prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
########################################################################################################
# data_sql.py - Data pull from json, clean it up and upload to SQL
# by <NAME>
#
# This is Python script Pulls the metadata (link) from following three json data:-
# 1. https://api.weather.gov/points/31.7276,-110.8754
# 2. https://api.weather.gov/points/32.395,-110.6911
# 3. https://api.weather.gov/points/32.4186,-110.7383
#
# The Link pulled (json data) from the above three json data are
# the grid data links that are use to pull all the weather related data for the three capmgrounds:-
# 1. https://api.weather.gov/gridpoints/TWC/91,26
# 2. https://api.weather.gov/gridpoints/TWC/101,54
# 3. https://api.weather.gov/gridpoints/TWC/100,56
#
# From the above grid data 4 dataframes are created. The challenge was pulling the data from the
# above json links and then converting the date-time columns to the format (date-time) that can be used
# to upload to SQL and creating the graphs. Also Temperatures need to be converted to degreeF and wind
# speeds to Miles per hour:-
# 1. Campgroud information dF with information like lat, lon, elevation,
# meta url, grid url, forest url, campsite url fire danger and map code.
# 2. One for each campground (bs_grid_df, rc_grid_df, sc_grid_df). These df
# have columns (temp in degreeF, temp time, wind speed, wind speed time, wind gust,
# wind gust time, prob precipitation, Prob precp time, qty precip, qty precip time).
#
# SQLalchemy was used to create 4 tables in postgres SQL and then the above 4 DataFrames were uploaded
# Postgres SQL. The table names in SQL are:
# 1. camp_wx
# 2. cg_bog_spring
# 3. cg_rose_canyon
# 4. cg_spencer_canyon
#
# This script was converted from data_sql.ipynb
##########################################################################################################
# %%
# ------------------------
# Dependencies and Setup
# ------------------------
import pandas as pd
import json
import requests
import numpy as np
import datetime
from datetime import timedelta
from splinter import Browser
from bs4 import BeautifulSoup
# %%
# --------------------------------------------------------------------
# Bog Spring CAMPGROUND
# --------------------------------------------------------------------
# ---------------------------------------------
# Pull Grid Data URL From Metadata url for
# ---------------------------------------------
bs_url = "https://api.weather.gov/points/31.7276,-110.8754"
response_bs = requests.get(bs_url)
data_bs = response_bs.json()
data_bs
grid_data_bs = data_bs["properties"]["forecastGridData"]
grid_data_bs
# %%
# ------------------------------------------------------------------------
# Pull latitude, Longitude and Elevation data for BogSprings Campground
# ------------------------------------------------------------------------
bs_forcast_url = grid_data_bs
response_bs_forecast = requests.get(bs_forcast_url)
data_bs_forecast = response_bs_forecast.json()
data_bs_forecast
lat_bs = data_bs_forecast["geometry"]["coordinates"][0][0][1]
lat_bs
lng_bs = data_bs_forecast["geometry"]["coordinates"][0][0][0]
lng_bs
elevation_bs = data_bs_forecast["properties"]["elevation"]["value"]
elevation_bs
# ---------------------------------------------------------------------------------
# Create a Dataframe with Latitude, Longitude Elevation and all other related URL
# ---------------------------------------------------------------------------------
bs_df = pd.DataFrame({"id": 1,
"campground": "Bog Springs",
"lat": [lat_bs],
"lon": [lng_bs],
"elevation": [elevation_bs],
"nws_meta_url": [bs_url],
"nws_grid_url": [grid_data_bs],
"forest_url":"https://www.fs.usda.gov/recarea/coronado/recreation/camping-cabins/recarea/?recid=25732&actid=29",
"campsite_url": "https://www.fs.usda.gov/Internet/FSE_MEDIA/fseprd746637.jpg",
"fire_danger": "Very High",
"map_code": '<iframe src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3393.5714340164473!2d-110.87758868361043!3d31.72759998130141!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x86d6970db0a5e44d%3A0x1b48084e4d6db970!2sBog%20Springs%20Campground!5e0!3m2!1sen!2sus!4v1626560932236!5m2!1sen!2sus" width="600" height="450" style="border:0;" allowfullscreen="" loading="lazy"></iframe>'
})
bs_df
# %%
# -------------------------------------------------------------------------------------------------
# Pull temperate, Wind Speed, Wind Gust, Probability of Precipitation, Quantity or Precipitation
# data along with the date and time for each.
# -------------------------------------------------------------------------------------------------
# =================== Temperature Data ======================
temp = []
for i in data_bs_forecast["properties"]["temperature"]["values"]:
temp.append(i)
temp_df = pd.DataFrame(temp)
temp_df
# Temperature conversion to Degree Fahrenheit
temp_df['degF'] = (temp_df['value'] * 9 / 5) + 32
temp_df
# validTime Column split to date and time for Temperature
date_temp = temp_df['validTime'].str.split('T', n=1, expand=True)
time_temp = date_temp[1].str.split('+', n=1, expand=True)
time_temp
temp_df['date_temp'] = date_temp[0]
temp_df['time_temp'] = time_temp[0]
# Combine date and time with a space in between the two
temp_df['date_time_temp'] = temp_df['date_temp'] + ' ' + temp_df['time_temp']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
temp_df['date_time_temp'] = pd.to_datetime(temp_df['date_time_temp'])
# Pull all the data for today + 3 days
time_delta_temp = datetime.datetime.strptime(temp_df['date_temp'][0],"%Y-%m-%d") + timedelta(days = 4)
temp_df['times_temp'] = time_delta_temp.strftime("%Y-%m-%d")
temp_df = temp_df.loc[temp_df['date_temp'] < temp_df['times_temp']]
temp_df
# temp_df.dtypes
# =================== Wind Speed Data ======================
wind_speed = []
for i in data_bs_forecast["properties"]["windSpeed"]["values"]:
wind_speed.append(i)
windSpeed_df = pd.DataFrame(wind_speed)
windSpeed_df
# Converting KM/hour to Miles/hour
windSpeed_df['miles/hour'] = windSpeed_df['value'] * 0.621371
windSpeed_df
# validTime Column split to date and time for Wind Speed
date_ws = windSpeed_df['validTime'].str.split('T', n=1, expand=True)
time_ws = date_ws[1].str.split('+', n=1, expand=True)
time_ws
windSpeed_df['date_ws'] = date_ws[0]
windSpeed_df['time_ws'] = time_ws[0]
# Combine date and time with a space in between the two
windSpeed_df['date_time_ws'] = windSpeed_df['date_ws'] + ' ' + windSpeed_df['time_ws']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
windSpeed_df['date_time_ws'] = pd.to_datetime(windSpeed_df['date_time_ws'])
# Pull all the data for today + 3 days
time_delta_ws = datetime.datetime.strptime(windSpeed_df['date_ws'][0],"%Y-%m-%d") + timedelta(days = 4)
windSpeed_df['times_ws'] = time_delta_ws.strftime("%Y-%m-%d")
windSpeed_df = windSpeed_df.loc[windSpeed_df['date_ws'] < windSpeed_df['times_ws']]
windSpeed_df
# windSpeed_df.dtypes
# =================== Wind Gust Data ======================
wind_gust = []
for i in data_bs_forecast["properties"]["windGust"]["values"]:
wind_gust.append(i)
wind_gust_df = pd.DataFrame(wind_gust)
wind_gust_df
# Converting KM/hour to Miles/hour
wind_gust_df['m/h'] = wind_gust_df['value'] * 0.621371
wind_gust_df
# # validTime Column split to date and time for Wind Gusts
date_wg = wind_gust_df['validTime'].str.split('T', n=1, expand=True)
time_wg = date_wg[1].str.split('+', n=1, expand=True)
time_wg
wind_gust_df['date_wg'] = date_wg[0]
wind_gust_df['time_wg'] = time_wg[0]
# Combine date and time with a space in between the two
wind_gust_df['date_time_wg'] = wind_gust_df['date_wg'] + ' ' + wind_gust_df['time_wg']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
wind_gust_df['date_time_wg'] = pd.to_datetime(wind_gust_df['date_time_wg'])
wind_gust_df
# Pull all the data for today + 3 days
time_delta_wg = datetime.datetime.strptime(wind_gust_df['date_wg'][0],"%Y-%m-%d") + timedelta(days = 4)
wind_gust_df['times_wg'] = time_delta_wg.strftime("%Y-%m-%d")
wind_gust_df = wind_gust_df.loc[wind_gust_df['date_wg'] < wind_gust_df['times_wg']]
wind_gust_df
# wind_gust_df.dtypes
# =================== Probability of Precipitation Data ======================
prob_precip = []
for i in data_bs_forecast["properties"]["probabilityOfPrecipitation"]["values"]:
prob_precip.append(i)
prob_precip_df = pd.DataFrame(prob_precip)
prob_precip_df
# # validTime Column split to date and time for Probability Precipitation
date_pp = prob_precip_df['validTime'].str.split('T', n=1, expand=True)
time_pp = date_pp[1].str.split('+', n=1, expand=True)
time_pp
prob_precip_df['date_pp'] = date_pp[0]
prob_precip_df['time_pp'] = time_pp[0]
# Combine date and time with a space in between the two
prob_precip_df['date_time_pp'] = prob_precip_df['date_pp'] + ' ' + prob_precip_df['time_pp']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
prob_precip_df['date_time_pp'] = pd.to_datetime(prob_precip_df['date_time_pp'])
prob_precip_df
# Pull all the data for today + 3 days
time_delta_pp = datetime.datetime.strptime(prob_precip_df['date_pp'][0],"%Y-%m-%d") + timedelta(days = 4)
prob_precip_df['times_pp'] = time_delta_pp.strftime("%Y-%m-%d")
prob_precip_df = prob_precip_df.loc[prob_precip_df['date_pp'] < prob_precip_df['times_pp']]
prob_precip_df
# prob_precip_df.dtypes
# =================== Quantity of Precipitation Data ======================
qty_precip = []
for i in data_bs_forecast["properties"]["quantitativePrecipitation"]["values"]:
qty_precip.append(i)
qty_precip_df = pd.DataFrame(qty_precip)
qty_precip_df
# # validTime Column split to date and time for quantity Precipitation
date_qp = qty_precip_df['validTime'].str.split('T', n=1, expand=True)
time_qp = date_qp[1].str.split('+', n=1, expand=True)
time_qp
qty_precip_df['date_qp'] = date_qp[0]
qty_precip_df['time_qp'] = time_qp[0]
# Combine date and time with a space in between the two
qty_precip_df['date_time_qp'] = qty_precip_df['date_qp'] + ' ' + qty_precip_df['time_qp']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
qty_precip_df['date_time_qp'] = pd.to_datetime(qty_precip_df['date_time_qp'])
qty_precip_df
# Pull all the data for today + 3 days
time_delta_qp = datetime.datetime.strptime(qty_precip_df['date_qp'][0],"%Y-%m-%d") + timedelta(days = 4)
qty_precip_df['times_qp'] = time_delta_qp.strftime("%Y-%m-%d")
qty_precip_df = qty_precip_df.loc[qty_precip_df['date_qp'] < qty_precip_df['times_qp']]
qty_precip_df
# qty_precip_df.dtypes
# =================== Create DataFrame with all the above data for Bog Spring Campground ======================
bs_grid_df = pd.DataFrame({"id":1,
"campground": "Bog Springs",
"forecasted_temperature_degF": temp_df['degF'],
"forecastTime_temperature": temp_df['date_time_temp'],
"forecasted_windSpeed_miles_per_h": windSpeed_df['miles/hour'],
"forecastTime_windSpeed": windSpeed_df['date_time_ws'],
"forecasted_windGust_miles_per_h": wind_gust_df['m/h'],
"forecastTime_windGust": wind_gust_df['date_time_wg'],
"forecasted_probabilityOfPrecipitation": prob_precip_df['value'],
"forecastTime_probabilityOfPrecipitation": prob_precip_df['date_time_pp'],
"forecasted_quantityOfPrecipitation_mm": qty_precip_df['value'],
"forecastTime_quantityOfPrecipitation": qty_precip_df['date_time_qp'],
})
bs_grid_df
# bs_grid_df.dtypes
# %%
# --------------------------------------------------------------------
# ROSE CANYON CAMPGROUND
# --------------------------------------------------------------------
# -------------------------------------------
# Pull Grid Data URL From Metadata url
# -------------------------------------------
rc_url = "https://api.weather.gov/points/32.395,-110.6911"
response_rc = requests.get(rc_url)
data_rc = response_rc.json()
data_rc
grid_data_rc = data_rc["properties"]["forecastGridData"]
grid_data_rc
# %%
# ------------------------------------------------------------------------
# Pull latitude, Longitude and Elevation data for Rose Canyon Campground
# ------------------------------------------------------------------------
rc_forcast_url = grid_data_rc
response_rc_forecast = requests.get(rc_forcast_url)
data_rc_forecast = response_rc_forecast.json()
data_rc_forecast
lat_rc = data_rc_forecast["geometry"]["coordinates"][0][0][1]
lat_rc
lng_rc = data_rc_forecast["geometry"]["coordinates"][0][0][0]
lng_rc
elevation_rc = data_rc_forecast["properties"]["elevation"]["value"]
elevation_rc
# ---------------------------------------------------------------------------------
# Create a Dataframe with Latitude, Longitude Elevation and all other related URL
# ---------------------------------------------------------------------------------
rc_df = pd.DataFrame({"id": 2,
"campground": "Rose Canyon",
"lat": [lat_rc],
"lon": [lng_rc],
"elevation": [elevation_rc],
"nws_meta_url": [rc_url],
"nws_grid_url": [grid_data_rc],
"forest_url":"https://www.fs.usda.gov/recarea/coronado/recreation/camping-cabins/recarea/?recid=25698&actid=29",
"campsite_url": "https://cdn.recreation.gov/public/2019/06/20/00/19/232284_beeddff5-c966-49e2-93a8-c63c1cf21294_700.jpg",
# "nws_meta_json":[data_rc],
# "nws_grid_json": [data_rc_forecast],
"fire_danger": "Very High",
"map_code": '<iframe src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3368.97130566869!2d-110.70672358360277!3d32.39313088108983!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x86d6400421614087%3A0xb6cfb84a4b05c95b!2sRose%20Canyon%20Campground!5e0!3m2!1sen!2sus!4v1626560965073!5m2!1sen!2sus" width="600" height="450" style="border:0;" allowfullscreen="" loading="lazy"></iframe>'
})
rc_df
# %%
# -------------------------------------------------------------------------------------------------
# Pull temperate, Wind Speed, Wind Gust, Probability of Precipitation, Quantity or Precipitation
# data along with the date and time for each.
# -------------------------------------------------------------------------------------------------
# =================== Temperature Data ======================
temp_rc = []
for i in data_rc_forecast["properties"]["temperature"]["values"]:
temp_rc.append(i)
temp_rc_df = pd.DataFrame(temp_rc)
temp_rc_df
# Temperature conversion to Degree Fahrenheit
temp_rc_df['degF_rc'] = (temp_rc_df['value'] * 9 / 5) + 32
temp_rc_df
# validTime Column split to date and time for Temperature
date_temp_rc = temp_rc_df['validTime'].str.split('T', n=1, expand=True)
time_temp_rc = date_temp_rc[1].str.split('+', n=1, expand=True)
time_temp_rc
temp_rc_df['date_temp_rc'] = date_temp_rc[0]
temp_rc_df['time_temp_rc'] = time_temp_rc[0]
# Combine date and time with a space in between the two
temp_rc_df['date_time_temp_rc'] = temp_rc_df['date_temp_rc'] + ' ' + temp_rc_df['time_temp_rc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
temp_rc_df['date_time_temp_rc'] = pd.to_datetime(temp_rc_df['date_time_temp_rc'])
# Pull all the data for today + 3 days
time_delta_temp_rc = datetime.datetime.strptime(temp_rc_df['date_temp_rc'][0],"%Y-%m-%d") + timedelta(days = 4)
temp_rc_df['times_temp_rc'] = time_delta_temp_rc.strftime("%Y-%m-%d")
temp_rc_df = temp_rc_df.loc[temp_rc_df['date_temp_rc'] < temp_rc_df['times_temp_rc']]
temp_rc_df
temp_rc_df.dtypes
# =================== Wind Speed Data ======================
wind_speed_rc = []
for i in data_rc_forecast["properties"]["windSpeed"]["values"]:
wind_speed_rc.append(i)
windSpeed_rc_df = pd.DataFrame(wind_speed_rc)
windSpeed_rc_df
# Converting KM/hour to Miles/hour
windSpeed_rc_df['miles/hour_rc'] = windSpeed_rc_df['value'] * 0.621371
windSpeed_rc_df
# validTime Column split to date and time for wind Speed
date_ws_rc = windSpeed_rc_df['validTime'].str.split('T', n=1, expand=True)
time_ws_rc = date_ws_rc[1].str.split('+', n=1, expand=True)
time_ws_rc
windSpeed_rc_df['date_ws_rc'] = date_ws_rc[0]
windSpeed_rc_df['time_ws_rc'] = time_ws_rc[0]
# Combine date and time with a space in between the two
windSpeed_rc_df['date_time_ws_rc'] = windSpeed_rc_df['date_ws_rc'] + ' ' + windSpeed_rc_df['time_ws_rc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
windSpeed_rc_df['date_time_ws_rc'] = pd.to_datetime(windSpeed_rc_df['date_time_ws_rc'])
# Pull all the data for today + 3 days
time_delta_ws = datetime.datetime.strptime(windSpeed_rc_df['date_ws_rc'][0],"%Y-%m-%d") + timedelta(days = 4)
windSpeed_rc_df['times_ws_rc'] = time_delta_ws.strftime("%Y-%m-%d")
windSpeed_rc_df = windSpeed_rc_df.loc[windSpeed_rc_df['date_ws_rc'] < windSpeed_rc_df['times_ws_rc']]
windSpeed_rc_df
# windSpeed_rc_df.dtypes
# =================== Wind Gust Data ======================
wind_gust_rc = []
for i in data_rc_forecast["properties"]["windGust"]["values"]:
wind_gust_rc.append(i)
wind_gust_rc_df = pd.DataFrame(wind_gust_rc)
wind_gust_rc_df
# Converting KM/hour to Miles/hour
wind_gust_rc_df['m/h_rc'] = wind_gust_rc_df['value'] * 0.621371
wind_gust_rc_df
# # validTime Column split to date and time for wind Gusts
date_wg_rc = wind_gust_rc_df['validTime'].str.split('T', n=1, expand=True)
time_wg_rc = date_wg_rc[1].str.split('+', n=1, expand=True)
time_wg_rc
wind_gust_rc_df['date_wg_rc'] = date_wg_rc[0]
wind_gust_rc_df['time_wg_rc'] = time_wg_rc[0]
# Combine date and time with a space in between the two
wind_gust_rc_df['date_time_wg_rc'] = wind_gust_rc_df['date_wg_rc'] + ' ' + wind_gust_rc_df['time_wg_rc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
wind_gust_rc_df['date_time_wg_rc'] = pd.to_datetime(wind_gust_rc_df['date_time_wg_rc'])
wind_gust_rc_df
# Pull all the data for today + 3 days
time_delta_wg = datetime.datetime.strptime(wind_gust_rc_df['date_wg_rc'][0],"%Y-%m-%d") + timedelta(days = 4)
wind_gust_rc_df['times_wg_rc'] = time_delta_wg.strftime("%Y-%m-%d")
wind_gust_rc_df = wind_gust_rc_df.loc[wind_gust_rc_df['date_wg_rc'] < wind_gust_rc_df['times_wg_rc']]
wind_gust_rc_df
# wind_gust_rc_df.dtypes
# =================== Probability of Precipitataion ======================
prob_precip_rc = []
for i in data_rc_forecast["properties"]["probabilityOfPrecipitation"]["values"]:
prob_precip_rc.append(i)
prob_precip_rc_df = pd.DataFrame(prob_precip_rc)
prob_precip_rc_df
# # validTime Column split to date and time for Probability Precipitation
date_pp_rc = prob_precip_rc_df['validTime'].str.split('T', n=1, expand=True)
time_pp_rc = date_pp_rc[1].str.split('+', n=1, expand=True)
time_pp_rc
prob_precip_rc_df['date_pp_rc'] = date_pp_rc[0]
prob_precip_rc_df['time_pp_rc'] = time_pp_rc[0]
# Combine date and time with a space in between the two
prob_precip_rc_df['date_time_pp_rc'] = prob_precip_rc_df['date_pp_rc'] + ' ' + prob_precip_rc_df['time_pp_rc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
prob_precip_rc_df['date_time_pp_rc'] = pd.to_datetime(prob_precip_rc_df['date_time_pp_rc'])
prob_precip_rc_df
# Pull all the data for today + 3 days
time_delta_pp = datetime.datetime.strptime(prob_precip_rc_df['date_pp_rc'][0],"%Y-%m-%d") + timedelta(days = 4)
prob_precip_rc_df['times_pp_rc'] = time_delta_pp.strftime("%Y-%m-%d")
prob_precip_rc_df = prob_precip_rc_df.loc[prob_precip_rc_df['date_pp_rc'] < prob_precip_rc_df['times_pp_rc']]
prob_precip_rc_df
# prob_precip_rc_df.dtypes
# =================== Quantity of Precipitataion ======================
qty_precip_rc = []
for i in data_rc_forecast["properties"]["quantitativePrecipitation"]["values"]:
qty_precip_rc.append(i)
qty_precip_rc_df = pd.DataFrame(qty_precip_rc)
qty_precip_rc_df
# # validTime Column split to date and time for quantity Precipitation
date_qp_rc = qty_precip_rc_df['validTime'].str.split('T', n=1, expand=True)
time_qp_rc = date_qp_rc[1].str.split('+', n=1, expand=True)
time_qp_rc
qty_precip_rc_df['date_qp_rc'] = date_qp_rc[0]
qty_precip_rc_df['time_qp_rc'] = time_qp_rc[0]
# Combine date and time with a space in between the two
qty_precip_rc_df['date_time_qp_rc'] = qty_precip_rc_df['date_qp_rc'] + ' ' + qty_precip_rc_df['time_qp_rc']
# Convert the above to date time format so it can be recognized by the PostgresSQL and js
qty_precip_rc_df['date_time_qp_rc'] = pd.to_datetime(qty_precip_rc_df['date_time_qp_rc'])
qty_precip_rc_df
# Pull all the data for today + 3 days
time_delta_qp = datetime.datetime.strptime(qty_precip_rc_df['date_qp_rc'][0],"%Y-%m-%d") + timedelta(days = 4)
qty_precip_rc_df['times_qp_rc'] = time_delta_qp.strftime("%Y-%m-%d")
qty_precip_rc_df = qty_precip_rc_df.loc[qty_precip_rc_df['date_qp_rc'] < qty_precip_rc_df['times_qp_rc']]
qty_precip_rc_df
# qty_precip_rc_df.dtypes
# =================== Create DataFrame with all the above data for Rose Canyon Campground ======================
rc_grid_df = pd.DataFrame({"id":2,
"campground": "Rose Canyon",
"forecasted_temperature_degF": temp_rc_df['degF_rc'],
"forecastTime_temperature": temp_rc_df['date_time_temp_rc'],
"forecasted_windSpeed_miles_per_h": windSpeed_rc_df['miles/hour_rc'],
"forecastTime_windSpeed": windSpeed_rc_df['date_time_ws_rc'],
"forecasted_windGust_miles_per_h": wind_gust_rc_df['m/h_rc'],
"forecastTime_windGust": wind_gust_rc_df['date_time_wg_rc'],
"forecasted_probabilityOfPrecipitation": prob_precip_rc_df['value'],
"forecastTime_probabilityOfPrecipitation": prob_precip_rc_df['date_time_pp_rc'],
"forecasted_quantityOfPrecipitation_mm": qty_precip_rc_df['value'],
"forecastTime_quantityOfPrecipitation": qty_precip_rc_df['date_time_qp_rc'],
})
rc_grid_df
# rc_grid_df.dtypes
# %%
# --------------------------------------------------------------------
# SPENCER CANYON CAMPGROUND
# --------------------------------------------------------------------
# -------------------------------------------
# Pull Grid Data URL From Metadata url
# -------------------------------------------
sc_url = "https://api.weather.gov/points/32.4186,-110.7383"
response_sc = requests.get(sc_url)
data_sc = response_sc.json()
data_sc
grid_data_sc = data_sc["properties"]["forecastGridData"]
grid_data_sc
# %%
# ------------------------------------------------------------------------
# Pull latitude, Longitude and Elevation data for Rose Canyon Campground
# ------------------------------------------------------------------------
sc_forcast_url = grid_data_sc
response_sc_forecast = requests.get(sc_forcast_url)
data_sc_forecast = response_sc_forecast.json()
data_sc_forecast
lat_sc = data_sc_forecast["geometry"]["coordinates"][0][0][1]
lat_sc
lng_sc = data_sc_forecast["geometry"]["coordinates"][0][0][0]
lng_sc
elevation_sc = data_sc_forecast["properties"]["elevation"]["value"]
elevation_sc
# ---------------------------------------------------------------------------------
# Create a Dataframe with Latitude, Longitude Elevation and all other related URL
# ---------------------------------------------------------------------------------
sc_df = pd.DataFrame({"id": 3,
"campground": "Spencer Canyon",
"lat": [lat_sc],
"lon": [lng_sc],
"elevation": [elevation_sc],
"nws_meta_url": [sc_url],
"nws_grid_url": [grid_data_sc],
"forest_url":"https://www.fs.usda.gov/recarea/coronado/recreation/camping-cabins/recarea/?recid=25710&actid=29",
"campsite_url": "https://www.fs.usda.gov/Internet/FSE_MEDIA/fseprd746608.jpg",
# "nws_meta_json":[data_sc],
# "nws_grid_json": [data_sc_forecast],
"fire_danger": "Very High",
"map_code": '<iframe src="https://www.google.com/maps/embed?pb=!1m18!1m12!1m3!1d3368.0814680369876!2d-110.74302428360251!3d32.41697578108229!2m3!1f0!2f0!3f0!3m2!1i1024!2i768!4f13.1!3m3!1m2!1s0x86d61515ca1f56fd%3A0x242e26b2f2f72242!2sSpencer%20Canyon%20Campground!5e0!3m2!1sen!2sus!4v1626560995515!5m2!1sen!2sus" width="600" height="450" style="border:0;" allowfullscreen="" loading="lazy"></iframe>'
})
sc_df
# %%
# -------------------------------------------------------------------------------------------------
# Pull temperate, Wind Speed, Wind Gust, Probability of Precipitation, Quantity or Precipitation
# data along with the date and time for each.
# -------------------------------------------------------------------------------------------------
# =================== Temperature Data ======================
temp_sc = []
for i in data_sc_forecast["properties"]["temperature"]["values"]:
temp_sc.append(i)
temp_sc_df = | pd.DataFrame(temp_sc) | pandas.DataFrame |
from pathlib import Path
from typing import Union, List
import xarray as xr
import numpy as np
import pandas as pd
import dask
from dask.distributed import progress
from loguru import logger
from reki.format.grib.eccodes import load_field_from_file as load_grib2_field_from_file
from reki.format.grads import load_field_from_file as load_grads_field_from_file
from reki_data_tool.utils import (
extract_domain,
combine_fields,
compute_field,
)
from reki_data_tool.postprocess.station.winter.meso1km.utils import (
standard_station,
standard_lat_section,
standard_lon_section,
)
from reki_data_tool.postprocess.station.winter.meso1km.common import (
LEVELS,
NAMES,
DATASET_NAMES,
STATIONS
)
from reki_data_tool.utils import cal_run_time, create_dask_client
@cal_run_time
def create_station_dask_v1(
output_file: Union[str, Path],
station_id: str,
start_time: pd.Timestamp,
grib2_files: List[Union[str, Path]],
postvar_file: Union[str, Path] = None,
engine: str = "local",
threads_per_worker: int = 1,
n_workers: int = None,
):
logger.info(f"create dask client with engine {engine}...")
if engine == "local":
client_kwargs = dict(threads_per_worker=threads_per_worker, n_workers=n_workers)
else:
client_kwargs = dict()
client = create_dask_client(engine, client_kwargs=client_kwargs)
logger.info("create dask client with engine {engine}...done")
logger.info(f"client: {client}")
logger.info("program begin")
# 站点信息
station_lat_index = STATIONS[station_id]["point"]["lat_index"]
station_lon_index = STATIONS[station_id]["point"]["lon_index"]
# 剖面图范围
lat_index_range = STATIONS[station_id]["section"]["lat_index_range"]
lon_index_range = STATIONS[station_id]["section"]["lon_index_range"]
logger.info("loading fields from files...")
data_list = dict()
for field_record in NAMES:
data_source = field_record.get("data_source", "grib2")
field_name = field_record["field_name"]
stations = []
lat_sections = []
lon_sections = []
if data_source == "grib2":
for file_path in grib2_files:
field = dask.delayed(load_grib2_field_from_file)(
file_path,
parameter=field_name,
level_type="pl",
level=LEVELS
)
# level_field = dask.delayed(extract_level)(field, levels)
field_station = dask.delayed(extract_domain)(field, station_lat_index, station_lon_index)
field_lat_section = dask.delayed(extract_domain)(field, lat_index_range, station_lon_index)
field_lon_section = dask.delayed(extract_domain)(field, station_lat_index, lon_index_range)
stations.append(field_station)
lat_sections.append(field_lat_section)
lon_sections.append(field_lon_section)
elif data_source == "postvar":
for forecast_hour in pd.to_timedelta(np.arange(0, 25, 1), unit="h"):
field = dask.delayed(load_grads_field_from_file)(
postvar_file,
parameter=field_name,
level_type="pl",
forecast_time=forecast_hour,
level=LEVELS
)
if field is None:
raise ValueError("field not found!")
# level_field = extract_level(field, levels)
field_station = dask.delayed(extract_domain)(field, station_lat_index, station_lon_index)
field_lat_section = dask.delayed(extract_domain)(field, lat_index_range, station_lon_index)
field_lon_section = dask.delayed(extract_domain)(field, station_lat_index, lon_index_range)
stations.append(field_station)
lat_sections.append(field_lat_section)
lon_sections.append(field_lon_section)
else:
raise ValueError(f"data source is not supported: {data_source}")
data_list[f"{field_name}_0"] = dask.delayed(combine_fields)(lat_sections, field_record, dim="valid_time")
data_list[f"{field_name}_9"] = dask.delayed(combine_fields)(lon_sections, field_record, dim="valid_time")
data_list[f"{field_name}"] = dask.delayed(combine_fields)(stations, field_record, dim="valid_time")
logger.info("loading fields from files...done")
logger.info("generating dataset fields...")
dataset_list = dict()
for record in DATASET_NAMES:
name = record["name"]
if "fields" not in record:
field_name = record["field_name"]
current_station = data_list[f"{field_name}"]
current_lat_section = data_list[f"{field_name}_0"]
current_lon_section = data_list[f"{field_name}_9"]
else:
op = record["operator"]
current_station = dask.delayed(compute_field)(op,
*[data_list[f"{f['field_name']}"] for f in record["fields"]])
current_lat_section = dask.delayed(compute_field)(op, *[data_list[f"{f['field_name']}_0"] for f in
record["fields"]])
current_lon_section = dask.delayed(compute_field)(op, *[data_list[f"{f['field_name']}_9"] for f in
record["fields"]])
dataset_list[f"{name}_0"] = dask.delayed(standard_lat_section)(current_lat_section, record)
dataset_list[f"{name}_9"] = dask.delayed(standard_lon_section)(current_lon_section, record)
dataset_list[f"{name}"] = dask.delayed(standard_station)(current_station, record)
logger.info("generating dataset fields...done")
def get_data_list(dataset_list):
return dataset_list
t = dask.delayed(get_data_list)(dataset_list)
logger.info("run DAG...")
result = t.persist()
if engine == "local":
progress(result)
else:
# progress(result)
pass
r = result.compute()
logger.info("run DAG...done")
client.close()
logger.info("creating xarray.Dataset...")
ds = xr.Dataset(r)
# 维度属性和变量
ds.coords["level"].attrs = {
"long_name": "Isobaric surface",
"units": "hPa"
}
ds["level"] = ds.coords["level"]
# 数据集属性
ds.attrs = {
"model": "GRAPES-1KM",
"initial_time": f"{start_time}0000"
}
logger.info("creating xarray.Dataset...done")
logger.info("saving to NetCDF file...")
ds.to_netcdf(output_file, format="NETCDF3_CLASSIC")
logger.info(f"saving to NetCDF file...done, {output_file}")
logger.info("program end")
if __name__ == "__main__":
grib2_data_path = Path(
# "/g11/wangdp/project/work/data/playground/station/ncl/data",
# "grib2-orig"
"/g2/nwp_sp/OPER_ARCHIVE/GRAPES_MESO_1KM/Prod-grib/2022031300/ORIG"
)
grib2_files = list(grib2_data_path.glob("rmf.hgra.*.grb2"))
grib2_files = sorted(grib2_files)
# postvar_file_path = Path(
# "/g11/wangdp/project/work/data/playground/station/ncl/data",
# "postvar/postvar.ctl_202108251200000"
# )
station_id = "54406"
from reki_data_tool.postprocess.station.winter.meso1km.config import (
OUTPUT_DIRECTORY
)
output_file_path = Path(OUTPUT_DIRECTORY, f"station_{station_id}_11_dask_v1.nc")
create_station_dask_v1(
output_file=output_file_path,
station_id=station_id,
start_time= | pd.to_datetime("2022-03-13 00:00:00") | pandas.to_datetime |
import fitbit
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
from ast import literal_eval
# 更新時間管理など
import datetime
import time
# Ctrl+Cで終了時の処理のため
import signal
import sys
# モジュール変数の定義
import config
# OSC関係のため
import argparse
from pythonosc import osc_message_builder
from pythonosc import udp_client
# 用いるTokenについて
TOKEN_FILE = "tokens.token"
ACCESS_TOKEN_FILE = "access.token"
CLIENT_ID = ""
CLIENT_SECRET = ""
ACCESS_TOKEN = ""
REFRESH_TOKEN = ""
# 取得したい日付(今日に設定) e.g. "2018-02-26"
DATE = datetime.datetime.now().strftime( '%Y-%m-%d' )
DATE = "2018-02-27"
print(DATE)
# Ctrl+Cで終了時の処理
def handler(signal, frame):
print('Exit with Ctrl+C / sys.exit(0) ')
# config.output_file.close()
sys.exit(0)
# request token file が更新されるのでそれのupdater
def updateToken(token):
f = open(TOKEN_FILE, 'w')
f.write(str(token))
f.close()
return
def init_fitbit():
with open(ACCESS_TOKEN_FILE, "r") as f: #token fileの読み込み
for line in f:
start_index=line.find("access_token")
if start_index is 0:
split_list = line.split()
ACCESS_TOKEN = split_list[2]
start_index=line.find("refresh_token")
if start_index is 0:
split_list = line.split()
REFRESH_TOKEN = split_list[2]
start_index=line.find("clientid")
if start_index is 0:
split_list = line.split()
CLIENT_ID = split_list[2]
start_index=line.find("clientsecret")
if start_index is 0:
split_list = line.split()
CLIENT_SECRET = split_list[2]
# Refresh Token に関しては8時間しか有効でないので常に更新
tokens = open(TOKEN_FILE).read()
token_dict = literal_eval(tokens)
# access_token = token_dict['access_token']
REFRESH_TOKEN = token_dict['refresh_token']
# メモしたID等の確認用
print("CLIENT_ID = {0}".format(CLIENT_ID))
print("CLIENT_SECRET = {0}".format(CLIENT_SECRET))
print("ACCESS_TOKEN = {0}".format(ACCESS_TOKEN))
print("REFRESH_TOKEN = {0}".format(REFRESH_TOKEN))
# ID等の設定
authd_client = fitbit.Fitbit(CLIENT_ID, CLIENT_SECRET,access_token=ACCESS_TOKEN, refresh_token=REFRESH_TOKEN, refresh_cb = updateToken)
return authd_client
def init_osc():
parser = argparse.ArgumentParser()
parser.add_argument("--ip", default="127.0.0.1", help="The ip of th OSC Server")
parser.add_argument("--port", type=int, default=config.port_num, help="The port the OSC server is listening on")
args = parser.parse_args()
osc_client = udp_client.UDPClient(args.ip, args.port)
# 設定のログ出し
print("ip:127.0.0.1, port:" + str(config.port_num) + ", address:/data")
return osc_client
# 繰り返すタスク
def task(authd_client, osc_client, start_time="16:15", end_time="16:40"):
data_sec = authd_client.intraday_time_series('activities/heart', DATE, detail_level='1sec',start_time=start_time, end_time=end_time) #'1sec', '1min', or '15min'
heart_sec = data_sec["activities-heart-intraday"]["dataset"]
# 取得データをDataFrameに変換
heart_df = | pd.DataFrame.from_dict(heart_sec) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
"""
Copyright 2018 Infosys Ltd.
Use of this source code is governed by MIT license that can be found in the LICENSE file or at
https://opensource.org/licenses/MIT.
@author: zineb , Mohan
# -*- coding: utf-8 -*-
"""
#%%
import xml.dom.minidom
import pandas as pd
import requests
import datetime as DT
from dateutil.parser import parse
import win32com.client as win32
import newspaper
from newspaper import Article
import nltk
nltk.download('all')
from TwitterSearch import TwitterSearchOrder
from TwitterSearch import TwitterUserOrder
from TwitterSearch import TwitterSearchException
from TwitterSearch import TwitterSearch
from bs4 import BeautifulSoup as bs
import urllib3
import xmltodict
import traceback2 as traceback
import re
import warnings
import contextlib
from urllib3.exceptions import InsecureRequestWarning
import Algo_4
#%%
old_merge_environment_settings = requests.Session.merge_environment_settings
@contextlib.contextmanager
def no_ssl_verification():
opened_adapters = set()
def merge_environment_settings(self, url, proxies, stream, verify, cert):
# Verification happens only once per connection so we need to close
# all the opened adapters once we're done. Otherwise, the effects of
# verify=False persist beyond the end of this context manager.
opened_adapters.add(self.get_adapter(url))
settings = old_merge_environment_settings(self, url, proxies, stream, verify, cert)
settings['verify'] = False
return settings
requests.Session.merge_environment_settings = merge_environment_settings
try:
with warnings.catch_warnings():
warnings.simplefilter('ignore', InsecureRequestWarning)
yield
finally:
requests.Session.merge_environment_settings = old_merge_environment_settings
for adapter in opened_adapters:
try:
adapter.close()
except:
pass
keywords=[]
companies_names=[]
days_count=[]
emails=[]
persons_names=[]
connections=[]
companies_ids=[]
relevantsubject=[]
twitter_list=[]
main_list=[]
log_date=[]
def main():
#COMPANIES.XML
doc_comp = xml.dom.minidom.parse("companies.xml");
# print(doc.nodeName)
# print(doc.firstChild.tagName)
companies=doc_comp.getElementsByTagName("company")
for company in companies:
#print(company.getElementsByTagName("name"))
company_name=company.getElementsByTagName("c_name")[0].childNodes[0]
companies_names.append(company_name.nodeValue)
keyword=company.getElementsByTagName("keyword")
x=[]
for word in keyword:
x.append(word.childNodes[0].nodeValue)
keywords.append(x)
company_id=company.getElementsByTagName("c_id")[0].childNodes[0]
companies_ids.append(company_id.nodeValue)
twitter=company.getElementsByTagName("twitter_name")[0].childNodes[0].nodeValue
youtube=company.getElementsByTagName("youtube")[0].childNodes[0].nodeValue
hashtag=company.getElementsByTagName("hashtag")
z=[]
for word in hashtag:
z.append(word.childNodes[0].nodeValue)
twitter_list.append([twitter,z])
main_list.append([company_name.nodeValue,x,twitter,z,youtube])
#NEW DATE
doc_log = xml.dom.minidom.parse("log.xml");
log_date.append(doc_log.getElementsByTagName('day')[0].childNodes[0].nodeValue)
#PEOPLE.XML
doc = xml.dom.minidom.parse("people_v2.xml");
#print(doc.nodeName)
#print(doc.firstChild.tagName)
person=doc.getElementsByTagName("person")
for info in person:
# print(company.getElementsByTagName("name"))
person_name=info.getElementsByTagName("p_name")[0].childNodes[0]
#print(person_name)
persons_names.append(person_name.nodeValue)
email=info.getElementsByTagName("email")[0].childNodes[0]
emails.append(email.nodeValue)
grouped_company=info.getElementsByTagName("group")
group=[]
for g in grouped_company:
group_name=g.getElementsByTagName("g_name")[0].childNodes[0]
#group.append(group_name.nodeValue)
comp_name=g.getElementsByTagName("comp_id")
comp=[]
for c in range(len(comp_name)):
comp.append(comp_name[c].childNodes[0].nodeValue)
group.append([group_name.nodeValue,comp])
#connections.append(group)
single_companies=info.getElementsByTagName("single")[0]
cs_name=single_companies.getElementsByTagName("comp_id")
single_comp=[]
for s in range(len(cs_name)):
single_name=cs_name[s].childNodes[0].nodeValue
single_comp.append(single_name)
group.append(single_comp)
connections.append(group)
#Keywords.XML
doc_words = xml.dom.minidom.parse("keywords_list.xml");
#print(doc_date.nodeName)
for i in range(len(doc_words.getElementsByTagName('word'))):
word=doc_words.getElementsByTagName('word')[i]
l=word.childNodes[0].nodeValue
relevantsubject.append(l)
if __name__ == "__main__":
main();
#%%
urls=[]
current_companies=[]
datasets={}
API_KEY = ''
def content():
today = DT.date.today()
# days_ago = today - DT.timedelta(days=int(days_count[0]))
todayf = today.strftime("%Y-%m-%d")
# days_agof = days_ago.strftime("%Y-%m-%d")
#URLS
url = 'https://newsapi.org/v2/everything?q='
url_p2='&from='+log_date[0]+'&to='+todayf+'+&sortBy=publishedAt&language=en&apiKey='+ API_KEY
for company in range(len(keywords)):
# print(company)
# print(len(company))
if len(keywords[company]) == 0 :
print('no keywords given')
if len(keywords[company]) > 1 :
new_url = url + keywords[company][0]
for i in range(1,len(keywords[company])):
new_url = new_url + "%20AND%20"+ keywords[company][i]
final_url = new_url + url_p2
else:
final_url= url + keywords[company][0] + url_p2
# print(url)
urls.append(final_url)
# Build df with article info + create excel sheet
count = 0
# current_companies=[]
# datasets={}
for url in urls:
JSONContent = requests.get(url).json()
#content = json.dumps(JSONContent, indent = 4, sort_keys=True)
article_list = []
for i in range(len(JSONContent['articles'])):
article_list.append([JSONContent['articles'][i]['source']['name'],
JSONContent['articles'][i]['title'],
JSONContent['articles'][i]['publishedAt'],
JSONContent['articles'][i]['url']
])
#print(article_list)
if article_list != []:
datasets[companies_names[count]]= pd.DataFrame(article_list)
datasets[companies_names[count]].columns = ['Source/User','Title/Tweet','Date','Link']
datasets[companies_names[count]]['Date']=datasets[companies_names[count]]['Date'].str.replace('T',' ')
datasets[companies_names[count]]['Date']=datasets[companies_names[count]]['Date'].str.replace('Z','')
datasets[companies_names[count]]['Date']=datasets[companies_names[count]]['Date'].str.split(expand=True)
for i in range(len(datasets[companies_names[count]]['Date'])):
datasets[companies_names[count]]['Date'][i]=parse(datasets[companies_names[count]]['Date'][i])
datasets[companies_names[count]]['Date'][i]=datasets[companies_names[count]]['Date'][i].date()
#datasets[companies_names[count]]['Date'][i]=datasets[companies_names[count]]['Date'][i].str.split(expand=True)
#ds = '2012-03-01T10:00:00Z' # or any date sting of differing formats.
#date = parser.parse(ds)
#datasets[companies_names[count]]['Date']=pd.to_datetime(datasets[companies_names[count]]['Date'])
#print(datasets[companies_names[count]])
current_companies.append(companies_names[count])
count=count+1
else:
None
count=count+1
content()
duplicate_df=[]
def duplicate_check():
for article in datasets:
d=datasets[article][datasets[article].duplicated(['Title/Tweet'],keep='first')==True]
print(d)
if d.empty == False:
duplicate_df.append(d)
else:
None
#duplicate_article.append(d)
#duplicate_article = duplicate_article.concat([duplicate_article,d], axis=0)
#print(d)
duplicate_check()
def duplicate_drop():
for article in datasets:
datasets[article]=datasets[article].drop_duplicates(['Title/Tweet'],keep='first')
datasets[article]=datasets[article].reset_index()
datasets[article]=datasets[article].drop(['index'], axis=1)
duplicate_drop()
#%%
def Scoring():
for a in datasets:
try:
datasets[a].insert(0,'Category','Article')
datasets[a].insert(1,'Company',str(a))
datasets[a].insert(3,'Keywords','none')
datasets[a].insert(4,'Subjects/Views','none')
for i in range(len(datasets[a]['Link'])):
r=[]
article = Article(datasets[a]['Link'][i])
article.download()
article.html
article.parse()
txt=article.text.encode('ascii','ignore').decode('ascii')
#f=requests.get(datasets[article]['Link'][i])
#txt=f.text.encode('ascii','ignore').decode('ascii')
txt=txt.lower()
#total_word= wordcounter(txt).get_word_count()
for word in relevantsubject:
result=txt.count(word)
if result != 0:
r.append(word +'('+ str(txt.count(word)) +')')
else:
None
# relevanceLink.append(r)
r=', '.join(word for word in r)
if r != []:
datasets[a]['Subjects/Views'][i]=str(r + ' (totalWords:'+ str(len(txt.split()))+')')
else:
datasets[a]['Subjects/Views'][i]=str('None')
article.nlp()
k=', '.join(keyword for keyword in article.keywords)
datasets[a]['Keywords'][i]=str(k)
except newspaper.article.ArticleException:
None
#k= []
#for keyword in article.keywords:
# k.append[keyword]
# k=', '.join(keyword for keyword in k)
# datasets[a]['Keywords'][i]=str(k)
Scoring()
# datasets[article]
#%% Formatting
companies_dic=dict(zip(companies_names, companies_ids))
people_comp_dic=dict(zip(persons_names, connections))
people_email_dic=dict(zip(persons_names, emails))
Subject = pd.DataFrame(relevantsubject)
Subject.columns=['Subject Interest']
Companies = pd.DataFrame(companies_names)
Companies.columns=['Companies Interest']
CS = pd.concat([Subject, Companies], axis=1)
CS.fillna('',inplace=True)
MainDF=pd.DataFrame(main_list)
MainDF.columns=['company','keywords','twitter','hashtag','youtube']
#import re
def Find(string):
url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+] |[!*\(\), ]|(?:%[0-9a-fA-F][0-9a-fA-F])|(?:%[0-9a-fA-F]|[$-_@.&+]|[!*\(\), ]|[0-9a-fA-F]))+', string)
return url
#%%
tweets_datasets={}
tw_current_companies=[]
today = DT.date.today()
#days_ago = today - DT.timedelta(days=int(days_count[0]))
new_date = parse(log_date[0]).date()
def Tweets():
try:
max_feeds=10
tso = TwitterSearchOrder() # create a TwitterSearchOrder object
tso.set_language('en')
tso.set_include_entities(False) # and don't give us all those entity information
tso.set_until(new_date)
tso.arguments.update({'tweet_mode':'extended'})
tso.arguments.update({'truncated': 'False' })
ts = TwitterSearch(
consumer_key = '',
consumer_secret = '',
access_token = '',
access_token_secret = '',
proxy='http://proxy_address'
)
for c in range(len(MainDF)):
count=0
#kw=[MainDF['twitter'][c]]
#for h in MainDF['hashtag'][c]:
# kw.append(h)
tso.set_keywords(MainDF['hashtag'][c])
tweets_list=[]
tuo = TwitterUserOrder(MainDF['twitter'][c])
# tuo.set_language('en')
tuo.set_include_entities(False) # and don't give us all those entity information
# tuo.set_until(days_ago)
# tuo.set_count(15)
tuo.arguments.update({'tweet_mode':'extended'})
tuo.arguments.update({'truncated': 'False' })
#for tweet in ts.search_tweets_iterable(tso):
# print(tweet)
# tweets_list.append([tweet['user']['screen_name'],tweet['full_text']])
for tweet in ts.search_tweets_iterable(tso):
if 'retweeted_status' in tweet:
None
#tweets_list.append([tweet['user']['screen_name'],tweet['retweeted_status']['full_text'],'Retweet of ' + tweet['retweeted_status']['user']['screen_name']])
else:
links=Find(tweet['full_text'])
links=', '.join(link for link in links)
#print(tweet)
tweets_list.append([MainDF['company'][c],tweet['user']['screen_name'],tweet['full_text'],tweet['created_at'],links])
for tweet in ts.search_tweets_iterable(tuo):
if tweet['lang'] != 'en':
#print(tweet)
None
else:
# print(tweet)
links=Find(tweet['full_text'])
links=', '.join(link for link in links)
tweets_list.append([MainDF['company'][c],tweet['user']['screen_name'],tweet['full_text'],tweet['created_at'],links])
count=count+1
if count == max_feeds:
break
if tweets_list != []:
tweets_datasets[MainDF['company'][c]]= pd.DataFrame(tweets_list)
tweets_datasets[MainDF['company'][c]].columns = ['Company','Source/User','Title/Tweet','Date','Link']
tweets_datasets[MainDF['company'][c]].insert(0,'Category','Twitter')
for i in range(len(tweets_datasets[MainDF['company'][c]]['Date'])):
tweets_datasets[MainDF['company'][c]]['Date'][i]=parse(tweets_datasets[MainDF['company'][c]]['Date'][i])
tweets_datasets[MainDF['company'][c]]['Date'][i]=tweets_datasets[MainDF['company'][c]]['Date'][i].date()
#print(datasets[companies_names[count]])
tw_current_companies.append(MainDF['company'][c])
else:
None
#tweets_list.append()
#print( '@%s tweeted: %s' % ( tweet['user']['screen_name'], tweet['text'] ) )
except TwitterSearchException as e: # take care of all those ugly errors if there are some
print(e)
with no_ssl_verification():
Tweets()
#%% Filters only for todays
for comp in tweets_datasets:
tweets_datasets[comp]=tweets_datasets[comp].loc[tweets_datasets[comp]['Date'] >= new_date]
for comp in list(tweets_datasets.keys()):
if tweets_datasets[comp].empty == True:
del tweets_datasets[comp]
#re-indexing
for comp in tweets_datasets:
tweets_datasets[comp]=tweets_datasets[comp].reset_index()
tweets_datasets[comp]=tweets_datasets[comp].drop(['index'], axis=1)
#tweets_datasets = tweets_datasets.loc[tweets_datasets[comp].empty == False]
#%%
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
Double_df=[]
for comp in tweets_datasets:
for i in range(len(tweets_datasets[comp])):
doubles=[]
#doubles.append(comp)
X =tweets_datasets[comp]['Title/Tweet'][i]
X_list = word_tokenize(X)
sw = stopwords.words('english')
X_set = {w for w in X_list if not w in sw}
for n in range(len(tweets_datasets[comp])):
Y =tweets_datasets[comp]['Title/Tweet'][n]
# tokenization
Y_list = word_tokenize(Y)
# sw contains the list of stopwords
# sw = stopwords.words('english')
l1 =[];l2 =[]
# remove stop words from string
#X_set = {w for w in X_list if not w in sw}
Y_set = {w for w in Y_list if not w in sw}
# form a set containing keywords of both strings
rvector = X_set.union(Y_set)
for w in rvector:
if w in X_set: l1.append(1) # create a vector
else: l1.append(0)
if w in Y_set: l2.append(1)
else: l2.append(0)
c = 0
# cosine formula
for i in range(len(rvector)):
c+= l1[i]*l2[i]
cosine = c / float((sum(l1)*sum(l2))**0.5)
print(tweets_datasets[comp]['Title/Tweet'][n])
print("similarity: ", cosine)
if (Y == X)== True:
#None
print('Same')
else:
if 0.80 <= cosine <= 0.99 :
print('Yes!')
doubles.append(tweets_datasets[comp].iloc[[n]])
#d=tweets_datasets[comp][tweets_datasets[comp]['Title/Tweet'][n]]
#doubles.append(d)
else:
None
if doubles != []:
d=pd.concat(doubles)
d=d.reset_index()
d=d.drop(['index'],axis=1)
Double_df.append(d)
else:
None
def drop_similar():
for comp in tweets_datasets:
for i in range(len(Double_df)):
for n in range(len(Double_df[i])):
for r in range(len(tweets_datasets[comp].copy())):
if Double_df[i]['Title/Tweet'][n] != tweets_datasets[comp]['Title/Tweet'][r]:
None
else:
tweets_datasets[comp]=tweets_datasets[comp].drop(r)
tweets_datasets[comp]=tweets_datasets[comp].reset_index()
tweets_datasets[comp]=tweets_datasets[comp].drop(['index'], axis=1)
drop_similar()
#%%
tw_duplicate_df=[]
def tw_duplicate_check():
try:
for article in tweets_datasets:
d=tweets_datasets[article][tweets_datasets[article].duplicated(subset=['Title/Tweet'],keep='first')==True]
print(d)
if d.empty == False:
tw_duplicate_df.append(d)
else:
None
except:
None
tw_duplicate_check()
def tw_duplicate_drop():
if tw_duplicate_df != []:
for article in tweets_datasets:
tweets_datasets[article]=tweets_datasets[article].drop_duplicates(subset=['Title/Tweet'],keep='first', inplace=True)
tweets_datasets[article]=tweets_datasets[article].reset_index()
tweets_datasets[article]=tweets_datasets[article].drop(['index'], axis=1)
else:
None
tw_duplicate_drop()
#%%
def Scoring_Tweet():
for a in tweets_datasets:
#datasets[a].insert(0,'Company',str(a))
tweets_datasets[a].insert(3,'Subjects/Views','none')
for i in range(len(tweets_datasets[a]['Title/Tweet'])):
r=[]
txt=tweets_datasets[a]['Title/Tweet'][i].encode('ascii','ignore').decode('ascii')
#f=requests.get(datasets[article]['Link'][i])
#txt=f.text.encode('ascii','ignore').decode('ascii')
txt=txt.lower()
#total_word= wordcounter(txt).get_word_count()
for word in relevantsubject:
result=txt.count(word)
if result != 0:
r.append(word +'('+ str(txt.count(word)) +')')
else:
None
# relevanceLink.append(r)
r=', '.join(word for word in r)
if r != []:
tweets_datasets[a]['Subjects/Views'][i]=str(r + ' (totalWords:'+ str(len(txt.split()))+')')
else:
tweets_datasets[a]['Subjects/Views'][i]=str('None')
Scoring_Tweet()
#%%
general_df = {}
general_df = tweets_datasets.copy()
for n in datasets:
if n in general_df:
general_df[n]=pd.concat([datasets[n],general_df[n]], axis=0, sort=False)
else:
general_df.update({str(n):datasets[n]})
for comp in general_df:
general_df[comp]=general_df[comp].reset_index()
general_df[comp]=general_df[comp].drop(['index'], axis=1)
#%%
Youtube_dataset ={}
base = "https://www.youtube.com/user/{}/videos"
from textblob import TextBlob
#qstring = "snowflakecomputing"
for i in range(len(MainDF)):
qstring= MainDF['youtube'][i]
with no_ssl_verification():
r = requests.get(base.format(qstring) )
page = r.text
soup=bs(page,'html.parser')
vids= soup.findAll('a',attrs={'class':'yt-uix-tile-link'})
duration=soup.findAll('span',attrs={'class':'accessible description'})
date=soup.findAll('ul',attrs={'class':'yt-lockup-meta-info'})
videolist=[]
for v in vids:
tmp = 'https://www.youtube.com' + v['href']
videolist.append([v['title'],tmp])
infos=[]
for d in date:
x=d.findAll('li')
infos.append([x[0].text,x[1].text])
youtubeDF=pd.DataFrame(videolist)
infosDF=pd.DataFrame(infos)
youtubeDF=pd.concat([youtubeDF,infosDF],axis=1)
#print(youtubeDF)
if youtubeDF.empty == False :
youtubeDF.columns=['Title/Tweet','Link','Subjects/Views','Date']
youtubeDF.insert(0,'Company',str(MainDF['company'][i]))
youtubeDF.insert(0,'Category','youtube')
youtubeDF.insert(2,'Source/User',base.format(qstring))
last = youtubeDF.loc[youtubeDF['Date']=='1 day ago']
last['Date']=last['Date'].replace('1 day ago',log_date[0], regex=True)
if last.empty == False:
Youtube_dataset[MainDF['company'][i]]=pd.DataFrame(last)
else:
None
else:
None
#%%
#re-indexing
for comp in Youtube_dataset:
Youtube_dataset[comp]=Youtube_dataset[comp].reset_index()
Youtube_dataset[comp]=Youtube_dataset[comp].drop(['index'], axis=1)
#%%
for i in list(Youtube_dataset.keys()):
for n in range(len(Youtube_dataset[i])):
if TextBlob(Youtube_dataset[i]['Title/Tweet'][n]).detect_language() != 'en':
Youtube_dataset[i]=Youtube_dataset[i].drop(n)
Youtube_dataset[i]=Youtube_dataset[i].reset_index()
Youtube_dataset[i]=Youtube_dataset[i].drop(['index'], axis=1)
else:
None
#%%
for comp in list(Youtube_dataset.keys()):
if Youtube_dataset[comp].empty == True:
del Youtube_dataset[comp]
#%%
#re-indexing
for comp in Youtube_dataset:
Youtube_dataset[comp]=Youtube_dataset[comp].reset_index()
Youtube_dataset[comp]=Youtube_dataset[comp].drop(['index'], axis=1)
#%%
for n in Youtube_dataset:
if n in general_df:
general_df[n]=pd.concat([Youtube_dataset[n],general_df[n]], axis=0, sort=False)
else:
general_df.update({str(n):Youtube_dataset[n]})
for comp in general_df:
general_df[comp]=general_df[comp].reset_index()
general_df[comp]=general_df[comp].drop(['index'], axis=1)
#%% ITUNES
urls_itunes=[]
itunes_content={}
url_1='https://itunes.apple.com/search?lang=en_us&term='
for i in range(len(companies_names)):
url_final= url_1 + companies_names[i]
#print(url_final)
urls_itunes.append(url_final)
i = 0
for url in urls_itunes:
response = requests.get(url)
content = response.json()
podcast_list=[]
for n in range(len(content['results'])):
c = content['results'][n]
#print(content['results'][n].items())
if content['results'][n]['wrapperType'] == 'audiobook':
podcast_list.append(['audiobook',companies_names[i],c['artistName'],c['collectionName'],c['releaseDate'],c['collectionViewUrl']])
else:
if content['results'][n]['kind'] == 'podcast':
podcast_list.append(['podcast',companies_names[i],c['artistName'],c['collectionName'],c['releaseDate'],c['trackViewUrl']])
else:
None
if podcast_list != []:
itunes_content[companies_names[i]] = pd.DataFrame(podcast_list)
itunes_content[companies_names[i]].columns =['Category','Company','Source/User','Title/Tweet','Date','Link']
itunes_content[companies_names[i]]['Date']=itunes_content[companies_names[i]]['Date'].str.replace('T',' ')
itunes_content[companies_names[i]]['Date']=itunes_content[companies_names[i]]['Date'].str.replace('Z',' ')
itunes_content[companies_names[i]]['Date']=itunes_content[companies_names[i]]['Date'].str.split(expand=True)
for d in range(len(itunes_content[companies_names[i]]['Date'])):
itunes_content[companies_names[i]]['Date'][d]=parse(itunes_content[companies_names[i]]['Date'][d])
itunes_content[companies_names[i]]['Date'][d]=itunes_content[companies_names[i]]['Date'][d].date()
i = i +1
else:
i = i +1
#Only for the good period of time
for comp in itunes_content:
itunes_content[comp]=itunes_content[comp].loc[itunes_content[comp]['Date'] >= new_date]
for comp in list(itunes_content.keys()):
if itunes_content[comp].empty == True:
del itunes_content[comp]
#re-indexing
for comp in itunes_content:
itunes_content[comp]=itunes_content[comp].reset_index()
itunes_content[comp]=itunes_content[comp].drop(['index'], axis=1)
#%%
for n in itunes_content:
if n in general_df:
general_df[n]=pd.concat([itunes_content[n],general_df[n]], axis=0, sort=False)
else:
general_df.update({str(n):itunes_content[n]})
for comp in general_df:
general_df[comp]=general_df[comp].reset_index()
general_df[comp]=general_df[comp].drop(['index'], axis=1)
#%% RSS FEED
#import urlopen
url='https://blogs.gartner.com/gbn-feed/'
proxy = urllib3.ProxyManager('http://proxy_address')
http = urllib3.PoolManager()
response = proxy.request('GET', url)
try:
data = xmltodict.parse(response.data)
except:
print("Failed to parse xml from response (%s)" % traceback.format_exc())
RSSfeed=pd.DataFrame(data['rss']['channel']['item'])
for r in range(len(RSSfeed)):
RSSfeed['pubDate'][r]=parse(RSSfeed['pubDate'][r])
RSSfeed['pubDate'][r]=RSSfeed['pubDate'][r].date()
RSSfeed=RSSfeed.loc[RSSfeed['pubDate'] >= new_date]
#RSSfeed=RSSfeed.loc[RSSfeed['pubDate'] >= parse('2019-09-12').date()]
RSSfeed=RSSfeed.drop(['guid','author','headshot','category'], axis=1)
#%%
def RSS_scoring():
try:
if RSSfeed.empty == True:
None
else:
RSSfeed.insert(0,'Category','Gartner')
#RSSfeed.insert(1,'Company','/')
RSSfeed.insert(3,'Keywords','none')
RSSfeed.insert(4,'Subjects/Views','none')
for i in range(len(RSSfeed['link'])):
article = Article(RSSfeed['link'][i])
article.download()
article.html
article.parse()
txt=article.text.encode('ascii','ignore').decode('ascii')
#f=requests.get(datasets[article]['Link'][i])
#txt=f.text.encode('ascii','ignore').decode('ascii')
txt=txt.lower()
#total_word= wordcounter(txt).get_word_count()
r=[]
for word in relevantsubject:
result=txt.count(word)
if result != 0:
r.append(word +'('+ str(txt.count(word)) +')')
else:
None
# relevanceLink.append(r)
r=', '.join(word for word in r)
if r != []:
RSSfeed['Subjects/Views'][i]=str(r + ' (totalWords:'+ str(len(txt.split()))+')')
else:
RSSfeed['Subjects/Views'][i]=str('None')
article.nlp()
k=', '.join(keyword for keyword in article.keywords)
RSSfeed['Keywords'][i]=str(k)
except:
None
RSS_scoring()
#%%
if RSSfeed.empty != True:
RSSfeed=RSSfeed.loc[RSSfeed['Subjects/Views'].str.contains('data') == True]
if RSSfeed.empty == True:
RSSfeed = RSSfeed.drop(['Category', 'title', 'link', 'Keywords', 'Subjects/Views', 'description','pubDate'], axis=1)
else:
None
else:
RSSfeed = RSSfeed.drop([ 'title', 'link', 'description','pubDate'], axis=1)
#%%
for i in RSSfeed:
RSSfeed[i].fillna(" ", inplace=True)
NEWS = RSSfeed['title'] + RSSfeed['Keywords'] + RSSfeed['Subjects/Views']
NEWS = NEWS.apply(Algo_4.clean_text)
NEWS_cv = Algo_4.cv.transform(NEWS)
NEWS_tfdi = Algo_4.tfidf_transformer.transform(NEWS_cv)
predictions_news = Algo_4.naive_bayes.predict(NEWS_cv)
predictions_news=pd.Series(predictions_news)
RSSfeed=pd.concat([RSSfeed,predictions_news], axis=1)
RSSfeed.rename(columns={0:'Relevant(1)/Irrelevant(0)'}, inplace=True)
#%%
for i in general_df:
for n in general_df[i]:
general_df[i][n].fillna(" ", inplace=True)
for i in general_df:
if 'Keywords' in general_df[i]:
NEWS = general_df[i]['Title/Tweet'] + general_df[i]['Source/User']+ general_df[i]['Keywords'] + general_df[i]['Subjects/Views']
else:
NEWS = general_df[i]['Title/Tweet'] + general_df[i]['Source/User'] + general_df[i]['Subjects/Views']
NEWS = NEWS.apply(Algo_4.clean_text)
NEWS_cv = Algo_4.cv.transform(NEWS)
NEWS_tfdi = Algo_4.tfidf_transformer.transform(NEWS_cv)
predictions_news = Algo_4.naive_bayes.predict(NEWS_cv)
predictions_news=pd.Series(predictions_news)
general_df[i]=pd.concat([general_df[i],predictions_news], axis=1)
general_df[i].rename(columns={0:'Relevant(1)/Irrelevant(0)'}, inplace=True)
#%%
def ToExcel():
for p in people_comp_dic:
with pd.ExcelWriter('NewsFor' + str(p)+'-'+ DT.date.today().strftime("%d-%m-%Y") +'.xlsx',engine='xlsxwriter',options={'strings_to_urls': False}) as writer:
workbook=writer.book
cell_format = workbook.add_format()
cell_format.set_text_wrap({'text_wrap':True})
col_format = workbook.add_format({
'align': 'vcenter',
'text_wrap': 'vjustify',
'num_format':'@'})
#print(i)
for c in range(len(people_comp_dic[p])) :
if len(people_comp_dic[p][c]) == 2 and type(people_comp_dic[p][c][1]) == list:
GroupedDataset=pd.DataFrame()
#print(GroupedDataset)
for sc in range(len(people_comp_dic[p][c][1])):
for i in general_df:
if str(i) == str(people_comp_dic[p][c][1][sc]):
GroupedDataset=pd.concat([GroupedDataset,general_df[i]],axis=0,sort=False)
else:
None
#if GroupeDataset = []:
#print(GroupedDataset)
if GroupedDataset.empty == False:
GroupedDataset.to_excel(writer,sheet_name=str(people_comp_dic[p][c][0]), index=False)
worksheet=writer.sheets[str(people_comp_dic[p][c][0])]
worksheet.autofilter('A1:H20')
worksheet.set_column('A:A',10,col_format)
worksheet.set_column('B:B',10,col_format)
worksheet.set_column('C:C',10,col_format)
worksheet.set_column('D:D',40,col_format)
worksheet.set_column('E:E',30,col_format)
worksheet.set_column('F:F',40,col_format)
worksheet.set_column('G:G',10,col_format)
worksheet.set_column('H:H',30,col_format)
worksheet.set_column('I:I',30,col_format)
else:
#print('df empty')
GroupedDataset.to_excel(writer,sheet_name=str(people_comp_dic[p][c][0]), index=False)
worksheet=writer.sheets[str(people_comp_dic[p][c][0])]
worksheet.write('A1', 'No news about this group.')
elif len(people_comp_dic[p][c]) > 1:
for sc in range(len(people_comp_dic[p][c])):
for i in general_df:
if str(i) == str(people_comp_dic[p][c][sc]):
general_df[i].to_excel(writer,sheet_name=str(i), index=False)
worksheet=writer.sheets[str(i)]
worksheet.autofilter('A1:H20')
worksheet.set_column('A:A',10,col_format)
worksheet.set_column('B:B',10,col_format)
worksheet.set_column('C:C',10,col_format)
worksheet.set_column('D:D',40,col_format)
#worksheet.set_column('D:D',20,col_format)
worksheet.set_column('E:E',30,col_format)
worksheet.set_column('F:F',40,col_format)
worksheet.set_column('G:G',10,col_format)
worksheet.set_column('H:H',30,col_format)
worksheet.set_column('I:I',30,col_format)
else:
None
else:
None
if RSSfeed.empty == True:
RSSfeed.to_excel(writer,sheet_name='Gartner', index=False)
worksheet=writer.sheets['Gartner']
worksheet.write('A1', 'No Gartner feeds for today!')
else:
RSSfeed.to_excel(writer,sheet_name='Gartner',index=False)
worksheet=writer.sheets['Gartner']
worksheet.autofilter('A1:G20')
worksheet.set_column('A:A',10,col_format)
worksheet.set_column('B:B',40,col_format)
worksheet.set_column('C:C',30,col_format)
worksheet.set_column('D:D',30,col_format)
worksheet.set_column('E:E',30,col_format)
worksheet.set_column('F:F',40,col_format)
worksheet.set_column('G:G',10,col_format)
worksheet.set_column('H:H',30,col_format)
CS.to_excel(writer,sheet_name='Info',index=False)
worksheet=writer.sheets['Info']
worksheet.set_column('A:B',30,col_format)
GroupedDuplicate=pd.DataFrame()
for d in range(len(duplicate_df)):
GroupedDuplicate=pd.concat([GroupedDuplicate,duplicate_df[d]],sort=False,axis=0)
for tw in range(len(tw_duplicate_df)):
GroupedDuplicate=pd.concat([GroupedDuplicate,tw_duplicate_df[tw]],sort=False,axis=0)
for db in range(len(Double_df)):
#for i in range(len(Double_df[db])):
GroupedDuplicate=pd.concat([GroupedDuplicate,Double_df[db]],sort=False,axis=0)
if GroupedDuplicate.empty == False:
GroupedDuplicate.to_excel(writer,sheet_name='Backlog', index=False)
worksheet=writer.sheets['Backlog']
worksheet.autofilter('A1:H20')
worksheet.set_column('A:A',10,col_format)
worksheet.set_column('B:B',10,col_format)
worksheet.set_column('C:C',10,col_format)
worksheet.set_column('D:D',40,col_format)
worksheet.set_column('E:E',30,col_format)
worksheet.set_column('F:F',40,col_format)
worksheet.set_column('G:G',10,col_format)
worksheet.set_column('H:H',30,col_format)
else:
GroupedDuplicate.to_excel(writer,sheet_name='Backlog', index=False)
worksheet=writer.sheets['Backlog']
worksheet.write('A1', 'No duplicates.')
# ALL COMPANIES
with | pd.ExcelWriter('AllCompaniesNews.xlsx',engine='xlsxwriter') | pandas.ExcelWriter |
# coding: utf-8
"""
"""
import pandas as pd
import numpy as np
import re
import csv
import io
import time
import traceback
import logging
logging.basicConfig(format="%(asctime)s %(levelname)s: %(message)s", level=logging.DEBUG)
def cal_jlr_zengzhanglv(data, col1, col2):
kuisun_count = 0
if not (data.iat[0] > 0 and data.iat[-1] > 0):
fhzzl = np.nan
else:
# 复合增长率
fhzzl = ((data.iat[0] / data.iat[-1]) ** (1.0 / (len(data) - 1)) - 1) * 100
for d in data[:-1]:
if d < 0:
kuisun_count += 1
return pd.Series({col1: fhzzl, col2: kuisun_count})
def cal_PEG(data, col1, col2):
# data.iat[0] is PE
if not (data.iat[0] > 0 and data.iat[1] > 0 and data.iat[-1] > 0):
peg = np.nan
fhzzl = np.nan
else:
# 复合增长率
fhzzl = ((data.iat[1] / data.iat[-1]) ** (1.0 / (len(data) - 2)) - 1) * 100
if fhzzl == 0:
peg = np.nan
else:
peg = data.iat[0] / fhzzl
return pd.Series({col1: fhzzl, col2: peg})
def generate_date_label(start_label):
dongtai_label = start_label
if pd.Timestamp(dongtai_label).is_year_end:
jingtai_label = dongtai_label
else:
jingtai_label = (pd.Timestamp(dongtai_label)- pd.offsets.YearEnd(1)).strftime("%Y%m%d")
# q4 ttm = q4 + last q4 - last q4
# q3 ttm = q3 + last q4 - last q3
# q2 ttm = q2 + last q4 - last q2
# q1 ttm = q1 + last q4 - last q1
ttm_label1 = dongtai_label
ttm_label2 = (pd.Timestamp(dongtai_label) -
pd.offsets.YearEnd(1)).strftime("%Y%m%d")
ttm_label3 = (pd.Timestamp(dongtai_label) -
pd.offsets.DateOffset(years=1)).strftime("%Y%m%d")
y4_label = pd.date_range(end=jingtai_label, freq='Y', periods=4).strftime("%Y%m%d")[::-1].tolist()
y3_label = y4_label[:-1]
y6_label = pd.date_range(end=jingtai_label, freq='Y', periods=6).strftime("%Y%m%d")[::-1].tolist()
y5_label = y6_label[:-1]
y11_label = pd.date_range(end=jingtai_label, freq='Y', periods=11).strftime("%Y%m%d")[::-1].tolist()
y10_label = y11_label[:-1]
yall_label = pd.date_range(end=jingtai_label, freq='Y', periods=30).strftime("%Y%m%d")[::-1].tolist()
return dongtai_label, jingtai_label, ttm_label1, ttm_label2, ttm_label3, \
y3_label, y4_label, y5_label, y6_label, y10_label, y11_label, yall_label
if __name__ == '__main__':
logging.debug("read all input.")
with io.open(r'..\all_other_data\symbol.txt', 'r', encoding='utf-8') as f:
symbol = [s.strip()[2:] for s in f.readlines()]
# symbol = symbol[0:1]
zhibiao_df = pd.read_csv(r"..\all_other_data\all_zhibiao_for_cwfx_tdx.csv", encoding='gbk', dtype={u'代码': str})
zhibiao_df = zhibiao_df.set_index(keys=[u'代码', u'指标'])
# zhibiao_df = zhibiao_df.reindex(index=[u'000001', u'000002', u'000003', u'000004'], level=0)
finance_df = | pd.read_csv(r"..\all_other_data\all_finance_info.csv", encoding='gbk', dtype={u'代码': str}) | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import pickle
import shutil
import sys
import tempfile
import numpy as np
from numpy import arange, nan
import pandas.testing as pdt
from pandas import DataFrame, MultiIndex, Series, to_datetime
# dependencies testing specific
import pytest
import recordlinkage
from recordlinkage.base import BaseCompareFeature
STRING_SIM_ALGORITHMS = [
'jaro', 'q_gram', 'cosine', 'jaro_winkler', 'dameraulevenshtein',
'levenshtein', 'lcs', 'smith_waterman'
]
NUMERIC_SIM_ALGORITHMS = ['step', 'linear', 'squared', 'exp', 'gauss']
FIRST_NAMES = [
u'Ronald', u'Amy', u'Andrew', u'William', u'Frank', u'Jessica', u'Kevin',
u'Tyler', u'Yvonne', nan
]
LAST_NAMES = [
u'Graham', u'Smith', u'Holt', u'Pope', u'Hernandez', u'Gutierrez',
u'Rivera', nan, u'Crane', u'Padilla'
]
STREET = [
u'<NAME>', nan, u'<NAME>', u'<NAME>', u'<NAME>',
u'<NAME>', u'Williams Trail', u'Durham Mountains', u'Anna Circle',
u'<NAME>'
]
JOB = [
u'Designer, multimedia', u'Designer, blown glass/stained glass',
u'Chiropractor', u'Engineer, mining', u'Quantity surveyor',
u'Phytotherapist', u'Teacher, English as a foreign language',
u'Electrical engineer', u'Research officer, government', u'Economist'
]
AGES = [23, 40, 70, 45, 23, 57, 38, nan, 45, 46]
# Run all tests in this file with:
# nosetests tests/test_compare.py
class TestData(object):
@classmethod
def setup_class(cls):
N_A = 100
N_B = 100
cls.A = DataFrame({
'age': np.random.choice(AGES, N_A),
'given_name': np.random.choice(FIRST_NAMES, N_A),
'lastname': np.random.choice(LAST_NAMES, N_A),
'street': np.random.choice(STREET, N_A)
})
cls.B = DataFrame({
'age': np.random.choice(AGES, N_B),
'given_name': np.random.choice(FIRST_NAMES, N_B),
'lastname': np.random.choice(LAST_NAMES, N_B),
'street': np.random.choice(STREET, N_B)
})
cls.A.index.name = 'index_df1'
cls.B.index.name = 'index_df2'
cls.index_AB = MultiIndex.from_arrays(
[arange(len(cls.A)), arange(len(cls.B))],
names=[cls.A.index.name, cls.B.index.name])
# Create a temporary directory
cls.test_dir = tempfile.mkdtemp()
@classmethod
def teardown_class(cls):
# Remove the test directory
shutil.rmtree(cls.test_dir)
class TestCompareApi(TestData):
"""General unittest for the compare API."""
def test_repr(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
c_str = str(comp)
c_repr = repr(comp)
assert c_str == c_repr
start_str = '<{}'.format(comp.__class__.__name__)
assert c_str.startswith(start_str)
def test_instance_linking(self):
comp = recordlinkage.Compare()
comp.exact('given_name', 'given_name')
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A, self.B)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_instance_dedup(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.numeric('age', 'age', method='step', offset=3, origin=2)
comp.numeric('age', 'age', method='step', offset=0, origin=2)
result = comp.compute(self.index_AB, self.A)
# returns a Series
assert isinstance(result, DataFrame)
# resulting series has a MultiIndex
assert isinstance(result.index, MultiIndex)
# indexnames are oke
assert result.index.names == [self.A.index.name, self.B.index.name]
assert len(result) == len(self.index_AB)
def test_label_linking(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A, self.B)
assert "my_feature_label" in result.columns.tolist()
def test_label_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'given_name',
'given_name',
label='my_feature_label')
result = comp.compute(self.index_AB, self.A)
assert "my_feature_label" in result.columns.tolist()
def test_multilabel_none_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_linking(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A, self.B)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name',
label=['a', ['b', 'c', 'd']])
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name',
label=['e', ['f', 'g', 'h']])
result = comp.compute(self.index_AB, self.A)
assert [0, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h'] == \
result.columns.tolist()
def test_multilabel_none_dedup(self):
def ones_np_multi(s1, s2):
return np.ones(len(s1)), np.ones((len(s1), 3))
def ones_pd_multi(s1, s2):
return (Series(np.ones(len(s1))), DataFrame(np.ones((len(s1), 3))))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones_np_multi,
'given_name',
'given_name')
comp.compare_vectorized(
ones_pd_multi,
'given_name',
'given_name')
result = comp.compute(self.index_AB, self.A)
assert [0, 1, 2, 3, 4, 5, 6, 7, 8] == \
result.columns.tolist()
def test_multilabel_error_dedup(self):
def ones(s1, s2):
return np.ones((len(s1), 2))
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(
ones, 'given_name', 'given_name', label=['a', 'b', 'c'])
with pytest.raises(ValueError):
comp.compute(self.index_AB, self.A)
def test_incorrect_collabels_linking(self):
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name', method='jaro')
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A, self.B)
def test_incorrect_collabels_dedup(self):
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
"given_name", "not_existing_label")
with pytest.raises(KeyError):
comp.compute(self.index_AB, self.A)
def test_compare_custom_vectorized_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col',
'col',
label='my_feature_label')
result = comp.compute(ix, A, B)
expected = DataFrame(
[1, 1, 1, 1, 1], index=ix, columns=['my_feature_label'])
pdt.assert_frame_equal(result, expected)
# def test_compare_custom_nonvectorized_linking(self):
# A = DataFrame({'col': [1, 2, 3, 4, 5]})
# B = DataFrame({'col': [1, 2, 3, 4, 5]})
# ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# def custom_func(a, b):
# return np.int64(1)
# # test without label
# comp = recordlinkage.Compare()
# comp.compare_single(
# custom_func,
# 'col',
# 'col'
# )
# result = comp.compute(ix, A, B)
# expected = DataFrame([1, 1, 1, 1, 1], index=ix)
# pdt.assert_frame_equal(result, expected)
# # test with label
# comp = recordlinkage.Compare()
# comp.compare_single(
# custom_func,
# 'col',
# 'col',
# label='test'
# )
# result = comp.compute(ix, A, B)
# expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test'])
# pdt.assert_frame_equal(result, expected)
def test_compare_custom_instance_type(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
def call(s1, s2):
# this should raise on incorrect types
assert isinstance(s1, np.ndarray)
assert isinstance(s2, np.ndarray)
return np.ones(len(s1), dtype=np.int)
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A, B)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_arguments_linking(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col',
5)
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
5,
label='test')
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
# test with kwarg
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
x=5,
label='test')
result = comp.compute(ix, A, B)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_dedup(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col', 'col')
result = comp.compute(ix, A)
expected = DataFrame([1, 1, 1, 1, 1], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2: np.ones(len(s1), dtype=np.int),
'col',
'col',
label='test')
result = comp.compute(ix, A)
expected = DataFrame([1, 1, 1, 1, 1], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_compare_custom_vectorized_arguments_dedup(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([[0, 1, 2, 3, 4], [1, 2, 3, 4, 0]])
# test without label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x, 'col', 'col',
5)
result = comp.compute(ix, A)
expected = DataFrame([5, 5, 5, 5, 5], index=ix)
pdt.assert_frame_equal(result, expected)
# test with label
comp = recordlinkage.Compare()
comp.compare_vectorized(
lambda s1, s2, x: np.ones(len(s1), dtype=np.int) * x,
'col',
'col',
5,
label='test')
result = comp.compute(ix, A)
expected = DataFrame([5, 5, 5, 5, 5], index=ix, columns=['test'])
pdt.assert_frame_equal(result, expected)
def test_parallel_comparing_api(self):
# use single job
comp = recordlinkage.Compare(n_jobs=1)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_single = comp.compute(self.index_AB, self.A, self.B)
result_single.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=2)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_2processes = comp.compute(self.index_AB, self.A, self.B)
result_2processes.sort_index(inplace=True)
# compare results
pdt.assert_frame_equal(result_single, result_2processes)
def test_parallel_comparing(self):
# use single job
comp = recordlinkage.Compare(n_jobs=1)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_single = comp.compute(self.index_AB, self.A, self.B)
result_single.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=2)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_2processes = comp.compute(self.index_AB, self.A, self.B)
result_2processes.sort_index(inplace=True)
# use two jobs
comp = recordlinkage.Compare(n_jobs=4)
comp.exact('given_name', 'given_name', label='my_feature_label')
result_4processes = comp.compute(self.index_AB, self.A, self.B)
result_4processes.sort_index(inplace=True)
# compare results
pdt.assert_frame_equal(result_single, result_2processes)
pdt.assert_frame_equal(result_single, result_4processes)
def test_pickle(self):
# test if it is possible to pickle the Compare class
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name')
comp.numeric('number', 'number')
comp.geo('lat', 'lng', 'lat', 'lng')
comp.date('before', 'after')
# do the test
pickle_path = os.path.join(self.test_dir, 'pickle_compare_obj.pickle')
pickle.dump(comp, open(pickle_path, 'wb'))
def test_manual_parallel_joblib(self):
# test if it is possible to pickle the Compare class
# This is only available for python 3. For python 2, it is not
# possible to pickle instancemethods. A workaround can be found at
# https://stackoverflow.com/a/29873604/8727928
if sys.version.startswith("3"):
# import joblib dependencies
from joblib import Parallel, delayed
# split the data into smaller parts
len_index = int(len(self.index_AB) / 2)
df_chunks = [self.index_AB[0:len_index], self.index_AB[len_index:]]
comp = recordlinkage.Compare()
comp.string('given_name', 'given_name')
comp.string('lastname', 'lastname')
comp.exact('street', 'street')
# do in parallel
Parallel(n_jobs=2)(
delayed(comp.compute)(df_chunks[i], self.A, self.B)
for i in [0, 1])
def test_indexing_types(self):
# test the two types of indexing
# this test needs improvement
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B_reversed = B[::-1].copy()
ix = MultiIndex.from_arrays([np.arange(5), np.arange(5)])
# test with label indexing type
comp_label = recordlinkage.Compare(indexing_type='label')
comp_label.exact('col', 'col')
result_label = comp_label.compute(ix, A, B_reversed)
# test with position indexing type
comp_position = recordlinkage.Compare(indexing_type='position')
comp_position.exact('col', 'col')
result_position = comp_position.compute(ix, A, B_reversed)
assert (result_position.values == 1).all(axis=0)
pdt.assert_frame_equal(result_label, result_position)
def test_pass_list_of_features(self):
from recordlinkage.compare import FrequencyA, VariableA, VariableB
# setup datasets and record pairs
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
ix = MultiIndex.from_arrays([np.arange(5), np.arange(5)])
# test with label indexing type
features = [
VariableA('col', label='y1'),
VariableB('col', label='y2'),
FrequencyA('col', label='y3')
]
comp_label = recordlinkage.Compare(features=features)
result_label = comp_label.compute(ix, A, B)
assert list(result_label) == ["y1", "y2", "y3"]
class TestCompareFeatures(TestData):
def test_feature(self):
# test using classes and the base class
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
feature = BaseCompareFeature('col', 'col')
feature._f_compare_vectorized = lambda s1, s2: np.ones(len(s1))
feature.compute(ix, A, B)
def test_feature_multicolumn_return(self):
# test using classes and the base class
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
def ones(s1, s2):
return DataFrame(np.ones((len(s1), 3)))
feature = BaseCompareFeature('col', 'col')
feature._f_compare_vectorized = ones
result = feature.compute(ix, A, B)
assert result.shape == (5, 3)
def test_feature_multicolumn_input(self):
# test using classes and the base class
A = DataFrame({
'col1': ['abc', 'abc', 'abc', 'abc', 'abc'],
'col2': ['abc', 'abc', 'abc', 'abc', 'abc']
})
B = DataFrame({
'col1': ['abc', 'abd', 'abc', 'abc', '123'],
'col2': ['abc', 'abd', 'abc', 'abc', '123']
})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
feature = BaseCompareFeature(['col1', 'col2'], ['col1', 'col2'])
feature._f_compare_vectorized = \
lambda s1_1, s1_2, s2_1, s2_2: np.ones(len(s1_1))
feature.compute(ix, A, B)
class TestCompareExact(TestData):
"""Test the exact comparison method."""
def test_exact_str_type(self):
A = DataFrame({'col': ['abc', 'abc', 'abc', 'abc', 'abc']})
B = DataFrame({'col': ['abc', 'abd', 'abc', 'abc', '123']})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
expected = DataFrame([1, 0, 1, 1, 0], index=ix)
comp = recordlinkage.Compare()
comp.exact('col', 'col')
result = comp.compute(ix, A, B)
pdt.assert_frame_equal(result, expected)
def test_exact_num_type(self):
A = DataFrame({'col': [42, 42, 41, 43, nan]})
B = DataFrame({'col': [42, 42, 42, 42, 42]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
expected = DataFrame([1, 1, 0, 0, 0], index=ix)
comp = recordlinkage.Compare()
comp.exact('col', 'col')
result = comp.compute(ix, A, B)
pdt.assert_frame_equal(result, expected)
def test_link_exact_missing(self):
A = DataFrame({'col': [u'a', u'b', u'c', u'd', nan]})
B = DataFrame({'col': [u'a', u'b', u'd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.exact('col', 'col', label='na_')
comp.exact('col', 'col', missing_value=0, label='na_0')
comp.exact('col', 'col', missing_value=9, label='na_9')
comp.exact('col', 'col', missing_value=nan, label='na_na')
comp.exact('col', 'col', missing_value='str', label='na_str')
result = comp.compute(ix, A, B)
# Missing values as default
expected = Series([1, 1, 0, 0, 0], index=ix, name='na_')
pdt.assert_series_equal(result['na_'], expected)
# Missing values as 0
expected = Series([1, 1, 0, 0, 0], index=ix, name='na_0')
pdt.assert_series_equal(result['na_0'], expected)
# Missing values as 9
expected = Series([1, 1, 0, 9, 9], index=ix, name='na_9')
pdt.assert_series_equal(result['na_9'], expected)
# Missing values as nan
expected = Series([1, 1, 0, nan, nan], index=ix, name='na_na')
pdt.assert_series_equal(result['na_na'], expected)
# Missing values as string
expected = Series([1, 1, 0, 'str', 'str'], index=ix, name='na_str')
pdt.assert_series_equal(result['na_str'], expected)
def test_link_exact_disagree(self):
A = DataFrame({'col': [u'a', u'b', u'c', u'd', nan]})
B = DataFrame({'col': [u'a', u'b', u'd', nan, nan]})
ix = MultiIndex.from_arrays([A.index.values, B.index.values])
comp = recordlinkage.Compare()
comp.exact('col', 'col', label='d_')
comp.exact('col', 'col', disagree_value=0, label='d_0')
comp.exact('col', 'col', disagree_value=9, label='d_9')
comp.exact('col', 'col', disagree_value=nan, label='d_na')
comp.exact('col', 'col', disagree_value='str', label='d_str')
result = comp.compute(ix, A, B)
# disagree values as default
expected = Series([1, 1, 0, 0, 0], index=ix, name='d_')
pdt.assert_series_equal(result['d_'], expected)
# disagree values as 0
expected = Series([1, 1, 0, 0, 0], index=ix, name='d_0')
pdt.assert_series_equal(result['d_0'], expected)
# disagree values as 9
expected = Series([1, 1, 9, 0, 0], index=ix, name='d_9')
| pdt.assert_series_equal(result['d_9'], expected) | pandas.testing.assert_series_equal |
import numpy as np
import pandas as pd
from networkx import nx
import numpy.linalg as la
class DataSimulation:
def __init__(self,p,n_days,t=None,road_props=None, noise_scale=1,t_switch=0,test_frac=0.8):
self.p=p
self.n_days=n_days
if not t is None:
self.t=t
else :
self.t = np.arange(14.75,20,0.25)
if not road_props is None:
self.road_props=road_props
else :
self.road_props= dict(zip([30, 50, 80, 130],np.random.multinomial(self.p,[0,0.25,0.5,0.25])))
self.noise_scale=noise_scale
self.t_switch=t_switch
self.tau = len(self.t)
self.test_frac = test_frac
def rearange_data(data,p):
data=data.swapaxes(0,2)
data=data.swapaxes(1,2)
data=data.reshape(p,-1)
return data
def generate_date_index(n_days):
start_date = pd.datetime(2020,1,1,15,0,0)
dinx=pd.date_range('2020-1-1 15:00:00+01:00',periods=4*24*n_days,freq='15min')
return dinx[(dinx.time>= pd.datetime(2020,1,1,15,0,0).time()) & (dinx.time< pd.datetime(2020,1,1,20,0,0).time()) ]
def gen_one_instant_speed(max_speed,normal_center=0.9,size=1):
normal_values = np.random.normal(0,(max_speed-normal_center*max_speed)/2,size)
return normal_center*max_speed+normal_values
def run_generation_formula(A,b,w,tau,p,A_R=None,t_switch=None,noise_scale=1):
data=[]
cur_A=A
for i in range(tau-1):
if t_switch is not None and i>t_switch :
cur_A=A_R
x= w-b[i][:]
noise = np.random.normal(0,noise_scale,size=p)
w= b[i+1][:] + cur_A.dot(x) + noise
data.append(w)
return np.array(data)
def generate_intercept(t,road_props,tau):
b_t= (2.5**2-(t-17.5)**2)
b_t=np.reshape(b_t,(1,-1))
b_t_difference = b_t[0][1:]-b_t[0][0:-1]
b_p = np.concatenate([DataSimulation.gen_one_instant_speed(max_speed,normal_center=0.9,size=prop) for max_speed,prop in road_props.items()])
b_p=np.reshape(b_p,(-1,1))
b_p=b_p.repeat(tau,axis=1)
b=b_p.T-b_t.T
return b
def generate_graph(p):
g = nx.gnm_random_graph(p, 8*p,directed=True)
return g
def generate_A_matrix(g,p):
A=np.random.uniform(-1,1,size=(p,p))*(np.array([[1 if i in g.adj[j] else 0 for i in range(p)] for j in g.adj])+np.diag([1]*p))
A=(A.T/la.norm(A,axis=1)).T
return A
def generate_data(self):
self.g = DataSimulation.generate_graph(self.p)
self.b = DataSimulation.generate_intercept(self.t,self.road_props,self.tau)
self.A_L = DataSimulation.generate_A_matrix(self.g,self.p)
self.A_R = DataSimulation.generate_A_matrix(self.g,self.p)
full_days_data = []
for i in range(self.n_days):
w0 = np.concatenate([DataSimulation.gen_one_instant_speed(max_speed,normal_center=0.9,size=prop) for max_speed,prop in self.road_props.items()])
data= DataSimulation.run_generation_formula(self.A_L,self.b,w0,self.tau,self.p,A_R = self.A_R,t_switch= self.t_switch+1)
full_days_data.append(data)
self.full_days_data=np.array(full_days_data)
return self.full_days_data
def split_center_data(self):
full_days_data_train=self.full_days_data[:int(self.test_frac*self.n_days)]
full_days_data_test=self.full_days_data[int(self.test_frac*self.n_days):]
full_days_data_train = DataSimulation.rearange_data(full_days_data_train,self.p)
full_days_data_test = DataSimulation.rearange_data(full_days_data_test,self.p)
sim_train_df = pd.DataFrame(data= full_days_data_train,columns=DataSimulation.generate_date_index(self.n_days)[:int(self.test_frac*self.n_days*(self.tau-1))])
sim_test_df = pd.DataFrame(data= full_days_data_test,columns=DataSimulation.generate_date_index(self.n_days)[int(self.test_frac*self.n_days*(self.tau-1)):])
intercept = pd.concat([sim_train_df.groupby( | pd.to_datetime(sim_train_df.columns) | pandas.to_datetime |
import pickle
from threading import Lock
from django.contrib.gis.db.models import Avg
from pandas import DataFrame
from app.external_sources.cadastres.models import Cadastre
from app.external_sources.csv.services.csv_service import get_load_properties
from app.external_sources.idealista.services.idealista_service import get_last_info
from app.external_sources.ine.services.ine_service import get_demographic_info
from app.external_sources.mi_cole.services.school_service import get_schools_in_circle
from app.external_sources.places.services.places_service import get_places_in_circle
from app.geo.services.geo_service import get_zone_containing_point
from app.main.settings import BASE_DIR
from app.properties.services.property_service import update_estimated_price
__cached_model = None
__lock_model = Lock()
def get_model():
global __cached_model
__lock_model.acquire()
try:
if __cached_model is None:
__cached_model = load_model()
finally:
__lock_model.release()
return __cached_model
def load_model():
"""Read model file
Returns:
Pipeline: model pipeline
"""
with open((BASE_DIR / "app/model_files/model_v1.pkl").resolve(), "rb") as f:
return pickle.load(f)[0]
def createDataFrame(load_id: int):
"""Create dataframe with model variables from a load
Args:
load_id (int): load to filter
Returns:
DataFrame: DataFrame with model variables
"""
dirty_properties = list(get_load_properties(load_id))
rows = list()
for dirty_property in dirty_properties:
row = prepare_data(dirty_property.property.cadastre)
row["property_id"] = dirty_property.property.id
rows.append(row)
return | DataFrame(rows) | pandas.DataFrame |
#!/usr/bin/env python3.7
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 11 11:14:12 2019
@author: ejreidelbach
:DESCRIPTION:
:REQUIRES:
:TODO:
"""
#==============================================================================
# Package Import
#==============================================================================
import json
import os
import pandas as pd
import pathlib
import tqdm
import urllib.request
#==============================================================================
# Reference Variable Declaration
#==============================================================================
#==============================================================================
# Function Definitions
#==============================================================================
def downloadPictures(dict_url, folder_name):
'''
Purpose: Download pictures via the provided url for the image
Inputs
------
dict_url : dictionary of URLs
dictionary in which the name of the image is the key and the
associated value is the URL of the image
folder_name : string
name of the folder in which downloaded images should be stored
Outputs
-------
files are saved to a folder named after the input dictionary
'''
# Check to see if the desired folder exists and create it if it doesn't
pathlib.Path('pictures/',folder_name).mkdir(parents=True, exist_ok=True)
# Download all the images at the associated URLs
for name, url in tqdm.tqdm(dict_url.items()):
if not pd.isna(url):
urllib.request.urlretrieve(url, 'pictures/' + folder_name + '/'
+ name + '.jpg')
#==============================================================================
# Working Code
#==============================================================================
# Set the project working directory
path_dir = pathlib.Path('/home/ejreidelbach/Projects/draft-gem/src/static')
os.chdir(path_dir)
#------------------------------------------------------------------------------
# Download Pictures of Schools
#------------------------------------------------------------------------------
# Ingest the listing of schools and their associated pictures
df_pics_school = | pd.read_csv('positionData/school_pictures.csv') | pandas.read_csv |
from dateutil import parser
import datetime
import pandas as pd
import json
from matplotlib.dates import DateFormatter
def fridge_plot(directory, variable, start_date, end_date = None):
begin = parser.parse(start_date)
if end_date is None:
end = begin
else:
end = parser.parse(end_date)
delta = end - begin
date_list = []
for i in range(delta.days + 2):
date_list.append((begin + datetime.timedelta(days = i)).date().strftime('%Y%m%d'))
date_set = set(date_list)
data_dict = []
for date in date_set:
try:
with open(directory + 'triton_' + date, 'r') as f:
for line in f:
data_dict.append(json.loads(line[:-2]))
except IOError:
pass
df = pd.DataFrame.from_dict(data_dict)
if len(data_dict) == 0:
print("Error: No data")
return df
df['time']= | pd.to_datetime(df['time']) | pandas.to_datetime |
from pandas import read_csv, merge
import csv
import sys
from pandas.core.frame import DataFrame
def average_submissions():
with open('submission_avg_13Aug.csv', 'wb') as f:
writer = csv.writer(f)
writer.writerow(['clip', 'seizure', 'early'])
df1 = | read_csv('submission_late_loader_newa.csv') | pandas.read_csv |
from datetime import datetime
import os
import re
import numpy as np
import pandas as pd
from fetcher.extras.common import atoi, MaRawData, zipContextManager
from fetcher.utils import Fields, extract_arcgis_attributes, extract_attributes
NULL_DATE = datetime(2020, 1, 1)
DATE = Fields.DATE.name
TS = Fields.TIMESTAMP.name
DATE_USED = Fields.DATE_USED.name
def add_query_constants(df, query):
for k, v in query.constants.items():
df[k] = v
return df
def build_leveled_mapping(mapping):
tab_mapping = {x.split(":")[0]: {} for x in mapping.keys() if x.find(':') > 0}
for k, v in mapping.items():
if k.find(':') < 0:
continue
tab, field = k.split(":")
tab_mapping[tab][field] = v
return tab_mapping
def prep_df(values, mapping):
df = pd.DataFrame(values).rename(columns=mapping).set_index(DATE)
for c in df.columns:
if c.find('status') >= 0:
continue
# convert to numeric
df[c] = pd.to_numeric(df[c])
df.index = pd.to_datetime(df.index, errors='coerce')
return df
def make_cumsum_df(data, timestamp_field=Fields.TIMESTAMP.name):
df = pd.DataFrame(data)
df.set_index(timestamp_field, inplace=True)
df.sort_index(inplace=True)
df = df.select_dtypes(exclude=['string', 'object'])
# .groupby(level=0).last() # can do it here, but not mandatory
cumsum_df = df.cumsum()
cumsum_df[Fields.TIMESTAMP.name] = cumsum_df.index
return cumsum_df
def handle_ak(res, mapping, queries):
tests = res[0]
collected = [x['attributes'] for x in tests['features']]
df = pd.DataFrame(collected)
df = df.pivot(columns='Test_Result', index='Date_Collected')
df.columns = df.columns.droplevel()
df['tests_total'] = df.sum(axis=1)
df = df.rename(columns=mapping).cumsum()
df[TS] = df.index
add_query_constants(df, queries[0])
tagged = df.to_dict(orient='records')
# cases
cases = pd.DataFrame([x['attributes'] for x in res[1]['features']]).rename(columns=mapping)
cases[TS] = pd.to_datetime(cases[TS], unit='ms')
cases = cases.set_index(TS).sort_index().cumsum().resample('1d').ffill()
cases[TS] = cases.index
add_query_constants(cases, queries[1])
tagged.extend(cases.to_dict(orient='records'))
# last item: already cumulative
data = extract_arcgis_attributes(res[2], mapping)
for x in data:
x[DATE_USED] = queries[2].constants[DATE_USED]
tagged.extend(data)
return tagged
def handle_ar(res, mapping):
# simply a cumsum table
data = extract_arcgis_attributes(res[0], mapping)
cumsum_df = make_cumsum_df(data)
return cumsum_df.to_dict(orient='records')
def handle_az(res, mapping, queries):
mapped = []
for i, df in enumerate(res[0]):
# minor cheating for same column names
df.columns = ["{}-{}".format(c, i) for c in df.columns]
df = df.rename(columns=mapping)
df[DATE] = pd.to_datetime(df[DATE])
df = df.set_index(DATE).sort_index().cumsum()
df[TS] = df.index
add_query_constants(df, queries[i])
mapped.extend(df.to_dict(orient='records'))
return mapped
def handle_ca(res, mapping, queries):
# need to cumsum
mapped = []
for query, result in zip(queries, res):
# extract also maps
items = extract_attributes(result, query.data_path, mapping, 'CA')
df = prep_df(items, mapping).sort_index(na_position='first').drop(columns=TS).cumsum()
df = df.loc[df.index.notna()]
add_query_constants(df, query)
df[TS] = df.index
mapped.extend(df.to_dict(orient='records'))
return mapped
def handle_ct(res, mapping, queries):
tests = res[0]
df = pd.DataFrame(tests).rename(columns=mapping).set_index(DATE)
for c in df.columns:
# convert to numeric
df[c] = pd.to_numeric(df[c])
df.index = df.index.fillna(NULL_DATE.strftime(mapping.get('__strptime')))
df = df.sort_index().cumsum()
df[TS] = pd.to_datetime(df.index)
df[TS] = df[TS].values.astype(np.int64) // 10 ** 9
add_query_constants(df, queries[0])
tagged = df.to_dict(orient='records')
# by report
df = res[1].rename(columns=mapping).sort_values('DATE')
add_query_constants(df, queries[1])
df[TS] = df['DATE']
tagged.extend(df.to_dict(orient='records'))
# death + cases
for i, df in enumerate(res[2:]):
df = res[2+i].rename(columns=mapping).set_index('DATE').sort_index().cumsum()
add_query_constants(df, queries[2+i])
df[TS] = df.index
tagged.extend(df.to_dict(orient='records'))
return tagged
def handle_dc(res, mapping, queries):
df = res[0]
# make it pretty
df = df[df['Unnamed: 0'] == 'Testing'].T
df.columns = df.loc['Unnamed: 1']
df = df.iloc[2:]
df.index = pd.to_datetime(df.index, errors='coerce')
df = df.loc[df.index.dropna()].rename(columns=mapping)
add_query_constants(df, queries[0])
df[TS] = df.index
return df.to_dict(orient='records')
def handle_de(res, mapping):
df = res[0]
df['Date'] = pd.to_datetime(df[['Year', 'Month', 'Day']])
df = df[df['Statistic'].isin(mapping.keys())]
# changing the order of operations here is probably better
def prepare_values(df):
df = df.pivot(
index=['Date', 'Date used'], values='Value', columns=['Statistic'])
df[DATE_USED] = df.index.get_level_values(1)
df = df.droplevel(1)
df['Date'] = df.index
df = df.replace(mapping).rename(columns=mapping)
return df.to_dict(orient='records')
# Death
deaths_df = df[(df['Statistic'].str.find('Death') >= 0) & (df['Unit'] == 'people')]
tagged = prepare_values(deaths_df)
# testing
tests_df = df[df['Statistic'].str.find('Test') >= 0]
for x in ['people', 'tests']:
partial = prepare_values(tests_df[tests_df['Unit'] == x])
tagged.extend(partial)
# cases
cases = df[df['Unit'] == 'people'][df['Statistic'].str.find('Cases') >= 0]
partial = prepare_values(cases)
tagged.extend(partial)
return tagged
def handle_fl(res, mapping, queries):
# simply a cumsum table
tagged = []
for i, data in enumerate(res[:-1]):
df = extract_arcgis_attributes(res[i], mapping)
cumsum_df = make_cumsum_df(df)
add_query_constants(cumsum_df, queries[i])
tagged.extend(cumsum_df.to_dict(orient='records'))
# The last item is the aggregated case-line data
df = pd.DataFrame([x['attributes'] for x in res[-1]['features']])
df = df.rename(
columns={**{'EXPR_1': 'Year', 'EXPR_2': 'Month', 'EXPR_3': 'Day'}, **mapping})
df[DATE] = pd.to_datetime(df[['Year', 'Month', 'Day']])
df = df.set_index(DATE).sort_index().cumsum()
add_query_constants(df, queries[-1])
df[TS] = df.index
tagged.extend(df.to_dict(orient='records'))
return tagged
def handle_ga(res, mapping):
tagged = []
file_mapping = build_leveled_mapping(mapping)
with zipContextManager(res[0]) as zipdir:
for filename in file_mapping.keys():
date_fields = [k for k, v in file_mapping[filename].items() if v == 'TIMESTAMP']
df = pd.read_csv(os.path.join(zipdir, filename), parse_dates=date_fields)
df = df[df['county'] == 'Georgia']
by_date = file_mapping[filename].pop(DATE_USED)
df = df.rename(columns=file_mapping[filename])
df[DATE_USED] = by_date
tagged.extend(df.to_dict(orient='records'))
return tagged
def handle_il(res, mapping, queries):
df = res[0].rename(columns=mapping)
df[TS] = df[DATE]
add_query_constants(df, queries[0])
mapped = df.to_dict(orient='records')
# testing
df = pd.DataFrame(res[1].get('test_group_counts')).rename(columns=mapping)
df = df[df['regionID'] == 0]
df[DATE] = pd.to_datetime(df[DATE])
df = df.set_index(DATE).sort_index().cumsum()
df[TS] = df.index
mapped.extend(df.to_dict(orient='records'))
return mapped
def handle_in(res, mapping):
tagged = []
df = prep_df(res[0]['result']['records'], mapping).sort_index().cumsum()
# need to assign dating correctly
assignments = [
('SPECIMENS', 'Specimen Collection'),
('POSITIVE_BY_SPECIMEN', 'Specimen Collection'),
(['POSITIVE', 'TOTAL'], 'Report'),
('DEATH', 'Death'),
]
for key, by_date in assignments:
if isinstance(key, list):
subset = df.filter(key)
else:
subset = df.filter(like=key)
if subset.columns[0] == 'POSITIVE_BY_SPECIMEN':
subset.columns = ['POSITIVE']
subset[DATE_USED] = by_date
subset[TS] = subset.index
tagged.extend(subset.to_dict(orient='records'))
return tagged
def handle_ks(res, mapping, queries):
testing = res[0][0].filter(like='alias')
testing.columns = [c.replace('-alias', '') for c in testing.columns]
testing = testing.rename(columns=mapping).groupby(DATE).last()
testing.index = pd.to_datetime(testing.index)
testing[TS] = testing.index
add_query_constants(testing, queries[0])
return testing.to_dict(orient='records')
def handle_la(res, mapping):
df = res[0].rename(columns=mapping).groupby(DATE).sum()
df = df.sort_index().cumsum()
df[TS] = df.index
df[DATE_USED] = 'Specimen Collection'
return df.to_dict(orient='records')
def handle_ma(res, mapping):
'''Returning a list of dictionaries (records)
'''
tagged = []
# break the mapping to {file -> {mapping}}
# not the most efficient, but the data is tiny
tab_mapping = build_leveled_mapping(mapping)
tabs = MaRawData(res[0])
for tabname in tab_mapping.keys():
df = tabs[tabname].rename(columns=tab_mapping[tabname])
df[DATE] = | pd.to_datetime(df[DATE]) | pandas.to_datetime |
import sys
import pandas as pd
import numpy as np
from sqlalchemy import create_engine
def load_data(messages_filepath, categories_filepath):
"""
Loads data from csv files and merge
Args:
messages_filepath: file path to the messages csv file
categories_filepath: file path to the categories csv file
Returns:
df: merged dataframe of the messages.csv and categories.csv files
"""
messages = | pd.read_csv(messages_filepath) | pandas.read_csv |
# RHR Online Anomaly Detection & Alert Monitoring
######################################################
# Author: <NAME> #
# Email: <EMAIL> #
# Location: Dept.of Genetics, Stanford University #
# Date: Oct 29 2020 #
######################################################
# uses raw heart rate and steps data (this stpes data doesn't have zeroes and need to innfer from hr datetime stamp)
## simple command
# python rhrad_online_alerts.py --heart_rate hr.csv --steps steps.csv
## full command
# python rhrad_online_alerts.py --heart_rate pbb_fitbit_oldProtocol_hr.csv --steps pbb_fitbit_oldProtocol_steps.csv --myphd_id pbb_RHR_online --figure1 pbb_RHR_online_anomalies.pdf --anomalies pbb_RHR_online_anomalies.csv --symptom_date 2020-01-10 --diagnosis_date 2020-01-11 --outliers_fraction 0.1 --random_seed 10 --baseline_window 744 --sliding_window 1 --alerts pbb_RHR_online_alerts.csv --figure2 pbb_RHR_online_alerts.pdf
# python rhrad_online_alerts.py --heart_rate pbb_fitbit_oldProtocol_hr.csv \
# --steps pbb_fitbit_oldProtocol_steps.csv \
# --myphd_id pbb_RHR_online \
# --figure1 pbb_RHR_online_anomalies.pdf \
# --anomalies pbb_RHR_online_anomalies.csv \
# --symptom_date 2020-01-10 --diagnosis_date 2020-01-11 \
# --outliers_fraction 0.1 \
# --random_seed 10 \
# --baseline_window 744 --sliding_window 1
# --alerts pbb_RHR_online_alerts.csv \
# --figure2 pbb_RHR_online_alerts.pdf
import warnings
warnings.filterwarnings('ignore')
import sys
import argparse
import pandas as pd
import numpy as np
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
#%matplotlib inline
import seaborn as sns
from statsmodels.tsa.seasonal import seasonal_decompose
from sklearn.preprocessing import StandardScaler
from sklearn.covariance import EllipticEnvelope
####################################
parser = argparse.ArgumentParser(description='Find anomalies in wearables time-series data.')
parser.add_argument('--heart_rate', metavar='', help ='raw heart rate count with a header = heartrate')
parser.add_argument('--steps',metavar='', help ='raw steps count with a header = steps')
parser.add_argument('--myphd_id',metavar='', default = 'myphd_id', help ='user myphd_id')
parser.add_argument('--anomalies', metavar='', default = 'myphd_id_anomalies.csv', help='save predicted anomalies as a CSV file')
parser.add_argument('--figure1', metavar='', default = 'myphd_id_anomalies.pdf', help='save predicted anomalies as a PDF file')
parser.add_argument('--symptom_date', metavar='', default = 'NaN', help = 'symptom date with y-m-d format')
parser.add_argument('--diagnosis_date', metavar='', default = 'NaN', help='diagnosis date with y-m-d format')
parser.add_argument('--outliers_fraction', metavar='', type=float, default=0.1, help='fraction of outliers or anomalies')
parser.add_argument('--random_seed', metavar='', type=int, default=10, help='random seed')
parser.add_argument('--baseline_window', metavar='',type=int, default=744, help='baseline window is used for training (in hours)')
parser.add_argument('--sliding_window', metavar='',type=int, default=1, help='sliding window is used to slide the testing process each hour')
parser.add_argument('--alerts', metavar='', default = 'myphd_id_alerts.csv', help='save predicted anomalies as a CSV file')
parser.add_argument('--figure2', metavar='', default = 'myphd_id_alerts.pdf', help='save predicted anomalies as a PDF file')
args = parser.parse_args()
# as arguments
fitbit_oldProtocol_hr = args.heart_rate
fitbit_oldProtocol_steps = args.steps
myphd_id = args.myphd_id
myphd_id_anomalies = args.anomalies
myphd_id_figure1 = args.figure1
symptom_date = args.symptom_date
diagnosis_date = args.diagnosis_date
RANDOM_SEED = args.random_seed
outliers_fraction = args.outliers_fraction
baseline_window = args.baseline_window
sliding_window = args.sliding_window
myphd_id_alerts = args.alerts
myphd_id_figure2 = args.figure2
####################################
class RHRAD_online:
# Infer resting heart rate ------------------------------------------------------
def resting_heart_rate(self, heartrate, steps):
"""
This function uses heart rate and steps data to infer resting heart rate.
It filters the heart rate with steps that are zero and also 12 minutes ahead.
"""
# heart rate data
df_hr = pd.read_csv(fitbit_oldProtocol_hr)
df_hr = df_hr.set_index('datetime')
df_hr.index.name = None
df_hr.index = pd.to_datetime(df_hr.index)
# steps data
df_steps = pd.read_csv(fitbit_oldProtocol_steps)
df_steps = df_steps.set_index('datetime')
df_steps.index.name = None
df_steps.index = pd.to_datetime(df_steps.index)
# merge dataframes
#df_hr = df_hr.resample('1min').mean()
#df_steps = df_steps.resample('1min').mean()
# added "outer" paramter for merge function to adjust the script to the new steps format
#df1 = pd.merge(df_hr, df_steps, left_index=True, right_index=True)
df1 = pd.merge(df_hr, df_steps, left_index=True, right_index=True, how="outer")
df1 = df1[pd.isnull(df1).any(axis=1)].fillna(0)
df1 = df1.rename(columns={"value_x": "heartrate", "value_y": "steps"})
df1 = df1.resample('1min').mean()
print(myphd_id)
print("Data size (in miutes) before removing missing data")
print(df1.shape)
ax = df1.plot(figsize=(20,4), title=myphd_id)
ax.figure.savefig(myphd_id+'_data.png')
#print(df1)
df1 = df1.dropna(how='any')
df1 = df1.loc[df1['heartrate']!=0]
print("Data size (in miutes) after removing missing data")
print(df1.shape)
#print(df1)
# define RHR as the HR measurements recorded when there were less than two steps taken during a rolling time window of the preceding 12 minutes (including the current minute)
df1['steps'] = df1['steps'].apply(np.int64)
df1['steps_window_12'] = df1['steps'].rolling(12).sum()
df1 = df1.loc[(df1['steps_window_12'] == 0 )]
print(df1['heartrate'].describe())
print(df1['steps_window_12'].describe())
# impute missing data
#df1 = df1.resample('1min').mean()
#df1 = df1.ffill()
print("No.of timesteps for RHR (in minutes)")
print(df1.shape)
return df1
# Pre-processing ------------------------------------------------------
def pre_processing(self, resting_heart_rate):
"""
This function takes resting heart rate data and applies moving averages to smooth the data and
downsamples to one hour by taking the avegare values
"""
# smooth data
df_nonas = df1.dropna()
df1_rom = df_nonas.rolling(400).mean()
# resample
df1_resmp = df1_rom.resample('1H').mean()
df2 = df1_resmp.drop(['steps'], axis=1)
df2 = df2.dropna()
print("No.of timesteps for RHR (in hours)")
print(df2.shape)
return df2
# Seasonality correction ------------------------------------------------------
def seasonality_correction(self, resting_heart_rate, steps):
"""
This function takes output pre-processing and applies seasonality correction
"""
sdHR_decomposition = seasonal_decompose(sdHR, model='additive', freq=1)
sdSteps_decomposition = seasonal_decompose(sdSteps, model='additive', freq=1)
sdHR_decomp = pd.DataFrame(sdHR_decomposition.resid + sdHR_decomposition.trend)
sdHR_decomp.rename(columns={sdHR_decomp.columns[0]:'heartrate'}, inplace=True)
sdSteps_decomp = pd.DataFrame(sdSteps_decomposition.resid + sdSteps_decomposition.trend)
sdSteps_decomp.rename(columns={sdSteps_decomp.columns[0]:'steps_window_12'}, inplace=True)
frames = [sdHR_decomp, sdSteps_decomp]
data = pd.concat(frames, axis=1)
#print(data)
#print(data.shape)
return data
# Train model and predict anomalies ------------------------------------------------------
def online_anomaly_detection(self, data_seasnCorec, baseline_window, sliding_window):
"""
# split the data, standardize the data inside a sliding window
# parameters - 1 month baseline window and 1 hour sliding window
# fit the model and predict the test set
"""
for i in range(baseline_window, len(data_seasnCorec)):
data_train_w = data_seasnCorec[i-baseline_window:i]
# train data normalization ------------------------------------------------------
data_train_w += 0.1
standardizer = StandardScaler().fit(data_train_w.values)
data_train_scaled = standardizer.transform(data_train_w.values)
data_train_scaled_features = pd.DataFrame(data_train_scaled, index=data_train_w.index, columns=data_train_w.columns)
data = | pd.DataFrame(data_train_scaled_features) | pandas.DataFrame |
from __future__ import unicode_literals, division, print_function
import json
import os
import pandas as pd
import unittest
from matminer.featurizers.dos import DOSFeaturizer, DopingFermi, \
Hybridization, SiteDOS, DosAsymmetry
from pymatgen.electronic_structure.dos import CompleteDos
from pymatgen.util.testing import PymatgenTest
test_dir = os.path.join(os.path.dirname(__file__))
class DOSFeaturesTest(PymatgenTest):
def setUp(self):
with open(os.path.join(test_dir, 'si_dos.json'), 'r') as sDOS:
si_dos = CompleteDos.from_dict(json.load(sDOS))
self.df = pd.DataFrame({'dos': [si_dos], 'site': [0]})
with open(os.path.join(test_dir, 'nb3sn_dos.json'), 'r') as sDOS:
nb3sn_dos = CompleteDos.from_dict(json.load(sDOS))
self.nb3sn_df = | pd.DataFrame({'dos': [nb3sn_dos]}) | pandas.DataFrame |
import nose
import os
import string
from distutils.version import LooseVersion
from datetime import datetime, date, timedelta
from pandas import Series, DataFrame, MultiIndex, PeriodIndex, date_range
from pandas.compat import range, lrange, StringIO, lmap, lzip, u, zip
import pandas.util.testing as tm
from pandas.util.testing import ensure_clean
from pandas.core.config import set_option
import numpy as np
from numpy import random
from numpy.random import randn
from numpy.testing import assert_array_equal
from numpy.testing.decorators import slow
import pandas.tools.plotting as plotting
def _skip_if_no_scipy():
try:
import scipy
except ImportError:
raise nose.SkipTest("no scipy")
@tm.mplskip
class TestSeriesPlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
self.ts = tm.makeTimeSeries()
self.ts.name = 'ts'
self.series = tm.makeStringSeries()
self.series.name = 'series'
self.iseries = tm.makePeriodSeries()
self.iseries.name = 'iseries'
def tearDown(self):
tm.close()
@slow
def test_plot(self):
_check_plot_works(self.ts.plot, label='foo')
_check_plot_works(self.ts.plot, use_index=False)
_check_plot_works(self.ts.plot, rot=0)
_check_plot_works(self.ts.plot, style='.', logy=True)
_check_plot_works(self.ts.plot, style='.', logx=True)
_check_plot_works(self.ts.plot, style='.', loglog=True)
_check_plot_works(self.ts[:10].plot, kind='bar')
_check_plot_works(self.iseries.plot)
_check_plot_works(self.series[:5].plot, kind='bar')
_check_plot_works(self.series[:5].plot, kind='line')
_check_plot_works(self.series[:5].plot, kind='barh')
_check_plot_works(self.series[:10].plot, kind='barh')
_check_plot_works(Series(randn(10)).plot, kind='bar', color='black')
@slow
def test_plot_figsize_and_title(self):
# figsize and title
import matplotlib.pyplot as plt
ax = self.series.plot(title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
@slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
import matplotlib.colors as colors
default_colors = plt.rcParams.get('axes.color_cycle')
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(kind='bar')
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(default_colors[i % len(default_colors)])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
ax = df.plot(kind='bar', color=custom_colors)
rects = ax.patches
conv = colors.colorConverter
for i, rect in enumerate(rects[::5]):
xp = conv.to_rgba(custom_colors[i])
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot(kind='bar', colormap='jet')
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
# Test colormap functionality
ax = df.plot(kind='bar', colormap=cm.jet)
rects = ax.patches
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
for i, rect in enumerate(rects[::5]):
xp = rgba_colors[i]
rs = rect.get_facecolor()
self.assertEqual(xp, rs)
tm.close()
df.ix[:, [0]].plot(kind='bar', color='DodgerBlue')
@slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot(kind='bar', linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# stacked
ax = df.plot(kind='bar', stacked=True, linewidth=2)
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
# subplots
axes = df.plot(kind='bar', linewidth=2, subplots=True)
for ax in axes:
for r in ax.patches:
self.assertEqual(r.get_linewidth(), 2)
@slow
def test_bar_log(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = Series([200, 500]).plot(log=True, kind='bar')
assert_array_equal(ax.yaxis.get_ticklocs(), expected)
def test_rotation(self):
df = DataFrame(randn(5, 5))
ax = df.plot(rot=30)
for l in ax.get_xticklabels():
self.assertEqual(l.get_rotation(), 30)
def test_irregular_datetime(self):
rng = date_range('1/1/2000', '3/1/2000')
rng = rng[[0, 1, 2, 3, 5, 9, 10, 11, 12]]
ser = Series(randn(len(rng)), rng)
ax = ser.plot()
xp = datetime(1999, 1, 1).toordinal()
ax.set_xlim('1/1/1999', '1/1/2001')
self.assertEqual(xp, ax.get_xlim()[0])
@slow
def test_hist(self):
_check_plot_works(self.ts.hist)
_check_plot_works(self.ts.hist, grid=False)
_check_plot_works(self.ts.hist, figsize=(8, 10))
_check_plot_works(self.ts.hist, by=self.ts.index.month)
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
_check_plot_works(self.ts.hist, ax=ax)
_check_plot_works(self.ts.hist, ax=ax, figure=fig)
_check_plot_works(self.ts.hist, figure=fig)
tm.close()
fig, (ax1, ax2) = plt.subplots(1, 2)
_check_plot_works(self.ts.hist, figure=fig, ax=ax1)
_check_plot_works(self.ts.hist, figure=fig, ax=ax2)
with tm.assertRaises(ValueError):
self.ts.hist(by=self.ts.index, figure=fig)
@slow
def test_hist_layout(self):
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n)})
with tm.assertRaises(ValueError):
df.height.hist(layout=(1, 1))
with tm.assertRaises(ValueError):
df.height.hist(layout=[1, 1])
@slow
def test_hist_layout_with_by(self):
import matplotlib.pyplot as plt
n = 10
gender = tm.choice(['Male', 'Female'], size=n)
df = DataFrame({'gender': gender,
'height': random.normal(66, 4, size=n), 'weight':
random.normal(161, 32, size=n),
'category': random.randint(4, size=n)})
_check_plot_works(df.height.hist, by=df.gender, layout=(2, 1))
tm.close()
_check_plot_works(df.height.hist, by=df.gender, layout=(1, 2))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(1, 4))
tm.close()
_check_plot_works(df.weight.hist, by=df.category, layout=(4, 1))
tm.close()
@slow
def test_hist_no_overlap(self):
from matplotlib.pyplot import subplot, gcf, close
x = Series(randn(2))
y = Series(randn(2))
subplot(121)
x.hist()
subplot(122)
y.hist()
fig = gcf()
axes = fig.get_axes()
self.assertEqual(len(axes), 2)
@slow
def test_plot_fails_with_dupe_color_and_style(self):
x = Series(randn(2))
with tm.assertRaises(ValueError):
x.plot(style='k--', color='k')
@slow
def test_hist_by_no_extra_plots(self):
import matplotlib.pyplot as plt
n = 10
df = DataFrame({'gender': tm.choice(['Male', 'Female'], size=n),
'height': random.normal(66, 4, size=n)})
axes = df.height.hist(by=df.gender)
self.assertEqual(len(plt.get_fignums()), 1)
def test_plot_fails_when_ax_differs_from_figure(self):
from pylab import figure, close
fig1 = figure()
fig2 = figure()
ax1 = fig1.add_subplot(111)
with tm.assertRaises(AssertionError):
self.ts.hist(ax=ax1, figure=fig2)
@slow
def test_kde(self):
_skip_if_no_scipy()
_check_plot_works(self.ts.plot, kind='kde')
_check_plot_works(self.ts.plot, kind='density')
ax = self.ts.plot(kind='kde', logy=True)
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_kde_kwargs(self):
_skip_if_no_scipy()
from numpy import linspace
_check_plot_works(self.ts.plot, kind='kde', bw_method=.5, ind=linspace(-100,100,20))
_check_plot_works(self.ts.plot, kind='density', bw_method=.5, ind=linspace(-100,100,20))
ax = self.ts.plot(kind='kde', logy=True, bw_method=.5, ind=linspace(-100,100,20))
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_kde_color(self):
_skip_if_no_scipy()
ax = self.ts.plot(kind='kde', logy=True, color='r')
lines = ax.get_lines()
self.assertEqual(len(lines), 1)
self.assertEqual(lines[0].get_color(), 'r')
@slow
def test_autocorrelation_plot(self):
from pandas.tools.plotting import autocorrelation_plot
_check_plot_works(autocorrelation_plot, self.ts)
_check_plot_works(autocorrelation_plot, self.ts.values)
@slow
def test_lag_plot(self):
from pandas.tools.plotting import lag_plot
_check_plot_works(lag_plot, self.ts)
_check_plot_works(lag_plot, self.ts, lag=5)
@slow
def test_bootstrap_plot(self):
from pandas.tools.plotting import bootstrap_plot
_check_plot_works(bootstrap_plot, self.ts, size=10)
def test_invalid_plot_data(self):
s = Series(list('abcd'))
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
with tm.assertRaises(TypeError):
s.plot(kind=kind)
@slow
def test_valid_object_plot(self):
s = Series(lrange(10), dtype=object)
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
_check_plot_works(s.plot, kind=kind)
def test_partially_invalid_plot_data(self):
s = Series(['a', 'b', 1.0, 2])
kinds = 'line', 'bar', 'barh', 'kde', 'density'
for kind in kinds:
with tm.assertRaises(TypeError):
s.plot(kind=kind)
def test_invalid_kind(self):
s = Series([1, 2])
with tm.assertRaises(ValueError):
s.plot(kind='aasdf')
@slow
def test_dup_datetime_index_plot(self):
dr1 = date_range('1/1/2009', periods=4)
dr2 = date_range('1/2/2009', periods=4)
index = dr1.append(dr2)
values = randn(index.size)
s = Series(values, index=index)
_check_plot_works(s.plot)
@tm.mplskip
class TestDataFramePlots(tm.TestCase):
def setUp(self):
import matplotlib as mpl
self.mpl_le_1_2_1 = str(mpl.__version__) <= LooseVersion('1.2.1')
def tearDown(self):
tm.close()
@slow
def test_plot(self):
df = tm.makeTimeDataFrame()
_check_plot_works(df.plot, grid=False)
_check_plot_works(df.plot, subplots=True)
_check_plot_works(df.plot, subplots=True, use_index=False)
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
self._check_plot_fails(df.plot, kind='line', blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
_check_plot_works(df.plot, subplots=True, title='blah')
_check_plot_works(df.plot, title='blah')
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples([(u('\u03b1'), 0),
(u('\u03b1'), 1),
(u('\u03b2'), 2),
(u('\u03b2'), 3),
(u('\u03b3'), 4),
(u('\u03b3'), 5),
(u('\u03b4'), 6),
(u('\u03b4'), 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u('\u0394')),
('bar', u('\u0395'))], names=['c0',
'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
_check_plot_works(df.plot, title=u('\u03A3'))
def test_nonnumeric_exclude(self):
import matplotlib.pyplot as plt
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
self.assertEqual(len(ax.get_lines()), 1) # B was plotted
@slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b')
self.assertEqual(ax.xaxis.get_label().get_text(), 'a')
@slow
def test_explicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b', label='LABEL')
self.assertEqual(ax.xaxis.get_label().get_text(), 'LABEL')
@slow
def test_plot_xy(self):
import matplotlib.pyplot as plt
# columns.inferred_type == 'string'
df = tm.makeTimeDataFrame()
self._check_data(df.plot(x=0, y=1),
df.set_index('A')['B'].plot())
self._check_data(df.plot(x=0), df.set_index('A').plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x='A', y='B'),
df.set_index('A').B.plot())
self._check_data(df.plot(x='A'), df.set_index('A').plot())
self._check_data(df.plot(y='B'), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = lrange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2),
df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8))
self.assertEqual(ax.title.get_text(), 'Test')
assert_array_equal(np.round(ax.figure.get_size_inches()),
np.array((16., 8.)))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@slow
def test_xcompat(self):
import pandas as pd
import matplotlib.pyplot as plt
df = tm.makeTimeDataFrame()
ax = df.plot(x_compat=True)
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
pd.plot_params['xaxis.compat'] = True
ax = df.plot()
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
pd.plot_params['x_compat'] = False
ax = df.plot()
lines = ax.get_lines()
tm.assert_isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
# useful if you're plotting a bunch together
with pd.plot_params.use('x_compat', True):
ax = df.plot()
lines = ax.get_lines()
self.assert_(not isinstance(lines[0].get_xdata(), PeriodIndex))
tm.close()
ax = df.plot()
lines = ax.get_lines()
tm.assert_isinstance(lines[0].get_xdata(), PeriodIndex)
def test_unsorted_index(self):
df = DataFrame({'y': np.arange(100)},
index=np.arange(99, -1, -1), dtype=np.int64)
ax = df.plot()
l = ax.get_lines()[0]
rs = l.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64)
tm.assert_series_equal(rs, df.y)
def _check_data(self, xp, rs):
xp_lines = xp.get_lines()
rs_lines = rs.get_lines()
def check_line(xpl, rsl):
xpdata = xpl.get_xydata()
rsdata = rsl.get_xydata()
assert_array_equal(xpdata, rsdata)
[check_line(xpl, rsl) for xpl, rsl in zip(xp_lines, rs_lines)]
tm.close()
@slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, sharex=True, legend=True)
for ax in axes:
self.assert_(ax.get_legend() is not None)
axes = df.plot(subplots=True, sharex=True)
for ax in axes[:-2]:
[self.assert_(not label.get_visible())
for label in ax.get_xticklabels()]
[self.assert_(label.get_visible())
for label in ax.get_yticklabels()]
[self.assert_(label.get_visible())
for label in axes[-1].get_xticklabels()]
[self.assert_(label.get_visible())
for label in axes[-1].get_yticklabels()]
axes = df.plot(subplots=True, sharex=False)
for ax in axes:
[self.assert_(label.get_visible())
for label in ax.get_xticklabels()]
[self.assert_(label.get_visible())
for label in ax.get_yticklabels()]
@slow
def test_plot_scatter(self):
from matplotlib.pylab import close
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
_check_plot_works(df.plot, x='x', y='y', kind='scatter')
_check_plot_works(df.plot, x=1, y=2, kind='scatter')
with tm.assertRaises(ValueError):
df.plot(x='x', kind='scatter')
with tm.assertRaises(ValueError):
df.plot(y='y', kind='scatter')
@slow
def test_plot_bar(self):
from matplotlib.pylab import close
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
_check_plot_works(df.plot, kind='bar')
_check_plot_works(df.plot, kind='bar', legend=False)
_check_plot_works(df.plot, kind='bar', subplots=True)
_check_plot_works(df.plot, kind='bar', stacked=True)
df = DataFrame(randn(10, 15),
index=list(string.ascii_letters[:10]),
columns=lrange(15))
_check_plot_works(df.plot, kind='bar')
df = DataFrame({'a': [0, 1], 'b': [1, 0]})
_check_plot_works(df.plot, kind='bar')
def test_bar_stacked_center(self):
# GH2157
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
ax = df.plot(kind='bar', stacked='True', grid=True)
self.assertEqual(ax.xaxis.get_ticklocs()[0],
ax.patches[0].get_x() + ax.patches[0].get_width() / 2)
def test_bar_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
ax = df.plot(kind='bar', grid=True)
self.assertEqual(ax.xaxis.get_ticklocs()[0],
ax.patches[0].get_x() + ax.patches[0].get_width())
@slow
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
expected = np.array([1., 10.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 100))
# no subplots
df = DataFrame({'A': [3] * 5, 'B': lrange(1, 6)}, index=lrange(5))
ax = df.plot(kind='bar', grid=True, log=True)
assert_array_equal(ax.yaxis.get_ticklocs(), expected)
@slow
def test_bar_log_subplots(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = DataFrame([Series([200, 300]),
Series([300, 500])]).plot(log=True, kind='bar',
subplots=True)
assert_array_equal(ax[0].yaxis.get_ticklocs(), expected)
assert_array_equal(ax[1].yaxis.get_ticklocs(), expected)
@slow
def test_boxplot(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
df['indic'] = ['foo', 'bar'] * 3
df['indic2'] = ['foo', 'bar', 'foo'] * 2
_check_plot_works(df.boxplot)
_check_plot_works(df.boxplot, column=['one', 'two'])
_check_plot_works(df.boxplot, column=['one', 'two'], by='indic')
_check_plot_works(df.boxplot, column='one', by=['indic', 'indic2'])
_check_plot_works(df.boxplot, by='indic')
_check_plot_works(df.boxplot, by=['indic', 'indic2'])
_check_plot_works(plotting.boxplot, df['one'])
_check_plot_works(df.boxplot, notch=1)
_check_plot_works(df.boxplot, by='indic', notch=1)
df = DataFrame(np.random.rand(10, 2), columns=['Col1', 'Col2'])
df['X'] = Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B'])
_check_plot_works(df.boxplot, by='X')
@slow
def test_kde(self):
_skip_if_no_scipy()
df = DataFrame(randn(100, 4))
_check_plot_works(df.plot, kind='kde')
_check_plot_works(df.plot, kind='kde', subplots=True)
ax = df.plot(kind='kde')
self.assert_(ax.get_legend() is not None)
axes = df.plot(kind='kde', logy=True, subplots=True)
for ax in axes:
self.assertEqual(ax.get_yscale(), 'log')
@slow
def test_hist(self):
import matplotlib.pyplot as plt
df = DataFrame(randn(100, 4))
_check_plot_works(df.hist)
_check_plot_works(df.hist, grid=False)
# make sure layout is handled
df = DataFrame(randn(100, 3))
_check_plot_works(df.hist)
axes = df.hist(grid=False)
self.assert_(not axes[1, 1].get_visible())
df = DataFrame(randn(100, 1))
_check_plot_works(df.hist)
# make sure layout is handled
df = DataFrame(randn(100, 6))
_check_plot_works(df.hist)
# make sure sharex, sharey is handled
_check_plot_works(df.hist, sharex=True, sharey=True)
# handle figsize arg
_check_plot_works(df.hist, figsize=(8, 10))
# make sure xlabelsize and xrot are handled
ser = df[0]
xf, yf = 20, 20
xrot, yrot = 30, 30
ax = ser.hist(xlabelsize=xf, xrot=30, ylabelsize=yf, yrot=30)
ytick = ax.get_yticklabels()[0]
xtick = ax.get_xticklabels()[0]
self.assertAlmostEqual(ytick.get_fontsize(), yf)
self.assertAlmostEqual(ytick.get_rotation(), yrot)
self.assertAlmostEqual(xtick.get_fontsize(), xf)
self.assertAlmostEqual(xtick.get_rotation(), xrot)
xf, yf = 20, 20
xrot, yrot = 30, 30
axes = df.hist(xlabelsize=xf, xrot=30, ylabelsize=yf, yrot=30)
for i, ax in enumerate(axes.ravel()):
if i < len(df.columns):
ytick = ax.get_yticklabels()[0]
xtick = ax.get_xticklabels()[0]
self.assertAlmostEqual(ytick.get_fontsize(), yf)
self.assertAlmostEqual(ytick.get_rotation(), yrot)
self.assertAlmostEqual(xtick.get_fontsize(), xf)
self.assertAlmostEqual(xtick.get_rotation(), xrot)
tm.close()
# make sure kwargs to hist are handled
ax = ser.hist(normed=True, cumulative=True, bins=4)
# height of last bin (index 5) must be 1.0
self.assertAlmostEqual(ax.get_children()[5].get_height(), 1.0)
tm.close()
ax = ser.hist(log=True)
# scale of y must be 'log'
self.assertEqual(ax.get_yscale(), 'log')
tm.close()
# propagate attr exception from matplotlib.Axes.hist
with tm.assertRaises(AttributeError):
ser.hist(foo='bar')
@slow
def test_hist_layout(self):
import matplotlib.pyplot as plt
df = DataFrame(randn(100, 4))
layout_to_expected_size = (
{'layout': None, 'expected_size': (2, 2)}, # default is 2x2
{'layout': (2, 2), 'expected_size': (2, 2)},
{'layout': (4, 1), 'expected_size': (4, 1)},
{'layout': (1, 4), 'expected_size': (1, 4)},
{'layout': (3, 3), 'expected_size': (3, 3)},
)
for layout_test in layout_to_expected_size:
ax = df.hist(layout=layout_test['layout'])
self.assertEqual(len(ax), layout_test['expected_size'][0])
self.assertEqual(len(ax[0]), layout_test['expected_size'][1])
# layout too small for all 4 plots
with tm.assertRaises(ValueError):
df.hist(layout=(1, 1))
# invalid format for layout
with tm.assertRaises(ValueError):
df.hist(layout=(1,))
@slow
def test_scatter(self):
_skip_if_no_scipy()
df = DataFrame(randn(100, 2))
import pandas.tools.plotting as plt
def scat(**kwds):
return plt.scatter_matrix(df, **kwds)
_check_plot_works(scat)
_check_plot_works(scat, marker='+')
_check_plot_works(scat, vmin=0)
_check_plot_works(scat, diagonal='kde')
_check_plot_works(scat, diagonal='density')
_check_plot_works(scat, diagonal='hist')
def scat2(x, y, by=None, ax=None, figsize=None):
return plt.scatter_plot(df, x, y, by, ax, figsize=None)
_check_plot_works(scat2, 0, 1)
grouper = Series(np.repeat([1, 2, 3, 4, 5], 20), df.index)
_check_plot_works(scat2, 0, 1, by=grouper)
@slow
def test_andrews_curves(self):
from pandas import read_csv
from pandas.tools.plotting import andrews_curves
path = os.path.join(curpath(), 'data', 'iris.csv')
df = read_csv(path)
_check_plot_works(andrews_curves, df, 'Name')
@slow
def test_parallel_coordinates(self):
from pandas import read_csv
from pandas.tools.plotting import parallel_coordinates
from matplotlib import cm
path = os.path.join(curpath(), 'data', 'iris.csv')
df = read_csv(path)
_check_plot_works(parallel_coordinates, df, 'Name')
_check_plot_works(parallel_coordinates, df, 'Name',
colors=('#556270', '#4ECDC4', '#C7F464'))
_check_plot_works(parallel_coordinates, df, 'Name',
colors=['dodgerblue', 'aquamarine', 'seagreen'])
_check_plot_works(parallel_coordinates, df, 'Name',
colors=('#556270', '#4ECDC4', '#C7F464'))
_check_plot_works(parallel_coordinates, df, 'Name',
colors=['dodgerblue', 'aquamarine', 'seagreen'])
_check_plot_works(parallel_coordinates, df, 'Name', colormap=cm.jet)
df = read_csv(path, header=None, skiprows=1, names=[1, 2, 4, 8,
'Name'])
_check_plot_works(parallel_coordinates, df, 'Name', use_columns=True)
_check_plot_works(parallel_coordinates, df, 'Name',
xticks=[1, 5, 25, 125])
@slow
def test_radviz(self):
from pandas import read_csv
from pandas.tools.plotting import radviz
from matplotlib import cm
path = os.path.join(curpath(), 'data', 'iris.csv')
df = read_csv(path)
_check_plot_works(radviz, df, 'Name')
_check_plot_works(radviz, df, 'Name', colormap=cm.jet)
@slow
def test_plot_int_columns(self):
df = DataFrame(randn(100, 4)).cumsum()
_check_plot_works(df.plot, legend=True)
def test_legend_name(self):
multi = DataFrame(randn(4, 4),
columns=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
multi.columns.names = ['group', 'individual']
ax = multi.plot()
leg_title = ax.legend_.get_title()
self.assertEqual(leg_title.get_text(), 'group,individual')
def _check_plot_fails(self, f, *args, **kwargs):
with tm.assertRaises(Exception):
f(*args, **kwargs)
@slow
def test_style_by_column(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
df = DataFrame(randn(100, 3))
for markers in [{0: '^', 1: '+', 2: 'o'},
{0: '^', 1: '+'},
['^', '+', 'o'],
['^', '+']]:
fig.clf()
fig.add_subplot(111)
ax = df.plot(style=markers)
for i, l in enumerate(ax.get_lines()[:len(markers)]):
self.assertEqual(l.get_marker(), markers[i])
@slow
def test_line_colors(self):
import matplotlib.pyplot as plt
import sys
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(color=custom_colors)
lines = ax.get_lines()
for i, l in enumerate(lines):
xp = custom_colors[i]
rs = l.get_color()
self.assertEqual(xp, rs)
tmp = sys.stderr
sys.stderr = StringIO()
try:
tm.close()
ax2 = df.plot(colors=custom_colors)
lines2 = ax2.get_lines()
for l1, l2 in zip(lines, lines2):
self.assertEqual(l1.get_color(), l2.get_color())
finally:
sys.stderr = tmp
| tm.close() | pandas.util.testing.close |
import pandas
try:
from typing import List, Any
except:
pass
from .ReportParser import ReportParser
class ReportParserEFM(ReportParser):
def __init__(self):
ReportParser.__init__(self)
def parseLines(self, lines):
# type: ([str]) -> None
current = 2
self.status = lines[current].strip()
# read list of modes
current = self.skip_until(lines, current, '#\t\tReactions\tEquations')
current = current + 1
end = self.find_empty(lines, current)
modes = [] # type: List[Any]
reversibility = []
reactions = []
equations = []
last = -1
for no in range(current, end):
parts = lines[no].strip().split('\t')
col1 = parts[0].strip()
col2 = parts[1].strip()
if len(parts) != 4:
reactions[last] = reactions[last] + ' + ' + col1
equations[last] = equations[last] + '; ' + col2
continue
col3 = parts[2].strip()
col4 = parts[3].strip()
if col1 != '':
last = int(col1) - 1
modes.append(col1)
reversibility.append(col2)
reactions.append(col3)
equations.append(col4)
df = pandas.DataFrame(
{'Mode': modes, 'Reversibility': reversibility, 'Reactions': reactions, 'Equations': equations})
df = df.set_index('Mode')
self.data_frames.append(df)
self.data_descriptions.append({'desc': "Flux modes"})
current = end + 1
# read net reactions
current = self.skip_until(lines, current, '#\tNet Reaction\tInternal Species')
current = current + 1
end = self.find_empty(lines, current)
modes = []
species = []
for no in range(current, end):
parts = lines[no].strip().split('\t')
if len(parts) != 3:
continue
modes.append(parts[0])
species.append(parts[2])
df = | pandas.DataFrame({'Mode': modes, 'Internal Species': species}) | pandas.DataFrame |
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from contextlib import contextmanager
from gptables import GPTable
# TODO: These should be stored in GPTable
gptable_text_attrs = ["title", "scope", "units", "source"]
gptable_list_text_attrs = ["subtitles", "legend", "notes"]
valid_index_columns = [
{},
{1: "one"},
{1: "one", 2: "two"},
{1: "one", 2: "two", 3: "three"}
]
valid_text_elements = [
"This is a string",
["This is ", {"bold": True}, "rich", "text"],
None
]
invalid_text_elements = [
dict(),
set(),
42,
3.14,
True
]
@contextmanager
def does_not_raise():
yield
@pytest.fixture(scope="function")
def create_gptable_with_kwargs():
def generate_gptable(format_dict=None):
base_gptable = {
"table": pd.DataFrame(),
"title": "",
"scope": "",
"units": "",
"source": "",
"index_columns": {} # Override default, as no columns in table
}
if format_dict is not None:
base_gptable.update(format_dict)
return GPTable(**base_gptable)
return generate_gptable
def test_init_defaults(create_gptable_with_kwargs):
"""
Test that given a minimal input, default attributes are correct types.
"""
empty_gptable = create_gptable_with_kwargs()
# Required args
assert empty_gptable.title == ""
assert empty_gptable.scope == ""
assert empty_gptable.units == ""
assert empty_gptable.source == ""
assert_frame_equal(
empty_gptable.table, | pd.DataFrame() | pandas.DataFrame |
import functools
import inspect
from abc import ABC
from abc import abstractmethod
from copy import deepcopy
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from typing import Sequence
from typing import Union
import numpy as np
import pandas as pd
from etna.core.mixins import BaseMixin
from etna.datasets.tsdataset import TSDataset
from etna.loggers import tslogger
# TODO: make PyCharm see signature of decorated method
def log_decorator(f):
"""Add logging for method of the model."""
patch_dict = {"function": f.__name__, "line": inspect.getsourcelines(f)[1], "name": inspect.getmodule(f).__name__}
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
tslogger.log(f"Calling method {f.__name__} of {self.__class__.__name__}", **patch_dict)
result = f(self, *args, **kwargs)
return result
return wrapper
class Model(ABC, BaseMixin):
"""Class for holding specific models - autoregression and simple regressions."""
def __init__(self):
self._models = None
@abstractmethod
def fit(self, ts: TSDataset) -> "Model":
"""Fit model.
Parameters
----------
ts:
Dataframe with features
Returns
-------
:
Model after fit
"""
pass
@abstractmethod
def forecast(
self, ts: TSDataset, prediction_interval: bool = False, quantiles: Sequence[float] = (0.025, 0.975)
) -> TSDataset:
"""Make predictions.
Parameters
----------
ts:
Dataframe with features
prediction_interval:
If True returns prediction interval for forecast
quantiles:
Levels of prediction distribution. By default 2.5% and 97.5% taken to form a 95% prediction interval
Returns
-------
TSDataset
Models result
"""
pass
@staticmethod
def _forecast_segment(model, segment: Union[str, List[str]], ts: TSDataset) -> pd.DataFrame:
segment_features = ts[:, segment, :]
segment_features = segment_features.droplevel("segment", axis=1)
segment_features = segment_features.reset_index()
dates = segment_features["timestamp"]
dates.reset_index(drop=True, inplace=True)
segment_predict = model.predict(df=segment_features)
segment_predict = pd.DataFrame({"target": segment_predict})
segment_predict["segment"] = segment
segment_predict["timestamp"] = dates
return segment_predict
class FitAbstractModel(ABC):
"""Interface for model with fit method."""
@abstractmethod
def fit(self, ts: TSDataset) -> "FitAbstractModel":
"""Fit model.
Parameters
----------
ts:
Dataset with features
Returns
-------
:
Model after fit
"""
pass
@abstractmethod
def get_model(self) -> Union[Any, Dict[str, Any]]:
"""Get internal model/models that are used inside etna class.
Internal model is a model that is used inside etna to forecast segments,
e.g. :py:class:`catboost.CatBoostRegressor` or :py:class:`sklearn.linear_model.Ridge`.
Returns
-------
:
The result can be of two types:
* if model is multi-segment, then the result is internal model
* if model is per-segment, then the result is dictionary where key is segment and value is internal model
"""
pass
class ForecastAbstractModel(ABC):
"""Interface for model with forecast method."""
@abstractmethod
def forecast(self, ts: TSDataset) -> TSDataset:
"""Make predictions.
Parameters
----------
ts:
Dataset with features
Returns
-------
:
Dataset with predictions
"""
pass
class PredictIntervalAbstractModel(ABC):
"""Interface for model with forecast method that creates prediction interval."""
@abstractmethod
def forecast(
self, ts: TSDataset, prediction_interval: bool = False, quantiles: Sequence[float] = (0.025, 0.975)
) -> TSDataset:
"""Make predictions.
Parameters
----------
ts:
Dataset with features
prediction_interval:
If True returns prediction interval for forecast
quantiles:
Levels of prediction distribution. By default 2.5% and 97.5% are taken to form a 95% prediction interval
Returns
-------
:
Dataset with predictions
"""
pass
class PerSegmentBaseModel(FitAbstractModel, BaseMixin):
"""Base class for holding specific models for per-segment prediction."""
def __init__(self, base_model: Any):
"""
Init PerSegmentBaseModel.
Parameters
----------
base_model:
Internal model which will be used to forecast segments, expected to have fit/predict interface
"""
self._base_model = base_model
self._models: Optional[Dict[str, Any]] = None
@log_decorator
def fit(self, ts: TSDataset) -> "PerSegmentBaseModel":
"""Fit model.
Parameters
----------
ts:
Dataset with features
Returns
-------
:
Model after fit
"""
self._models = {}
for segment in ts.segments:
self._models[segment] = deepcopy(self._base_model)
for segment, model in self._models.items():
segment_features = ts[:, segment, :]
segment_features = segment_features.dropna() # TODO: https://github.com/tinkoff-ai/etna/issues/557
segment_features = segment_features.droplevel("segment", axis=1)
segment_features = segment_features.reset_index()
model.fit(df=segment_features, regressors=ts.regressors)
return self
def _get_model(self) -> Dict[str, Any]:
"""Get internal etna base models that are used inside etna class.
Returns
-------
:
dictionary where key is segment and value is internal model
"""
if self._models is None:
raise ValueError("Can not get the dict with base models, the model is not fitted!")
return self._models
def get_model(self) -> Dict[str, Any]:
"""Get internal models that are used inside etna class.
Internal model is a model that is used inside etna to forecast segments,
e.g. :py:class:`catboost.CatBoostRegressor` or :py:class:`sklearn.linear_model.Ridge`.
Returns
-------
:
dictionary where key is segment and value is internal model
"""
internal_models = {}
for segment, base_model in self._get_model().items():
if not hasattr(base_model, "get_model"):
raise NotImplementedError(
f"get_model method is not implemented for {self._base_model.__class__.__name__}"
)
internal_models[segment] = base_model.get_model()
return internal_models
@staticmethod
def _forecast_segment(model: Any, segment: str, ts: TSDataset, *args, **kwargs) -> pd.DataFrame:
"""Make predictions for one segment."""
segment_features = ts[:, segment, :]
segment_features = segment_features.droplevel("segment", axis=1)
segment_features = segment_features.reset_index()
dates = segment_features["timestamp"]
dates.reset_index(drop=True, inplace=True)
segment_predict = model.predict(df=segment_features, *args, **kwargs)
if isinstance(segment_predict, np.ndarray):
segment_predict = pd.DataFrame({"target": segment_predict})
segment_predict["segment"] = segment
segment_predict["timestamp"] = dates
return segment_predict
class PerSegmentModel(PerSegmentBaseModel, ForecastAbstractModel):
"""Class for holding specific models for per-segment prediction."""
def __init__(self, base_model: Any):
"""
Init PerSegmentBaseModel.
Parameters
----------
base_model:
Internal model which will be used to forecast segments, expected to have fit/predict interface
"""
super().__init__(base_model=base_model)
@log_decorator
def forecast(self, ts: TSDataset) -> TSDataset:
"""Make predictions.
Parameters
----------
ts:
Dataframe with features
Returns
-------
:
Dataset with predictions
"""
result_list = list()
for segment, model in self._get_model().items():
segment_predict = self._forecast_segment(model=model, segment=segment, ts=ts)
result_list.append(segment_predict)
result_df = | pd.concat(result_list, ignore_index=True) | pandas.concat |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import analyze
from utils import plot_collections, bin, modify, plotting
"""
Blue: #0C5DA5
Green: #00B945
"""
plt.style.use(['science', 'ieee', 'std-colors'])
fig, ax = plt.subplots()
size_x_inches, size_y_inches = fig.get_size_inches()
plt.close(fig)
sciblue = '#0C5DA5'
scigreen = '#00B945'
# --- --- SETUP
# --- files to read
dataset_dir = '/Users/mackenzie/Desktop/gdpyt-characterization/datasets/synthetic_overlap_noise-level1/'
base_dir = '/Users/mackenzie/Desktop/gdpyt-characterization/publication data/iteration 5/synthetic grid overlap random z nl1/'
# test coords
base_testcoords = 'test_coords/'
idpt1 = 'dx-5-60-5/test_id1_coords_static_grid-overlap-random-z-nl1.xlsx'
spct1 = 'dx-5-60-5/test_id11_coords_SPC_grid-overlap-random-z-nl1.xlsx'
idpt2 = 'dx-7.5-57.5-5/test_id2_coords_static_grid-overlap-random-z-nl1.xlsx'
spct2 = 'dx-7.5-57.5-5/test_id12_coords_SPC_grid-overlap-random-z-nl1.xlsx'
# true coords
true1 = dataset_dir + 'grid-random-z/calibration_input/calib_-15.0.txt'
true2 = dataset_dir + 'grid-random-z/test_input_dx7.5/B0000.txt'
# diameter parameters
path_diameter_params = dataset_dir + 'grid/results/calibration-SPC-spct-no_noise_cal/calib_spct_pop_defocus_stats_GridOverlapSPC_calib_nll1_SPC_no_noise_cal.xlsx'
# save ids
save_ids = ['test_id1_coords_static',
'test_id11_coords_SPC',
'test_id2_coords_static',
'test_id12_coords_SPC',
]
modifiers = [True, True, False, False]
# --- --- PERCENT DIAMETER OVERLAP
export_percent_diameter_overlap = False
plot_percent_diameter_overlap = False
if plot_percent_diameter_overlap or export_percent_diameter_overlap:
# --- read each percent diameter overlap dataframe (if available)
calculate_percent_overlap = False
for sid, modifier in zip(save_ids, modifiers):
if calculate_percent_overlap:
# --- For each test coords, calculate percent diameter overlap
for test_coord, true_coord, filt, sid in zip([idpt1, spct1, idpt2, spct2],
[true1, true1, true2, true2],
modifier,
save_ids):
dfo = analyze.calculate_particle_to_particle_spacing(
test_coords_path=base_dir + base_testcoords + test_coord,
theoretical_diameter_params_path=path_diameter_params,
mag_eff=10,
z_param='z_true',
zf_at_zero=False,
zf_param='zf_from_dia',
max_n_neighbors=5,
true_coords_path=true_coord,
maximum_allowable_diameter=55)
# filter dx 5 60 5 coords
if filt:
dfo = dfo[dfo['x'] < 820]
# save to excel
dfo.to_excel(base_dir + 'percent-overlap/{}_grid-overlap-random-z-nl1_percent_overlap.xlsx'.format(sid),
index=False)
else:
dfo = pd.read_excel(base_dir + 'percent-overlap/test_coords_percent_overlap/'
'{}_grid-overlap-random-z-nl1_percent_overlap.xlsx'.format(sid))
# --- --- EVALUATE RMSE Z
# limit percent diameter overlap to -25% (not overlapping here)
dfo['percent_dx_diameter'] = dfo['percent_dx_diameter'].where(dfo['percent_dx_diameter'] > -0.5, -0.5)
# binning
columns_to_bin = ['z_true', 'percent_dx_diameter']
bin_z = [-27.5, -15, -2.5, 10, 22.5]
bin_pdo = 3
dfbicts = analyze.evaluate_2d_bin_local_rmse_z(df=dfo,
columns_to_bin=columns_to_bin,
bins=[bin_z, bin_pdo],
round_to_decimals=[3, 4],
min_cm=0.5,
equal_bins=[False, True])
# --- --- PLOT RMSE Z
if plot_percent_diameter_overlap:
# Plot rmse z + number of particles binned as a function of percent diameter overlap for different z bins
fig, [ax, ax2] = plt.subplots(nrows=2, sharex=True, figsize=(size_x_inches*1.35, size_y_inches*1.5))
for name, df in dfbicts.items():
ax.plot(df.bin, df.rmse_z, '-o', label=name)
ax2.plot(df.bin, df.num_bind, '-o')
ax.set_ylabel(r'z r.m.s. error ($\mu m$)')
ax.set_yscale('log')
ax.legend(loc='upper left', bbox_to_anchor=(1, 1), title=r'$z_{bin}$')
ax2.set_xlabel(r'$\gamma \: $(\%)')
ax2.set_ylabel(r'$N_{p}$')
plt.tight_layout()
plt.savefig(base_dir + 'percent-overlap/{}_rmsez_num-binned_pdo.png'.format(sid))
plt.show()
# --- --- EXPORT RMSE Z TO EXCEL
# dfstack = modify.stack_dficts_by_key(dfbicts, drop_filename=False)
# dfstack.to_excel(base_dir + 'percent-overlap/{}_binned_rmsez_by_z_pdo.xlsx'.format(sid), index=False)
# --- --- PLOT OTHER METRICS
# --- calculate the local rmse_z uncertainty
num_bins = 25
bin_list = np.round(np.linspace(-1.25, 1, 10), 4)
min_cm = 0.5
z_range = [-40.1, 40.1]
round_to_decimal = 4
df_ground_truth = None
# bin by percent diameter overlap
if plot_percent_diameter_overlap:
dfob = bin.bin_local_rmse_z(df=dfo, column_to_bin='percent_dx_diameter', bins=bin_list, min_cm=min_cm, z_range=z_range,
round_to_decimal=round_to_decimal, df_ground_truth=df_ground_truth)
fig, ax = plt.subplots()
ax.plot(dfob.index, dfob.rmse_z, '-o')
ax.set_xlabel(r'$\gamma \: $(\%)')
ax.set_ylabel(r'z r.m.s. error ($\mu m$)')
plt.tight_layout()
plt.savefig(base_dir + 'percent-overlap/{}_binned_rmsez_by_pdo.png'.format(sid))
plt.show()
# bin by z
dfobz = bin.bin_local_rmse_z(df=dfo, column_to_bin='z_true', bins=num_bins, min_cm=min_cm, z_range=z_range,
round_to_decimal=round_to_decimal, df_ground_truth=df_ground_truth)
fig, ax = plt.subplots()
ax.plot(dfobz.index, dfobz.rmse_z, '-o')
ax.set_xlabel(r'$z_{true} \:$ ($\mu m$)')
ax.set_ylabel(r'z r.m.s. error ($\mu m$)')
plt.tight_layout()
plt.savefig(base_dir + 'percent-overlap/{}_binned_rmsez_by_z.png'.format(sid))
plt.show()
# --- --- CALCULATE RMSE BY PARTICLE TO PARTICLE SPACINGS
# --- setup
column_to_split = 'x'
round_x_to_decimal = 0
if modifier:
splits = np.array([93.0, 189.0, 284.0, 380.0, 475.0, 571.0, 666.0, 762.0, 858.0, 930]) # , 900.0
keys = [60, 5, 10, 15, 20, 25, 30, 35, 40, 50] # , 47.5
else:
splits = np.array([79.0, 163.5, 254.0, 348.5, 447.0, 555.5, 665.0, 777.5, 900.0])
keys = [7.5, 12.5, 17.5, 22.5, 27.5, 32.5, 37.5, 42.5, 47.5]
# --- split df into dictionary
dfsplicts_gdpyt = modify.split_df_and_merge_dficts(dfo, keys, column_to_split, splits, round_x_to_decimal)
# --- rmse z by binning x
dfmbicts_gdpyt = analyze.calculate_bin_local_rmse_z(dfsplicts_gdpyt, column_to_split, splits, min_cm, z_range,
round_x_to_decimal, dficts_ground_truth=None)
# --- plot global uncertainty - gdpyt
if plot_percent_diameter_overlap:
xlabel_for_keys = r'$\delta x (pix)$'
h = 80
scale_fig_dim = [1, 1]
fig, ax, ax2 = plotting.plot_dfbicts_global(dfmbicts_gdpyt, parameters='rmse_z', xlabel=xlabel_for_keys, h=h,
scale=scale_fig_dim)
plt.tight_layout()
plt.savefig(base_dir + 'percent-overlap/{}_global_binned_rmsez_by_z.png'.format(sid))
plt.show()
# --- --- EXPORT GLOBAL RMSE Z TO EXCEL
if export_percent_diameter_overlap:
dfstack = modify.stack_dficts_by_key(dfmbicts_gdpyt, drop_filename=False)
dfstack.to_excel(base_dir + 'percent-overlap/{}_global_binned_rmsez_by_particle_spacing.xlsx'.format(sid), index=False)
# --- --- COMBINED PARTICLE TO PARTICLE SPACING
plot_particle_spacing = False
if plot_particle_spacing:
# read files
read_dir = base_dir + 'percent-overlap/particle-to-particle-spacing/'
fn1 = 'test_id1_coords_static_global_binned_rmsez_by_particle_spacing'
fn2 = 'test_id2_coords_static_global_binned_rmsez_by_particle_spacing'
fn11 = 'test_id11_coords_SPC_global_binned_rmsez_by_particle_spacing'
fn12 = 'test_id12_coords_SPC_global_binned_rmsez_by_particle_spacing'
df1 = pd.read_excel(read_dir + fn1 + '.xlsx')
df2 = pd.read_excel(read_dir + fn2 + '.xlsx')
df11 = | pd.read_excel(read_dir + fn11 + '.xlsx') | pandas.read_excel |
# Written by: <NAME>, @dataoutsider
# Viz: "Good Read", enjoy!
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import os
from math import pi, cos, sin, exp, sqrt, atan2
#pd.set_option('display.max_rows', None)
class point:
def __init__(self, index, item, x, y, path = -1, value = ''):
self.index = index
self.item = item
self.x = x
self.y = y
self.path = path
self.value = value
def to_dict(self):
return {
'index' : self.index,
'item' : self.item,
'x' : self.x,
'y' : self.y,
'path' : self.path,
'value' : self.value }
def ellipse_y(a, b, xs, xe, points):
x = np.linspace(xs, xe, num=points)
y = (b/a)*np.sqrt(a**2-x**2)
return x, y
def ellipse_x(a, b, y):
x = sqrt(-((a**2.*y**2.-b**2.*a**2.)/(b**2.)))
return x
def feather(x, length, cutoff_0_to_2):
xt = x/length
xf = xt*cutoff_0_to_2
# x space
xr = [0., 2.0]
# paramters
xi = [1.5, 3.5]
a = [0., 40.]
f = [2.0, 2.0]
# parameter linear transform
xi_t = np.interp(xf, xr, xi)
a_t = np.interp(xf, xr, a)
f_t = np.interp(xf, xr, f)
# Nick's feather equation
y = (1./xi_t**f_t)*sin(a_t*pi/180.)*25.
return y
#region data prep
df = pd.read_csv(os.path.dirname(__file__) + '/1001bookreviews_google_clean.csv')
df_cat_count = df.groupby('category').count().reset_index()
# Lump singles into 'Other'
df_cat_count['type'] = ['[\'All Single-Book Categories\']' if x == 1 else y for x, y in zip(df_cat_count['google_id'], df_cat_count['category'])]
# Save out lumped categories
df_cat_count.to_csv(os.path.dirname(__file__) + '/1001bookreviews_quill_cat.csv', encoding='utf-8', index=False)
# Final group
df_cat_count = df_cat_count.groupby('type').sum()
group_sort = df_cat_count.sort_values(by=['google_id'], ascending=[True]).reset_index()
group_sort['cat_perc'] = group_sort['google_id']/len(df.index)
print(group_sort)
#endregion
#region constants
top_x = 0.15 # 0.15 0.3
top_y = 8.5 #6 19.5
btm_x = 0.15 # 0.15 0.3
btm_y = -3.25 #-2.5 -7.0
#endregion
#region algorithm
upper = top_y - 0.01
lower = 0.0
delta = upper-lower
offset = 0
list_xy = []
ix = 0
resolution = 100
spine_b = 0.05 #0.05 0.15
path = 1
xt = []
yt = []
for index, row in group_sort.iterrows():
y_high = upper-offset
y_low = y_high-delta*row['cat_perc']
offset += delta*row['cat_perc']
x_high = ellipse_x(top_x, top_y, y_high)
x_low = ellipse_x(top_x, top_y, y_low)
# v (top down bend) >list_xy.append(point(ix, index, -x_high, y_high, 1, 0))
x, y = ellipse_y(x_high, -spine_b, -x_high, x_high, resolution)
for i in range(resolution):
list_xy.append(point(ix, -(index+1), x[i], y_high+y[i], path, row['type']))
path += 1
# / (right side inward bend) >list_xy.append(point(ix, index, x_high, y_high, 2, 0))
x, y = ellipse_y(top_x, top_y, x_high, x_low, resolution)
for i in range(resolution):
list_xy.append(point(ix, -(index+1), x[i], y[i], path, row['type']))
path += 1
# ^ (bottom down bend) >list_xy.append(point(ix, index, -x_high, y_low, 4, 0))
x, y = ellipse_y(x_low, -spine_b, x_low, -x_low, resolution)
for i in range(resolution):
list_xy.append(point(ix, -(index+1), x[i], y_low+y[i], path, row['type']))
path += 1
if index == len(group_sort.index)-1:
xt = x
yt = y
# \ (left side inward bend) >list_xy.append(point(ix, index, x_high, y_low, 3, 0))
x, y = ellipse_y(top_x, top_y, -x_low, -x_high, resolution)
for i in range(resolution):
list_xy.append(point(ix, -(index+1), x[i], y[i], path, row['type']))
path += 1
path = 1
# pen tip
for i in range(resolution):
list_xy.append(point(ix, -(index+2), xt[i], yt[i], path, '[\'aaa\']'))
path += 1
x, y = ellipse_y(btm_x, btm_y, -btm_x, btm_x, resolution)
for i in range(resolution):
list_xy.append(point(ix, -(index+2), x[i], y[i], path, '[\'aaa\']'))
path += 1
#endregion
#region output
df_quill = pd.DataFrame.from_records([s.to_dict() for s in list_xy])
df_quill['color'] = df_quill['value']
df_feather = pd.read_csv(os.path.dirname(__file__) + '/test_feather.csv')
df_out = | pd.concat([df_feather, df_quill], ignore_index=True) | pandas.concat |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for datetime64 and datetime64tz dtypes
from datetime import (
datetime,
time,
timedelta,
)
from itertools import (
product,
starmap,
)
import operator
import warnings
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs.conversion import localize_pydatetime
from pandas._libs.tslibs.offsets import shift_months
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import (
DateOffset,
DatetimeIndex,
NaT,
Period,
Series,
Timedelta,
TimedeltaIndex,
Timestamp,
date_range,
)
import pandas._testing as tm
from pandas.core.arrays import (
DatetimeArray,
TimedeltaArray,
)
from pandas.core.ops import roperator
from pandas.tests.arithmetic.common import (
assert_cannot_add,
assert_invalid_addsub_type,
assert_invalid_comparison,
get_upcast_box,
)
# ------------------------------------------------------------------
# Comparisons
class TestDatetime64ArrayLikeComparisons:
# Comparison tests for datetime64 vectors fully parametrized over
# DataFrame/Series/DatetimeIndex/DatetimeArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, tz_naive_fixture, box_with_array):
# Test comparison with zero-dimensional array is unboxed
tz = tz_naive_fixture
box = box_with_array
dti = date_range("20130101", periods=3, tz=tz)
other = np.array(dti.to_numpy()[0])
dtarr = tm.box_expected(dti, box)
xbox = get_upcast_box(dtarr, other, True)
result = dtarr <= other
expected = np.array([True, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
"foo",
-1,
99,
4.0,
object(),
timedelta(days=2),
# GH#19800, GH#19301 datetime.date comparison raises to
# match DatetimeIndex/Timestamp. This also matches the behavior
# of stdlib datetime.datetime
datetime(2001, 1, 1).date(),
# GH#19301 None and NaN are *not* cast to NaT for comparisons
None,
np.nan,
],
)
def test_dt64arr_cmp_scalar_invalid(self, other, tz_naive_fixture, box_with_array):
# GH#22074, GH#15966
tz = tz_naive_fixture
rng = date_range("1/1/2000", periods=10, tz=tz)
dtarr = tm.box_expected(rng, box_with_array)
assert_invalid_comparison(dtarr, other, box_with_array)
@pytest.mark.parametrize(
"other",
[
# GH#4968 invalid date/int comparisons
list(range(10)),
np.arange(10),
np.arange(10).astype(np.float32),
np.arange(10).astype(object),
pd.timedelta_range("1ns", periods=10).array,
np.array(pd.timedelta_range("1ns", periods=10)),
list(pd.timedelta_range("1ns", periods=10)),
pd.timedelta_range("1 Day", periods=10).astype(object),
pd.period_range("1971-01-01", freq="D", periods=10).array,
pd.period_range("1971-01-01", freq="D", periods=10).astype(object),
],
)
def test_dt64arr_cmp_arraylike_invalid(
self, other, tz_naive_fixture, box_with_array
):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="ns", periods=10, tz=tz)._data
obj = tm.box_expected(dta, box_with_array)
assert_invalid_comparison(obj, other, box_with_array)
def test_dt64arr_cmp_mixed_invalid(self, tz_naive_fixture):
tz = tz_naive_fixture
dta = date_range("1970-01-01", freq="h", periods=5, tz=tz)._data
other = np.array([0, 1, 2, dta[3], Timedelta(days=1)])
result = dta == other
expected = np.array([False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = dta != other
tm.assert_numpy_array_equal(result, ~expected)
msg = "Invalid comparison between|Cannot compare type|not supported between"
with pytest.raises(TypeError, match=msg):
dta < other
with pytest.raises(TypeError, match=msg):
dta > other
with pytest.raises(TypeError, match=msg):
dta <= other
with pytest.raises(TypeError, match=msg):
dta >= other
def test_dt64arr_nat_comparison(self, tz_naive_fixture, box_with_array):
# GH#22242, GH#22163 DataFrame considered NaT == ts incorrectly
tz = tz_naive_fixture
box = box_with_array
ts = Timestamp("2021-01-01", tz=tz)
ser = Series([ts, NaT])
obj = tm.box_expected(ser, box)
xbox = get_upcast_box(obj, ts, True)
expected = Series([True, False], dtype=np.bool_)
expected = tm.box_expected(expected, xbox)
result = obj == ts
tm.assert_equal(result, expected)
class TestDatetime64SeriesComparison:
# TODO: moved from tests.series.test_operators; needs cleanup
@pytest.mark.parametrize(
"pair",
[
(
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[NaT, NaT, Timestamp("2011-01-03")],
),
(
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[NaT, NaT, Timedelta("3 days")],
),
(
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
[NaT, NaT, Period("2011-03", freq="M")],
),
],
)
@pytest.mark.parametrize("reverse", [True, False])
@pytest.mark.parametrize("dtype", [None, object])
@pytest.mark.parametrize(
"op, expected",
[
(operator.eq, Series([False, False, True])),
(operator.ne, Series([True, True, False])),
(operator.lt, Series([False, False, False])),
(operator.gt, Series([False, False, False])),
(operator.ge, Series([False, False, True])),
(operator.le, Series([False, False, True])),
],
)
def test_nat_comparisons(
self,
dtype,
index_or_series,
reverse,
pair,
op,
expected,
):
box = index_or_series
l, r = pair
if reverse:
# add lhs / rhs switched data
l, r = r, l
left = Series(l, dtype=dtype)
right = box(r, dtype=dtype)
result = op(left, right)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"data",
[
[Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")],
[Timedelta("1 days"), NaT, Timedelta("3 days")],
[Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")],
],
)
@pytest.mark.parametrize("dtype", [None, object])
def test_nat_comparisons_scalar(self, dtype, data, box_with_array):
box = box_with_array
left = Series(data, dtype=dtype)
left = tm.box_expected(left, box)
xbox = get_upcast_box(left, NaT, True)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left == NaT, expected)
tm.assert_equal(NaT == left, expected)
expected = [True, True, True]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left != NaT, expected)
tm.assert_equal(NaT != left, expected)
expected = [False, False, False]
expected = tm.box_expected(expected, xbox)
if box is pd.array and dtype is object:
expected = pd.array(expected, dtype="bool")
tm.assert_equal(left < NaT, expected)
tm.assert_equal(NaT > left, expected)
tm.assert_equal(left <= NaT, expected)
tm.assert_equal(NaT >= left, expected)
tm.assert_equal(left > NaT, expected)
tm.assert_equal(NaT < left, expected)
tm.assert_equal(left >= NaT, expected)
tm.assert_equal(NaT <= left, expected)
@pytest.mark.parametrize("val", [datetime(2000, 1, 4), datetime(2000, 1, 5)])
def test_series_comparison_scalars(self, val):
series = Series(date_range("1/1/2000", periods=10))
result = series > val
expected = Series([x > val for x in series])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"left,right", [("lt", "gt"), ("le", "ge"), ("eq", "eq"), ("ne", "ne")]
)
def test_timestamp_compare_series(self, left, right):
# see gh-4982
# Make sure we can compare Timestamps on the right AND left hand side.
ser = Series(date_range("20010101", periods=10), name="dates")
s_nat = ser.copy(deep=True)
ser[0] = Timestamp("nat")
ser[3] = Timestamp("nat")
left_f = getattr(operator, left)
right_f = getattr(operator, right)
# No NaT
expected = left_f(ser, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), ser)
tm.assert_series_equal(result, expected)
# NaT
expected = left_f(ser, Timestamp("nat"))
result = right_f(Timestamp("nat"), ser)
tm.assert_series_equal(result, expected)
# Compare to Timestamp with series containing NaT
expected = left_f(s_nat, Timestamp("20010109"))
result = right_f(Timestamp("20010109"), s_nat)
tm.assert_series_equal(result, expected)
# Compare to NaT with series containing NaT
expected = left_f(s_nat, NaT)
result = right_f(NaT, s_nat)
tm.assert_series_equal(result, expected)
def test_dt64arr_timestamp_equality(self, box_with_array):
# GH#11034
ser = Series([Timestamp("2000-01-29 01:59:00"), Timestamp("2000-01-30"), NaT])
ser = tm.box_expected(ser, box_with_array)
xbox = get_upcast_box(ser, ser, True)
result = ser != ser
expected = tm.box_expected([False, False, True], xbox)
tm.assert_equal(result, expected)
warn = FutureWarning if box_with_array is pd.DataFrame else None
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[0]
expected = tm.box_expected([False, True, True], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser != ser[2]
expected = tm.box_expected([True, True, True], xbox)
tm.assert_equal(result, expected)
result = ser == ser
expected = tm.box_expected([True, True, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[0]
expected = tm.box_expected([True, False, False], xbox)
tm.assert_equal(result, expected)
with tm.assert_produces_warning(warn):
# alignment for frame vs series comparisons deprecated
result = ser == ser[2]
expected = tm.box_expected([False, False, False], xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize(
"datetimelike",
[
Timestamp("20130101"),
datetime(2013, 1, 1),
np.datetime64("2013-01-01T00:00", "ns"),
],
)
@pytest.mark.parametrize(
"op,expected",
[
(operator.lt, [True, False, False, False]),
(operator.le, [True, True, False, False]),
(operator.eq, [False, True, False, False]),
(operator.gt, [False, False, False, True]),
],
)
def test_dt64_compare_datetime_scalar(self, datetimelike, op, expected):
# GH#17965, test for ability to compare datetime64[ns] columns
# to datetimelike
ser = Series(
[
Timestamp("20120101"),
Timestamp("20130101"),
np.nan,
Timestamp("20130103"),
],
name="A",
)
result = op(ser, datetimelike)
expected = Series(expected, name="A")
tm.assert_series_equal(result, expected)
class TestDatetimeIndexComparisons:
# TODO: moved from tests.indexes.test_base; parametrize and de-duplicate
def test_comparators(self, comparison_op):
index = tm.makeDateIndex(100)
element = index[len(index) // 2]
element = Timestamp(element).to_datetime64()
arr = np.array(index)
arr_result = comparison_op(arr, element)
index_result = comparison_op(index, element)
assert isinstance(index_result, np.ndarray)
tm.assert_numpy_array_equal(arr_result, index_result)
@pytest.mark.parametrize(
"other",
[datetime(2016, 1, 1), Timestamp("2016-01-01"), np.datetime64("2016-01-01")],
)
def test_dti_cmp_datetimelike(self, other, tz_naive_fixture):
tz = tz_naive_fixture
dti = date_range("2016-01-01", periods=2, tz=tz)
if tz is not None:
if isinstance(other, np.datetime64):
# no tzaware version available
return
other = localize_pydatetime(other, dti.tzinfo)
result = dti == other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
result = dti > other
expected = np.array([False, True])
tm.assert_numpy_array_equal(result, expected)
result = dti >= other
expected = np.array([True, True])
tm.assert_numpy_array_equal(result, expected)
result = dti < other
expected = np.array([False, False])
tm.assert_numpy_array_equal(result, expected)
result = dti <= other
expected = np.array([True, False])
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("dtype", [None, object])
def test_dti_cmp_nat(self, dtype, box_with_array):
left = DatetimeIndex([Timestamp("2011-01-01"), NaT, Timestamp("2011-01-03")])
right = DatetimeIndex([NaT, NaT, Timestamp("2011-01-03")])
left = tm.box_expected(left, box_with_array)
right = tm.box_expected(right, box_with_array)
xbox = get_upcast_box(left, right, True)
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = lhs != rhs
expected = np.array([True, True, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
expected = np.array([False, False, False])
expected = tm.box_expected(expected, xbox)
| tm.assert_equal(lhs == NaT, expected) | pandas._testing.assert_equal |
"""Create a synthetic population that is representative of Germany."""
from pathlib import Path
import numpy as np
import pandas as pd
import pytask
import sid
from sid.shared import factorize_assortative_variables
from src.config import BLD
from src.config import N_HOUSEHOLDS
from src.config import SRC
from src.create_initial_states.create_contact_model_group_ids import (
add_contact_model_group_ids,
)
from src.create_initial_states.create_vaccination_priority import (
create_vaccination_group,
)
from src.create_initial_states.create_vaccination_priority import (
create_vaccination_rank,
)
from src.prepare_data.task_prepare_rki_data import TRANSLATE_STATES
from src.shared import create_age_groups
from src.shared import create_age_groups_rki
_DEPENDENCIES = {
# py files
"sid_shared.py": Path(sid.__file__).parent.resolve() / "shared.py",
"shared.py": SRC / "shared.py",
"create_contact_model_group_ids": SRC
/ "create_initial_states"
/ "create_contact_model_group_ids.py",
"add_weekly_ids": SRC / "create_initial_states" / "add_weekly_ids.py",
"make_educ_group_columns": SRC
/ "create_initial_states"
/ "make_educ_group_columns.py",
"create_vaccination_priority": SRC
/ "create_initial_states"
/ "create_vaccination_priority.py",
"translations": SRC / "prepare_data" / "task_prepare_rki_data.py",
#
# data
"hh_data": SRC
/ "original_data"
/ "population_structure"
/ "microcensus2010_cf.dta",
"county_probabilities": BLD / "data" / "population_structure" / "counties.parquet",
"work_daily_dist": BLD
/ "contact_models"
/ "empirical_distributions"
/ "work_recurrent_daily.pkl",
"work_weekly_dist": BLD
/ "contact_models"
/ "empirical_distributions"
/ "work_recurrent_weekly.pkl",
"other_daily_dist": BLD
/ "contact_models"
/ "empirical_distributions"
/ "other_recurrent_daily.pkl",
"other_weekly_dist": BLD
/ "contact_models"
/ "empirical_distributions"
/ "other_recurrent_weekly.pkl",
"params": BLD / "params.pkl",
}
@pytask.mark.depends_on(_DEPENDENCIES)
@pytask.mark.parametrize(
"n_hhs, produces",
[
(N_HOUSEHOLDS, BLD / "data" / "initial_states.parquet"),
(100_000, BLD / "data" / "debug_initial_states.parquet"),
],
)
def task_create_initial_states_microcensus(depends_on, n_hhs, produces):
mc = pd.read_stata(depends_on["hh_data"])
county_probabilities = pd.read_parquet(depends_on["county_probabilities"])
work_daily_dist = | pd.read_pickle(depends_on["work_daily_dist"]) | pandas.read_pickle |
import pandas as pd
import urllib as ul
# class の定義
class SipSymp:
# ベースURL の設定
__base_url = 'http://www.ieice.org/ess/sip/symp/'
# <br /> の置き換え
__brstr = '@'
def __init__(self, year):
self.__year = year
# URL の設定
self.__url = self.__base_url + str(self.__year) + '/?cmd=program'
# HTMLの読み込みと改行タグの置換
fp = ul.request.urlopen(self.__url)
html = fp.read()
html = html.replace(b'<br />', self.__brstr.encode())
fp.close()
# プログラムの抽出
tables = pd.read_html(html)
nTables = len(tables)
# タイトル・著者リストの抽出(評価1年目)
nRows = len(tables[1])
if nRows > 1:
self.__ser = tables[1].iloc[1:,1].str.strip()
else:
self.__ser = | pd.Series([]) | pandas.Series |
import os
import warnings
from typing import List
import numpy as np
import pandas as pd
from pandas import read_sql_query
from sqlalchemy import create_engine
from zipline.data.bundles import ingest, register
from zipline.utils.cli import maybe_show_progress
from app.models import Database
warnings.filterwarnings("ignore")
class DatabaseEngine:
def __init__(self, db_config: Database):
self._db_config = db_config
self._engine = create_engine(self._get_db_url())
def _get_db_url(self) -> str:
return f"postgresql://{self._db_config.user}:{self._db_config.password}@{self._db_config.netloc}:{self._db_config.port}/{self._db_config.dbname}" # noqa
def get_data(self, isins, from_date, to_date):
query = f"""SELECT open, high, low, close, volume, time FROM ohlc
INNER JOIN instrument ON instrument.isin = ohlc.isin
WHERE instrument.isin='{isins}' AND time BETWEEN '{from_date}' AND '{to_date}'
ORDER BY time asc"""
return read_sql_query(query, self._engine)
class SQLIngester:
def __init__(self, isins: List[str], from_date: str, to_date: str, engine: DatabaseEngine, **kwargs):
self.isins = isins
self.from_date = from_date
self.to_date = to_date
self._engine = engine
def create_metadata(self) -> pd.DataFrame:
return pd.DataFrame(
np.empty(
len(self.isins),
dtype=[
("start_date", "datetime64[ns]"),
("end_date", "datetime64[ns]"),
("auto_close_date", "datetime64[ns]"),
("symbol", "object"),
("exchange", "object"),
],
)
)
def get_stock_data(self, isins: str) -> pd.DataFrame:
data = self._engine.get_data(isins, self.from_date, self.to_date)
data["time"] = pd.to_datetime(data["time"])
data.rename(columns={"time": "Date"}, inplace=True, copy=False)
data.set_index("Date", inplace=True)
return data
def writer(self, show_progress: bool) -> iter(int, pd.DataFrame):
with maybe_show_progress(self.isins, show_progress, label="Downloading from Yahoo") as it:
for index, isins in enumerate(it):
data = self.get_stock_data(isins)
data.dropna(
inplace=True
) # Yahoo can sometimes add duplicate rows on same date, one which is full or NaN
start_date = data.index[0]
end_date = data.index[-1]
autoclose_date = end_date + pd.Timedelta(days=1)
self._df_metadata.iloc[index] = start_date, end_date, autoclose_date, isins, "NASDAQ"
yield index, data
def __call__(
self,
environ,
asset_db_writer,
minute_bar_writer,
daily_bar_writer,
adjustment_writer,
calendar,
start_session,
end_session,
cache,
show_progress,
output_dir,
):
self._df_metadata = self.create_metadata()
daily_bar_writer.write(self.writer(show_progress), show_progress=show_progress)
asset_db_writer.write(equities=self._df_metadata)
adjustment_writer.write()
if __name__ == "__main__":
symbols = [sym.strip() for sym in os.environ.get("SYMBOLS").split(",")]
register(
"foreverbull",
SQLIngester(symbols, os.environ.get("FROM_DATE"), os.environ.get("TO_DATE")),
calendar_name="XFRA",
)
ingest("foreverbull", os.environ, | pd.Timestamp.utcnow() | pandas.Timestamp.utcnow |
# -*- coding: utf-8 -*-
"""
Created on Mar 8 2019
@author: <NAME>
email : <EMAIL>
"""
################################################################################
#Analysis with Random Forest
#Tested with Python 2.7 and Python 3.5 on Ubuntu Mate Release 16.04.5 LTS (Xenial Xerus) 64-bit
###############################################################################
'''
IMPORT LIBRARIES
'''
import numpy as np
import pandas as pd
import os
from cvd_ids_in_ukbb_normal_pca import find_cvds_ukbb
#from analyze_plots_ukbb import *
from mlxtend.feature_selection import SequentialFeatureSelector as SFS
from sklearn.preprocessing import MinMaxScaler, StandardScaler
from sklearn.model_selection import StratifiedKFold
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
from scipy import interp
from sklearn.ensemble import RandomForestClassifier
from mlxtend.plotting import plot_sequential_feature_selection as plot_sfs
def ROC_curve(X, y,setA,label,clf,name,path_to_save):
cv=StratifiedKFold(n_splits=10)
classifier = clf
tprs = []
aucs = []
mean_fpr = np.linspace(0, 1, 100)
i=0
for train, test in cv.split(X,y):
probas_ = classifier.fit(X[train], y[train]).predict_proba(X[test])
# Compute ROC curve and area the curve
fpr, tpr, thresholds = roc_curve(y[test], probas_[:, 1])
tprs.append(interp(mean_fpr, fpr, tpr))
tprs[-1][0] = 0.0
roc_auc = auc(fpr, tpr)
aucs.append(roc_auc)
# plt.plot(fpr, tpr, lw=1, alpha=0.3,
# label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))
i += 1
plt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',
label='Chance', alpha=.8)
mean_tpr = np.mean(tprs, axis=0)
mean_tpr[-1] = 1.0
mean_auc = auc(mean_fpr, mean_tpr)
std_auc = np.std(aucs)
plt.plot(mean_fpr, mean_tpr, color='b',
label=r'Mean ROC (AUC = %0.2f $\pm$ %0.2f)' % (mean_auc, std_auc),
lw=2, alpha=.8)
std_tpr = np.std(tprs, axis=0)
tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
plt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,
label=r'$\pm$ 1 std. dev.')
plt.xlim([-0.05, 1.05])
plt.ylim([-0.05, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC curve of %s using %s'%(setA[0], label))
plt.legend(loc="lower right")
#plt.show()
title='%s_%s_%s'%(setA[0],label,name)
title.replace(' ','_')
plt.savefig(path_to_save+'ROC_%s.png'%title)
plt.close()
def find_min_overlap(overlap_angina):
min_overlap_id = overlap_angina.values.argmin()
min_overlap_name =overlap_angina.columns[min_overlap_id]
return min_overlap_name
def get_conventional_indices (convention, nor_df_training):
'''
Get conventional indices for Normal Training
'''
conventional_indices_training_nor = convention.loc[convention['f.eid'].isin(nor_df_training['patient'])]
conventional_indices_training_nor = conventional_indices_training_nor.set_index('f.eid')
conventional_indices_training_nor = conventional_indices_training_nor.reindex(index = nor_df_training['patient'])
conventional_indices_training_nor=conventional_indices_training_nor.fillna(conventional_indices_training_nor.mean())
conventional_indices_LV_training = conventional_indices_training_nor.filter(regex=( 'LV'))
conventional_indices_LV_training =conventional_indices_LV_training.iloc[:,:-1]
conventional_indices_RV_training = conventional_indices_training_nor.filter(regex=('RV'))
conventional_indices_RV_training =conventional_indices_RV_training.iloc[:,:-1]
conventional_all_training_nor = pd.concat([conventional_indices_LV_training,conventional_indices_RV_training],axis=1)
return conventional_all_training_nor
file_feat=open('.../conditions.txt')
conditions=[]
with open(".../conditions.txt") as feat:
for line in feat:
f=line.strip()
f=line.split(",")
for i in range (len(f)):
conditions.append(str(f[i]))
os.chdir(".../Risk Factors_new conditions_even_cases/Diabetes_ML/")
### Define Risk factors to analyze
risk_factors =[
# ['high cholesterol'],
['diabetes'],
# ['hypertension',\
# 'essential hypertension']
]
cvds_samples=[]
cvd_classifier_acc=[]
acc_all=[]
models=[]
#### Take the UKBB files ##########################################################################################
#### these 3 files will be check for the cardiovascular diseases
## to find the samples
main_path_files = '.../Data Information/Files/'
#1-
conditions=pd.read_csv(main_path_files+'medical_conditions_.csv', low_memory=False)
#2-
history=pd.read_csv(main_path_files+'health_and_medical_history_.csv', low_memory=False)
#3-
outcomes=pd.read_csv(main_path_files+'health_related_outcomes_.csv', low_memory=False)
###
#### Take the conventional clinical indices to make the comparison of the results
convention =pd.read_csv(main_path_files+'imaging_heart_mri_qmul_oxford_decoded_headings_decoded_data_2017-May-17_1445_r4d.csv', low_memory=False)
## Get genetics data (if needed)
#genomics = pd.read_csv('genomics_decoded_headings_decoded_data_2017-May-17_1445_r4d.csv', low_memory=False)
#genomics=genomics.fillna(genomics.mean())
#genomics.drop(genomics.select_dtypes(['object']), inplace=True, axis=1)
#### Take the calculated radiomics features
radiomics_ukbb=pd.read_csv('.../cardiac radiomics for Application 2964/1. Radiomics results calculated.csv', low_memory=False)
#### Define the paths for the figures
#save_path_main='.../Risk Factors/Even_cases_reproduce/'
'''
Define different RF classifiers
'''
names = [
'RF_n_est1_maxdepth3_maxfeat_auto_gini','RF_n_est1_maxdepth3_maxfeat_sqrt_gini',
'RF_n_est1_maxdepth3_maxfeat_02_gini',
'RF_n_est16_maxdepth3_maxfeat_auto_gini','RF_n_est16_maxdepth3_maxfeat_sqrt_gini','RF_n_est16_maxdepth3_maxfeat_02_gini',
'RF_n_est32_maxdepth3_maxfeat_auto_gini','RF_n_est32_maxdepth3_maxfeat_sqrt_gini','RF_n_est32_maxdepth3_maxfeat_02_gini',
'RF_n_est100_maxdepth3_maxfeat_auto_gini','RF_n_est100_maxdepth3_maxfeat_sqrt_gini','RF_n_est100_maxdepth3_maxfeat_02_gini',
'RF_n_est1_maxdepth5_maxfeat_auto_gini','RF_n_est1_maxdepth5_maxfeat_sqrt_gini',
'RF_n_est1_maxdepth5_maxfeat_02_gini',
'RF_n_est16_maxdepth5_maxfeat_auto_gini','RF_n_est16_maxdepth5_maxfeat_sqrt_gini','RF_n_est16_maxdepth5_maxfeat_02_gini',
'RF_n_est32_maxdepth5_maxfeat_auto_gini','RF_n_est32_maxdepth5_maxfeat_sqrt_gini','RF_n_est32_maxdepth5_maxfeat_02_gini',
'RF_n_est100_maxdepth5_maxfeat_auto_gini','RF_n_est100_maxdepth5_maxfeat_sqrt_gini','RF_n_est100_maxdepth5_maxfeat_02_gini',
'RF_n_est1_maxdepth3_maxfeat_auto_entropy','RF_n_est1_maxdepth3_maxfeat_sqrt_entropy','RF_n_est1_maxdepth3_maxfeat_02_entropy',
'RF_n_est16_maxdepth3_maxfeat_auto_entropy','RF_n_est16_maxdepth3_maxfeat_sqrt_entropy','RF_n_est16_maxdepth3_maxfeat_02_entropy',
'RF_n_est32_maxdepth3_maxfeat_auto_entropy','RF_n_est32_maxdepth3_maxfeat_sqrt_entropy','RF_n_est32_maxdepth3_maxfeat_02_entropy',
'RF_n_est100_maxdepth3_maxfeat_auto_entropy','RF_n_est100_maxdepth3_maxfeat_sqrt_entropy','RF_n_est100_maxdepth3_maxfeat_02_entropy',
'RF_n_est1_maxdepth5_maxfeat_auto_entropy','RF_n_est1_maxdepth5_maxfeat_sqrt_entropy','RF_n_est1_maxdepth5_maxfeat_02_entropy',
'RF_n_est16_maxdepth5_maxfeat_auto_entropy','RF_n_est16_maxdepth5_maxfeat_sqrt_entropy','RF_n_est16_maxdepth5_maxfeat_02_entropy',
'RF_n_est32_maxdepth5_maxfeat_auto_entropy',
'RF_n_est32_maxdepth5_maxfeat_sqrt_entropy','RF_n_est32_maxdepth5_maxfeat_02_entropy',
'RF_n_est100_maxdepth5_maxfeat_auto_entropy','RF_n_est100_maxdepth5_maxfeat_sqrt_entropy','RF_n_est100_maxdepth5_maxfeat_02_entropy'
# "Random Forest"
# , "Neural Net",
]
classifiers = [
RandomForestClassifier(n_estimators=1, max_depth=3, max_features='auto', criterion='gini'),
RandomForestClassifier(n_estimators=1, max_depth=3, max_features='sqrt', criterion='gini'),
RandomForestClassifier(n_estimators=1, max_depth=3, max_features=0.2, criterion='gini'),
RandomForestClassifier(n_estimators=16, max_depth=3, max_features='auto', criterion='gini'),
RandomForestClassifier(n_estimators=16, max_depth=3, max_features='sqrt', criterion='gini'),
RandomForestClassifier(n_estimators=16, max_depth=3, max_features=0.2, criterion='gini'),
RandomForestClassifier(n_estimators=32, max_depth=3, max_features='auto', criterion='gini'),
RandomForestClassifier(n_estimators=32, max_depth=3, max_features='sqrt', criterion='gini'),
RandomForestClassifier(n_estimators=32, max_depth=3, max_features=0.2, criterion='gini'),
RandomForestClassifier(n_estimators=100, max_depth=3, max_features='auto', criterion='gini'),
RandomForestClassifier(n_estimators=100, max_depth=3, max_features='sqrt', criterion='gini'),
RandomForestClassifier(n_estimators=100, max_depth=3, max_features=0.2, criterion='gini'),
RandomForestClassifier(n_estimators=1, max_depth=5, max_features='auto', criterion='gini'),
RandomForestClassifier(n_estimators=1, max_depth=5, max_features='sqrt', criterion='gini'),
RandomForestClassifier(n_estimators=1, max_depth=5, max_features=0.2, criterion='gini'),
RandomForestClassifier(n_estimators=16, max_depth=5, max_features='auto', criterion='gini'),
RandomForestClassifier(n_estimators=16, max_depth=5, max_features='sqrt', criterion='gini'),
RandomForestClassifier(n_estimators=16, max_depth=5, max_features=0.2, criterion='gini'),
RandomForestClassifier(n_estimators=32, max_depth=5, max_features='auto', criterion='gini'),
RandomForestClassifier(n_estimators=32, max_depth=5, max_features='sqrt', criterion='gini'),
RandomForestClassifier(n_estimators=32, max_depth=5, max_features=0.2, criterion='gini'),
RandomForestClassifier(n_estimators=100, max_depth=5, max_features='auto', criterion='gini'),
RandomForestClassifier(n_estimators=100, max_depth=5, max_features='sqrt', criterion='gini'),
RandomForestClassifier(n_estimators=100, max_depth=5, max_features=0.2, criterion='gini'),
RandomForestClassifier(n_estimators=1, max_depth=3, max_features='auto', criterion='entropy'),
RandomForestClassifier(n_estimators=1, max_depth=3, max_features='sqrt', criterion='entropy'),
RandomForestClassifier(n_estimators=1, max_depth=3, max_features=0.2, criterion='entropy'),
RandomForestClassifier(n_estimators=16, max_depth=3, max_features='auto', criterion='entropy'),
RandomForestClassifier(n_estimators=16, max_depth=3, max_features='sqrt', criterion='entropy'),
RandomForestClassifier(n_estimators=16, max_depth=3, max_features=0.2, criterion='entropy'),
RandomForestClassifier(n_estimators=32, max_depth=3, max_features='auto', criterion='entropy'),
RandomForestClassifier(n_estimators=32, max_depth=3, max_features='sqrt', criterion='entropy'),
RandomForestClassifier(n_estimators=32, max_depth=3, max_features=0.2, criterion='entropy'),
RandomForestClassifier(n_estimators=100, max_depth=3, max_features='auto', criterion='entropy'),
RandomForestClassifier(n_estimators=100, max_depth=3, max_features='sqrt', criterion='entropy'),
RandomForestClassifier(n_estimators=100, max_depth=3, max_features=0.2, criterion='entropy'),
RandomForestClassifier(n_estimators=1, max_depth=5, max_features='auto', criterion='entropy'),
RandomForestClassifier(n_estimators=1, max_depth=5, max_features='sqrt', criterion='entropy'),
RandomForestClassifier(n_estimators=1, max_depth=5, max_features=0.2, criterion='entropy'),
RandomForestClassifier(n_estimators=16, max_depth=5, max_features='auto', criterion='entropy'),
RandomForestClassifier(n_estimators=16, max_depth=5, max_features='sqrt', criterion='entropy'),
RandomForestClassifier(n_estimators=16, max_depth=5, max_features=0.2, criterion='entropy'),
RandomForestClassifier(n_estimators=32, max_depth=5, max_features='auto', criterion='entropy'),
RandomForestClassifier(n_estimators=32, max_depth=5, max_features='sqrt', criterion='entropy'),
RandomForestClassifier(n_estimators=32, max_depth=5, max_features=0.2, criterion='entropy'),
RandomForestClassifier(n_estimators=100, max_depth=5, max_features='auto', criterion='entropy'),
RandomForestClassifier(n_estimators=100, max_depth=5, max_features='sqrt', criterion='entropy'),
RandomForestClassifier(n_estimators=100, max_depth=5, max_features=0.2, criterion='entropy'),
]
cvds_samples_all=[]
cvds_samples_random_selection=[]
cvd_classifier_acc=[]
acc_all=[]
cases=[]
models=[]
models_conv=[]
read_samples_path ='.../Results-Single_feat_Last/'
path_to_save_roc='.../Diabetes_ML/RF/'
for i in range(len(risk_factors)):
#### Define the set for each risk to analyze
setA= risk_factors[i]
# Find CVDs in UK Biobank data and add 'normal' cases as a new instance in the list, cvds_classify
# print('Analyzing %s in UK Biobank...'%(setA))
# [nor_df, setA_df, rest_cvds_classify] =find_cvds_ukbb(conditions,radiomics_ukbb, rest_cvds_classify, setA)
nor_df =pd.read_csv(read_samples_path+'normal_%s.csv'%setA[0])
setA_df =pd.read_csv(read_samples_path+'setA_df_%s.csv'%setA[0])
nor_conv= | pd.read_csv(read_samples_path+'normal_%s_conv.csv'%setA[0]) | pandas.read_csv |
import os,sys
import pandas as pd
import numpy as np
import subprocess
from tqdm import tqdm
from ras_method import ras_method
import warnings
warnings.filterwarnings('ignore')
def est_trade_value(x,output_new,sector):
"""
Function to estimate the trade value between two sectors
"""
if (sector is not 'other1') & (sector is not 'other2'):
sec_output = output_new.sum(axis=1).loc[output_new.sum(axis=1).index.get_level_values(1) == sector].reset_index()
else:
sec_output = output_new.sum(axis=1).loc[output_new.sum(axis=1).index.get_level_values(1) == 'IMP'].reset_index()
x['gdp'] = x.gdp*min(sec_output.loc[sec_output.region==x.reg1].values[0][2],sec_output.loc[sec_output.region==x.reg2].values[0][2])
return x
def estimate(table='INDEC',year=2015,print_output=False,print_progress=True):
"""
Function to create a province-level MRIO table, based on a national IO table. The default is the INDEC table.
"""
data_path = os.path.join('..','data')
# load sector data
sectors = list(pd.read_excel(os.path.join(data_path,'other_sources',
'industry_high_level_classification.xlsx'))['SEC_CODE'].values)
# load provincial mappers
reg_mapper = pd.read_excel(os.path.join(data_path,'INDEC','sh_cou_06_16.xls'),sheet_name='reg_mapper',header=None).iloc[:,:2]
reg_mapper = dict(zip(reg_mapper[0],reg_mapper[1]))
# load provincial data
prov_data = pd.read_excel(os.path.join(data_path,'INDEC','PIB_provincial_06_17.xls'),sheet_name='VBP',
skiprows=3,index_col=[0],header=[0],nrows=71)
prov_data = prov_data.loc[[x.isupper() for x in prov_data.index],:]
prov_data.columns = [x.replace(' ','_') for x in ['Ciudad de Buenos Aires', 'Buenos Aires', 'Catamarca', 'Cordoba',
'Corrientes', 'Chaco', 'Chubut', 'Entre Rios', 'Formosa', 'Jujuy',
'La Pampa', 'La Rioja', 'Mendoza', 'Misiones', 'Neuquen', 'Rio Negro',
'Salta', 'San Juan', 'San Luis', 'Santa Cruz', 'Santa Fe',
'Santiago del Estero', 'Tucuman', 'Tierra del Fuego',
'No distribuido', 'Total']]
region_names = list(prov_data.columns)[:-2]
prov_data.index = sectors+['TOTAL']
prov_data = prov_data.replace(0, 1)
### Create proxy data for first iteration
sectors+['other1','other2']
# proxy level 2
proxy_reg_arg = pd.DataFrame(prov_data.iloc[-1,:24]/prov_data.iloc[-1,:24].sum()).reset_index()
proxy_reg_arg['year'] = 2016
proxy_reg_arg = proxy_reg_arg[['year','index','TOTAL']]
proxy_reg_arg.columns = ['year','id','gdp']
proxy_reg_arg.to_csv(os.path.join('..','mrio_downscaling','proxy_reg_arg.csv'),index=False)
# proxy level 4
for iter_,sector in enumerate(sectors+['other1','other2']):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_sector = pd.DataFrame(prov_data.iloc[iter_,:24]/prov_data.iloc[iter_,:24].sum()).reset_index()
proxy_sector['year'] = 2016
proxy_sector['sector'] = 'sec{}'.format(sector)
proxy_sector = proxy_sector[['year','sector','index',sector]]
proxy_sector.columns = ['year','sector','region','gdp']
proxy_sector.to_csv(os.path.join('..','mrio_downscaling','proxy_sec{}.csv'.format(sector)),index=False)
else:
proxy_sector = pd.DataFrame(prov_data.iloc[-1,:24]/prov_data.iloc[-1,:24].sum()).reset_index()
proxy_sector['year'] = 2016
proxy_sector['sector'] = sector+'1'
proxy_sector = proxy_sector[['year','sector','index','TOTAL']]
proxy_sector.columns = ['year','sector','region','gdp']
proxy_sector.to_csv(os.path.join('..','mrio_downscaling','proxy_{}.csv'.format(sector)),index=False)
# proxy level 18
def change_name(x):
if x in sectors:
return 'sec'+x
elif x == 'other1':
return 'other11'
else:
return 'other21'
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, sectors+['other1','other2'], region_names],
names=['sec1', 'reg1','sec2','reg2'])
for iter_,sector in enumerate(sectors+['other1','other2']):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_{}.csv'.format(sector)),index=False)
"""
Create first version of MRIO for Argentina, without trade
"""
### save basetable for disaggregation usin the specific source:
basetable = pd.read_csv(os.path.join(data_path,'national_tables','{}_{}.csv'.format(year,table)),index_col=[0])
basetable.to_csv(os.path.join('..','mrio_downscaling','basetable.csv'),header=False,index=False)
### run libmrio
p = subprocess.Popen([r'..\mrio_downscaling\mrio_disaggregate', 'settings_notrade.yml'],
cwd=os.path.join('..','mrio_downscaling'))
p.wait()
### load data and reorder
region_names_list = [item for sublist in [[x]*(len(sectors)+2) for x in region_names]
for item in sublist]
rows = ([x for x in sectors+['VA','IMP']])*len(region_names)
cols = ([x for x in sectors+['FD','EXP']])*len(region_names)
index_mi = pd.MultiIndex.from_arrays([region_names_list, rows], names=('region', 'row'))
column_mi = pd.MultiIndex.from_arrays([region_names_list, cols], names=('region', 'col'))
MRIO = pd.read_csv(os.path.join('..','mrio_downscaling','output1.csv'),header=None,index_col=None)
MRIO.index = index_mi
MRIO.columns = column_mi
# create predefined index and col, which is easier to read
sector_only = [x for x in sectors]*len(region_names)
col_only = ['FD']*len(region_names)
region_col = [item for sublist in [[x]*len(sectors) for x in region_names] for item in sublist] + \
[item for sublist in [[x]*1 for x in region_names] for item in sublist]
column_mi_reorder = pd.MultiIndex.from_arrays(
[region_col, sector_only+col_only], names=('region', 'col'))
# sum va and imports
valueA = MRIO.xs('VA', level=1, axis=0).sum(axis=0)
valueA.drop('FD', level=1,axis=0,inplace=True)
valueA.drop('EXP', level=1,axis=0,inplace=True)
imports = MRIO.xs('IMP', level=1, axis=0).sum(axis=0)
imports.drop('FD', level=1,axis=0,inplace=True)
imports.drop('EXP', level=1,axis=0,inplace=True)
FinalD = MRIO.xs('FD', level=1, axis=1).sum(axis=1)
FinalD.drop('VA', level=1,axis=0,inplace=True)
FinalD.drop('IMP', level=1,axis=0,inplace=True)
Export = MRIO.xs('EXP', level=1, axis=1).sum(axis=1)
Export.drop('VA', level=1,axis=0,inplace=True)
Export.drop('IMP', level=1,axis=0,inplace=True)
output_new = MRIO.copy()
"""
Balance first MRIO version
"""
# convert to numpy matrix
X0 = MRIO.as_matrix()
# get sum of rows and columns
u = X0.sum(axis=1)
v = X0.sum(axis=0)
# and only keep T
v[:(len(u)-2)] = u[:-2]
# apply RAS method to rebalance the table
X1 = ras_method(X0, u, v, eps=1e-5,print_out=print_output)
#translate to pandas dataframe
output_new = pd.DataFrame(X1)
output_new.index = index_mi
output_new.columns = column_mi
if print_progress:
print('NOTE : Balanced MRIO table without trade finished using {} data'.format(table))
"""
Create second version of MRIO for Argentina, with trade
"""
### Load OD matrix
od_matrix_total = pd.DataFrame(pd.read_excel(os.path.join(data_path,'OD_data','province_ods.xlsx'),
sheet_name='total',index_col=[0,1],usecols =[0,1,2,3,4,5,6,7])).unstack(1).fillna(0)
od_matrix_total.columns.set_levels(['A','G','C','D','B','I'],level=0,inplace=True)
od_matrix_total.index = od_matrix_total.index.map(reg_mapper)
od_matrix_total = od_matrix_total.stack(0)
od_matrix_total.columns = od_matrix_total.columns.map(reg_mapper)
od_matrix_total = od_matrix_total.swaplevel(i=-2, j=-1, axis=0)
od_matrix_total = od_matrix_total.loc[:, od_matrix_total.columns.notnull()]
### Create proxy data
# proxy level 14
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, region_names],
names=['sec1', 'reg1','reg2'])
for iter_,sector in enumerate((sectors+['other1','other2'])):
if sector in ['A','G','C','D','B','I']:
proxy_trade = (od_matrix_total.sum(level=1).divide(od_matrix_total.sum(level=1).sum(axis=1),axis='rows')).stack(0).reset_index()
proxy_trade.columns = ['reg1','reg2','gdp']
proxy_trade['year'] = 2016
proxy_trade = proxy_trade.apply(lambda x: est_trade_value(x,output_new,sector),axis=1)
proxy_trade['sec1'] = 'sec{}'.format(sector)
proxy_trade = proxy_trade[['year','sec1','reg1','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade14_sec{}.csv'.format(sector)),index=False)
elif (sector is not 'other1') & (sector is not 'other2') & (sector not in ['A','G','C','D','B','I']): # & (sector not in ['L','M','N','O','P']):
proxy_trade = (od_matrix_total.sum(level=1).divide(od_matrix_total.sum(level=1).sum(axis=1),axis='rows')).stack(0).reset_index()
#proxy_trade[0].loc[(proxy_trade.origin_province == proxy_trade.destination_province)] = 0.9
#proxy_trade[0].loc[~(proxy_trade.origin_province == proxy_trade.destination_province)] = 0.1
proxy_trade.columns = ['reg1','reg2','gdp']
proxy_trade['year'] = 2016
proxy_trade = proxy_trade.apply(lambda x: est_trade_value(x,output_new,sector),axis=1)
proxy_trade['sec1'] = 'sec{}'.format(sector)
proxy_trade = proxy_trade[['year','sec1','reg1','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade14_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = (od_matrix_total.sum(level=1).divide(od_matrix_total.sum(level=1).sum(axis=1),axis='rows')).stack(0).reset_index()
proxy_trade.columns = ['reg1','reg2','gdp']
proxy_trade['year'] = 2016
proxy_trade = proxy_trade.apply(lambda x: est_trade_value(x,output_new,sector),axis=1)
proxy_trade['sec1'] = sector+'1'
proxy_trade = proxy_trade[['year','sec1','reg1','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade14_{}.csv'.format(sector)),index=False)
# proxy level 18
mi_index = pd.MultiIndex.from_product([sectors+['other1','other2'], region_names, sectors+['other1','other2'], region_names],
names=['sec1', 'reg1','sec2','reg2'])
for iter_,sector in enumerate((sectors+['other1','other2'])):
if (sector is not 'other1') & (sector is not 'other2'):
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade = proxy_trade.loc[proxy_trade.sec2.isin(['L','M','N','O','P'])]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade.query("reg1 == reg2")
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_sec{}.csv'.format(sector)),index=False)
else:
proxy_trade = pd.DataFrame(columns=['year','gdp'],index= mi_index).reset_index()
proxy_trade['year'] = 2016
proxy_trade['gdp'] = 0
proxy_trade = proxy_trade.query("reg1 != reg2")
proxy_trade = proxy_trade.loc[proxy_trade.sec1 == sector]
proxy_trade = proxy_trade.loc[proxy_trade.sec2.isin(['L','M','N','O','P'])]
proxy_trade['sec1'] = proxy_trade.sec1.apply(change_name)
proxy_trade['sec2'] = proxy_trade.sec2.apply(change_name)
proxy_trade = proxy_trade.query("reg1 == reg2")
proxy_trade = proxy_trade[['year','sec1','reg1','sec2','reg2','gdp']]
proxy_trade.columns = ['year','sector','region','sector','region','gdp']
proxy_trade.to_csv(os.path.join('..','mrio_downscaling','proxy_trade_{}.csv'.format(sector)),index=False)
### run libmrio
p = subprocess.Popen([r'..\mrio_downscaling\mrio_disaggregate', 'settings_trade.yml'],
cwd=os.path.join('..','mrio_downscaling'))
p.wait()
# load data and reorder
region_names_list = [item for sublist in [[x]*(len(sectors)+2) for x in region_names]
for item in sublist]
rows = ([x for x in sectors+['VA','IMP']])*len(region_names)
cols = ([x for x in sectors+['FD','EXP']])*len(region_names)
index_mi = | pd.MultiIndex.from_arrays([region_names_list, rows], names=('region', 'row')) | pandas.MultiIndex.from_arrays |
import dask
import glob
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
import numpy as np
import os
import pandas as pd
from pymicro.view.vol_utils import compute_affine_transform
import scipy
import scipy.optimize
import secrets
matplotlib.use("Agg")
TRACKID = "track"
X = "x"
Y = "y"
Z = "z"
FRAME = "frame"
CELLID = "cell"
def drop_matched(matched: pd.DataFrame, df1: pd.DataFrame, df2: pd.DataFrame):
"""Remove the matched rows from df1 and df2. Matched results from merging df1 with df2.
Important order of df1 and df2 matters."""
# extract df1 and df2 from matched
matched_x = matched[[x for x in matched.columns if "_x" in x]].copy()
matched_y = matched[[y for y in matched.columns if "_y" in y]].copy()
matched_x.columns = [x.replace("_x", "") for x in matched_x.columns]
matched_y.columns = [y.replace("_y", "") for y in matched_y.columns]
# Add frame column and reorder
matched_x[FRAME] = matched[FRAME]
matched_y[FRAME] = matched[FRAME]
matched_x[CELLID] = matched[CELLID]
matched_y[CELLID] = matched[CELLID]
matched_x = matched_x[df1.columns]
matched_y = matched_y[df2.columns]
df1_new = pd.concat([df1, matched_x])
df2_new = pd.concat([df2, matched_y])
df1_new = df1_new.drop_duplicates(keep=False)
df2_new = df2_new.drop_duplicates(keep=False)
return df1_new, df2_new
def filter_tracks(df: pd.DataFrame, min_length: int = 10) -> pd.DataFrame:
"""Filter tracks based on length.
Arg:
df: dataframe containing the tracked data
min_length: integer specifying the min track length
Return:
filtered data frame."""
df = df[[X, Y, Z, FRAME, TRACKID, CELLID]]
df = df[df[CELLID] != 0].copy()
distribution_length = df[TRACKID].value_counts()
selection = distribution_length.index.values[
distribution_length.values > min_length
]
df = df[df[TRACKID].isin(selection)]
return df
def filter_overlapping(df: pd.DataFrame, max_overlaps: float = 0.5):
"""Return data.frame where tracks with overlaps higher than max_overlaps are filtered out.
Args:
df: dataframe with tracks to stitch
max_overlaps: maximum fraction of track that can overlap.
Tracks with higher overlaps will be filtered out.
Return:
filtered dataframe.
"""
while True:
# count number of duplicated timepoints per track
duplicated = df[df[FRAME].isin(df[df[FRAME].duplicated()][FRAME])][
TRACKID
].value_counts()
if len(duplicated) < 1:
return df
# duplicated track id
duplicated_tracks = duplicated.index.values
# number of duplication
duplicated_values = duplicated.values
# count number of timepoints per track
count_tracks_length = df[TRACKID].value_counts()
# if number of track is 1, by definition there is no overlapping
if len(count_tracks_length) == 1:
return df
# count track length of overlapping tracks
count_tracks_overlapping = count_tracks_length[
count_tracks_length.index.isin(duplicated_tracks)
]
# extract track id of shortest overlapping tracks
shortest_track_overlapping_idx = count_tracks_overlapping.idxmin()
# too long overlaps?
toolong = False
for track, value in zip(duplicated_tracks, duplicated_values):
fraction = value / len(df[df[TRACKID] == track])
if fraction > max_overlaps:
toolong = True
# if we found too many overlaps, remove shortest track and restart
if toolong:
df = df[df[TRACKID] != shortest_track_overlapping_idx].copy()
# if no too long overlaps, remove duplicates and return dataframe
if not toolong:
df = df.drop_duplicates(FRAME)
return df
def stitch(df: pd.DataFrame, max_dist: float = 1.6, max_overlaps: float = 0.5):
"""Stitch tracks with the same cell id. If tracks overlap, filters out
tracks with overlap higher than max_overlaps. Overlapping frames are filtered out randomly.
Arg:
df: dataframe containing the tracked data.
max_dist: maximum distance to match tracks from the same cell.
max_overlaps: maximum overlap allowed for each track.
Return:
dataframe with stitched tracks."""
res = pd.DataFrame()
# loop over cell (stitch only tracks from same cell)
for cell, sub in df.groupby(CELLID):
# if we find any overlapping tracks, filter them out (either whole track or partial tracks)
if np.sum(sub[FRAME].duplicated()) > 0:
sub = filter_overlapping(df=sub, max_overlaps=max_overlaps)
sub = sub.sort_values(FRAME).reset_index(drop=True)
# if we have only 1 track, skip stitching
if len(sub[TRACKID].unique()) == 1:
res = pd.concat([res, sub])
continue
# look for jumps between tracks in time
idx = sub[sub[TRACKID].diff() != 0].index.values[
1:
] # remove first value id df which is always different from none
# all jumping track ids
trackids = sub.loc[np.unique([idx - 1, idx]), TRACKID].values
indexes = np.unique(trackids, return_index=True)[1]
trackids = np.array([trackids[index] for index in sorted(indexes)])
# find prev and after jump data frames (sub2 before, sub1 after jump)
sub1 = sub.loc[idx]
sub2 = sub.loc[idx - 1]
# vector containing all the stitched ids
lastidx = 0
stitched_trackids = [trackids[lastidx]]
for index in np.arange(len(trackids) - 1):
# if no jumping found between current and next track id, go back to previous tracks
back_iteration = 0
# loop until you find jumping between next track and some previous tracks
while True:
if index - back_iteration < 0:
raise ValueError(
"No jumping found between next track and any of previous one. Something is off.."
)
# select all transition between two tracks
selection = (
(sub1[TRACKID] == trackids[index - back_iteration]).values
& (sub2[TRACKID] == trackids[index + 1]).values
) | (
(sub1[TRACKID] == trackids[index + 1]).values
& (sub2[TRACKID] == trackids[index - back_iteration]).values
)
# if jumping between tracks occur multiple times, take the shortest distance
if np.sum(selection) > 0:
dists = np.sqrt(
np.sum(
np.square(
sub1.loc[selection, [X, Y, Z]].values
- sub2.loc[selection, [X, Y, Z]].values
),
axis=1,
)
)
dist = np.min(dists)
break
# if no jumping has been found, take previous track
else:
back_iteration += 1
# if jumping has found, check distance
if dist < max_dist:
sub.loc[
sub[TRACKID] == trackids[index + 1], TRACKID
] = stitched_trackids[-1 - back_iteration]
# if jumping is too big, record the unstitched track id
else:
lastidx = index + 1
stitched_trackids.append(trackids[lastidx])
res = pd.concat([res, sub])
return res
def calculate_distance_merged_channels(merged):
s1 = merged[[x + "_x" for x in [X, Y, Z]]].values
s2 = merged[[x + "_y" for x in [X, Y, Z]]].values
return np.sqrt(np.mean(np.sum((s1 - s2) ** 2, axis=1)))
def calculate_single_dist(sub_df1, sub_df2, cost=True):
"""Distance function for merging tracks. Eucledian distance scaled with sqrt of length.
If defined, cost for no-overlapping part of tracks is added."""
merged = pd.merge(sub_df1, sub_df2, how="inner", on=[FRAME])
if not len(merged):
return 9999999999
s1 = merged[[x + "_x" for x in [X, Y, Z]]].values
s2 = merged[[x + "_y" for x in [X, Y, Z]]].values
dist = np.mean(np.sum((s1 - s2) ** 2, axis=1)) / np.sqrt(len(merged))
if cost:
scaling = np.mean([len(sub_df1), len(sub_df2)]) / len(merged)
dist *= scaling
return dist
@dask.delayed
def calculate_dist(sub_df1, df2, cost=True):
dist = []
for _, sub_df2 in df2.groupby(TRACKID):
dist.append(calculate_single_dist(sub_df1, sub_df2, cost=cost))
return dist
def calculate_distance(df1: pd.DataFrame, df2: pd.DataFrame, cost: bool = True):
"""Return the matrix of distances between tracks in true and pred.
Since we want to privilege long overlapping tracks, we will divide the average distance by the
square-root of the number of overlapping points.
Args:
df1: dataframe containing the first dataset (ground truth, channel 1 etc..)
df2: dataframe containing the second dataset (ground truth, channel 2 etc..)
cost: if defined, the distance will be scaled by the fraction of no overlapping points
Return:
matrix of all pairwise distances.
"""
dist = []
column_length = df1[TRACKID].nunique()
row_length = df2[TRACKID].nunique()
for _, sub_df1 in df1.groupby(TRACKID):
dist.append(calculate_dist(sub_df1, df2, cost=cost))
dist = dask.compute(dist)
dist = np.array(dist)
return dist
def merge_channels(
df1: pd.DataFrame,
df2: pd.DataFrame,
cost: bool = True,
distance_cutoff: int = 2,
recursive: bool = True,
):
"""Return data frame containing the merged channel tracks
Args:
df1: dataframe containing the first dataset (ground truth, channel 1 etc..)
df2: dataframe containing the second dataset (ground truth, channel 2 etc..)
cost: if defined, the distance will be scaled by the fraction of no overlapping points
distance_cutoff: cutoff on the average distance between tracks to be considered as corresponding track.
recursive: apply recursive matching of leftover of partially matched tracks.
Return:
data frame of merged datasers.
"""
results = pd.DataFrame()
while True:
# calculate distance between tracks
dist = calculate_distance(df1, df2, cost)
dist = dist.squeeze(axis=0)
# match tracks
rows, cols = scipy.optimize.linear_sum_assignment(dist)
remove = 0
for r, c in zip(rows, cols):
if dist[r, c] > distance_cutoff:
rows = rows[rows != r]
cols = cols[cols != c]
remove += 1
if len(rows) == 0 or len(cols) == 0:
break
# extract matched track ids
track_ids_df1 = []
for trackid, _ in df1.groupby(TRACKID):
track_ids_df1.append(trackid)
track_ids_df2 = []
for trackid, _ in df2.groupby(TRACKID):
track_ids_df2.append(trackid)
track_list_df1 = np.array([track_ids_df1[i] for i in rows])
track_list_df2 = np.array([track_ids_df2[i] for i in cols])
# record and drop matched part of tracks
for idx1, idx2 in zip(track_list_df1, track_list_df2):
sub1 = df1[df1[TRACKID] == idx1].copy()
sub2 = df2[df2[TRACKID] == idx2].copy()
tmp = pd.merge(sub1, sub2, on=[FRAME], how="inner").sort_values(FRAME)
tmp[CELLID] = tmp["cell_x"].astype(int)
if not len(tmp):
continue
df1, df2 = drop_matched(tmp, df1, df2)
if calculate_distance_merged_channels(tmp) <= distance_cutoff:
tmp["uniqueid"] = secrets.token_hex(16)
results = pd.concat([results, tmp])
if not recursive:
return results
return results
def register_points_using_euclidean_distance(
reference_file: str, moving_file: str, distance_cutoff: float = 0.1
):
"""Given file containing reference and moving coordinates, get the two sets of matched points"""
reference = | pd.read_csv(reference_file) | pandas.read_csv |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sys
from sqlalchemy import create_engine
def load_data(filepath):
"""
A function that loads and returns a dataframe.
INPUT:
filepath: relative or absolute path to the data
RETURNS:
df: Dataframe
"""
df = pd.read_csv(filepath)
return df
def business_understanding(dataframe):
"""
A procedure to understand the business goal of the data.
Prints the columns of the dataframe
INPUT:
dataframe: The data's dataframe to be understood
RETURNS:
None
"""
columns = dataframe.columns
print("The HR has identified and curated data from employees using nine criteria, namely:")
for i, col in enumerate(columns):
print(f"{i+1}. {col} ")
def data_cleaning_and_understanding(dataframe):
"""
This procedure analyses the dataframe to answer the following questions:
1. How many employees have left the company so far?
2. Which group of employees have the tendency to leave most?
3. Find other details about the data
In particlar, this function analyzes the percentage change of each sub-group in every columns
between the original dataframe and the new dataframe involving employees that have left
INPUT:
dataframe- original dataframe
OUTPUT:
two series: dataframes with the column of the group of employees that are most likely to leave
their employments from both the original dataframe and the dataframe containing only those
that have left their employment
"""
df = dataframe
num_employees = df.shape[0]
num_of_valid_employees = df.dropna(how="any").shape[0]
num_of_missing_employees = df.shape[0] - num_of_valid_employees
df_of_left_employees = df.query('LeaveOrNot==1')
num_of_left_employees = df_of_left_employees.shape[0]
# Get all the columns except the "LeaveOrNot"
labels = df.columns[:-1]
# Search for Group with the most tendency to leave their employment
new = []
original = []
for label in labels:
elem = df_of_left_employees[label].value_counts()/num_of_left_employees
indiv = df[label].value_counts()/num_employees
new.append(elem)
original.append(indiv)
percent_change = [(a-b)*100 for a, b in zip(np.array(new, dtype='object'), np.array(original, dtype='object'))]
print('='*60)
if num_of_missing_employees != 0:
print(f"The dataset contains {num_of_valid_employees } valid employees.")
else:
print(f"The dataset contains {num_employees } number of employees.")
print('='*60)
print(f"{num_of_left_employees} employees have so far left the company")
print('='*60)
for x in percent_change:
print(x)
print('='*60)
# This the group with the most tendency to leave their employment would be observed from the last printout above
# The group with most percentage change is "Female" employees
df_most_left_new = df_of_left_employees['Gender']
df_most_left_original = df['Gender']
return df_most_left_original, df_most_left_new
def create_dummy_df(df, dummy_na):
'''
INPUT:
df - pandas dataframe with categorical variables you want to dummy
dummy_na - Bool holding whether you want to dummy NA vals of categorical columns or not
OUTPUT:
df - a new dataframe that has the following characteristics:
1. contains all columns that were not specified as categorical
2. removes all the original columns that are categorical variables
3. dummy columns for each of the categorical columns
4. if dummy_na is True - it also contains dummy columns for the NaN values
5. Use a prefix of the column name with an underscore (_) for separating
'''
cat_df = df.select_dtypes(include=['object'])
#Create a copy of the dataframe
#Pull a list of the column names of the categorical variables
cat_cols = cat_df.columns
for col in cat_cols:
try:
# for each cat add dummy var, drop original column
df = pd.concat([df.drop(col, axis=1), | pd.get_dummies(df[col], prefix=col, prefix_sep='_', drop_first=True, dummy_na=dummy_na) | pandas.get_dummies |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : <NAME>
# @Contact : <EMAIL>
from collections import Counter
import category_encoders.utils as util
import numpy as np
import pandas as pd
from pandas.core.common import SettingWithCopyWarning
from sklearn.base import BaseEstimator
from sklearn.base import TransformerMixin
from sklearn.impute import SimpleImputer as SklearnSimpleImputer
from sklearn.utils._testing import ignore_warnings
from .base import BaseImputer
class CategoricalImputer(BaseEstimator, TransformerMixin):
def __init__(
self,
strategy="most_frequent",
fill_value="<NULL>",
):
self.strategy = strategy
self.fill_value = fill_value
assert strategy in ("most_frequent", "constant"), ValueError(f"Invalid strategy {strategy}")
def fit(self, X, y=None):
X = util.convert_input(X)
self.columns = X.columns
self.statistics_ = np.array([self.fill_value] * len(self.columns), dtype='object')
if self.strategy == "most_frequent":
for i, column in enumerate(X.columns):
for value, counts in Counter(X[column]).most_common():
if not pd.isna(value):
self.statistics_[i] = value
break
return self
@ignore_warnings(category=SettingWithCopyWarning)
def transform(self, X):
# note: change inplace
for i, (column, dtype) in enumerate(zip(X.columns, X.dtypes)):
value = self.statistics_[i]
mask = | pd.isna(X[column]) | pandas.isna |
#!/usr/bin/env python3
# various functions and mixins for downstream genomic and epigenomic anlyses
import os
import glob
import re
import random
from datetime import datetime
import time
from pybedtools import BedTool
import pandas as pd
import numpy as np
from tqdm import tqdm_notebook, tqdm
# Get Current Git Commit Hash for version
path = [x.replace(' ', r'\ ') for x in os.popen('echo $PYTHONPATH').read().split(':') if 'dkfunctions' in x.split('/')]
if len(path) > 0:
version = os.popen(f'cd {path[0]}; git rev-parse HEAD').read()[:-1]
__version__ = f'v0.1, Git SHA1: {version}'
else:
__version__ = f'v0.1, {datetime.now():%Y-%m-%d}'
def val_folder(folder):
folder = folder if folder.endswith('/') else f'{folder}/'
folder = f'{os.getcwd()}/' if folder == '/' else folder
os.makedirs(folder, exist_ok=True)
return folder
def image_display(file):
from IPython.display import Image, display
display(Image(file))
def rplot(plot_func, filename, filetype, *args, **kwargs):
from rpy2.robjects.packages import importr
grdevices = importr('grDevices')
filetype = filetype.lower()
plot_types = {'png': grdevices.png,
'svg': grdevices.svg,
'pdf': grdevices.pdf
}
plot_types[filetype](f'{filename}.{filetype}')
return_object = plot_func(*args, **kwargs)
grdevices.dev_off()
if filetype == 'png':
image_display(f'{filename}.{filetype}')
return return_object
def read_pd(file, *args, **kwargs):
if (file.split('.')[-1] == 'txt') or (file.split('.')[-1] == 'tab'):
return pd.read_table(file, header=0, index_col=0, *args, **kwargs)
elif (file.split('.')[-1] == 'xls') or (file.split('.')[-1] == 'xlsx'):
return pd.read_excel(file, *args, **kwargs)
else:
raise IOError("Cannot parse count matrix. Make sure it is .txt, .xls, or .xlsx")
def rout_write(x):
'''
function for setting r_out to print to file instead of jupyter
rpy2.rinterface.set_writeconsole_regular(rout_write)
rpy2.rinterface.set_writeconsole_warnerror(rout_write)
'''
print(x, file=open(f'{os.getcwd()}/R_out_{datetime.now():%Y-%m-%d}.txt', 'a'))
def alert_me(text):
'''
Send me a pop up alert to macosx.
'''
os.system(f'''osascript -e 'tell Application "System Events" to display dialog "{text}"' ''')
def tq_type():
environ = os.environ
if '_' in environ.keys():
jupyter = True if os.environ['_'].endswith('jupyter') else False
elif 'MPLBACKEND' in environ.keys():
jupyter = True if 'ipykernel' in os.environ['MPLBACKEND'] else jupyter
return tqdm_notebook if jupyter else tqdm
def peak_overlap_MC(df_dict, background, permutations=1000, seed=42, notebook=True):
'''
Monte Carlo simulation of peak overlaps in a given background
pvalue calucated as liklihood over emperical random background overlap of shuffled peaks per chromosome.
Inputs
------
df_dict: dictinoary of dataframes in bed format
background genome space: pybedtool bed of background genome space
permutations: number of permutations
seed: random seed
Returns
-------
pvalue
'''
np.random.seed(seed)
tq = tq_type()
# generate probability of chosing a chromosome region based on its size
bregions = background.to_dataframe()
bregions.index = range(len(bregions))
bregions['Size'] = bregions.iloc[:, 2] - bregions.iloc[:, 1]
total_size = bregions.Size.sum()
bregions['fraction'] = bregions.Size / total_size
bed_dict = {name: df.copy() for name, df in df_dict.items()}
# determine length of each peak region
for df in bed_dict.values():
df['Length'] = df.iloc[:, 2] - df.iloc[:, 1]
# determine baseline overlap intersect count of preshuffled peaks.
A, B = bed_dict.values()
overlap = len(BedTool.from_dataframe(A).sort().merge() + BedTool.from_dataframe(B).sort().merge())
results = []
for permutation in tq(range(permutations)):
for df in bed_dict.values():
# randomly pick a region in the background based on size distribution of the regions
index_list = bregions.index.tolist()
df_size = len(df)
bregions_fraction = bregions.fraction
first_pick = np.random.choice(index_list, size=df_size, p=bregions_fraction)
lengths = df.Length.tolist()
alternatives = np.random.choice(index_list, size=df_size, p=bregions_fraction)
# repick regions if the peak length is larger than the region size (this part can be optimized)
regions = []
new_pick = 0
for reg, length in zip(first_pick, lengths):
reg_length = bregions.iloc[reg, 2] - bregions.iloc[reg, 1]
if reg_length > length:
regions.append(reg)
else:
while reg_length <= length:
new_reg = alternatives[new_pick]
reg_length = bregions.iloc[new_reg, 2] - bregions.iloc[new_reg, 1]
new_pick += 1
regions.append(new_reg)
# assign the chromosome
df.iloc[:, 0] = [bregions.iloc[x, 0] for x in regions]
# randomly pick a start within the selected background region within the peak size constraints
df.iloc[:, 1] = [np.random.randint(bregions.iloc[reg, 1], bregions.iloc[reg, 2] - length) for length, reg in zip(lengths, regions)]
# assign end based on peak length
df.iloc[:, 2] = df.iloc[:, 1] + df.Length
new_overlap = len(BedTool.from_dataframe(A).sort().merge() + BedTool.from_dataframe(B).sort().merge())
results.append(1 if new_overlap >= overlap else 0)
p = (sum(results) + 1) / (len(results) + 1)
A_name, B_name = df_dict.keys()
print(f'Number of intersected peaks of {A_name} and {B_name}: {overlap}')
print(f'Number of times simulated intersections exceeded or equaled the actual overlap: {sum(results)}')
print(f'Monte Carlo p-value estimate: {p}')
return p
'''
Implementation of an Enrichr API with graphs
Author: <NAME>
'''
def post_genes(gene_list, description):
'''
posts gene list to Enricr
Returns
-------
dictionary: userListId, shortId
'''
import json
import requests
ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/addList'
genes_str = '\n'.join([str(x) for x in gene_list])
payload = {'list': (None, genes_str),
'description': (None, description)
}
response = requests.post(ENRICHR_URL, files=payload)
if not response.ok:
raise Exception('Error analyzing gene list')
return json.loads(response.text)
def enrich(userListId, filename, gene_set_library):
'''
Returns
-------
Text file of enrichment results
'''
import requests
ENRICHR_URL = 'http://amp.pharm.mssm.edu/Enrichr/export'
query_string = '?userListId=%s&filename=%s&backgroundType=%s'
url = ENRICHR_URL + query_string % (userListId, filename, gene_set_library)
response = requests.get(url, stream=True)
with open(filename, 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
return response
def enrichr_barplot(filename, gene_library, out_dir, description, max_n=20,
q_thresh=0.05, color='slategray', display_image=True):
'''
Saves barplot from Enrichr results
Paramaters
----------
filename: enrichr response file
gene_library: gene set library to test
out_dir: result output folder
description: sample or gene set source name
max_n: max number of significant to display
q_thresh: qvalue threshold
color: plot color
dispaly_image: bool
Return
------
None
'''
import seaborn as sns
import matplotlib.pyplot as plt
e_df = pd.read_csv(filename, header=0, sep="\t").sort_values(by=['Adjusted P-value']).head(max_n)
e_df['Clean_term'] = e_df.Term.apply(lambda x: x.split("_")[0])
e_df['log_q'] = -np.log10(e_df['Adjusted P-value'])
plt.clf()
sns.set(context='paper', font='Arial', font_scale=1.2, style='white',
rc={'figure.dpi': 300, 'figure.figsize': (8, 6)}
)
fig, ax = plt.subplots()
fig.suptitle(f'{description} {gene_library.replace("_", " ")} enrichment\n(q<{q_thresh}, max {max_n})')
sig = e_df[e_df['Adjusted P-value'] <= q_thresh].copy()
if len(sig) > 0:
g = sns.barplot(data=sig, x='log_q', y='Clean_term', color=color, ax=ax)
plt.xlabel('q-value (-log$_{10}$)')
plt.ylabel('Enrichment Term')
ymin, ymax = g.get_ylim()
g.vlines(x=-np.log10(q_thresh), ymin=ymin, ymax=ymax, colors='k',
linestyles='dashed', label=f'q = {q_thresh}')
g.legend()
sns.despine()
else:
ax.text(0.5, 0.5, 'No Significant Enrichments.',
horizontalalignment='center',
verticalalignment='center',
transform=ax.transAxes
)
try:
plt.tight_layout(h_pad=1, w_pad=1)
except ValueError:
pass
plt.subplots_adjust(top=0.88)
file = f'{out_dir}{description}_{gene_library}_enrichr.barplot.png'
fig.savefig(file, dpi=300)
plt.close()
image_display(file)
def enrichr(dict_of_genelists, out_dir, dict_of_genelibraries=None, display=True,
q_thresh=0.05, plot_color='slategray', max_n=20 ):
'''
Runs enrichment analysis through Enrichr and plots results
Paramaters
----------
dict_of_genelists: dictionary of description to genelists
dict_of_genelibraries: dictionary of enrichr gene libraries to test against
If None, will use default libraries
display: bool whether to display inline
q_thresh: qvalue threshold
plot_color:
max_n:
'''
out_dir = out_dir if out_dir.endswith('/') else f'{out_dir}/'
gene_libraries ={'KEGG': 'KEGG_2016',
'GO_Biological_Process': 'GO_Biological_Process_2018',
'ChIP-X_Consensus_TFs': 'ENCODE_and_ChEA_Consensus_TFs_from_ChIP-X',
'ChEA': 'ChEA_2016',
'OMIM_Disease': 'OMIM_Disease'
}
libraries = gene_libraries if dict_of_genelibraries is None else dict_of_genelibraries
generator = ((d,g,l,gl) for d,g in dict_of_genelists.items()
for l, gl in libraries.items()
)
for description, genes, library, gene_library in generator:
filename=f'{out_dir}{description}_{library}.enrichr.txt'
post = post_genes(genes, description)
get = enrich(post['userListId'], filename, gene_library)
if get.ok:
enrichr_barplot(filename=filename, gene_library=library, out_dir=out_dir, description=description,
max_n=max_n,q_thresh=q_thresh, color=plot_color, display_image=display)
else:
print(f'Enrichr error: {library}, {description}')
'''
end enrichr
'''
def gsea_dotplot(df_dict, title='', qthresh=0.05, top_term=None, gene_sets=[], dotsize_factor=4, figsize=(4, 10), out_dir='.'):
'''
Makes a dotplot of GSEA results with the dot size as the percent of genes in the leading edge and the color the NES.
Plots only significant dots at given fdr theshold
Inputs
------
df_dict: dictionary of named GSEA results for the analysis. pandas df of gsea_report.xls (use pd.concat to combine pos and neg enrichments)
name: name used for title and filename
qthresh: qvalue theshold for includsion
pgene_sets: list of gene sets to plot. If empty, will plot all with FDR q value < 0.05
top_term: integer specifing top number of sets to plot (by qvalue). None plots all.
dot_size_factor: scale to increase dot size for leading edge %
out_dir: output directory
Returns
-------
Gene_Sets used for plotting
'''
import matplotlib.pyplot as plt
import seaborn as sns
out_dir = val_folder(out_dir)
index = []
# get leading edge percentages
for df in df_dict.values():
if 'NAME' in df.columns.tolist():
df.index = df.NAME
df['le_tags'] = df['LEADING EDGE'].apply(lambda x: x.split('%')[0].split('=')[-1])
df.sort_values(by='NES', ascending=False, inplace=True)
index += df[df['FDR q-val'] < 0.05].index.tolist()
index = list(set(index))
# use gene_sets if provided
if len(gene_sets) > 0:
index = gene_sets
# make master df
data_df = pd.DataFrame()
for name, df in df_dict.items():
df['sample_name'] = name
data_df = pd.concat([data_df, df.loc[index]])
# extra filters
data_df = data_df[data_df.sample_name.notna()]
if top_term:
index = list(set(data_df.sort_values(by='FDR q-val').head(top_term).index.tolist()))
# reindex
data_df['GS_NAME'] = data_df.index
data_df.index = range(len(data_df))
# make x coordinate
samples = data_df.sample_name.unique()
sample_number = len(samples)
sample_x = {name: (x + .5) for name, x in zip(samples, range(sample_number))}
data_df['x'] = data_df.sample_name.map(sample_x)
# make y coordinate
gene_set = list(index[::-1])
gene_set_number = len(gene_set)
sample_y = {name: y for name, y in zip(gene_set, range(gene_set_number))}
data_df['y'] = data_df.GS_NAME.map(sample_y)
# filter for significance and make dot size from leading edge percentage
data_df['sig_tags'] = data_df[['FDR q-val', 'le_tags']].apply(lambda x: 0 if float(x[0]) > qthresh else float(x[1]), axis=1)
data_df['area'] = data_df['sig_tags'] * dotsize_factor
plot_df = data_df[data_df.GS_NAME.isin(index)].copy()
# plot
plt.clf()
sns.set(context='paper', style='white', font='Arial', rc={'figure.dpi': 300})
fig, ax = plt.subplots(figsize=figsize)
sc = ax.scatter(x=plot_df.x, y=plot_df.y, s=plot_df.area, edgecolors='face', c=plot_df.NES, cmap='RdBu_r')
# format y axis
ax.yaxis.set_major_locator(plt.FixedLocator(plot_df.y))
ax.yaxis.set_major_formatter(plt.FixedFormatter(plot_df.GS_NAME))
ax.set_yticklabels(plot_df.GS_NAME.apply(lambda x: x.replace('_', ' ')), fontsize=16)
# format x axis
ax.set_xlim(0, sample_number)
ax.xaxis.set_major_locator(plt.FixedLocator(plot_df.x))
ax.xaxis.set_major_formatter(plt.FixedFormatter(plot_df.sample_name))
ax.set_xticklabels(plot_df.sample_name, fontsize=16, rotation=45)
# add colorbar
cax = fig.add_axes([0.95, 0.20, 0.03, 0.22])
cbar = fig.colorbar(sc, cax=cax,)
cbar.ax.tick_params(right=True)
cbar.ax.set_title('NES', loc='left', fontsize=12)
cbar.ax.tick_params(labelsize=10)
# add legend
markers = []
min_value = plot_df[plot_df.sig_tags > 0].sig_tags.min()
max_value = plot_df.sig_tags.max()
rounded_min = int(10 * round((min_value - 5) / 10))
rounded_max = int(10 * round((max_value + 5) / 10)) # rounds up to nearest ten (ie 61 --> 70)
sizes = [x for x in range(rounded_min, rounded_max + 1, 10)]
for size in sizes:
markers.append(ax.scatter([], [], s=size * dotsize_factor, c='k'))
legend = ax.legend(markers, sizes, prop={'size': 12})
legend.set_title('Leading Edge (%)', prop={'size': 12})
# offset legend
bb = legend.get_bbox_to_anchor().inverse_transformed(ax.transAxes)
xOffset = .6
yOffset = 0
bb.x0 += xOffset
bb.x1 += xOffset
bb.y0 += yOffset
bb.y1 += yOffset
legend.set_bbox_to_anchor(bb, transform=ax.transAxes)
# set title
ax.set_title(title.replace('_', ' '), fontsize=20)
sns.despine()
fig.savefig(f'{out_dir}{title.replace(" ", "_")}.png', bbox_inches='tight')
fig.savefig(f'{out_dir}{title.replace(" ", "_")}.svg', bbox_inches='tight')
return plot_df
def annotate_peaks(dict_of_dfs, folder, genome, db='UCSC', check=False, TSS=[-3000,3000], clean=False):
'''
Annotate a dictionary of dataframes from bed files to the genome using ChIPseeker and Ensembl annotations.
Inputs
------
dict_of_beds: dictionary of bed files
folder: output folder
genome: hg38, hg19, mm10
db: default UCSC, but can also accept Ensembl
TSS: list of regions around TSS to annotate as promoter
check: bool. checks whether annotation file already exists
Returns
-------
dictionary of annotated bed files as dataframe
'''
import rpy2.robjects as ro
import rpy2.rinterface as ri
from rpy2.robjects.packages import importr
from rpy2.robjects import pandas2ri
pandas2ri.activate()
tq = tq_type()
ri.set_writeconsole_regular(rout_write)
ri.set_writeconsole_warnerror(rout_write)
chipseeker = importr('ChIPseeker')
genomicFeatures = importr('GenomicFeatures')
makeGR = ro.r("makeGRangesFromDataFrame")
as_df = ro.r("as.data.frame")
check_df = {key: os.path.isfile(f'{folder}{key.replace(" ","_")}_annotated.txt') for key in dict_of_dfs.keys()}
return_bool = False not in set(check_df.values())
if return_bool & check:
return {f'{key}_annotated': pd.from_csv(f'{folder}{key.replace(" ","_")}_annotated.txt', index_col=0, header=0, sep="\t") for key in dict_of_dfs.keys()}
species = ('Mmusculus' if genome.lower() == 'mm10' else 'Hsapiens')
if db.lower() == 'ucsc':
TxDb = importr(f'TxDb.{species}.UCSC.{genome.lower()}.knownGene')
txdb = ro.r(f'txdb <- TxDb.{species}.UCSC.{genome.lower()}.knownGene')
elif db.lower() == 'ensembl':
TxDb = importr(f'TxDb.{species}.UCSC.{genome.lower()}.ensGene')
txdb = ro.r(f'txdb <- TxDb.{species}.UCSC.{genome.lower()}.ensGene')
else:
raise ValueError('UCSC or Ensembl only.')
os.makedirs(folder, exist_ok=True)
if genome.lower() == 'mm10':
annoDb = importr('org.Mm.eg.db')
anno = 'org.Mm.eg.db'
elif genome.lower() == 'hg38' or genome.lower() == 'hg19':
annoDb = importr('org.Hs.eg.db')
anno = 'org.Hs.eg.db'
return_dict = {}
print('Annotating Peaks...')
for key, df in tq(dict_of_dfs.items()):
if check & check_df[key]:
return_dict[f'{key}_annotated'] = pd.from_csv(f'{folder}{key.replace(" ","_")}_annotated.txt', index_col=0, header=0, sep="\t")
else:
col_len = len(df.columns)
df.columns = ["chr", "start", "end"] + list(range(col_len - 3))
GR = makeGR(df)
GR_anno = chipseeker.annotatePeak(GR, overlap='TSS', TxDb=txdb, annoDb=anno, tssRegion=ro.IntVector(TSS)) #switched to TSS on 10/02/2019
return_dict[f'{key}_annotated'] = ro.pandas2ri.ri2py(chipseeker.as_data_frame_csAnno(GR_anno))
return_dict[f'{key}_annotated'].to_excel(f'{folder}{key.replace(" ","_")}_annotated.xlsx')
if clean:
for k,df in return_dict.items():
df['Anno'] = df.annotation.apply(lambda x: 'Promoter' if x.split(' ')[0] == 'Promoter' else x)
df['Anno'] = df.Anno.apply(lambda x: 'Intergenic' if x.split(' ')[0] in ['Downstream', 'Distal'] else x)
df['Anno'] = df.Anno.apply(lambda x: x.split(' ')[0] if x.split(' ')[0] in ['Intron', 'Exon'] else x)
return return_dict
def plot_venn2(Series, string_name_of_overlap, folder):
'''
Series with with overlaps 10,01,11
Plots a 2 way venn.
Saves to file.
'''
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib_venn import venn2, venn2_circles
folder = f'{folder}venn2/' if folder.endswith('/') else f'{folder}/venn2/'
os.makedirs(folder, exist_ok=True)
plt.figure(figsize=(7, 7))
font = {'family': 'sans-serif',
'weight': 'normal',
'size': 16,
}
plt.rc('font', **font)
# make venn
sns.set(style='white', font='Arial')
venn_plot = venn2(subsets=(Series.iloc[0], Series.iloc[1], Series.iloc[2]), set_labels=[name.replace('_', ' ') for name in Series.index.tolist()])
patch = ['10', '01', '11']
colors = ['green', 'blue', 'teal']
for patch, color in zip(patch, colors):
venn_plot.get_patch_by_id(patch).set_color('none')
venn_plot.get_patch_by_id(patch).set_alpha(.4)
venn_plot.get_patch_by_id(patch).set_edgecolor('none')
c = venn2_circles(subsets=(Series.iloc[0], Series.iloc[1], Series.iloc[2]))
colors_test = ['green', 'blue']
for circle, color in zip(c, colors_test):
circle.set_edgecolor(color)
circle.set_alpha(0.8)
circle.set_linewidth(3)
plt.title(string_name_of_overlap.replace('_', ' ') + " overlaps")
plt.tight_layout()
name = string_name_of_overlap.replace('_', ' ').replace('\n', '_')
plt.savefig(f"{folder}{name}-overlap.svg")
plt.savefig(f"{folder}{name}-overlap.png", dpi=300)
plt.close()
image_display(f"{folder}{name}-overlap.png")
def plot_venn2_set(dict_of_sets, string_name_of_overlap, folder, pvalue=False, total_genes=None):
'''
Plots a 2 way venn from a dictionary of sets
Saves to file.
Inputs
------
dict_of_sets: dictionary of sets to overlap
string_name_of_overlap: string with name of overlap
folder: output folder
Returns
-------
None
'''
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib_venn import venn2, venn2_circles
from scipy import stats
folder = f'{folder}venn2/' if folder.endswith('/') else f'{folder}/venn2/'
os.makedirs(folder, exist_ok=True)
plt.figure(figsize=(7, 7))
font = {'family': 'sans-serif',
'weight': 'normal',
'size': 16,
}
plt.rc('font', **font)
set_list = []
set_names = []
for name, setlist in dict_of_sets.items():
set_list.append(setlist)
set_names.append(name.replace('_', ' '))
# make venn
sns.set(style='white', font='Arial')
venn_plot = venn2(subsets=set_list, set_labels=set_names)
patch = ['10', '01', '11']
colors = ['green', 'blue', 'teal']
for patch, color in zip(patch, colors):
venn_plot.get_patch_by_id(patch).set_color('none')
venn_plot.get_patch_by_id(patch).set_alpha(.4)
venn_plot.get_patch_by_id(patch).set_edgecolor('none')
c = venn2_circles(subsets=set_list)
colors_test = ['green', 'blue']
for circle, color in zip(c, colors_test):
circle.set_edgecolor(color)
circle.set_alpha(0.8)
circle.set_linewidth(3)
if None not in [pvalue, total_genes]:
intersection_N = len(set_list[0] & set_list[1])
pvalue = stats.hypergeom.sf(intersection_N, total_genes, len(set_list[0]), len(set_list[1]))
pvalue_string = f'= {pvalue:.03g}' if pvalue > 1e-5 else '< 1e-5'
plt.text(0, -.05, f'p-value {pvalue_string}', fontsize=10, transform=c[1].axes.transAxes)
plt.title(string_name_of_overlap.replace('_', ' ') + " overlaps")
plt.tight_layout()
plt.savefig(f"{folder}{string_name_of_overlap.replace(' ', '_')}-overlap.svg")
plt.savefig(f"{folder}{string_name_of_overlap.replace(' ', '_')}-overlap.png", dpi=300)
plt.close()
image_display(f"{folder}{string_name_of_overlap.replace(' ', '_')}-overlap.png")
def plot_venn3_set(dict_of_sets, string_name_of_overlap, folder):
'''
Makes 3 way venn from 3 sets.
Saves to file.
Inputs
------
dict_of_sets: dictionary of sets to overlap
string_name_of_overlap: string with name of overlap
folder: output folder
Returns
-------
None
'''
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib_venn import venn3, venn3_circles
folder = f'{folder}venn3/' if folder.endswith('/') else f'{folder}/venn3/'
os.makedirs(folder, exist_ok=True)
plt.clf()
sns.set(style='white', context='paper', font_scale=2, rc={'figure.figsize': (7, 7)})
# font = {'family': 'sans-serif',
# 'weight': 'normal',
# 'size': 16,
# }
# plt.rc('font', **font)
set_list = []
set_names = []
for name, setlist in dict_of_sets.items():
set_list.append(setlist)
set_names.append(name.replace('_', ' '))
# make venn
venn_plot = venn3(subsets=set_list, set_labels=set_names)
patch = ['100', '110', '101', '010', '011', '001', '111']
for p in patch:
if venn_plot.get_patch_by_id(p):
venn_plot.get_patch_by_id(p).set_color('none')
venn_plot.get_patch_by_id(p).set_alpha(.4)
venn_plot.get_patch_by_id(p).set_edgecolor('none')
# make
c = venn3_circles(subsets=set_list)
colors_list = ['green', 'blue', 'grey']
for circle, color in zip(c, colors_list):
circle.set_edgecolor(color)
circle.set_alpha(0.8)
circle.set_linewidth(3)
plt.title(f"{string_name_of_overlap.replace('_', ' ')} Overlaps")
plt.tight_layout()
plt.savefig(f"{folder}{string_name_of_overlap.replace(' ','_')}-overlap.svg")
plt.savefig(f"{folder}{string_name_of_overlap.replace(' ','_')}-overlap.png", dpi=300)
plt.close()
image_display(f"{folder}{string_name_of_overlap.replace(' ','_')}-overlap.png")
def plot_venn3_counts(element_list, set_labels, string_name_of_overlap, folder):
'''
Plot three way venn based on counts of specific overlaping numbers.
Saves to file.
Inputs
------
element_list: tuple with counts of the the overlaps from (Abc,aBc,ABc,abC,AbC,ABC)
set_labels: list or tuple with names of the overlaps ('A','B','C')
string_name_of_overlap: string with name of overlap
folder: output folder
Returns
-------
None
'''
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib_venn import venn3, venn3_circles
folder = f'{folder}venn3/' if folder.endswith('/') else f'{folder}/venn3/'
os.makedirs(folder, exist_ok=True)
plt.clf()
sns.set(style='white', context='paper', font_scale=1, rc={'figure.figsize': (7, 7)})
# font = {'family': 'sans-serif',
# 'weight': 'normal',
# 'size': 16,
# }
# plt.rc('font', **font)
# make venn
venn_plot = venn3(subsets=element_list, set_labels=[name.replace('_', ' ') for name in set_labels])
patch = ['100', '110', '101', '010', '011', '001', '111']
for p in patch:
if venn_plot.get_patch_by_id(p):
venn_plot.get_patch_by_id(p).set_color('none')
venn_plot.get_patch_by_id(p).set_alpha(.4)
venn_plot.get_patch_by_id(p).set_edgecolor('none')
# make
c = venn3_circles(subsets=element_list)
colors_list = ['green', 'blue', 'grey']
for circle, color in zip(c, colors_list):
circle.set_edgecolor(color)
circle.set_alpha(0.8)
circle.set_linewidth(3)
plt.title(f"{string_name_of_overlap.replace('_', ' ')} Overlaps")
plt.tight_layout()
plt.savefig(f"{folder}{string_name_of_overlap.replace(' ', '_')}-overlap.svg")
plt.savefig(f"{folder}{string_name_of_overlap.replace(' ', '_')}-overlap.png", dpi=300)
plt.close()
image_display(f"{folder}{string_name_of_overlap.replace(' ', '_')}-overlap.png")
def overlap_two(bed_dict, genome=None):
'''
Takes a dictionary of two bed-like format files.
Merges all overlapping peaks for each bed into a master file.
Intersects beds to merged master file.
Performs annotations with ChIPseeker if genome is specified.
Plots venn diagrams of peak overlaps
If genome is specified, also plots venn diagrams of annotated gene sets.
Inputs
------
bed_dict: dictionary of BedTool files
genome: 'hg38','hg19','mm10'
Returns
-------
Returns a dictionary of dataframes from unique and overlap peaks.
If genome is specified, includes a dictionary of annotated peaks.
'''
names = list(bed_dict.keys())
Folder = f'{os.getcwd()}/'
subfolder = f"{names[0].replace(' ', '_')}_{names[1].replace(' ', '_')}_overlap/"
out = f'{Folder}{subfolder}'
os.makedirs(out, exist_ok=True)
print(f'Output files are found in {out}')
masterfile = bed_dict[names[0]].cat(bed_dict[names[1]]).sort().merge()
sorted_dict = {key: bed.sort().merge() for key, bed in bed_dict.items()}
overlap_dict = {'overlap': masterfile.intersect(sorted_dict[names[0]]).intersect(sorted_dict[names[1]])}
for key, bed in sorted_dict.items():
other = {other_key: other_bed for other_key, other_bed in sorted_dict.items() if other_key != key}
overlap_dict['{}_unique_peak'.format(key)] = masterfile.intersect(sorted_dict[key]).intersect(list(other.values())[0], v=True)
for key, bed in overlap_dict.items():
bed.to_dataframe().to_csv('{}{}{}-unique-peaks-from-mergedPeaks.bed'.format(Folder, subfolder, key.replace(' ', '_')),
header=None, index=None, sep="\t")
overlap_numbers = pd.Series({names[0]: len(overlap_dict['{}_unique_peak'.format(names[0])]),
names[1]: len(overlap_dict['{}_unique_peak'.format(names[1])]),
'overlap': len(overlap_dict['overlap'])
},
index=[names[0], names[1], 'overlap']
)
# Venn
plot_venn2(overlap_numbers,
'{} and\n{} peak'.format(names[0], names[1]),
'{}{}'.format(Folder, subfolder)
)
if bool(genome):
print('Annotating overlaping peaks...')
# Annotate with ChIPseeker
unikey = '{}_unique'
unianno = '{}_unique_annotated'
return_dict = annotate_peaks({unikey.format(key): bed.to_dataframe() for key, bed in overlap_dict.items()}, '{}{}'.format(Folder, subfolder), genome=genome)
Set1_unique = set(return_dict[unianno.format('{}_unique_peak'.format(names[0]))].SYMBOL.unique().tolist())
Set2_unique = set(return_dict[unianno.format('{}_unique_peak'.format(names[1]))].SYMBOL.unique().tolist())
Overlap_Set = set(return_dict[unianno.format('overlap')].SYMBOL.unique().tolist())
venn2_dict = {names[0]: (Set1_unique | Overlap_Set),
names[1]: (Set2_unique | Overlap_Set)
}
plot_venn2_set(venn2_dict,
'{} and {}\nannotated gene'.format(names[0], names[1]),
'{}{}'.format(Folder, subfolder)
)
gene_overlaps = {}
gene_overlaps['{}_unique_genes'.format(names[0])] = Set1_unique - (Set2_unique | Overlap_Set)
gene_overlaps['{}_unique_genes'.format(names[1])] = Set2_unique - (Set1_unique | Overlap_Set)
gene_overlaps['Overlap_Gene_Set'] = (Set1_unique & Set2_unique) | Overlap_Set
for key, gene_set in gene_overlaps.items():
with open(f'{Folder}{subfolder}{key}.txt', 'w') as file:
for gene in gene_set:
file.write(f'{gene}\n')
for key, item in gene_overlaps.items():
return_dict[key] = item
for key, df in overlap_dict.items():
return_dict[key] = df
else:
return_dict = overlap_dict
return return_dict
def overlap_three(bed_dict, genome=None):
'''
Takes a dictionary of three bed-like format files.
Merges all overlapping peaks for each bed into a master file.
Intersects beds to merged master file.
Performs annotations with ChIPseeker if genome is specified.
Plots venn diagrams of peak overlaps
If genome is specified, also plots venn diagrams of annotated gene sets.
Inputs
------
bed_dict: dictionary of BedTool files
genome: 'hg38','hg19','mm10'
Returns
-------
Returns a dictionary of dataframes from unique and overlap peaks.
If genome is specified, includes a dictionary of annotated peaks.
'''
from collections import OrderedDict
names = list(bed_dict.keys())
Folder = f'{os.getcwd()}/'
subfolder = f"{names[0].replace(' ', '_')}-{ names[1].replace(' ', '_')}-{names[2].replace(' ', '_')}-overlap/"
out = f'{Folder}{subfolder}'
os.makedirs(out, exist_ok=True)
print(f'Output files are found in {out}')
print(f'A: {names[0]}, B: {names[1]}, C: {names[2]}')
master = bed_dict[names[0]].cat(bed_dict[names[1]]).cat(bed_dict[names[2]]).sort().merge()
A = bed_dict[names[0]].sort().merge()
B = bed_dict[names[1]].sort().merge()
C = bed_dict[names[2]].sort().merge()
sorted_dict = OrderedDict({'master': master, 'A': A, 'B': B, 'C': C})
sorted_dict['A_bc'] = (master + A - B - C)
sorted_dict['aB_c'] = (master + B - A - C)
sorted_dict['A_B_c'] = (master + A + B - C)
sorted_dict['abC_'] = (master + C - A - B)
sorted_dict['A_bC_'] = (master + A + C - B)
sorted_dict['aB_C_'] = (master + B + C - A)
sorted_dict['A_B_C_'] = (master + A + B + C)
labTup = tuple(key for key in sorted_dict.keys())
lenTup = tuple(len(bed) for bed in sorted_dict.values())
print(f'{labTup}\n{lenTup}')
plot_venn3_counts(lenTup[4:], names, f"{'_'.join(names)}-peak-overlaps", out)
for key, bed in sorted_dict.items():
if len(bed) > 1:
bed.to_dataframe().to_csv(f'{out}{key.replace(" ", "_")}-peaks-from-mergedPeaks.bed', header=None, index=None, sep="\t")
if bool(genome):
print('Annotating ovelapped peaks...')
unikey = '{}'
unianno = '{}_annotated'
return_dict = annotate_peaks({unikey.format(key): bed.to_dataframe() for key, bed in sorted_dict.items()}, out, genome=genome)
Set1 = set(return_dict[unianno.format('A')].SYMBOL.unique().tolist())
Set2 = set(return_dict[unianno.format('B')].SYMBOL.unique().tolist())
Set3 = set(return_dict[unianno.format('C')].SYMBOL.unique().tolist())
plot_venn3_set({names[0]: Set1, names[1]: Set2, names[2]: Set3}, f'{names[0]}_{names[1]}_{names[2]}-gene-overlaps', out)
return sorted_dict if genome is None else {**sorted_dict, **return_dict}
def splice_bar(data, title, x, y):
'''
Plots bar graph of misplicing counts as file.
Inputs
------
data: dataframe
title: string plot title
x: string of columm title for number of events in data
y: string of column title for splicing type in data
Returns
-------
None
'''
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(context='paper', font='Arial', style='white', font_scale=2)
plot = sns.barplot(x=x, y=y, data=data)
plot.set_title(title.replace('_', ' '))
plot.set_ylabel('')
sns.despine()
sns.utils.plt.savefig('{}.png'.format(title.replace(' ', '_')), dpi=300)
plt.close()
image_display('{}.png'.format(title.replace(' ', '_')))
def make_df(dict_of_sets, name):
'''
Make a dataframe from a dictionary of sets.
Inputs
------
dict_of_sets: dictionary of sets
name: string name of file
Returns
-------
dataframe
'''
out_dir = '{pwd}/{name}/'.format(pwd=os.getcwd(), name=name.replace(' ', '_'))
os.makedirs(out_dir, exist_ok=True)
count = 0
for key, genes in dict_of_sets.items():
count = max(count, len(genes))
df = pd.DataFrame(index=range(1, count + 1))
for key, genes in dict_of_sets.items():
df[key] = pd.Series(list(genes) + ['NA'] * (count - len(genes)))
df.to_excel('{}/{}.xls'.format(out_dir, name.replace(' ', '_')), index=False)
return df
def plot_col(df, title, ylabel, out='', xy=(None, None), xticks=[''], plot_type=['violin'], pvalue=False, compare_tags=None):
'''
Two column boxplot from dataframe. Titles x axis based on column names.
Inputs
------
df: dataframe (uses first two columns)
title: string of title
ylabel: string of y label
xy: If specified, will x is the label column and y is the data column. (default: (None,None): Data separated into two columns).
xticks: list of xtick names (default is column name)
pvalue: bool to perform ttest (default is False). Will only work if xy=(None,None) or ther are only two labels in x.
plot_type: list of one or more: violin, box, swarm (default=violin)
compare_tags: if xy and pvalue is specified and there are more than two tags in x, specify the tags to compare. eg. ['a','b']
out: out parent directory. if none returns into colplot/
Returns
------
None
'''
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
out = val_folder(out)
plt.clf()
sns.set(context='paper', font='Arial', font_scale=2, style='white', rc={'figure.dpi': 300, 'figure.figsize': (5, 6)})
if type(plot_type) != list:
plot_type = plot_type.split()
lower_plot_type = [x.lower() for x in plot_type]
if len(lower_plot_type) == 0:
raise IOError('Input a plot type.')
elif True not in {x in lower_plot_type for x in ['violin', 'box', 'swarm']}:
raise IOError('Did not recognize plot type.')
if 'swarm' in lower_plot_type:
if xy == (None, None):
fig = sns.swarmplot(data=df, color='black', s=4)
else:
fig = sns.swarmplot(data=df, x=xy[0], y=xy[1], color='black', s=4)
if 'violin' in lower_plot_type:
if xy == (None, None):
fig = sns.violinplot(data=df)
else:
fig = sns.violinplot(data=df, x=xy[0], y=xy[1])
if 'box' in lower_plot_type:
if xy == (None, None):
fig = sns.boxplot(data=df)
else:
fig = sns.boxplot(data=df, x=xy[0], y=xy[1])
fig.yaxis.set_label_text(ylabel)
fig.set_title(title.replace('_', ' '))
if xticks:
fig.xaxis.set_ticklabels(xticks)
fig.xaxis.set_label_text('')
for tick in fig.xaxis.get_ticklabels():
tick.set_fontsize(12)
if pvalue:
if xy == (None, None):
_, pvalue = stats.ttest_ind(a=df.iloc[:, 0], b=df.iloc[:, 1])
compare_tags = df.columns
else:
_, pvalue = stats.ttest_ind(a=df[df[xy[0]] == compare_tags[0]][xy[1]], b=df[df[xy[0]] == compare_tags[1]][xy[1]])
fig.text(s='p-value = {:.03g}, {} v {}'.format(pvalue, compare_tags[0], compare_tags[1]), x=0, y=-.12, transform=fig.axes.transAxes, fontsize=12)
sns.despine()
plt.tight_layout()
plt.savefig('{}{}.svg'.format(out, title.replace(' ', '_')))
plt.subplots_adjust(bottom=0.17, top=0.9)
plt.savefig('{}{}.png'.format(out, title.replace(' ', '_')), dpi=300)
print('{}.png found in {}/'.format(title.replace(' ', '_'), out))
plt.close()
image_display('{}{}.png'.format(out, title.replace(' ', '_')))
def scatter_regression(df, s=150, alpha=0.3, line_color='dimgrey', svg=False, reg_stats=True, point_color='steelblue', title=None,
xlabel=None, ylabel=None, IndexA=None, IndexB=None, annotate=None, Alabel='Group A', Blabel='Group B'):
'''
Scatter plot and Regression based on two matched vectors.
Plots r-square and pvalue on .png
Inputs
------
df: dataframe to plot (column1 = x axis, column2= y axis)
kwargs (defaults):
s: point size (150)
alpha: (0.3)
line_color: regression line color (dimgrey)
svg: make svg (False)
stats: print R2 and pvalue on plot (True)
point_color: (steelblue)
title: string
xlabel: string
ylabel: string
IndexA: set or list of genes to highlight red
Alabel: string for IndexA group ('Group A')
IndexB: set or list of genes to highlight blue
annotate: list of genes to annotate on the graph
Returns
-------
None
Prints file name and location
Saves .png plot in scatter_regression/ folder in cwd with dpi=300.
'''
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
sns.set(context='paper', style="white", font_scale=3, font='Arial',
rc={"lines.linewidth": 2,
'figure.figsize': (9, 9),
'font.size': 18, 'figure.dpi': 300})
fig, ax = plt.subplots()
cols = df.columns.tolist()
regplot = sns.regplot(x=cols[0], y=cols[1], data=df, scatter=True,
fit_reg=True, color=line_color,
scatter_kws={'s': s, 'color': point_color, 'alpha': alpha}
)
if xlabel:
plt.xlabel(xlabel, labelpad=10)
if ylabel:
plt.ylabel(ylabel, labelpad=10)
if title:
regplot.set_title(title.replace('_', ' '))
if type(IndexA) in [list, set]:
# A = set(IndexA)
Abool = [True if x in IndexA else False for x in df.index.tolist()]
regplot = ax.scatter(df[Abool].iloc[:, 0], df[Abool].iloc[:, 1], marker='o', alpha=(alpha + .4 if alpha < .6 else 1), color='red', s=s, label=Alabel)
if type(IndexB) in [list, set]:
# B = set(IndexB)
Bbool = [True if x in IndexB else False for x in df.index.tolist()]
regplot = ax.scatter(df[Bbool].iloc[:, 0], df[Bbool].iloc[:, 1], marker='o', alpha=(alpha + .3 if alpha < .7 else 1), color='mediumblue', s=s, label=Blabel)
if type(annotate) in [list, set]:
anno_df = df[[True if x in annotate else False for x in df.index.tolist()]]
offx, offy = (df.iloc[:, :2].max() - df.iloc[:, :2].min()) * .1
for index, (x, y) in anno_df.iterrows():
ax.annotate(index, xy=(x, y), xytext=((x - offx, y + offy) if y >= x else (x + offx, y - offy)), arrowprops={'arrowstyle': '-', 'color': 'black'})
if reg_stats:
r, pvalue = stats.pearsonr(x=df.iloc[:, 0], y=df.iloc[:, 1])
ax.text(0, 0, 'r = {:.03g}; p-value = {:.03g}'.format(r, pvalue), fontsize=25, transform=ax.transAxes)
sns.despine(offset=5)
fig.tight_layout()
os.makedirs('scatter_regression/', exist_ok=True)
if svg:
plt.savefig('scatter_regression/{}.svg'.format(title.replace(' ', '_')))
plt.savefig('scatter_regression/{}.png'.format(title.replace(' ', '_')), dpi=300)
print('{}.png found in {}/scatter_regression/'.format(title.replace(' ', '_'), os.getcwd()))
plt.close()
image_display('scatter_regression/{}.png'.format(title.replace(' ', '_')))
def signature_heatmap(vst, sig, name, cluster_columns=False):
'''
Generate heatmap of differentially expressed genes using
variance stablized transfrmed log2counts.
Inputs
------
vst = gene name is the index
sig = set or list of signature
name = name of file
cluster_columns = bool (default = False)
Outputs
------
.png and .svg file of heatmap
Returns
-------
None
'''
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font='Arial', font_scale=2, style='white', context='paper')
vst['gene_name'] = vst.index
CM = sns.clustermap(vst[vst.gene_name.apply(lambda x: x in sig)].drop('gene_name', axis=1),
z_score=0, method='complete', cmap='RdBu_r',
yticklabels=False, col_cluster=cluster_columns)
CM.fig.suptitle(name.replace('_', ' '))
CM.savefig('{}_Heatmap.png'.format(name.replace(' ', '_')), dpi=300)
CM.savefig('{}_Heatmap.svg'.format(name.replace(' ', '_')))
plt.close()
image_display('{}_Heatmap.png'.format(name.replace(' ', '_')))
def ssh_job(command_list, job_name, job_folder, project='nimerlab', threads=1, q='general', mem=3000):
'''
Sends job to LSF pegasus.ccs.miami.edu
Inputs
------
command_list: list of commands with new lines separated by commas
job_name: string of job name (also used for log file)
job_folder: string of folder to save err out and script files
q: pegasus q, ie. 'bigmem', 'general' (default), 'parrallel'
mem: integer memory requirement, default=3000 (3GB RAM)
project: string pegasus project name (default = nimerlab)
threads: integer of number of threads. default = 1
ssh: whether or not to ssh into pegasus. Default=True
Returns
-------
Tuple(rand_id, job_folder, prejob_files)
'''
job_folder = job_folder if job_folder.endswith('/') else f'{job_folder}/'
os.system(f'ssh pegasus mkdir -p {job_folder}')
rand_id = str(random.randint(0, 100000))
str_comd_list = '\n'.join(command_list)
cmd = '\n'.join(['#!/bin/bash',
'',
f"#BSUB -J ID_{rand_id}_JOB_{job_name.replace(' ','_')}",
f'#BSUB -R "rusage[mem={mem}]"',
f'#BSUB -R "span[ptile={threads}]"',
f"#BSUB -o {job_folder}{job_name.replace(' ','_')}_logs_{rand_id}.stdout.%J",
f"#BSUB -e {job_folder}{job_name.replace(' ','_')}_logs_{rand_id}.stderr.%J",
'#BSUB -W 120:00',
f'#BSUB -n {threads}',
f'#BSUB -q {q}',
f'#BSUB -P {project}',
'',
f'{str_comd_list}'
])
with open(f'{job_name.replace(" ","_")}.sh', 'w') as file:
file.write(cmd)
prejob_files = os.popen(f'ssh pegasus ls {job_folder}').read().split('\n')[:-1]
os.system(f'''ssh pegasus "mkdir -p {job_folder}"''')
os.system(f'scp {job_name.replace(" ", "_")}.sh pegasus:{job_folder}')
os.system(f'''ssh pegasus "cd {job_folder}; bsub < {job_name.replace(' ','_')}.sh"''')
print(f'Submitting {job_name} as ID_{rand_id} from folder {job_folder}: {datetime.now():%Y-%m-%d %H:%M:%S}')
return (rand_id, job_folder, prejob_files, job_name)
def ssh_check(ID, job_folder, prejob_files=None, wait=True, return_filetype=None, load=False, check_IO_logs=None, sleep=10, job_name=''):
'''
Checks for pegasus jobs sent by ssh_job and prints contents of the log file.
Optionally copies and/or loads the results file.
Inputs
------
Job ID: Job ID
wait: wait for processes to finish before returning, default=True
job_folder: job folder to probe for results, (only if return_filetype specified)
return_filetype: return file type (ex. .png will search for all .png in job_folder and import it)default=None
display: whether or not to display imported file
pre_list: list of contents of job folder brefore execution.
check_IO_logs: read output from .err .out logs
sleep: seconds to sleep (default 10)
job_name: pepends local ssh folder with job name if provided
Returns
------
None
'''
job_folder = val_folder(job_folder)
jobs_list = os.popen('ssh pegasus bhist -w').read()
job = [j for j in re.findall(r'ID_(\d+)', jobs_list) if j == ID]
if len(job) != 0:
print(f'Job ID_{ID} is not complete: {datetime.now():%Y-%m-%d %H:%M:%S}')
else:
if os.popen('''ssh pegasus "if [ -f {}/*_logs_{}.stderr* ]; then echo 'True' ; fi"'''.format(job_folder, ID)).read() == 'True\n':
print(f'Job ID_{ID} is finished')
else:
print(f'There was likely an error in submission of Job ID_{ID}')
if wait:
running = True
while running:
jobs_list = os.popen('ssh pegasus "bhist -w"').read()
job = [j for j in re.findall(r'ID_(\d+)', jobs_list) if j == ID]
if len(job) == 0:
running = False
else:
print(f'Waiting for jobs to finish... {datetime.now():%Y-%m-%d %H:%M:%S}')
time.sleep(sleep)
print(f'Job ID_{ID} is finished')
if load:
os.makedirs(f'ssh_files/{ID}/', exist_ok=True)
post_files = os.popen(f'ssh pegasus ls {job_folder}*{return_filetype}').read().split("\n")[:-1]
if prejob_files is None:
prejob_files = []
import_files = [file for file in post_files if file not in prejob_files]
for file in import_files:
print('Copying {} to {}/ssh_files/{}{}/'.format(file, os.getcwd(), job_name, ID))
os.system('scp pegasus:{} ssh_files/{}{}/{}'.format(file, job_name, ID, file.split('/')[-1]))
image_display('ssh_files/{}{}/{}'.format(job_name, ID, file.split('/')[-1]))
if check_IO_logs:
logs = {'ErrorFile': '{}/*_logs_{}.stderr*'.format(job_folder, ID),
'OutFile': '{}/*_logs_{}.stdout*'.format(job_folder, ID)
}
os.makedirs('logs/', exist_ok=True)
for key, log in logs.items():
os.system("scp 'pegasus:{}' 'logs/ID_{}_{}.txt'".format(log, ID, key))
if os.path.isfile('logs/ID_{}_{}.txt'.format(ID, key)):
print('logs/ID_{} {}:'.format(ID, key))
with open('logs/ID_{}_{}.txt'.format(ID, key)) as file:
print(file.read())
def deeptools(regions,
signals,
matrix_name,
out_name,
pegasus_folder,
envelope='deeptools',
copy=False,
title='',
bps=(1500, 1500, 4000),
d_type='center',
scaled_names=('TSS', 'TES'),
make=('matrix', 'heatmap', 'heatmap_group', 'profile', 'profile_group'),
missing_values_as_zero=True,
heatmap_kmeans=0,
save_sorted_regions='',
sort_regions='descend',
profile_colors=None):
'''
Inputs
------
regions: dictionary {'region_name':'/path/to/ssh/bedfile'}
signals: dictionary {'signal_name':'/path/to/ssh/bigwigfile'}
matrix_name: string of matrix name or matrix to be named (before .matrix.gz)
out_name: name for output file
tite: plot title (optional)
envelope: conda envelope
bps: tuple of region width on either side of center or scaled. center ignores last number. default is (1500,1500,4000)
type: 'center' or 'scaled'
scaled_names: optional names for scaled start and end (default ('TSS','TES'))
make: tuple of deeptool commands. options: matrix, heatmap, heatmap_group, profile, profile_group
copy: bool. Copy region and signal files to peagasus
missing_values_as_zero: True
heatmap_kmeans: Default 0. kmeans clusters (int)
save_sorted_regions= '' (default: don't output) else filename for kmeans sorted region file
sort_regions= default descend. 'keep', 'no', ascend.
profile_colors: default none. list of colers per sample in sample order
Returns
-------
string of commands for ssh_job
'''
pegasus_folder = pegasus_folder if pegasus_folder.endswith('/') else f'{pegasus_folder}/'
os.system(f"ssh pegasus 'mkdir {pegasus_folder}'")
make_lower = [x.lower() for x in make]
if d_type.lower() == 'center':
deepMat = 'reference-point --referencePoint center'
deepHeat = "--refPointLabel 'Peak Center'"
deepProf = "--refPointLabel 'Peak Center'"
else:
deepMat = f'scale-regions --regionBodyLength {bps[2]}'
deepHeat = f'--startLabel {scaled_names[0]} --endLabel {scaled_names[1]}'
deepProf = f'--startLabel {scaled_names[0]} --endLabel {scaled_names[1]}'
cmd_list = ['module rm python share-rpms65', f'source activate {envelope}']
if copy:
print('Copying region files to pegasus...')
for region in regions.values():
if os.popen(f'''ssh pegasus "if [ -f {pegasus_folder}{region.split('/')[-1]}]; then echo 'True' ; fi"''').read() != 'True\n':
print(f'Copying {region} to pegasus at {pegasus_folder}.')
os.system(f"scp {region} pegasus:{pegasus_folder}")
else:
print(f'{region} found in {pegasus_folder}.')
print('Copying signal files to pegasus...')
for signal in signals.values():
if os.popen(f'''ssh pegasus "if [ -f {pegasus_folder}/{signal.split('/')[-1]} ]; then echo 'True' ; fi"''').read() != 'True\n':
print(f'Copying {signal} to {pegasus_folder}.')
os.system(f"scp {signal} pegasus:{pegasus_folder}")
pegasus_region_path = ' '.join([f"{pegasus_folder}{region_path.split('/')[-1]}" for region_path in regions.values()])
pegasus_signal_path = ' '.join([f"{pegasus_folder}{signal_path.split('/')[-1]}" for signal_path in signals.values()])
else:
pegasus_region_path = ' '.join([f'{region_path}' for region_path in regions.values()])
pegasus_signal_path = ' '.join([f'{signal_path}' for signal_path in signals.values()])
if 'matrix' in make_lower:
signal_name = ' '.join([f'''"{signal_name.replace('_', ' ')}"''' for signal_name in signals.keys()])
computeMatrix = f"computeMatrix {deepMat} -a {bps[0]} -b {bps[1]} -p 4 -R {pegasus_region_path} -S {pegasus_signal_path} --samplesLabel {signal_name} -o {matrix_name}.matrix.gz"
if missing_values_as_zero:
computeMatrix += ' --missingDataAsZero'
cmd_list.append(computeMatrix)
if 'heatmap' in make_lower or 'heatmap_group' in make_lower:
region_name = ' '.join([f'''"{region_name.replace('_', ' ')}"''' for region_name in regions.keys()])
plotHeatmap_base = f"plotHeatmap -m {matrix_name}.matrix.gz --dpi 300 {deepHeat} --plotTitle '{title.replace('_',' ')}' --whatToShow 'heatmap and colorbar' --colorMap Reds"
if sort_regions != 'descend':
plotHeatmap_base += f' --sortRegions {sort_regions}'
if heatmap_kmeans > 0:
plotHeatmap_base += f' --kmeans {heatmap_kmeans}'
else:
plotHeatmap_base += f' --regionsLabel {region_name}'
if save_sorted_regions != '':
plotHeatmap_base += f' --outFileSortedRegions {save_sorted_regions}.txt'
if 'heatmap' in make_lower:
cmd_list.append(f"{plotHeatmap_base} -out {out_name}_heatmap.png")
if 'heatmap_group' in make_lower:
cmd_list.append(f"{plotHeatmap_base} -out {out_name}_heatmap_perGroup.png --perGroup")
if 'profile' in make_lower or 'profile_group' in make_lower:
region_name = ' '.join([f'''"{region_name.replace('_', ' ')}"''' for region_name in regions.keys()])
plotProfile_base = f"plotProfile -m {matrix_name}.matrix.gz --dpi 300 {deepProf} --plotTitle '{title.replace('_',' ')}'"
if heatmap_kmeans > 0:
plotProfile_base += f' --kmeans {heatmap_kmeans}'
else:
plotProfile_base += f' --regionsLabel {region_name}'
if profile_colors:
plotProfile_base += f' --colors {" ".join(profile_colors)}'
if save_sorted_regions != '':
plotProfile_base += f' --outFileSortedRegions {save_sorted_regions}_profile.txt'
if 'profile' in make_lower:
cmd_list.append(f"{plotProfile_base} -out {out_name}_profile.png")
if 'profile_group' in make_lower:
cmd_list.append(f"{plotProfile_base} -out {out_name}_profile_perGroup.png --perGroup")
return cmd_list
def order_cluster(dict_set, count_df, gene_column_name, title):
'''
Inputs
------
dict_set: a dictary with a cluster name and a set of genes in that cluster for plotting (should be non-overlapping).
df: a pandas dataframe with the normalized counts for each gene and samples (or average of samples) in row columns.
should also contain a column with the gene name.
gene_column_name: the pandas column specifying the gene name (used in the dict_set)
title: title for the plot and for saving the file
Returns
------
(Ordered Index List, Ordered Count DataFrame, Clustermap)
'''
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.cluster import hierarchy
import matplotlib.patches as mpatches
out_list = []
df = count_df.copy()
df['group'] = 'NA'
for name, genes in dict_set.items():
if len(genes) == 0:
print(f'There are not genes in {name}. Skipping Group')
continue
reduced_df = df[df[gene_column_name].isin(genes)]
linkage = hierarchy.linkage(reduced_df.drop(columns=[gene_column_name, 'group']), method='ward', metric='euclidean')
order = hierarchy.dendrogram(linkage, no_plot=True, color_threshold=-np.inf)['leaves']
gene_list = reduced_df.iloc[order][gene_column_name].tolist()
gene_index = df[df[gene_column_name].isin(gene_list)].index.tolist()
out_list += gene_index
gene_symbol = [gene.split('_')[-1] for gene in gene_list]
with open(f'{name}_genes.txt', 'w') as file:
for gene in gene_symbol:
file.write(f'{gene}\n')
df.loc[gene_index, 'group'] = name
ordered_df = df.loc[out_list]
color_mapping = dict(zip([name for name, genes in dict_set.items() if len(genes) > 0], sns.hls_palette(len(df.group.unique()), s=.7)))
row_colors = df.group.map(color_mapping)
sns.set(context='notebook', font='Arial', palette='RdBu_r', style='white', rc={'figure.dpi': 300})
clustermap = sns.clustermap(ordered_df.loc[out_list].drop(columns=[gene_column_name, 'group']),
z_score=0,
row_colors=row_colors,
row_cluster=False,
col_cluster=False,
cmap='RdBu_r',
yticklabels=False)
clustermap.fig.suptitle(title)
legend = [mpatches.Patch(color=color, label=label.replace('_', ' ')) for label, color in color_mapping.items() if label != 'NA']
clustermap.ax_heatmap.legend(handles=legend, bbox_to_anchor=(-.1, .9, 0., .102))
clustermap.savefig(f'{title.replace(" ","_")}.png', dpi=300)
plt.close()
image_display(f'{title.replace(" ","_")}.png')
return out_list, ordered_df, clustermap
def ranked_ordered_cluster(dict_set, in_df,
gene_column_name,
dict_sort_col,
title='ranked_ordered_cluster',
group_name='Group',
figsize=None,
ascending=False):
'''
Inputs
------
dict_set: a dictary with a cluster name and a set of genes in that cluster for plotting.
df: a pandas dataframe with the normalized counts for each gene and samples (or average of samples) in row columns.
should also contain a column with the gene name.
gene_column_name: the pandas column specifying the gene name (used in the dict_set)
dict_sort_col: dictionary mapping cluster name with column to sort by in that cluster.
group_name: name (string) of the clusters (ie. Group, or Lineage)
title: title for the plot and for saving the file
figsize: tuple of figsize or default none for autogeneration
ascending: bool for sort order
Returns
------
(Ordered Count DataFrame, Clustermap)
'''
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import matplotlib.patches as mpatches
from dkfunctions import image_display
out_dfs = []
df = in_df.copy()
df[group_name] = 'NA'
df.index = df[gene_column_name]
for name, genes in dict_set.items():
reduced_df = df[df[gene_column_name].isin(genes)].copy()
zscored = reduced_df.drop(columns=[gene_column_name, group_name]).T.apply(stats.zscore).T.copy()
order = zscored.sort_values(by=dict_sort_col[name], ascending=ascending).index.tolist()
gene_list = reduced_df.loc[order, gene_column_name].tolist()
gene_symbol = [gene.split('_')[-1] for gene in gene_list]
with open(f'{name}_genes.txt', 'w') as file:
for gene in gene_symbol:
file.write(f'{gene}\n')
reduced_df[group_name] = name
reduced_df = reduced_df.loc[gene_list]
out_dfs.append(reduced_df)
ordered_df = pd.concat(out_dfs)
groups = ordered_df[group_name].unique()
color_mapping = dict(zip(groups, sns.color_palette("colorblind",len(groups))))
row_colors = ordered_df[group_name].map(color_mapping).tolist()
sns.set(context='paper', font='Arial', palette='pastel', style='white', rc={'figure.dpi': 300}, font_scale=.9)
g = sns.clustermap(ordered_df.drop(columns=[gene_column_name, group_name]),
z_score=0,
row_colors=row_colors,
row_cluster=False,
col_cluster=False,
cmap='RdBu_r',
yticklabels=True,
figsize=figsize)
g.fig.suptitle(title)
legend = [mpatches.Patch(color=color, label=label.replace('_', ' ')) for label, color in color_mapping.items() if label != 'NA']
g.ax_heatmap.legend(handles=legend, bbox_to_anchor=(-.1, .9, 0., .102),fontsize='large')
g.savefig(f'{title.replace(" ","_")}.png', dpi=300)
g.savefig(f'{title.replace(" ","_")}.svg')
plt.close()
image_display(f'{title.replace(" ","_")}.png')
return ordered_df, g
def gsea_barplot(out_dir, pos_file, neg_file, gmt_name, max_number=20):
'''
Inputs
------
out_dir: directory output or '' for current directory
pos_file: GSEA positive enrichment .xls file
neg_file: GSEA negative enrichment .xls file
gmt_name: name of enrichment (ex: Hallmarks)
max_number: max number of significant sets to report (default 20)
Returns
-------
string of save file
'''
import matplotlib.pyplot as plt
import seaborn as sns
out_dir = out_dir if out_dir.endswith('/') else '{}/'.format(out_dir)
out_dir = '' if out_dir == '/' else out_dir
os.makedirs(out_dir, exist_ok=True)
pos = pd.read_table(pos_file).head(max_number) if os.path.isfile(pos_file) else pd.DataFrame(columns=['FDR q-val'])
pos[gmt_name] = [' '.join(name.split('_')[1:]) for name in pos.NAME.tolist()]
neg = pd.read_table(neg_file).head(max_number) if os.path.isfile(neg_file) else pd.DataFrame(columns=['FDR q-val'])
neg[gmt_name] = [' '.join(name.split('_')[1:]) for name in neg.NAME.tolist()]
sns.set(context='paper', font='Arial', font_scale=.9, style='white', rc={'figure.dpi': 300, 'figure.figsize': (8, 6)})
fig, (ax1, ax2) = plt.subplots(ncols=1, nrows=2)
fig.suptitle('{} GSEA enrichment\n(q<0.05, max {})'.format(gmt_name, max_number))
if len(pos[pos['FDR q-val'] < 0.05]) > 0:
UP = sns.barplot(data=pos[pos['FDR q-val'] < 0.05], x='NES', y=gmt_name, color='firebrick', ax=ax1)
UP.set_title('Positive Enrichment')
sns.despine()
if len(neg[neg['FDR q-val'] < 0.05]) > 0:
DN = sns.barplot(data=neg[neg['FDR q-val'] < 0.05], x='NES', y=gmt_name, color='steelblue', ax=ax2)
DN.set_title('Negative Enrichment')
sns.despine()
try:
plt.tight_layout(h_pad=1, w_pad=1)
except ValueError:
pass
plt.subplots_adjust(top=0.88)
file = f'{out_dir}{gmt_name}_GSEA_NES_plot.png'
fig.savefig(file, dpi=300)
plt.close()
image_display(file)
return file
def hinton(df, filename, folder, max_weight=None):
"""Draw Hinton diagram for visualizing a weight matrix."""
import matplotlib.pyplot as plt
import seaborn as sns
folder = folder if folder.endswith('/') else f'{folder}/'
folder = f'{os.getcwd()}/' if folder == '/' else folder
sns.set(context='paper', rc={'figure.figsize': (8, 8), 'figure.dpi': 200})
matrix = df.values
plt.clf()
plt.figure(figsize=(10, 10), dpi=200)
ax = plt.gca()
if not max_weight:
max_weight = 2 ** np.ceil(np.log(np.abs(matrix).max()) / np.log(2))
ax.patch.set_facecolor('white')
ax.set_aspect('equal', 'box')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
ax.axis('off')
for (x, y), w in np.ndenumerate(matrix):
color = 'red' if w > 0 else 'blue'
size = np.sqrt(np.abs(w) / max_weight)
rect = plt.Rectangle([y - size / 2, x - size / 2], size, size,
facecolor=color, edgecolor=color)
ax.add_patch(rect)
fraction = len(df.index.tolist())
increment = (.915 / fraction)
y = 0.942
for x in df.index.tolist():
ax.annotate(x, xy=(-.15, y), xycoords='axes fraction')
y -= increment
ax.annotate("Components", xy=(.4, 0), xycoords='axes fraction', size=14)
ax.autoscale_view()
ax.annotate('Hinton Plot of Independent Components', xy=(.14, 1), xycoords='axes fraction', size=20)
ax.invert_yaxis()
ax.figure.savefig(f'{folder}{filename}.png')
plt.close()
image_display(f'{folder}{filename}.png')
def genomic_annotation_plots(dict_of_annotated_dfs, txdb_db,
filename='Genomic_Annotation_Plot',
title='',
bar_width=.75,
figsize=(10, 5),
order=['Promoter (<=1kb)',
'Promoter (1-2kb)',
'Promoter (2-3kb)',
'Intron',
'Exon',
"3' UTR",
"5' UTR",
'Downstream (<1kb)',
'Downstream (1-2kb)'
'Downstream (2-3kb)',
'Distal Intergenic'],
feature_col='annotation',
palette='colorblind',
plot_mode='fraction'
):
'''
from chipseeker annotation output as df
txdb_db = UCSC or Ensembl
'''
import matplotlib.pyplot as plt
import seaborn as sns
db = '(uc' if txdb_db == 'UCSC' else '(ENS'
Anno_df = pd.DataFrame(index=order)
for name, df in dict_of_annotated_dfs.items():
df[feature_col] = [anno.replace(f' {db}', f'_{db}').split('_')[0] for anno in df[feature_col].tolist()]
df_anno = df.groupby(feature_col).count().iloc[:, 0]
if plot_mode.lower() == 'fraction':
Anno_df[name] = df_anno / df_anno.sum()
else:
Anno_df[name] = df_anno
Anno_df[Anno_df.isna()] = 0
sns.set(style='white', font='Arial', font_scale=1.2)
sns.set_palette(palette, n_colors=len(order))
f = plt.figure(figsize=figsize)
Anno_df.T.plot(kind='barh', stacked=True, ax=f.gca(), width=bar_width, lw=0.1)
plt.title(title)
plt.legend(loc=3, bbox_to_anchor=(1.0, 0))
plt.xlabel('Fraction' if plot_mode.lower() == 'fraction' else 'Peak Number')
sns.despine()
plt.tight_layout()
plt.savefig(f'{filename}.png', dpi=300)
plt.close()
image_display(f'{filename}.png')
def extract_ENCODE_report_data(base_folder, report_type, out_folder='', histone=False, replicate=False):
'''
Inputs
-----
base_folder: AQUAS results folder. Will use subfolders for sample name and look for report in those subfolders.
report_type: 'AQUAS' or 'cromwell'
replicate: Whether the ChIPseq was performed as a repliate or not.
Returns
-----
DataFrame of results
'''
tq = tq_type()
if report_type.lower() not in ['aquas', 'cromwell']:
raise ValueError('This function only extracts summary info from AQUAS or Cromwell generated qc reports.')
base_folder = val_folder(base_folder)
report_name = f'{base_folder}*/*report.html' if report_type.lower() == 'aquas' else f'{base_folder}*report.html'
reports = glob.glob(report_name)
out_folder = val_folder(out_folder)
if replicate is True:
raise AssertionError('Not set up for replicates yet.')
results_df = pd.DataFrame(index=['Percent_mapped', 'Filtered_Uniquely_Mapped_Reads', 'Fraction_Duplicated', 'S_JS_Distance', 'PBC1', 'RSC', 'Overlap_Optimal_Peak_Number', 'FrIP_IDR', 'IDR_Peak_Number'])
for file in tq(reports):
name = re.findall(r'.*/(.*)_report.html', file)[0] if report_type.lower() == 'aquas' else re.findall(r'.*/(.*)_qc_report.html', file)[0]
report = pd.read_html(file)
series = pd.Series()
series['Percent_mapped'] = report[1].iloc[7, 1] if report_type.lower() == 'aquas' else report[0].iloc[7, 1]
series['Filtered_Uniquely_Mapped_Reads'] = report[2].iloc[5, 1] if report_type.lower() == 'aquas' else report[3].iloc[5, 1]
series['Fraction_Duplicated'] = report[3].iloc[7, 1] if report_type.lower() == 'aquas' else report[1].iloc[7, 1]
series['S_JS_Distance'] = report[4].iloc[7, 1] if report_type.lower() == 'aquas' else report[8].iloc[8, 1]
series['PBC1'] = report[5].iloc[6, 1] if report_type.lower() == 'aquas' else report[2].iloc[6, 1]
series['RSC'] = report[6].iloc[8, 1] if report_type.lower() == 'aquas' else report[5].iloc[9, 1]
series['Overlap_Optimal_Peak_Number'] = report[10].iloc[4, 1] if report_type.lower() == 'aquas' else report[4].iloc[4, 1]
if histone is False:
series['FrIP_IDR'] = report[11].iloc[0, 1] if report_type.lower() == 'aquas' else report[7].iloc[1, 1]
series['IDR_Peak_Number'] = report[12].iloc[4, 1] if report_type.lower() == 'aquas' else report[4].iloc[4, 2]
results_df[name] = series
for index in tq(results_df.index.tolist()):
plot_col(results_df.loc[index], out=out_folder, title=f'{index}', ylabel=index.replace('_', ' '), plot_type=['violin', 'swarm'])
return results_df
def meme_ssh(folder, fasta, bed, meme_db, out_name, markov=None):
folder = folder if folder.endswith('/') else f'{folder}/'
out_fasta = f'{folder}{bed.split("/")[-1].replace(".bed",".fasta")}'
meme_cmd = ['module rm python share-rpms65',
'source activate motif',
f'bedtools getfasta -fi {fasta} -bed {bed} -fo {out_fasta}',
f'meme-chip -oc {out_name} -db {meme_db} -dna {out_fasta}'
]
if markov:
meme_cmd[3] += f' -bfile {markov}'
return ssh_job(meme_cmd, f'{out_name}_meme', folder, mem=3000)
def overlap_four(bed_dict, genome=None):
'''
Takes a dictionary of four pybedtools.BedTool objects.
Merges all overlapping peaks for each bed into a master file.
Intersects beds to merged master file.
Performs annotations with ChIPseeker if genome is specified.
Inputs
------
bed_dict: dictionary of BedTool files
genome: 'hg38','hg19','mm10'
Returns
-------
Returns a dictionary of dataframes from unique and overlap peaks.
If genome is specified, includes a dictionary of annotated genes.
'''
from collections import OrderedDict
import pickle
names = list(bed_dict.keys())
Folder = f'{os.getcwd()}/'
subfolder = f"{names[0].replace(' ', '_')}-{ names[1].replace(' ', '_')}-{names[2].replace(' ', '_')}-{names[3].replace(' ', '_')}overlap/"
out = f'{Folder}{subfolder}'
os.makedirs(out, exist_ok=True)
print(f'Output files are found in {out}')
print(f'A: {names[0]}, B: {names[1]}, C: {names[2]}, D: {names[3]}')
master = bed_dict[names[0]].cat(bed_dict[names[1]]).cat(bed_dict[names[2]]).sort().merge().cat(bed_dict[names[3]]).sort().merge()
A = bed_dict[names[0]].sort().merge()
B = bed_dict[names[1]].sort().merge()
C = bed_dict[names[2]].sort().merge()
D = bed_dict[names[3]].sort().merge()
sorted_dict = OrderedDict({'master': master, 'A': A, 'B': B, 'C': C, 'D': D})
sorted_dict['Abcd'] = (master + A - B - C - D)
sorted_dict['aBcd'] = (master + B - A - C - D)
sorted_dict['abCd'] = (master + C - A - B - D)
sorted_dict['abcD'] = (master + D - A - B - C)
sorted_dict['ABcd'] = (master + A + B - C - D)
sorted_dict['AbCd'] = (master + A + C - B - D)
sorted_dict['AbcD'] = (master + A + D - C - B)
sorted_dict['aBCd'] = (master + B + C - A - D)
sorted_dict['aBcD'] = (master + B + D - A - C)
sorted_dict['abCD'] = (master + C + D - A - B)
sorted_dict['ABCd'] = (master + A + B + C - D)
sorted_dict['ABcD'] = (master + A + B + D - C)
sorted_dict['AbCD'] = (master + A + C + D - B)
sorted_dict['aBCD'] = (master + B + C + D - A)
sorted_dict['ABCD'] = (master + A + B + C + D)
labTup = tuple(key for key in sorted_dict.keys())
lenTup = tuple(len(bed) for bed in sorted_dict.values())
gener = (f'{lab}: {size}' for lab, size in zip(labTup, lenTup))
for x in gener:
print(x)
for key, bed in sorted_dict.items():
if len(bed) > 1:
bed.to_dataframe().to_csv(f"{out}{key.replace(' ', '_')}-peaks-from-mergedPeaks.bed", header=None, index=None, sep="\t")
if bool(genome):
print('Annotating ovelapped peaks...')
unikey = '{}'
unianno = '{}_annotated'
return_dict = annotate_peaks({unikey.format(key): bed.to_dataframe() for key, bed in sorted_dict.items() if len(bed) > 0}, out, genome=genome)
gene_dict = {names[0]: return_dict[unianno.format('A')].SYMBOL.unique().tolist(),
names[1]: return_dict[unianno.format('B')].SYMBOL.unique().tolist(),
names[2]: return_dict[unianno.format('C')].SYMBOL.unique().tolist(),
names[3]: return_dict[unianno.format('D')].SYMBOL.unique().tolist()
}
for name, gene_list in gene_dict.items():
with open(f'{out}{name}_all_peaks_annotated.txt', 'w') as fp:
for gene in gene_list:
fp.write(f'{gene}\n')
with open(f'{out}Overlap_annotated_results.pkl', 'wb') as fp:
pickle.dump(return_dict, fp)
return sorted_dict if genome is None else {**sorted_dict, **return_dict}
def extract_clustermap_clusters(clustermap, num_of_clusters):
'''
Input a seaborn clustermap and number of clusters to id
Returns an array of labelled clusters based on the original dataframe used to generate the clustermap.
Usage: df['cluster'] = extract_clutsermap_clusters(clustermap, 2)
'''
from scipy.cluster.hierarchy import fcluster
return fcluster(clustermap.dendrogram_row.linkage, num_of_clusters, criterion='maxclust')
def generate_ROSE_gffs(bed_file, name):
'''
takes a bed_file and returns a gff in the format needed for the ROSE algorithm.
'''
if type(bed_file) is BedTool:
bed_file = bed_file.to_dataframe()
elif type(bed_file) is not pd.DataFrame:
IOError('Input must be either a pybedtools object or a pandas dataframe')
gff = pd.DataFrame({'chr': bed_file.chrom,
'number': bed_file.index.tolist(),
'b1': '.',
'start': bed_file.start,
'end': bed_file.end,
'b2': '.',
'b3': '.',
'b4': '.',
'id': bed_file.index.tolist()
},
index=bed_file.index)
gff.to_csv(f'{name}_enhancer.gff', header=False, index=False, sep="\t")
return gff
def active_enhancer_determination(H3K4me1_bw, H3K4me3_bw, H3K4me1_bed, H3K27ac_bed, name, TSS_bed=None, gtf_file=None, TSS_region=(2500, 2500), chrom_sizes=None):
'''
Input
----------
TSS_bed: location of a premade TSS window bed file. If None, gtf_file, chrom.sizes, and TSS_region must be specified.
gtf_file: if no TSS_bed is provided, location of gtf_file must be included here.
TSS_region: if no TSS_bed is provided, specify the distances before and after TSS.
chrom: file of chromosome sizes (no header in file)
H3K4me1_bw: location of H3K4me1 bigwig file (should be normalized ie. FC over input)
H3K4me3_bw: location of H3K4me3 bigwig file (should be normalized ie. FC over input)
H3K4me1_bed: peakfile for H3K4me1
H3K27ac_bed: peakfile for H2K27ac
name: name of the output sample
Output
-----------
Saves enhancer bed file to disk as well as TSS window bed file if TSS_bed is not provided
returns an enhancer bedfile
'''
import gtfparse
import pyBigWig
if TSS_bed is None:
gtf = gtfparse.read_gtf(gtf_file)
TSS = gtf[gtf.feature == 'gene'][['seqname', 'start', 'end', 'strand']]
TSS['TSS'] = TSS[['start', 'end', 'strand']].apply(lambda x: x[0] if x[2] == '+' else x[1], axis=1)
TSS_slop = pd.DataFrame({'chr': TSS.seqname,
'start': TSS.TSS - TSS_region[0],
'end': TSS.TSS + TSS_region[1]
},
index=TSS.index)
low_index = TSS_slop[TSS_slop.start < 0].index
TSS_slop.loc[low_index, 'start'] = 0
chrom = | pd.read_csv(chrom_sizes, header=None, index_col=0, sep="\t") | pandas.read_csv |
import unittest
import sys
import os
import os.path
from os.path import dirname, abspath, expanduser, exists, join
import shutil
import subprocess
TEST_DIR=abspath(expanduser(dirname(__file__)))
try:
import dataworkspaces
except ImportError:
sys.path.append(os.path.abspath(".."))
from dataworkspaces.utils.subprocess_utils import find_exe
TEMPDIR=os.path.abspath(os.path.expanduser(__file__)).replace('.py', '_data')
WS_DIR=join(TEMPDIR, 'workspace')
CODE_DIR=join(WS_DIR, 'code')
NOTEBOOK='test_jupyter_kit.ipynb'
PYTHONPATH=os.path.abspath("..")
try:
JUPYTER=find_exe('jupyter', 'install Jupyter before running this test')
ERROR = None
except Exception as e:
ERROR = e
JUPYTER=None
try:
import pandas
except ImportError:
pandas = None
try:
import numpy
except ImportError:
numpy = None
@unittest.skipUnless(JUPYTER is not None, "SKIP: No Jupyter install found: %s"%ERROR)
class TestJupyterKit(unittest.TestCase):
def setUp(self):
if exists(TEMPDIR):
shutil.rmtree(TEMPDIR)
os.mkdir(TEMPDIR)
os.mkdir(WS_DIR)
self.dws=find_exe("dws", "Make sure you have enabled your python virtual environment")
self._run_dws(['init', '--hostname', 'test-host',
'--create-resources=code,source-data,intermediate-data,results'],
verbose=False)
shutil.copy(NOTEBOOK, join(CODE_DIR, NOTEBOOK))
def tearDown(self):
if exists(TEMPDIR):
shutil.rmtree(TEMPDIR)
def _run_dws(self, dws_args, cwd=WS_DIR, env=None, verbose=True):
if verbose:
command = self.dws + ' --verbose --batch '+ ' '.join(dws_args)
else:
command = self.dws + ' --batch '+ ' '.join(dws_args)
print(command + (' [%s]' % cwd))
r = subprocess.run(command, cwd=cwd, shell=True, env=env)
r.check_returncode()
def test_jupyter(self):
command = "%s nbconvert --to notebook --execute %s" % (JUPYTER, NOTEBOOK)
print(command)
import copy
env=copy.copy(os.environ)
env['PYTHONPATH']=PYTHONPATH
print("set pythonpath to %s" % PYTHONPATH)
r = subprocess.run(command, cwd=CODE_DIR, shell=True, env=env)
r.check_returncode()
self._run_dws(['snapshot', '-m', "'snapshot of notebook run'", 'S1'],
verbose=False)
@unittest.skipUnless(JUPYTER is not None, "SKIP: No Jupyter install found: %s"%ERROR)
@unittest.skipUnless(pandas is not None, "SKIP: pandas is not installed")
@unittest.skipUnless(numpy is not None, "SKIP: numpy is not installed")
class TestHeatmapBinning(unittest.TestCase):
def test_no_unique(self):
from dataworkspaces.kits.jupyter import _metric_col_to_colormap
from pandas.testing import assert_series_equal
bins = _metric_col_to_colormap(pandas.Series([numpy.nan, numpy.nan]))
assert_series_equal(pandas.Series([-1,-1]), bins)
def test_one_unique(self):
from dataworkspaces.kits.jupyter import _metric_col_to_colormap
from pandas.testing import assert_series_equal
bins = _metric_col_to_colormap(pandas.Series([1.2, numpy.nan, 1.2]))
assert_series_equal(pandas.Series([3,-1,3]), bins)
def test_two_unique(self):
from dataworkspaces.kits.jupyter import _metric_col_to_colormap
from pandas.testing import assert_series_equal
bins = _metric_col_to_colormap(pandas.Series([1.4, numpy.nan, 1.2]))
assert_series_equal(pandas.Series([4,-1,2]), bins)
def test_three_unique(self):
from dataworkspaces.kits.jupyter import _metric_col_to_colormap
from pandas.testing import assert_series_equal
bins = _metric_col_to_colormap(pandas.Series([1.4, numpy.nan, 1.2, 1.0]))
assert_series_equal(pandas.Series([4,-1,3, 2]), bins, check_dtype=False)
def test_four_unique(self):
from dataworkspaces.kits.jupyter import _metric_col_to_colormap
from pandas.testing import assert_series_equal
bins = _metric_col_to_colormap(pandas.Series([1.4, numpy.nan, 1.2, 1.0, 1.0, 0.8]))
assert_series_equal(pandas.Series([4,-1,3, 2,2,2]), bins, check_dtype=False)
def test_five_unique(self):
from dataworkspaces.kits.jupyter import _metric_col_to_colormap
from pandas.testing import assert_series_equal
bins = _metric_col_to_colormap(pandas.Series([1.4, numpy.nan, 1.2, 1.0, 1.0, 0.8, 0.4]))
assert_series_equal(pandas.Series([5,-1,4, 2, 2, 1, 1]), bins, check_dtype=False)
def test_six_unique(self):
from dataworkspaces.kits.jupyter import _metric_col_to_colormap
from pandas.testing import assert_series_equal
bins = _metric_col_to_colormap(pandas.Series([1.4, numpy.nan, 1.2, 1.0, 1.0, 0.8, 0.4, 1.5]))
assert_series_equal(pandas.Series([4,-1,3, 2, 2, 1, 1, 5]), bins, check_dtype=False)
def test_seven_unique(self):
from dataworkspaces.kits.jupyter import _metric_col_to_colormap
from pandas.testing import assert_series_equal
bins = _metric_col_to_colormap( | pandas.Series([0.2, 1.4, numpy.nan, 1.2, 1.0, 1.0, 0.8, 0.4, 1.5]) | pandas.Series |
import numpy as np
from nglui import annotation
import pandas as pd
from collections.abc import Collection
from itertools import chain
def _multipoint_transform(row, pt_columns, squeeze_cols):
"""Reshape dataframe to accomodate multiple points in a single row"""
pts = {pcol: np.atleast_2d(row[pcol]) for pcol in pt_columns}
n_pts = pts[pt_columns[0]].shape[0]
rows = [{} for _ in range(n_pts)]
for col in row.index:
if col in pt_columns:
if col in squeeze_cols:
for r, v in zip(rows, pts[col].squeeze().tolist()):
r[col] = v
else:
for r, v in zip(rows, pts[col].tolist()):
r[col] = v
else:
for r in rows:
r[col] = row[col]
return rows
class SelectionMapper(object):
"""Class for configuring object selections based on root id
Parameters
----------
data_columns : str or list, optional
Name (or list of names) of the data columns to get ids from. Default is None.
fixed_ids : list, optional
List of ids to select irrespective of data.
"""
def __init__(
self, data_columns=None, fixed_ids=None, fixed_id_colors=None, color_column=None
):
if isinstance(data_columns, str):
data_columns = [data_columns]
self._config = dict(
data_columns=data_columns,
fixed_ids=fixed_ids,
fixed_id_colors=fixed_id_colors,
color_column=color_column,
)
@property
def data_columns(self):
if self._config.get("data_columns", None) is None:
return []
else:
return self._config.get("data_columns")
@property
def fixed_ids(self):
if self._config.get("fixed_ids", None) is None:
return np.array([], dtype=np.uint64)
else:
return np.array(self._config.get("fixed_ids", []), dtype=np.uint64)
@property
def fixed_id_colors(self):
if self._config.get("fixed_id_colors", None) is None:
return []
else:
return list(self._config.get("fixed_id_colors", None))
@property
def color_column(self):
return self._config.get("color_column", None)
def selected_ids(self, data):
"""Uses the rules to generate a list of ids from a dataframe."""
selected_ids = []
if data is not None:
for col in self.data_columns:
selected_ids.append(data[col].values.astype(np.uint64))
selected_ids.append(self.fixed_ids)
return np.concatenate(selected_ids)
def seg_colors(self, data):
colors = {}
if len(self.fixed_id_colors) == len(self.fixed_ids):
for ii, oid in enumerate(self.fixed_ids):
colors[oid] = self.fixed_id_colors[ii]
if self.color_column is not None:
clist = data[self.color_column].to_list()
for col in self.data_columns:
for ii, oid in enumerate(data[col]):
colors[oid] = clist[ii]
return colors
class AnnotationMapperBase(object):
def __init__(
self,
type,
data_columns,
description_column,
linked_segmentation_column,
tag_column,
group_column,
set_position,
gather_linked_segmentations,
share_linked_segmentations,
multipoint,
):
self._config = dict(
type=type,
data_columns=data_columns,
array_data=False,
description_column=description_column,
linked_segmentation_column=linked_segmentation_column,
tag_column=tag_column,
group_column=group_column,
set_position=set_position,
gather_linked_segmentations=gather_linked_segmentations,
share_linked_segmentations=share_linked_segmentations,
multipoint=multipoint,
)
self._tag_map = None
@property
def type(self):
return self._config.get("type", None)
@property
def data_columns(self):
return self._config.get("data_columns", None)
@property
def description_column(self):
return self._config.get("description_column", None)
@property
def linked_segmentation_column(self):
return self._config.get("linked_segmentation_column", None)
@property
def tag_column(self):
return self._config.get("tag_column", None)
@property
def group_column(self):
return self._config.get("group_column", None)
@property
def gather_linked_segmentations(self):
return self._config.get("gather_linked_segmentations", True)
@property
def share_linked_segmentations(self):
return self._config.get("share_linked_segmentations", False)
@property
def set_position(self):
return self._config.get("set_position", False)
@property
def multipoint(self):
return self._config.get("multipoint", False)
def multipoint_reshape(self, data, pt_columns, squeeze_cols=[]):
if data is None or len(data) == 0:
return data
else:
rows = data.apply(
lambda x: _multipoint_transform(
x, pt_columns=pt_columns, squeeze_cols=squeeze_cols
),
axis=1,
).tolist()
return pd.DataFrame.from_records([r for r in chain.from_iterable(rows)])
@property
def tag_map(self):
if self._tag_map is None:
return {}
else:
return self._tag_map
@tag_map.setter
def tag_map(self, tag_list):
if tag_list is None:
self._tag_map = {}
else:
self._tag_map = {tag: ii + 1 for ii, tag in enumerate(tag_list)}
@property
def array_data(self):
return self._config.get("array_data", False)
@array_data.setter
def array_data(self, new_array_data):
self._config["array_data"] = new_array_data
if new_array_data:
self._config["data_columns"] = self._default_array_data_columns()
self._config["description_column"] = None
self._config["linked_segmentation_column"] = None
self._config["tag_column"] = None
def _default_array_data_columns(self):
return []
def _assign_tags(self, data):
if self.tag_column is not None:
anno_tags = []
for row in data[self.tag_column]:
if isinstance(row, Collection) and not isinstance(row, str):
add_annos = [self.tag_map.get(r, None) for r in row]
else:
add_annos = [self.tag_map.get(row, None)]
anno_tags.append(add_annos)
else:
anno_tags = [[None] for x in range(len(data))]
return anno_tags
def _render_data(self, data):
# Set per subclass
return None
def _linked_segmentations(self, data):
if self.linked_segmentation_column is not None:
seg_array = np.vstack(data[self.linked_segmentation_column].values)
linked_segs = [row[~pd.isnull(row)].astype(int) for row in seg_array]
else:
linked_segs = [None for x in range(len(data))]
return linked_segs
def _descriptions(self, data):
if self.description_column is not None:
descriptions = data[self.description_column].values
else:
descriptions = [None for x in range(len(data))]
return descriptions
def _add_groups(self, groups, annos):
vals, inverse = np.unique(groups, return_inverse=True)
inv_inds = np.flatnonzero(~ | pd.isnull(vals) | pandas.isnull |
import numpy as np
import pandas as pd
from collections import defaultdict
from statsmodels.stats.multitest import multipletests
import pybedtools
from adjustText import adjust_text
from copy import deepcopy
from pysam import FastaFile
import matplotlib.pylab as plt
from darwinian_shift.section import Section, NoMutationsError
from darwinian_shift.transcript import Transcript, NoTranscriptError, CodingTranscriptError
from darwinian_shift.mutation_spectrum import MutationalSpectrum, GlobalKmerSpectrum, read_spectrum, \
EvenMutationalSpectrum
from darwinian_shift.statistics import CDFPermutationTest
from darwinian_shift.reference_data.reference_utils import get_source_genome_reference_file_paths
from darwinian_shift.lookup_classes.errors import MetricLookupException
from darwinian_shift.utils import read_sbs_from_vcf
BED_COLS = ['Chromosome/scaffold name', 'Genomic coding start', 'Genomic coding end',
'Gene name', 'Transcript stable ID']
class DarwinianShift:
"""
Class to process and store data for statistical testing of selection
Calculates the mutation spectrum from the data
Tests of individual genes/transcripts can run using the run_gene and run_transcript methods.
"""
def __init__(self,
# Input data
data,
# Reference data.
# If supplying the name of the source_genome (e.g. homo_sapiens), it will use pre-downloaded ensembl data from the reference_data directory,
# Specify the ensembl release number to use a specific version, or it will use the most recent release that has been downloaded.
# Alternatively, provide the paths to an exon file and a reference genome fa.gz file.
# This must be compressed with bgzip and with a faidx index file
# the exon file or reference file will take precedence over the source_genome/ensembl_release
# e.g. can specify source_genome and exon file to use the standard genome with a custom set of exons
# Special case, can specify source_genome="GRCh37" to use that human build.
source_genome=None, ensembl_release=None,
exon_file=None, reference_fasta=None,
# Measurements and statistics
lookup=None, # A class which will return metric value. See lookup_classes directory
statistics=None,
# Options
sections=None, # Tab separated file or dataframe.
# PDB file options
pdb_directory=None,
sifts_directory=None,
download_sifts=False,
# MutationalSpectrum options
spectra=None, # Predefined spectra
gene_list=None, transcript_list=None,
deduplicate=False,
# If false, will run over all transcripts in exon data file that have a mutation.
# If true, will pick the longest transcript for each gene.
use_longest_transcript_only=True,
# These will exclude from the tests, not the spectrum.
excluded_positions=None, # dict, key=chrom, val=positions. E.g. {'1': [1, 2, 3]}
excluded_mutation_types=None,
included_mutation_types=None, # This has priority over excluded mutation_types
chunk_size=50000000, # How much of a chromosome will be processed at once.
# Bigger=quicker but more memory
low_mem=True, # Will not store sequence data during spectrum calculation for later reuse.
# Options for testing/reproducibility
random_seed=None, # Int. Set this to return consistent results.
testing_random_seed=None, # Int. Set to get same results, even if changing order or spectra/tests.
verbose=False
):
"""
:param data:
:param source_genome:
:param ensembl_release:
:param exon_file:
:param reference_fasta:
:param lookup:
:param statistics:
:param sections:
:param pdb_directory:
:param sifts_directory:
:param download_sifts:
:param spectra: Spectrum object or list of Spectrum objects. The mutational spectrum is calculated from the data.
To skip this process, use EvenMutationalSpectrum or "Even", although this will impact the statistical results.
Default is a global trinucleotide spectrum.
:param gene_list:
:param transcript_list:
:param deduplicate:
:param use_longest_transcript_only:
:param excluded_positions:
:param excluded_mutation_types:
:param included_mutation_types:
:param chunk_size:
:param low_mem:
:param random_seed:
:param testing_random_seed:
:param verbose:
"""
self.verbose=verbose
self.low_mem=low_mem
if random_seed is not None:
np.random.seed(random_seed)
if isinstance(data, pd.DataFrame):
self.data = data
elif isinstance(data, str): # File path given
if data.endswith('.vcf') or data.endswith('.vcf.gz'):
self.data = read_sbs_from_vcf(data)
else: # Assume the data is in a tab-delimited file including "chr", "pos", "ref" and "mut" columns.
self.data = pd.read_csv(data, sep="\t")
# Filter non-snv mutations.
bases = ['A', 'C', 'G', 'T']
# Take copy here to deal with pandas SettingWithCopyWarning
# From here on, want to be editing views of the following copy of the data and ignore previous unfiltered data.
self.data = self.data[(self.data['ref'].isin(bases)) & (self.data['mut'].isin(bases))].copy()
self.data.loc[:, 'chr'] = self.data['chr'].astype(str)
self.data = self.data.reset_index(drop=True) # Make sure every mutation has a unique index
if self.verbose:
# For tracking which mutations are included in used transcripts.
# Slows process, so only done if verbose=True.
self.data.loc[:, 'included'] = False
self.chromosomes = self.data['chr'].unique()
if deduplicate:
self._deduplicate_data()
self.excluded_positions = excluded_positions
self.excluded_mutation_types = excluded_mutation_types
self.included_mutation_types = included_mutation_types
if pdb_directory is None:
self.pdb_directory = "."
else:
self.pdb_directory = pdb_directory
if sifts_directory is None:
self.sifts_directory = "."
else:
self.sifts_directory = sifts_directory
self.download_sifts = download_sifts
self.lookup = lookup
if hasattr(self.lookup, "setup_project"):
self.lookup.setup_project(self)
exon_file, reference_fasta = self._get_reference_data(source_genome, ensembl_release, exon_file, reference_fasta)
self.exon_data = pd.read_csv(exon_file, sep="\t")
self.exon_data.loc[:, 'Chromosome/scaffold name'] = self.exon_data['Chromosome/scaffold name'].astype(str)
# This filter helps to remove obscure scaffolds so they will not be matched.
self.exon_data = self.exon_data[self.exon_data['Chromosome/scaffold name'].isin(self.chromosomes)]
# Removes exons without any coding bases
self.exon_data = self.exon_data[~pd.isnull(self.exon_data['Genomic coding start'])]
self.exon_data['Genomic coding start'] = self.exon_data['Genomic coding start'].astype(int)
self.exon_data['Genomic coding end'] = self.exon_data['Genomic coding end'].astype(int)
# If transcripts are not specified, may be used to check if excluded mutations are in alternative transcripts
self.unfiltered_exon_data = None
self.use_longest_transcript_only = use_longest_transcript_only
self.gene_list = gene_list
self.transcript_list = transcript_list
self.transcript_objs = {}
# If transcripts not specified, will use longest transcript per gene to calculate signature.
self.signature_transcript_list = None
self.alternative_transcripts = None
self.reference_fasta = reference_fasta
self.transcript_gene_map = {}
self.gene_transcripts_map = defaultdict(set)
self.spectra = None
self._process_spectra(spectra)
if len(set([s.name for s in self.spectra])) < len(self.spectra):
raise ValueError('More than one MutationSpectrum with the same name. Provide unique names to each.')
self.ks = set([getattr(s, 'k', None) for s in self.spectra]) # Kmers to use for the spectra. E.g. 3 for trinucleotides
self.ks.discard(None)
self.chunk_size = chunk_size
self.section_transcripts = None # Transcripts required for the sections.
self.sections = None
additional_results_columns = self._get_sections(sections)
self._set_up_exon_data()
self.checked_included = False
self.total_spectrum_ref_mismatch = 0
if any([(isinstance(s, GlobalKmerSpectrum) and not s.precalculated) for s in self.spectra]):
# Collect data for any signatures that need to be calculated from the global data
if not self.use_longest_transcript_only:
print('WARNING: Using multiple transcripts per gene may double count mutants in the spectrum')
print('Can set use_longest_transcript_only=True and save the spectrum, then run using the pre-calculated spectrum.')
self._calculate_spectra(self.verbose)
if self.verbose:
self._check_non_included_mutations()
if statistics is None:
# Use the default statistics only
# Use the cdf permutation test as the default as it is appropriate for a wide range of null distributions.
self.statistics = [CDFPermutationTest()]
elif not isinstance(statistics, (list, tuple)):
self.statistics = [statistics]
else:
self.statistics = statistics
if testing_random_seed is not None:
for s in self.statistics:
try:
s.set_testing_random_seed(testing_random_seed)
except AttributeError as e:
# Not a test that uses a seed
pass
if len(set([s.name for s in self.statistics])) < len(self.statistics):
raise ValueError('More than one statistic with the same name. Provide unique names to each.')
# Results
self.result_columns = ['gene', 'transcript_id', 'chrom', 'section_id', 'num_mutations']
self.result_columns.extend(additional_results_columns)
self.results = None
self.scored_data = []
def _get_reference_data(self, source_genome, ensembl_release, exon_file, reference_fasta):
if source_genome is None and (exon_file is None or reference_fasta is None):
raise TypeError('Must provide a source_genome or an exon_file and a reference_fasta')
if source_genome is not None and (exon_file is None or reference_fasta is None):
try:
exon_file1, reference_file1 = get_source_genome_reference_file_paths(source_genome, ensembl_release)
except Exception as e:
print('Reference files not found for source_genome:{} and ensembl_release:{}'.format(source_genome, ensembl_release))
print('Try downloading the data using download_reference_data_from_latest_ensembl or download_grch37_reference_data')
print('Or download manually and specify the file paths using exon_file and reference_fasta')
raise e
if exon_file is None:
exon_file = exon_file1
if reference_fasta is None:
reference_fasta = reference_file1
if self.verbose:
print('exon_file:', exon_file)
print('reference_fasta:', reference_fasta)
return exon_file, reference_fasta
def _deduplicate_data(self):
self.data.drop_duplicates(subset=['chr', 'pos', 'ref', 'mut'], inplace=True)
def _process_spectra(self, spectra):
if spectra is None:
self.spectra = [
GlobalKmerSpectrum()
]
elif isinstance(spectra, (list, tuple, set)):
try:
processed_spectra = []
for s in spectra:
if isinstance(s, MutationalSpectrum):
processed_spectra.append(s)
elif isinstance(s, str):
processed_spectra.append(read_spectrum(s))
else:
raise TypeError(
'Each spectrum must be a MutationalSpectrum object or file path to precalculated spectrum, {} given'.format(
type(s)))
self.spectra = processed_spectra
except TypeError as e:
raise TypeError(
'Each spectrum must be a MutationalSpectrum object or file path to precalculated spectrum, {} given'.format(
type(s)))
elif isinstance(spectra, MutationalSpectrum):
self.spectra = [spectra]
elif isinstance(spectra, str):
if spectra.lower() == 'even':
self.spectra = [EvenMutationalSpectrum()]
else:
# File path
self.spectra = [read_spectrum(spectra)]
else:
raise TypeError(
'spectra must be a MutationalSpectrum object or list of MutationalSpectrum objects, {} given'.format(
type(spectra)))
for i, s in enumerate(self.spectra):
s.set_project(self)
s.reset() # Makes sure counts start from zero in case using same spectrum object again.
# Run s.fix_spectrum() for each spectrum to prevent reset.
def _get_sections(self, sections):
additional_results_columns = []
if sections is not None:
if isinstance(sections, str):
self.sections = pd.read_csv(sections, sep="\t")
elif isinstance(sections, pd.DataFrame):
self.sections = sections
else:
raise ValueError('Do not recognize input sections. Should be file path or pandas dataframe')
print(len(self.sections), 'sections')
additional_results_columns = [c for c in self.sections.columns if c != 'transcript_id']
self.section_transcripts = self.sections['transcript_id'].unique()
return additional_results_columns
def make_section(self, section_dict=None, transcript_id=None, gene=None, lookup=None, **kwargs):
"""
:param section_dict: Can be dictionary or pandas Series. Must contain "transcript_id" or "gene" and any
other information required to define a section (e.g. start/end or pdb_id and pdb_chain)
:param transcript_id:
:param gene:
:param lookup: A Lookup object, if given will override the lookup for the DarwinianShift object. Alternatively,
this can be provided in the section_dict under the key "lookup"
:return:
"""
if section_dict is not None:
section_dict_copy = section_dict.copy() # Make sure not to edit the original dictionary/series
if 'transcript_id' in section_dict_copy:
transcript_id = section_dict_copy.pop('transcript_id')
else:
transcript_id = self.get_transcript_id(section_dict_copy['gene'])
if isinstance(transcript_id, set):
transcript_id = list(transcript_id)[0]
print('Multiple transcripts for gene {}. Running {}'.format(section_dict_copy['gene'], transcript_id))
elif transcript_id is not None:
section_dict_copy = {}
elif gene is not None:
transcript_id = self.get_transcript_id(gene)
if isinstance(transcript_id, set):
transcript_id = list(transcript_id)[0]
print('Multiple transcripts for gene {}. Running {}'.format(gene, transcript_id))
elif transcript_id is None:
raise ValueError('No transcript associated with gene {}'.format(gene))
section_dict_copy = {}
else:
raise ValueError('Must provide a section_dict, transcript_id or gene')
transcript_obj = self.get_transcript_obj(transcript_id)
if lookup is not None:
section_dict_copy['lookup'] = lookup
for k, v in kwargs.items():
section_dict_copy[k] = v
sec = Section(transcript_obj, **section_dict_copy)
return sec
def get_transcript_obj(self, transcript_id):
t = self.transcript_objs.get(transcript_id, None)
if t is None:
t = self.make_transcript(transcript_id=transcript_id)
return t
def get_transcript_id(self, gene):
t = self.gene_transcripts_map.get(gene, None)
if t is not None and len(t) == 1:
return list(t)[0]
else:
return t
def get_gene_name(self, transcript_id):
return self.transcript_gene_map.get(transcript_id, None)
def get_gene_list(self):
"""
If no gene_list or transcript_list has been given for __init__,
this will be a list of the genes that overlap with the data.
:return:
"""
if self.gene_list is not None:
return self.gene_list
else:
return list(self.gene_transcripts_map.keys())
def get_transcript_list(self):
"""
If no gene_list or transcript_list has been given for __init__,
this will be a list of the genes that overlap with the data.
:return:
"""
if self.transcript_list is not None:
return self.transcript_list
else:
return list(self.transcript_gene_map.keys())
def make_transcript(self, gene=None, transcript_id=None, genomic_sequence_chunk=None, offset=None,
region_exons=None, region_mutations=None):
"""
Makes a new transcript object and adds to the gene-transcript maps
:param gene:
:param transcript_id:
:return:
"""
if gene is None and transcript_id is None:
raise ValueError('Need to supply gene or transcript_id')
t = Transcript(self, gene=gene, transcript_id=transcript_id,
genomic_sequence_chunk=genomic_sequence_chunk, offset=offset, region_exons=region_exons,
region_mutations=region_mutations)
self.transcript_gene_map[t.transcript_id] = t.gene
t.get_observed_mutations()
self.gene_transcripts_map[t.gene].add(t.transcript_id)
if self.verbose:
self.data.loc[t.transcript_data_locs, 'included'] = True
if not self.low_mem:
self.transcript_objs[t.transcript_id] = t
return t
def get_overlapped_transcripts(self, mut_data, exon_data):
mut_data = mut_data.sort_values(['chr', 'pos'])
mut_bed = pybedtools.BedTool.from_dataframe(mut_data[['chr', 'pos', 'pos']])
exon_bed = pybedtools.BedTool.from_dataframe(exon_data[BED_COLS])
try:
intersection = exon_bed.intersect(mut_bed).to_dataframe()
except pd.errors.EmptyDataError as e:
return None
if not intersection.empty:
intersection.rename({'score': 'Transcript stable ID', 'name': 'Gene name'}, inplace=True, axis=1)
intersection = intersection[['Gene name', 'Transcript stable ID']]
return intersection.drop_duplicates()
else:
return None
def _set_up_exon_data(self):
if self.transcript_list is not None:
if self.section_transcripts:
transcript_list_total = set(self.transcript_list).union(self.section_transcripts)
else:
transcript_list_total = self.transcript_list
self.exon_data = self.exon_data[self.exon_data['Transcript stable ID'].isin(transcript_list_total)]
self.exon_data = self.exon_data.sort_values(['Chromosome/scaffold name', 'Genomic coding start'])
if set(self.transcript_list).difference(self.exon_data['Transcript stable ID'].unique()):
raise ValueError('Not all requested transcripts found in exon data.')
elif self.gene_list is not None: # Given genes. Will use longest transcript for each.
if self.section_transcripts:
self.exon_data = self.exon_data[(self.exon_data['Gene name'].isin(self.gene_list)) |
(self.exon_data['Transcript stable ID'].isin(self.section_transcripts))]
else:
self.exon_data = self.exon_data[self.exon_data['Gene name'].isin(self.gene_list)]
if self.use_longest_transcript_only:
self._remove_unused_transcripts()
self.exon_data = self.exon_data.sort_values(['Chromosome/scaffold name', 'Genomic coding start'])
if set(self.gene_list).difference(self.exon_data['Gene name'].unique()):
raise ValueError('Not all requested genes found in exon data.')
else:
overlapped_transcripts = self.get_overlapped_transcripts(self.data, self.exon_data)
if overlapped_transcripts is None:
raise ValueError('No transcripts found matching the mutations')
self.exon_data = self.exon_data[
self.exon_data['Transcript stable ID'].isin(overlapped_transcripts['Transcript stable ID'])]
self.exon_data = self.exon_data.sort_values(['Chromosome/scaffold name', 'Genomic coding start'])
if self.use_longest_transcript_only:
self._remove_unused_transcripts()
if self.verbose:
print(len(self.exon_data['Gene name'].unique()), 'genes')
# Record the transcripts that are associated with each gene so they can be looked up
ed = self.exon_data[['Gene name', 'Transcript stable ID']].drop_duplicates()
for g, t in zip(ed['Gene name'], ed['Transcript stable ID']):
self.transcript_gene_map[t] = g
self.gene_transcripts_map[g].add(t)
def _remove_unused_transcripts(self):
if self.verbose:
self.unfiltered_exon_data = self.exon_data.copy()
transcripts = set()
for gene, gene_df in self.exon_data.groupby('Gene name'):
longest_cds = gene_df['CDS Length'].max()
transcript_df = gene_df[gene_df['CDS Length'] == longest_cds]
if len(transcript_df) > 0:
transcripts.add(transcript_df.iloc[0]['Transcript stable ID'])
self.signature_transcript_list = list(transcripts)
if self.section_transcripts is not None:
transcripts = transcripts.union(self.section_transcripts)
self.exon_data = self.exon_data[self.exon_data['Transcript stable ID'].isin(transcripts)]
def _check_non_included_mutations(self):
# annotated mutations are those included in one of the given transcripts
if not self.checked_included and self.unfiltered_exon_data is not None:
non_annotated_mutations = self.data[~self.data['included']]
overlapped = self.get_overlapped_transcripts(non_annotated_mutations, self.unfiltered_exon_data)
if overlapped is not None:
genes = overlapped['Gene name'].unique()
print(len(non_annotated_mutations), 'mutations not in used transcripts, of which', len(overlapped),
'are exonic in an alternative transcript')
print('Selected transcripts miss exonic mutations in alternative transcripts for:', genes)
print('Look at self.alternative_transcripts for alternatives')
self.alternative_transcripts = overlapped
else:
print(len(non_annotated_mutations), 'mutations not in used transcripts, of which 0 are exonic '
'in an alternative transcript')
self.checked_included = True
def _get_processing_chunks(self):
# Divide the genes/transcripts into sections which are nearby in chromosomes
chunks = []
for chrom, chrom_df in self.exon_data.groupby('Chromosome/scaffold name'):
chrom_data = self.data[self.data['chr'] == chrom]
while len(chrom_df) > 0:
first_pos = chrom_df['Genomic coding start'].min()
chunk = chrom_df[chrom_df['Genomic coding start'] <= first_pos + self.chunk_size]
chunk_transcripts = chunk['Transcript stable ID'].unique()
chunk = chrom_df[chrom_df['Transcript stable ID'].isin(chunk_transcripts)] # Do not split up transcripts
last_pos = chunk['Genomic coding end'].max()
chunk_data = chrom_data[(chrom_data['pos'] >= first_pos) & (chrom_data['pos'] <= last_pos)]
chunks.append({
'chrom': str(chrom),
'start': int(first_pos), 'end': int(last_pos), 'transcripts': chunk_transcripts,
'exon_data': chunk, 'mut_data': chunk_data
})
# Remove the transcripts in last chunk
chrom_df = chrom_df[~chrom_df['Transcript stable ID'].isin(chunk_transcripts)]
return chunks
def _chunk_iterator(self):
chunks = self._get_processing_chunks()
f = FastaFile(self.reference_fasta)
if len(self.ks) == 0:
max_k = 0
else:
max_k = max(self.ks)
if max_k == 0:
context = 0
else:
context = int((max_k - 1) / 2)
for c in chunks:
offset = c['start'] - context
try:
chunk_seq = f[c['chrom']][
offset - 1:c['end'] + context].upper() # Another -1 to move to zero based coordinates.
except KeyError as e:
print('Did not recognize chromosome', c['chrom'])
continue
for t in c['transcripts']:
yield t, chunk_seq, offset, c['exon_data'], c['mut_data']
def _calculate_spectra(self, verbose=False):
for transcript_id, chunk_seq, offset, region_exons, region_mutations in self._chunk_iterator():
if self.signature_transcript_list is not None and transcript_id not in self.signature_transcript_list:
continue
try:
# Get the trinucleotides from the transcript.
t = self.make_transcript(transcript_id=transcript_id, genomic_sequence_chunk=chunk_seq, offset=offset,
region_exons=region_exons, region_mutations=region_mutations)
for s in self.spectra:
s.add_transcript_muts(t)
if t.mismatches > 0:
self.total_spectrum_ref_mismatch += t.mismatches
elif t.dedup_mismatches > 0: # If all signature deduplicate, just count those mismatches.
self.total_spectrum_ref_mismatch += t.dedup_mismatches
if verbose:
print('{}:{}'.format(t.gene, t.transcript_id), end=" ")
except (NoTranscriptError, CodingTranscriptError):
pass
except Exception as e:
print('Failed to collect signature data from {}'.format(transcript_id))
raise e
if verbose:
print()
for sig in self.spectra:
sig.get_spectrum()
if self.total_spectrum_ref_mismatch > 0:
print('Warning: {} mutations do not match reference base'.format(self.total_spectrum_ref_mismatch))
def run_gene(self, gene, plot=False, spectra=None, statistics=None, start=None, end=None,
excluded_mutation_types=None, included_mutation_types=None,
included_residues=None, excluded_residues=None, pdb_id=None, pdb_chain=None, lookup=None,
**additional_kwargs):
"""
Run analysis for a single gene
:param gene: Gene name.
:param plot: Will plot the standard results of the analysis. If False, can still be plotted afterwards from
the returned class object. Plotting afterwards also enables more plotting options.
:param spectra: The mutational spectrum or spectra to use for the analysis. If None, will use the spectra
of the project.
:param statistics: The statistical tests to run. If None, will use the statistics of the project.
:param start: Will exclude residues before this one from the analysis. If None, will start from the first
residue of the protein.
:param end: Will exclude residues after this one from the analysis. If None, will end at the last
residue of the protein.
:param excluded_mutation_types: Can be string or list of strings. Mutation types to exclude from the
analysis. E.g. ['synonymous', 'nonsense']. If None, will use the excluded_mutation_types of the project.
:param included_mutation_types: Can be string or list of strings. Mutation types to include in the
analysis. E.g. ['synonymous', 'nonsense']. If None, will use the included_mutation_types of the project.
:param included_residues: List or array of integers. The residues to analyse. If None, will analyse all
residues (except those excluded by other arguments).
:param excluded_residues: List or array of integers. The residues to exclude from the analysis.
:param pdb_id: For analyses that use a protein structure. Four letter ID of the pdb file to use.
:param pdb_chain: For analyses that use a protein structure. The chain to use for the analysis.
:param lookup: The class object or function used to score the mutations. If None, will use the lookup of
the project.
:param additional_kwargs: Any additional attributes that will be assigned to the Section object created.
These can be used by the lookup class.
:return: Section object
"""
gene_transcripts = self.gene_transcripts_map[gene]
if len(gene_transcripts) == 0:
transcript_obj = self.make_transcript(gene=gene)
transcript_id = transcript_obj.transcript_id
else:
transcript_id = list(gene_transcripts)[0]
if len(gene_transcripts) > 1:
print('Multiple transcripts for gene {}. Running {}'.format(gene, transcript_id))
return self.run_transcript(transcript_id, plot=plot, spectra=spectra, statistics=statistics,
start=start, end=end,
excluded_mutation_types=excluded_mutation_types,
included_mutation_types=included_mutation_types, included_residues=included_residues,
excluded_residues=excluded_residues, pdb_id=pdb_id, pdb_chain=pdb_chain,
lookup=lookup, **additional_kwargs)
def run_transcript(self, transcript_id, plot=False, spectra=None, statistics=None, start=None, end=None,
excluded_mutation_types=None, included_mutation_types=None,
included_residues=None, excluded_residues=None, pdb_id=None, pdb_chain=None, lookup=None,
**additional_kwargs):
"""
Run analysis for a single transcript.
:param transcript_id: Transcript id.
:param plot: Will plot the standard results of the analysis. If False, can still be plotted afterwards from
the returned class object. Plotting afterwards also enables more plotting options.
:param spectra: The mutational spectrum or spectra to use for the analysis. If None, will use the spectra
of the project.
:param statistics: The statistical tests to run. If None, will use the statistics of the project.
:param start: Will exclude residues before this one from the analysis. If None, will start from the first
residue of the protein.
:param end: Will exclude residues after this one from the analysis. If None, will end at the last
residue of the protein.
:param excluded_mutation_types: Can be string or list of strings. Mutation types to exclude from the
analysis. E.g. ['synonymous', 'nonsense']. If None, will use the excluded_mutation_types of the project.
:param included_mutation_types: Can be string or list of strings. Mutation types to include in the
analysis. E.g. ['synonymous', 'nonsense']. If None, will use the included_mutation_types of the project.
:param included_residues: List or array of integers. The residues to analyse. If None, will analyse all
residues (except those excluded by other arguments).
:param excluded_residues: List or array of integers. The residues to exclude from the analysis.
:param pdb_id: For analyses that use a protein structure. Four letter ID of the pdb file to use.
:param pdb_chain: For analyses that use a protein structure. The chain to use for the analysis.
:param lookup: The class object or function used to score the mutations. If None, will use the lookup of
the project.
:param additional_kwargs: Any additional attributes that will be assigned to the Section object created.
These can be used by the lookup class.
:return: Section object
"""
try:
section = Section(self.get_transcript_obj(transcript_id), start=start, end=end,
pdb_id=pdb_id, pdb_chain=pdb_chain,
excluded_mutation_types=excluded_mutation_types,
included_mutation_types=included_mutation_types, included_residues=included_residues,
excluded_residues=excluded_residues, lookup=lookup, **additional_kwargs)
except (CodingTranscriptError, NoTranscriptError) as e:
print(type(e).__name__, e, '- Unable to run for', transcript_id)
return None
return self.run_section(section, plot=plot, spectra=spectra, statistics=statistics)
def run_section(self, section, plot=False, verbose=False, spectra=None, statistics=None, lookup=None):
"""
Run statistics and optionally plot plots for a section.
The section can be a Section object, or a dictionary that defines the Section object to be made
The spectra and statistics can be passed here, but other options for the Section (such as included/excluded
mutation types) must be defined when the Section object is created or in the dictionary passed to the section arg
:param section: Section object or dictionary with Section.__init__ kwargs.
:param plot: Will plot the standard results of the analysis. If False, can still be plotted afterwards from
the returned class object. Plotting afterwards also enables more plotting options.
:param verbose: Will print section id and gene name when running.
:param spectra: The mutational spectrum or spectra to use for the analysis. If None, will use the spectra of
the project.
:param statistics: The statistical tests to run. If None, will use the statistics of the project.
:param lookup: A Lookup object, if given will override the lookup for the DarwinianShift object. Alternatively,
this can be provided in the section_dict under the key "lookup"
:return: Section object
"""
if self.lookup is None and lookup is None:
# No lookup defined for the project or given as an argument to this function.
# See if one has been defined for the section
if isinstance(section, (dict, pd.Series)):
section_lookup = section.get('lookup', None)
else:
section_lookup = getattr(section, 'lookup', None)
if section_lookup is None:
raise ValueError('No lookup defined. Define one for the whole project using ' \
'self.change_lookup() or provide one to this function.')
try:
if isinstance(section, (dict, pd.Series)):
# Dictionary/series with attributes to define a new section
section = self.make_section(section, lookup=lookup)
elif lookup is not None:
section.change_lookup_inplace(lookup)
if verbose:
print('Running', section.section_id, section.gene)
section.run(plot_permutations=plot, spectra=spectra, statistics=statistics)
if plot:
section.plot()
return section
except (NoMutationsError, AssertionError, CodingTranscriptError, NoTranscriptError, MetricLookupException) as e:
if isinstance(section, Section):
print(type(e).__name__, e, '- Unable to run for', section.section_id)
else:
print(type(e).__name__, e, '- Unable to run for', section)
return None
def run_all(self, verbose=None, spectra=None, statistics=None):
"""
Run analysis over all genes, transcripts or sections defined in the project.
After running, the results can be seen in the 'results' attribute of the DarwinianShift object.
The scores of each mutation can also be seen in the 'scored_data' attribute of the DarwinianShift object.
:param verbose: Will print additional information.
:param spectra: The mutational spectrum or spectra to use for the analysis. If None, will use the spectra of
the project.
:param statistics: The statistical tests to run. If None, will use the statistics of the project.
:return:
"""
if verbose is None:
verbose = self.verbose
results = []
scored_data = []
for t, chunk_seq, offset, region_exons, region_mutations in self._chunk_iterator():
if self.sections is not None: # Sections are defined.
if t not in self.section_transcripts:
continue
transcript_obj = self.transcript_objs.get(t)
if transcript_obj is None:
try:
transcript_obj = self.make_transcript(transcript_id=t, genomic_sequence_chunk=chunk_seq,
offset=offset, region_exons=region_exons,
region_mutations=region_mutations)
except (NoTranscriptError, CodingTranscriptError) as e:
if verbose:
print(e)
continue
if self.sections is None: # If the sections to run are not defined, use the whole transcript as one section
transcript_sections = [Section(transcript_obj)]
else:
transcript_sections_df = self.sections[self.sections['transcript_id'] == transcript_obj.transcript_id]
transcript_sections = []
for i, row in transcript_sections_df.iterrows():
transcript_sections.append(self.make_section(row))
for section in transcript_sections:
res = self.run_section(section, verbose=verbose, spectra=spectra, statistics=statistics)
if res is not None:
scored_data.append(res.observed_mutations)
results.append(res.get_results_dictionary())
if results:
self.results = | pd.DataFrame(results) | pandas.DataFrame |
#!/usr/bin/env python3
"""
Generates all the actual figures. Run like
python3 src/plot.py PLOT_NAME
"""
import argparse
import collections
import pandas as pd
import numpy as np
import humanize
import matplotlib
matplotlib.use('Agg') # NOQA
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import InsetPosition, inset_axes
import seaborn as sns
import scipy
tgp_region_pop = {
'AMR': ['CLM', 'MXL', 'PUR', 'PEL'],
'AFR': ['LWK', 'ASW', 'GWD', 'MSL', 'YRI', 'ACB', 'ESN'],
'EAS': ['CHS', 'KHV', 'JPT', 'CHB', 'CDX'],
'SAS': ['BEB', 'STU', 'GIH', 'PJL', 'ITU'],
'EUR': ['FIN', 'GBR', 'IBS', 'CEU', 'TSI']
}
# Standard order.
tgp_populations = [
'CHB', 'JPT', 'CHS', 'CDX', 'KHV',
'CEU', 'TSI', 'FIN', 'GBR', 'IBS',
'YRI', 'LWK', 'GWD', 'MSL', 'ESN', 'ASW', 'ACB',
'MXL', 'PUR', 'CLM', 'PEL',
'GIH', 'PJL', 'BEB', 'STU', 'ITU']
def get_tgp_region_colours():
return {
"EAS": sns.color_palette("Greens", 2)[1],
"EUR": sns.color_palette("Blues", 1)[0],
"AFR": sns.color_palette("Wistia", 3)[0],
"AMR": sns.color_palette("Reds", 2)[1],
"SAS": sns.color_palette("Purples", 2)[1],
}
def get_sgdp_region_colours():
cols = get_tgp_region_colours()
return {
'Africa': cols["AFR"],
'America': cols["AMR"],
'EastAsia': cols["EAS"],
'SouthAsia': cols["SAS"],
'Oceania': "brown",
'WestEurasia': cols["EUR"],
'CentralAsiaSiberia': "pink"
}
def get_tgp_colours():
# TODO add option to give shades for the different pops.
region_colours = get_tgp_region_colours()
pop_colour_map = {}
for region, pops in tgp_region_pop.items():
for pop in pops:
pop_colour_map[pop] = region_colours[region]
return pop_colour_map
class Figure(object):
"""
Superclass of figures for the paper. Each figure is a concrete subclass.
"""
name = None
def __init__(self):
datafile_name = "data/{}.csv".format(self.name)
self.data = pd.read_csv(datafile_name)
def save(self, figure_name=None, bbox_inches="tight"):
if figure_name is None:
figure_name = self.name
print("Saving figure '{}'".format(figure_name))
plt.savefig("figures/{}.pdf".format(figure_name), bbox_inches='tight', dpi=400)
plt.savefig("figures/{}.png".format(figure_name), bbox_inches='tight', dpi=400)
plt.close()
def error_label(self, error, label_for_no_error = "No genotyping error"):
"""
Make a nice label for an error parameter
"""
try:
error = float(error)
return "Error rate = {}".format(error) if error else label_for_no_error
except (ValueError, TypeError):
try: # make a simplified label
if "Empirical" in error:
error = "With genotyping"
except:
pass
return "{} error".format(error) if error else label_for_no_error
class StoringEveryone(Figure):
"""
Figure showing how tree sequences can store the entire human population
worth of variation data.
"""
name = "storing_everyone"
def plot(self):
df = self.data
df = df[df.sample_size > 10]
fig = plt.figure()
ax1 = fig.add_subplot(111)
xytext = (18, 0)
GB = 1024**3
largest_n = np.array(df.sample_size)[-1]
index = df.vcf > 0
line, = ax1.loglog(df.sample_size, df.vcf_fit, "-", color="tab:pink", label="")
ax1.loglog(
df.sample_size[index], df.vcf[index], "d", label="VCF",
color=line.get_color())
largest_value = np.array(df.vcf_fit)[-1]
ax1.annotate(
humanize.naturalsize(largest_value * GB, binary=True, format="%d"),
textcoords="offset points", xytext=xytext,
xy=(largest_n, largest_value), xycoords="data")
line, = ax1.loglog(
df.sample_size, df.vcfz_fit, ":", label="", color=line.get_color())
ax1.loglog(
df.sample_size[index], df.vcfz[index], "d", label="Compressed VCF",
color=line.get_color(), markerfacecolor='w')
largest_value = np.array(df.vcfz_fit)[-1]
ax1.annotate(
humanize.naturalsize(largest_value * GB, binary=True, format="%d"),
textcoords="offset points", xytext=xytext,
xy=(largest_n, largest_value), xycoords="data")
index = df.pbwt > 0
line, = ax1.loglog(
df.sample_size[index], df.pbwt[index], "-", color="tab:orange", label="")
ax1.loglog(
df.sample_size[index], df.pbwt[index], "s", label="pbwt",
color=line.get_color())
line, = ax1.loglog(
df.sample_size[index], df.pbwtz[index], ":", label="", color=line.get_color())
ax1.loglog(
df.sample_size[index], df.pbwtz[index], "s", label="Compressed pbwt",
color=line.get_color(), markerfacecolor='w')
line, = ax1.loglog(
df.sample_size, df.uncompressed, "o", label="Trees", color="b")
ax1.loglog(df.sample_size, df.tsk_fit, "-", color=line.get_color(), label="")
largest_value = np.array(df.tsk_fit)[-1]
ax1.annotate(
humanize.naturalsize(largest_value * GB, binary=True, format="%d"),
textcoords="offset points", xytext=xytext,
xy=(largest_n, largest_value), xycoords="data")
line, = ax1.loglog(
df.sample_size, df.tskz_fit, ":", label="", color=line.get_color())
ax1.loglog(df.sample_size, df.compressed, "o", label="Compressed trees",
color=line.get_color(), markerfacecolor='w')
largest_value = np.array(df.tskz_fit)[-1]
ax1.annotate(
humanize.naturalsize(largest_value * GB, binary=True, format="%d"),
textcoords="offset points", xytext=xytext,
xy=(largest_n, largest_value), xycoords="data")
ax1.set_xlabel("Number of chromosomes")
ax1.set_ylabel("File size (GiB)")
plt.legend()
# plt.tight_layout()
self.save()
plt.clf()
fig = plt.figure()
ax1 = fig.add_subplot(111)
largest_n = 10**7
index = df.sample_size <= largest_n
# Rescale to MiB
uncompressed = np.array(df.uncompressed[index] * 1024)
compressed = np.array(df.compressed[index] * 1024)
pbwt = np.array(df.pbwt[index] * 1024)
pbwtz = np.array(df.pbwtz[index] * 1024)
ax1.loglog(df.sample_size[index], uncompressed, "-o", label="trees")
largest_value = uncompressed[-1]
ax1.annotate(
humanize.naturalsize(largest_value * 1024**2, binary=True, format="%.1f"),
textcoords="offset points", xytext=xytext,
xy=(largest_n, largest_value), xycoords="data")
ax1.loglog(df.sample_size[index], compressed, "-o", label="Compressed trees")
largest_value = compressed[-1]
ax1.annotate(
humanize.naturalsize(largest_value * 1024**2, binary=True, format="%d"),
textcoords="offset points", xytext=xytext,
xy=(largest_n, largest_value - 10), xycoords="data")
ax1.loglog(df.sample_size[index], pbwt, "-s", label="pbwt")
largest_value = pbwt[-1]
ax1.annotate(
humanize.naturalsize(largest_value * 1024**2, binary=True, format="%d"),
textcoords="offset points", xytext=xytext,
xy=(largest_n, largest_value), xycoords="data")
ax1.loglog(df.sample_size[index], pbwtz, "-s", label="Compressed pbwt")
largest_value = pbwtz[-1]
ax1.annotate(
humanize.naturalsize(largest_value * 1024**2, binary=True, format="%d"),
textcoords="offset points", xytext=xytext,
xy=(largest_n, largest_value + 10), xycoords="data")
ax1.set_ylabel("File size (MiB)")
ax1.set_xlabel("Number of chromosomes")
plt.legend()
self.save("storing_everyone_pbwt")
class SampleEdges(Figure):
name = "sample_edges"
def plot_region(self, df, ax, rotate_labels):
ax.plot(df.sample_edges.values, "o")
breakpoints = np.where(df.population.values[1:] != df.population.values[:-1])[0]
breakpoints = np.array([-1] + list(breakpoints) + [len(df)-1])+0.5
x_labels = []
x_pos = []
last = -0.5
for bp in breakpoints[1:]:
x_labels.append(df.population[int(bp - 1.5)])
x_pos.append(last + (bp - last) / 2)
last = bp
# use major ticks for labels, so they are not cut off
ax.tick_params(axis="x", which="major", length=0)
ax.set_xticks(x_pos)
if rotate_labels:
ax.set_xticklabels(x_labels, rotation=30)
else:
ax.set_xticklabels(x_labels)
ax.tick_params(axis="x", which="minor", length=0)
ax.set_xticks(breakpoints, minor=True)
ax.set_xticklabels([], minor=True)
ax.set_xlim(-0.5, len(df) - 0.5)
ax.set_title(df.region.unique()[0])
ax.grid(which="minor", axis="x")
def plot_summary(self):
full_df = self.data
fig, axes = plt.subplots(2, 1, figsize=(14, 6))
plt.subplots_adjust(hspace=0.5)
for ax, dataset in zip(axes, ["1kg", "sgdp"]):
df = full_df[full_df.dataset == dataset]
df = df.sort_values(by=["region", "population", "sample", "strand"])
df = df.reset_index()
ax.plot(df.sample_edges.values)
breakpoints = np.where(df.region.values[1:] != df.region.values[:-1])[0]
for bp in breakpoints:
ax.axvline(x=bp, ls="--", color="black")
last = 0
for j, bp in enumerate(list(breakpoints) + [len(df)]):
x = last + (bp - last) / 2
y = -400
if dataset == "1kg":
y = -200
ax.annotate(
df.region[bp - 1], xy=(x, y), horizontalalignment='center',
annotation_clip=False)
last = bp
breakpoints = np.where(
df.population.values[1:] != df.population.values[:-1])[0]
breakpoints = list(breakpoints) + [len(df)]
ax.set_xticks(breakpoints)
ax.set_xticklabels([])
ax.set_ylabel("Sample Edges")
ax.grid(axis="x")
ax.set_xlim(0, len(df))
ax.xaxis.set_ticks_position('none')
title = "SGDP"
if dataset == "1kg":
title = "TGP"
last = 0
for bp in breakpoints:
x = last + (bp - last) / 2
last = bp
ax.annotate(
df.population[int(x)], xy=(x, 100), horizontalalignment='center',
annotation_clip=False)
outliers = ["NA20289", "HG02789"]
for outlier in outliers:
tmp_df = df[df["sample"] == outlier]
x = tmp_df.index.values[0]
ax.annotate(
outlier, xy=(x, 1550), horizontalalignment='center',
annotation_clip=False, style='italic')
ax.set_title(title + " individuals")
axes[0].set_ylim(0, 1500)
axes[1].set_ylim(0, 3500)
self.save("sample_edges_summary")
def plot(self):
self.plot_summary()
full_df = self.data
for ds in ["1kg", "sgdp"]:
df_ds = full_df[full_df.dataset == ds]
print(ds, "Overall", df_ds.sample_edges.mean(), sep="\t")
fig, axes = plt.subplots(5, 1, figsize=(14, 16))
plt.subplots_adjust(hspace=0.5)
if ds == "1kg":
plt.title("TGP sample edges per population")
else:
plt.title("SGDP sample edges per population")
for ax, region in zip(axes, df_ds.region.unique()):
df = df_ds[df_ds.region == region]
df = df.sort_values(by=["population", "sample", "strand"])
df = df.reset_index()
print(ds, region, df.sample_edges.mean(), sep="\t")
self.plot_region(df, ax, rotate_labels=ds == "sgdp")
self.save("{}_{}".format(self.name, ds))
class FrequencyDistanceAccuracy(Figure):
"""
Plot accuracy of frequency ordering pairs of mutations vs distance between mutations
The csv file is created by running
python3 ./src/freq_dist_simulations.py
or, if you have, say 40 processors available, you can run it in parallel like
python3 -p 40 ./src/freq_dist_simulations.py
"""
name = "frequency_distance_accuracy_singletons"
def plot(self):
df = self.data
plt.plot((df.SeparationDistanceStart + df.SeparationDistanceEnd)/2/1e3,
df.Agree/df.Total,label=self.error_label(None),
color="k", linestyle="-")
plt.plot((df.SeparationDistanceStart + df.SeparationDistanceEnd)/2/1e3,
df.ErrorAgree/df.Total,label=self.error_label("EmpiricalError"),
color="k", linestyle="-.")
plt.xlabel("Distance between variants (kb)")
plt.ylabel("Proportion of mutation pairs correctly ordered")
plt.legend()
self.save()
class AncestorAccuracy(Figure):
"""
Compare lengths of real vs reconstructed ancestors, using 2 csv files generated by
TSINFER_DIR=../tsinfer #set to your tsinfer directory
python3 ${TSINFER_DIR}/evaluation.py aq -l 5 -d data -C -s 321 -e 0
python3 ${TSINFER_DIR}/evaluation.py aq -l 5 -d data -C -s 321 -e data/EmpiricalErrorPlatinum1000G.csv
cd data
cat anc-qual_n=100_Ne=5000_L=5.0_mu=1e-08_rho=1e-08_err=data_EmpiricalErrorPlatinum1000G_error_data.csv > ancestor_accuracy.csv
tail +2 anc-qual_n=100_Ne=5000_L=5.0_mu=1e-08_rho=1e-08_err=0.0_error_data.csv >> ancestor_accuracy.csv
""" # noqa
name = "ancestor_accuracy"
def __init__(self):
super().__init__()
# rescale length to kb
self.data["Real length"] /= 1e3
self.data["Estim length"] /= 1e3
# put high inaccuracy first
self.data = self.data.sort_values("Inaccuracy")
def plot(self):
n_bins=50
max_length = max(np.max(self.data["Real length"]), np.max(self.data["Estim length"]))* 1.1
min_length = min(np.min(self.data["Real length"]), np.min(self.data["Estim length"])) * 0.9
fig = plt.figure(figsize=(20, 8))
gs = matplotlib.gridspec.GridSpec(1, 4, width_ratios=[5,5,0.5,2.5])
ax0 = fig.add_subplot(gs[0])
axes = [ax0, fig.add_subplot(gs[1], sharex=ax0, sharey=ax0, yticklabels=[])]
c_ax = fig.add_subplot(gs[2])
h_ax = fig.add_subplot(gs[3], sharey=c_ax)
for ax, error in zip(axes, sorted(self.data.seq_error.unique())):
df = self.data.query("seq_error == @error")
ls = "-" if ax == axes[0] else "-."
im = ax.scatter(df["Real length"], df["Estim length"], c=1-df["Inaccuracy"],
s=20, cmap=matplotlib.cm.viridis)
ax.plot([0, max_length], [0, max_length], '-',
color='grey', zorder=-1, linestyle=ls)
ax.set_title(self.error_label(error))
ax.set_xscale('log')
ax.set_yscale('log')
n_greater_eq = sum(df["Estim length"]/df["Real length"] >= 1)
n_less = sum(df["Estim length"]/df["Real length"] < 1)
ax.text(min_length*1.1, min_length*2,
"{} haplotypes $\geq$ true length".format(n_greater_eq),
rotation=45, va='bottom', ha='left', color="#2ca02c")
ax.text(min_length*2, min_length*1.1,
"{} haplotypes $<$ true length".format(n_less),
rotation=45, va='bottom', ha='left', color="#d62728")
ax.set_aspect(1)
ax.set_xlim(min_length, max_length)
ax.set_ylim(min_length, max_length)
ax.set_xlabel("True ancestral haplotype length (kb)")
if ax == axes[0]:
ax.set_ylabel("Inferred ancestral haplotype length (kb)")
n, bins, patches = h_ax.hist(1-df["Inaccuracy"],
bins=n_bins, orientation="horizontal", alpha=0.5,
edgecolor='black', linewidth=1, linestyle=ls);
norm = matplotlib.colors.Normalize(bins.min(), bins.max())
# set a color for every bar (patch) according
# to bin value from normalized min-max interval
for bin, patch in zip(bins, patches):
color = matplotlib.cm.viridis(norm(bin))
patch.set_facecolor(color)
c_ax.set_axes_locator(InsetPosition(axes[1], [1.05,0,0.05,1]))
cbar = fig.colorbar(im, cax=c_ax)
cbar.set_label("Accuracy", rotation=270, va="center")
h_ax.set_axes_locator(InsetPosition(c_ax, [3.5,0,7,1]))
h_ax.set_title("Accuracy distribution")
h_ax.axis('off')
self.save()
class ToolsFigure(Figure):
"""
Superclass of all figures where different tools (e.g. ARGweaver, fastarg) are compared
"""
# Colours taken from Matplotlib default color wheel.
# https://matplotlib.org/users/dflt_style_changes.html
tools_format = collections.OrderedDict([
("ARGweaver", {"mark":"*", "col":"#d62728"}),
("RentPlus", {"mark":"d", "col":"#2ca02c"}),
("fastARG", {"mark":"^", "col":"#ff7f0e"}),
("tsinfer", {"mark":"o", "col":"#1f77b4"}),
])
error_bars = True
class CputimeAllToolsBySampleSizeFigure(ToolsFigure):
"""
Compare cpu times for tsinfer vs other tools. We can only really get the CPU times
for all four methods in the same scale for tiny examples.
We can show that ARGWeaver and RentPlus are much slower than tsinfer
and FastARG here and compare tsinfer and FastARG more thoroughly
in a dedicated figure.
"""
name = "cputime_all_tools_by_sample_size"
def plot(self):
df = self.data
# Scale time to hours
time_scale = 3600
df.cputime_mean /= time_scale
df.cputime_se /= time_scale
sample_sizes = df.sample_size.unique()
fig, (ax_hi, ax_lo) = plt.subplots(2, 1, sharex=True)
lengths = df.length.unique()
# check these have fixed lengths
assert len(lengths) == 1
max_non_AW = 0
for tool in df.tool.unique():
line_data = df.query("tool == @tool")
if tool != 'ARGweaver':
max_non_AW = max(max_non_AW, max(line_data.cputime_mean+line_data.cputime_se))
for ax in (ax_lo, ax_hi):
ax.errorbar(
line_data.sample_size,
line_data.cputime_mean,
yerr=line_data.cputime_se,
color=self.tools_format[tool]["col"],
marker=self.tools_format[tool]['mark'],
elinewidth=1,
label=tool)
d = .015 # how big to make the diagonal lines in axes coordinates
# arguments to pass to plot, just so we don't keep repeating them
kwargs = dict(transform=ax_hi.transAxes, color='k', clip_on=False)
ax_hi.plot((-d, +d), (-d, +d), **kwargs) # top-left diagonal
ax_hi.plot((1 - d, 1 + d), (-d, +d), **kwargs) # top-right diagonal
ax_lo.set_xlabel("Sample Size")
ax_hi.set_ylabel("CPU time (hours)")
#ax_lo.set_xlim(sample_sizes.min(), sample_sizes.max())
# zoom-in / limit the view to different portions of the data
ax_hi.set_ylim(bottom = max_non_AW*40) # outliers only
ax_lo.set_ylim(bottom = 0-max_non_AW/20, top=max_non_AW+max_non_AW/20) # most of the data
#ax_hi.set_ylim(0.01, 3) # outliers only
#ax_lo.set_ylim(0, 0.002) # most of the data
# hide the spines between ax and ax2
ax_hi.spines['bottom'].set_visible(False)
ax_lo.spines['top'].set_visible(False)
ax_hi.xaxis.tick_top()
ax_hi.tick_params(labeltop=False) # don't put tick labels at the top
ax_lo.xaxis.tick_bottom()
kwargs.update(transform=ax_lo.transAxes) # switch to the bottom axes
ax_lo.plot((-d, +d), (1 - d, 1 + d), **kwargs) # bottom-left diagonal
ax_lo.plot((1 - d, 1 + d), (1 - d, 1 + d), **kwargs) # bottom-right diagonal
ax_hi.legend(loc="lower right")
self.save()
class MemTimeFastargTsinferFigure(ToolsFigure):
name = "mem_time_fastarg_tsinfer"
def __init__(self):
super().__init__()
# Rescale the length to Mb
length_scale = 10**6
self.data.length /= length_scale
# Scale time to hours
time_scale = 3600
cpu_cols = [c for c in self.data.columns if c.startswith("cputime")]
self.data[cpu_cols] /= time_scale
# Scale memory to GiB
mem_cols = [c for c in self.data.columns if c.startswith("memory")]
self.data[mem_cols] /= 1024 * 1024 * 1024
length_sample_size_combos = self.data[["length", "sample_size"]].drop_duplicates()
self.fixed_length = length_sample_size_combos['length'].value_counts().idxmax()
self.fixed_sample_size = length_sample_size_combos['sample_size'].value_counts().idxmax()
def plot(self):
fig, axes = plt.subplots(2, 2, sharey="row", sharex="col", figsize=(8, 5.5), constrained_layout=True)
xticks = [[0,2,4,6,8,10], [0,5000,10000,15000,20000]] #hardcode a hack here
ylim_inset = [0.32, 0.35] #hardcode a hack here
for i, (plotted_column, y_label) in enumerate(
zip(["cputime", "memory"], ["CPU time (hours)", "Memory (GiB)"])):
df = self.data.query("sample_size == @self.fixed_sample_size")
df_inset = df.query("tool == 'tsinfer'")
for tool in df.tool.unique():
line_data = df.query("tool == @tool")
axes[i][0].errorbar(
line_data.length,
line_data[plotted_column+"_mean"],
yerr=line_data[plotted_column+"_se"],
label=tool,
color=self.tools_format[tool]["col"],
marker=self.tools_format[tool]['mark'],
elinewidth=1)
axes[i][0].set_xlim(0)
axes[i][0].get_yaxis().set_label_coords(-0.08,0.5)
axes[i][0].set_ylabel(y_label)
tool = "tsinfer"
axins1 = inset_axes(axes[i][0], width="50%", height="40%", loc=2, borderpad=1)
axins1.errorbar(
df_inset.length, df_inset[plotted_column+"_mean"],
yerr=df_inset[plotted_column + "_se"], label=tool,
color=self.tools_format[tool]["col"], marker=self.tools_format[tool]['mark'],
linewidth=1, elinewidth=1, markersize=3)
df = self.data.query("length == @self.fixed_length")
df_inset = df.query("tool == 'tsinfer'")
for tool in df.tool.unique():
line_data = df.query("tool == @tool")
axes[i][1].errorbar(
line_data.sample_size,
line_data[plotted_column+"_mean"],
yerr=line_data[plotted_column+"_se"],
label=tool,
color=self.tools_format[tool]["col"],
marker=self.tools_format[tool]['mark'],
elinewidth=1)
tool = "tsinfer"
axins2 = inset_axes(axes[i][1], width="50%", height="40%", loc=2, borderpad=1)
axins2.errorbar(
df_inset.sample_size, df_inset[plotted_column+"_mean"],
yerr=df_inset[plotted_column+"_se"], label=tool,
color=self.tools_format[tool]["col"], marker=self.tools_format[tool]['mark'],
linewidth=1, elinewidth=1, markersize=3)
axins1.tick_params(axis='both', which='major', labelsize=7)
axins1.set_ylim(-0.01, ylim_inset[i])
axins1.set_xticks(xticks[0])
axins1.yaxis.tick_right()
axins2.tick_params(axis='both', which='major', labelsize=7)
axins2.set_ylim(-0.01, ylim_inset[i])
axins2.set_xticks(xticks[1])
axins2.yaxis.tick_right()
#axins2.set_yticklabels(["{:.0f}".format(s) for s in yticks[i]])
axes[0][0].legend(loc="upper right", numpoints=1, fontsize="small")
axes[1][0].set_xlabel("Length (Mb) for fixed sample size of {}".format(
self.fixed_sample_size))
axes[1][1].set_xlabel("Sample size for fixed length of {:g} Mb".format(
self.fixed_length))
axes[1][0].set_xticks(xticks[0])
axes[1][1].set_xticks(xticks[1])
self.save()
class PerformanceLengthSamplesFigure(ToolsFigure):
"""
The performance metrics figures. Each of these figures
has two panels; one for scaling by sequence length and the other
for scaling by sample size. Different lines are given for each
of the different combinations of tsinfer parameters
"""
y_name = "plotted_column"
y_axis_label = None
def __init__(self):
super().__init__()
# Rescale the length to Mb
length_scale = 10**6
self.data.length /= length_scale
length_sample_size_combos = self.data[["length", "sample_size"]].drop_duplicates()
self.fixed_length = length_sample_size_combos['length'].value_counts().idxmax()
self.fixed_sample_size = length_sample_size_combos['sample_size'].value_counts().idxmax()
def plot(self):
df = self.data
recombination_linestyles = [':', '-', '--']
recombination_rates = np.sort(df.recombination_rate.unique())
mutation_rates = df.mutation_rate.unique()
tools = df.tool.unique()
assert len(recombination_linestyles) >= len(recombination_rates)
assert len(mutation_rates) == len(tools) == 1
mu = mutation_rates[0]
tool = tools[0]
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6), sharey=True)
ax1.set_title("Fixed number of chromosomes ({})".format(self.fixed_sample_size))
ax1.set_xlabel("Sequence length (MB)")
ax1.set_ylabel(self.y_axis_label)
for linestyle, rho in zip(recombination_linestyles, recombination_rates):
line_data = df.query("(sample_size == @self.fixed_sample_size) and (recombination_rate == @rho)")
ax1.errorbar(
line_data.length, line_data[self.plotted_column + "_mean"],
yerr=None, # line_data[self.plotted_column + "_se"],
linestyle=linestyle,
color=self.tools_format[tool]["col"],
#marker=self.tools_format[tool]['mark'],
#elinewidth=1
)
ax2.set_title("Fixed sequence length ({:g} Mb)".format(self.fixed_length))
ax2.set_xlabel("Sample size")
for linestyle, rho in zip(recombination_linestyles, recombination_rates):
line_data = df.query("(length == @self.fixed_length) and (recombination_rate == @rho)")
ax2.errorbar(
line_data.sample_size, line_data[self.plotted_column + "_mean"],
yerr=None, # line_data[self.plotted_column + "_se"],
linestyle=linestyle,
color=self.tools_format[tool]["col"],
#marker=self.tools_format[tool]['mark'],
#elinewidth=1
)
params = [
plt.Line2D((0,0),(0,0), color=self.tools_format[tool]["col"],
linestyle=linestyle, linewidth=2)
for linestyle, rho in zip(recombination_linestyles, recombination_rates)]
ax1.legend(
params,
[r"$\rho$ = {}".format("$\mu$" if rho==mu else
r"{:g}$\mu$ (more recombination)".format(rho/mu) if rho>mu else
r"$\mu$/{:g} (less recombination)".format(mu/rho))
for rho_index, rho in enumerate(recombination_rates)],
loc="upper right", fontsize=10, title="Relative rate of recombination")
self.save()
class TSCompressionFigure(PerformanceLengthSamplesFigure):
name = "tsinfer_ts_filesize_ln"
plotted_column = "ts_relative_filesize"
y_axis_label = "File size relative to simulated tree sequence"
class VCFCompressionFigure(PerformanceLengthSamplesFigure):
name = "tsinfer_vcf_compression_ln"
plotted_column = "vcf_compression_factor"
y_axis_label = "Compression factor relative to vcf.gz"
class NodesWithMutations(PerformanceLengthSamplesFigure):
name = "mutation_ancestors_ln"
plotted_column = "prop_nodes_with_mutation"
y_axis_label = "Proportion of internal nodes associated with at least one mutation"
class TreeMetricsFigure(ToolsFigure):
metric_titles = {
"wRF": "weighted Robinson-Foulds metric",
"RF": "Robinson-Foulds metric",
"SPR": "estimated SPR difference",
"path": "Path difference",
"KC": "Kendall-Colijn metric",
}
polytomy_and_averaging_format = collections.OrderedDict([
("broken", {
"per site": {"linestyle":"--"},
"per variant": {"linestyle":":"}}),
("retained", {
"per site": {"linestyle":"-"},
"per variant": {"linestyle":"-."}})
])
sample_size_format = [
{'fillstyle':'full'}, #smaller ss
{'fillstyle':'none'} #assume only max 2 sample sizes per plot
]
length_format = {'tsinfer':[{'col':'k'}, {'col':'#1f77b4'}, {'col':'#17becf'}]}
def single_metric_plot(self, df, x_variable, ax, av_method,
rho = None, markers = True, x_jitter = None):
"""
A single plot on an ax. This requires plotting separate lines, e.g. for each tool
If rho is give, plot an x=rho vertical line, assuming x is the mutation_rate.
x_jitter can be None, 'log' or 'linear'
"""
v_cols = ['length', 'sample_size', 'tool', 'polytomies']
v_order = df[v_cols].drop_duplicates() # find unique combinations
# sort for display
v_order = v_order.sort_values(v_cols, ascending=[False, True, True, False])
ss_order = {v:k for k,v in enumerate(v_order.sample_size.unique())}
l_order = {v:k for k,v in enumerate(v_order.length.unique())}
for i, r in enumerate(v_order.itertuples()):
query = []
query.append("length == @r.length")
query.append("sample_size == @r.sample_size")
query.append("tool == @r.tool")
query.append("polytomies == @r.polytomies")
line_data = df.query("(" + ") and (".join(query) + ")")
if not line_data.empty:
if len(v_order.length.unique()) > 1:
# all tsinfer tools: use colours for length for polytomy format
colour = self.length_format[r.tool][l_order[r.length]]["col"]
else:
# no variable lengths: use standard tool colours
colour = self.tools_format[r.tool]["col"]
x = line_data[x_variable]
if x_jitter:
if x_jitter == 'log':
x *= 1 + (2*i/len(v_order)-1) * (max(x)/min(x))/5000
else:
x += (2 * i - 1) * (max(x)-min(x))/400
ax.errorbar(
x, line_data.treedist_mean,
yerr=line_data.treedist_se if self.error_bars else None,
linestyle=self.polytomy_and_averaging_format[r.polytomies][av_method]["linestyle"],
fillstyle=self.sample_size_format[ss_order[r.sample_size]]['fillstyle'],
color=colour,
marker=self.tools_format[r.tool]['mark'] if markers else None,
elinewidth=1)
if rho is not None:
ax.axvline(x=rho, color = 'gray', zorder=-1, linestyle=":", linewidth=1)
ax.text(rho, ax.get_ylim()[1]/40, r'$\mu=\rho$',
va="bottom", ha="right", color='gray', rotation=90)
return v_order
class MetricsAllToolsFigure(TreeMetricsFigure):
"""
Simple figure that shows all the metrics at the same time.
Assumes at most 2 sample sizes
"""
name = "metrics_all_tools"
def plot(self):
averaging_method = self.data.averaging.unique()
eff_sizes = self.data.Ne.unique()
rhos = self.data.recombination_rate.unique()
lengths = self.data.length.unique()
assert len(averaging_method) == len(eff_sizes) == len(rhos) == 1
rho = rhos[0]
method = averaging_method[0]
sample_sizes = self.data.sample_size.unique()
# x-direction is different error rates
seq_error_params = self.data.error_param.unique()
# y-direction is the permutations of metric + whether it is rooted
metric_and_rooting = self.data.groupby(["metric", "rooting"]).groups
metric_and_rooting = collections.OrderedDict( # make a consistent order
sorted(metric_and_rooting.items(), key=lambda x: x[0]))
# sort this so that metrics come out in a set order (TO DO)
fig, axes = plt.subplots(len(metric_and_rooting), len(seq_error_params),
squeeze=False, sharey='row',
figsize=(6*len(seq_error_params), 15))
for j, ((metric, root), rows) in enumerate(metric_and_rooting.items()):
for k, error in enumerate(seq_error_params):
# we are in the j,k th subplot
ax = axes[j][k]
ax.set_xscale('log')
display_order = self.single_metric_plot(
self.data.loc[rows].query("error_param == @error"), "mutation_rate",
ax, method, rho, markers = (len(sample_sizes)!=1))
# Use integers for labels
ax.yaxis.set_major_locator(matplotlib.ticker.MaxNLocator(integer=True))
# Make the labels on each Y axis line up properly
ax.get_yaxis().set_label_coords(-0.08,0.5)
if j == 0:
ax.set_title(self.error_label(error))
if j == len(metric_and_rooting) - 1:
ax.set_xlabel("Mutation rate")
if k == 0:
ax.set_ylim(getattr(self,'ylim', 0))
rooting_suffix = " (unrooted)" if root=="unrooted" else ""
ylab = getattr(self, 'y_axis_label', self.metric_titles[metric] + rooting_suffix)
ax.set_ylabel(ylab)
artists = [
plt.Line2D((0,1),(0,0), linewidth=2,
color=self.tools_format[d.tool]["col"],
linestyle=self.polytomy_and_averaging_format[d.polytomies][method]["linestyle"],
marker = None if len(sample_sizes)==1 else self.tools_format[d.tool]['mark'])
for d in display_order[['tool', 'polytomies']].drop_duplicates().itertuples()]
tool_labels = [d.tool + ("" if d.polytomies == "retained" else (" (polytomies " + d.polytomies + ")"))
for d in display_order[['tool', 'polytomies']].drop_duplicates().itertuples()]
axes[0][0].legend(
artists, tool_labels, numpoints=1, labelspacing=0.1)
fig.tight_layout()
self.save()
class MetricsAllToolsAccuracyFigure(MetricsAllToolsFigure):
"""
Show the metrics tending to 0 as mutation rate increases
"""
name = "metrics_all_tools_accuracy"
class MetricAllToolsFigure(TreeMetricsFigure):
"""
Plot each metric in a different pdf file.
For the publication: make symbols small and solid
"""
plot_height = 4.5
name = "metric_all_tools"
#y_axis_label="Average distance from true trees"
hide_polytomy_breaking = True
output_metrics = [("KC","rooted"), ("RF", "rooted")] #can add extras in here if necessary
def plot(self):
if getattr(self,"hide_polytomy_breaking", None):
df = self.data.query("polytomies != 'broken'")
else:
df = self.data
for metric, rooting in self.output_metrics:
query = ["metric == @metric", "rooting == @rooting"]
averaging_method = df.averaging.unique()
eff_sizes = df.Ne.unique()
rhos = df.recombination_rate.unique()
lengths = df.length.unique()
assert len(averaging_method) == len(eff_sizes) == len(rhos) == 1
rho = rhos[0]
method = averaging_method[0]
# x-direction is different sequencing error rates
try:
seq_error_params = df.error_param.unique()
except AttributeError:
seq_error_params = [0]
# y-direction is different ancestral allele error rates (if present)
try:
aa_error_params = self.data.ancestral_state_error_param.unique()
except AttributeError:
aa_error_params = [0]
fig, axes = plt.subplots(len(aa_error_params), len(seq_error_params),
squeeze=False, sharey=True,
figsize=getattr(self,'figsize',(6*len(seq_error_params), self.plot_height)))
# Used for final version of Figure 3.
# figsize=(7, 3.5))
for j, aa_error in enumerate(aa_error_params):
for k, seq_error in enumerate(seq_error_params):
ax = axes[j][k]
subquery = query
if len(seq_error_params) > 1:
subquery.append("error_param == @seq_error")
if len(aa_error_params) > 1:
subquery.append("ancestral_state_error_param == @aa_error")
display_order = self.single_metric_plot(
df.query("(" + ") and (".join(subquery) + ")"),
"mutation_rate", ax, method, rho)
ax.set_title(self.error_label(seq_error))
ax.set_xlabel("Mutation rate")
ax.set_xscale('log')
if k == 0:
ax.set_ylim(getattr(self,'ylim', 0))
rooting_suffix = " (unrooted)" if rooting=="unrooted" else ""
ylab = getattr(self, 'y_axis_label', None)
if ylab is None:
ylab = metric + rooting_suffix + " metric"
if len(aa_error_params)>1 or aa_error != 0:
ylab += ", {:g}% ancestral state error".format(aa_error*100)
ax.set_ylabel(ylab)
# Create legends from custom artists
artists = [
plt.Line2D((0,1),(0,0),
color=self.tools_format[d.tool]["col"],
linestyle=self.polytomy_and_averaging_format[d.polytomies][method]["linestyle"],
marker = self.tools_format[d.tool]['mark'])
for d in display_order[['tool', 'polytomies']].drop_duplicates().itertuples()]
tool_labels = [d.tool + ("" if d.polytomies == "retained" else (" (polytomies " + d.polytomies + ")"))
for d in display_order[['tool', 'polytomies']].drop_duplicates().itertuples()]
first_legend = axes[0][0].legend(
artists, tool_labels, numpoints=1, labelspacing=0.1, loc="upper right")
fig.tight_layout()
if len(self.output_metrics)==1:
self.save()
else:
self.save("_".join([self.name, metric, rooting]))
class MetricAllToolsAccuracyBadAncestorsSummary(MetricAllToolsFigure):
name = "metric_all_tools_accuracy_bad_ancestors"
output_metrics = [("KC","rooted"), ("RF", "rooted")]
y_axis_label = None
hide_polytomy_breaking = False
plot_height = 10.5
class MetricAllToolsAccuracyDemographyFigure(MetricAllToolsFigure):
"""
Simple figure that shows an ARG metrics for a genome under a more complex demographic
model (the Gutenkunst Out Of Africa model), as mutation rate increases to high values
"""
plot_height = 3.5 # To more-or-less match to other supplementary figures
name = "metric_all_tools_accuracy_demography"
hide_polytomy_breaking = True
class MetricAllToolsAccuracySweepFigure(TreeMetricsFigure):
"""
Figure for simulations with selection.
Each page should be a single figure for a particular metric, with error on the
"""
name = "metric_all_tools_accuracy_sweep"
error_bars = True
hide_polytomy_breaking = True
output_metrics = [("KC","rooted")] #can add extras in here if necessary
def plot(self):
if getattr(self,"hide_polytomy_breaking", None):
df = self.data.query("polytomies != 'broken'")
else:
df = self.data
for metric, rooting in self.output_metrics:
df = df.query("(metric == @metric) and (rooting == @rooting)")
output_freqs = df[['output_frequency', 'output_after_generations']].drop_duplicates()
averaging_method = df.averaging.unique()
eff_sizes = df.Ne.unique()
rhos = df.recombination_rate.unique()
lengths = df.length.unique()
assert len(averaging_method) == len(eff_sizes) == len(rhos) == 1
rho = rhos[0]
method = averaging_method[0]
# x-direction is different error rates
seq_error_params = df.error_param.unique()
fig, axes = plt.subplots(len(output_freqs), len(seq_error_params),
figsize=getattr(self,'figsize',(6*len(seq_error_params), 2.5*len(output_freqs))),
squeeze=False, sharey=True)
for j, output_data in enumerate(output_freqs.itertuples()):
for k, error in enumerate(seq_error_params):
ax = axes[j][k]
freq = output_data.output_frequency
gens = output_data.output_after_generations
query = ["error_param == @error"]
query.append("output_frequency == @freq")
query.append("output_after_generations == @gens")
display_order = self.single_metric_plot(
df.query("(" + ") and (".join(query) + ")"),
"mutation_rate", ax, method, rho)
ax.set_xscale('log')
if j == 0:
ax.set_title(self.error_label(error))
if j == len(output_freqs) - 1:
ax.set_xlabel("Neutral mutation rate")
if k == 0:
ax.set_ylabel(getattr(self, 'y_axis_label', metric + " metric") +
" @ {}{}".format(
"fixation " if np.isclose(freq, 1.0) else "freq {}".format(freq),
"+{} gens".format(int(gens)) if gens else ""))
if np.isclose(freq, 1.0) and not gens:
# This is *at* fixation - set the plot background colour
ax.set_facecolor('0.9')
# Create legends from custom artists
artists = [
plt.Line2D((0,1),(0,0),
color=self.tools_format[d.tool]["col"],
linestyle=self.polytomy_and_averaging_format[d.polytomies][method]["linestyle"],
marker = self.tools_format[d.tool]['mark'])
for d in display_order[['tool', 'polytomies']].drop_duplicates().itertuples()]
tool_labels = [d.tool + ("" if d.polytomies == "retained" else (" (polytomies " + d.polytomies + ")"))
for d in display_order[['tool', 'polytomies']].drop_duplicates().itertuples()]
first_legend = axes[0][0].legend(
artists, tool_labels, numpoints=1, labelspacing=0.1, loc="upper right")
fig.tight_layout()
if len(self.output_metrics)==1:
self.save()
else:
self.save("_".join([self.name, metric, rooting]))
class MetricSubsamplingFigure(TreeMetricsFigure):
"""
Figure that shows whether increasing sample size helps with the accuracy of
reconstructing the ARG for a fixed subsample. We only use tsinfer for this.
"""
name = "metric_subsampling"
hide_polytomy_breaking = True
output_metrics = [("KC","rooted")] #can add extras in here if necessary
def plot(self):
self.polytomy_and_averaging_format['retained']['per variant']['linestyle'] = "-"
for metric, rooting in self.output_metrics:
query = ["metric == @metric", "rooting == @rooting"]
if getattr(self,"hide_polytomy_breaking", None):
query.append("polytomies != 'broken'")
df = self.data.query("(" + ") and (".join(query) + ")")
subsample_size = df.subsample_size.unique()
# all should have the same similarion sample size (but inference occurs on
# different subsample sizes, and tree comparisons on a fixed small tip #.
averaging_method = self.data.averaging.unique()
sample_sizes = df.sample_size.unique()
all_tree_tips = df.restrict_sample_size_comparison.unique()
mutation_rates = df.mutation_rate.unique()
assert len(all_tree_tips) == len(mutation_rates) == len(sample_sizes) == len(averaging_method) == 1
tree_tips = all_tree_tips[0]
method = averaging_method[0]
lengths = df.length.unique()
seq_error_params = df.error_param.unique()
fig, axes = plt.subplots(1, len(seq_error_params),
figsize=(12, 6), squeeze=False, sharey=True)
for k, error in enumerate(seq_error_params):
ax = axes[0][k]
display_order = self.single_metric_plot(
df.query("error_param == @error"), "subsample_size",
ax, method, rho = None, markers = False, x_jitter = 'log')
ax.set_title(self.error_label(error))
if k == 0:
ylab = getattr(self, 'y_axis_label', self.metric_titles[metric])
ax.set_ylabel("Average " + ylab + " for trees reduced to first {} samples".format(tree_tips))
ax.set_xlabel("Sample size used for inference")
ax.set_xscale('log')
if len(display_order)>1:
l_order = {v:k for k,v in enumerate(display_order.length.unique())}
artists = [
plt.Line2D((0,1),(0,0),
color=self.length_format[d.tool][l_order[d.length]]["col"],
linestyle=self.polytomy_and_averaging_format[d.polytomies][method]["linestyle"],
marker = False)
for d in display_order[['length', 'tool', 'polytomies']].drop_duplicates().itertuples()]
labels = ["{} kb".format(d.length//1000)
for d in display_order[['length']].drop_duplicates().itertuples()]
first_legend = axes[0][0].legend(
artists, labels, numpoints=1, labelspacing=0.1, loc="upper right")
fig.tight_layout()
if len(self.output_metrics)==1:
self.save()
else:
self.save("_".join([self.name, metric, rooting]))
def rotate_linkage(linkage, index):
x, y = linkage[index][0:2]
linkage[index][0] = y
linkage[index][1] = x
class UkbbStructureFigure(Figure):
"""
Figure showing the structure for UKBB using heatmaps.
"""
name = "ukbb_structure"
def __init__(self):
# We don't have a CSV called this, name.csv so skip loading.
pass
def plot_ukbb_region_clustermap(self):
df = pd.read_csv("data/ukbb_ukbb_british_centre.csv").set_index("centre")
# Zscore normalise
for col in list(df):
df[col] = scipy.stats.zscore(df[col])
row_linkage = scipy.cluster.hierarchy.linkage(df, method="average", optimal_ordering=True)
# Tweaks to the clade rotation order
# Flip the north vs north-west cities
rotate_linkage(row_linkage, -3)
# Put Welsh towns next to Birmingham
rotate_linkage(row_linkage, -8)
# Do Leeds - Sheffield - Nottingham, not Nottingham - Sheffield - Leeds
# (this simply helps the top-to-bottom labelling scheme
rotate_linkage(row_linkage, -9)
rotate_linkage(row_linkage, -11)
# Bristol near Welsh towns
rotate_linkage(row_linkage, -12)
# Swansea then Cardiff (highlights association between Cardiff & Bristol)
rotate_linkage(row_linkage, -15)
# The south/south-east centres are a bit visually messy - try putting east at end
# Oxford near Bristol
rotate_linkage(row_linkage, -16)
# Reading nearer Oxford
rotate_linkage(row_linkage, -20)
# Push Croydon (furthest east) to the end
rotate_linkage(row_linkage, -21)
order = scipy.cluster.hierarchy.leaves_list(row_linkage)
x_pop = df.index.values[order]
cg = sns.clustermap(df[x_pop], row_linkage=row_linkage, col_cluster=False, rasterized=True)
cg.ax_heatmap.set_ylabel("")
for tick in cg.ax_heatmap.get_xticklabels():
tick.set_rotation(-45)
tick.set_ha('left')
tick.set_rotation_mode("anchor")
self.save("ukbb_ukbb_clustermap_british")
def plot_1kg_ukbb_clustermap(self):
df = pd.read_csv("data/1kg_ukbb_ethnicity.csv").set_index("ethnicity")
colour_map = get_tgp_colours()
colours = [colour_map[pop] for pop in tgp_populations]
df = df[tgp_populations]
row_linkage = scipy.cluster.hierarchy.linkage(df, method="average")
cg = sns.clustermap(
df, row_linkage=row_linkage, col_cluster=False, col_colors=colours, rasterized=True)
cg.ax_heatmap.set_ylabel("")
for region, col in get_tgp_region_colours().items():
cg.ax_col_dendrogram.bar(0, 0, color=col, label=region, linewidth=0)
cg.ax_col_dendrogram.legend(bbox_to_anchor=(1.2, 0.8))
self.save("ukbb_1kg_clustermap_ethnicity")
def plot_1kg_ukbb_british_centre_clustermap(self):
df = pd.read_csv("data/1kg_ukbb_british_centre.csv").set_index("centre")
colour_map = get_tgp_colours()
colours = [colour_map[pop] for pop in tgp_populations]
df = df[tgp_populations]
# Zscore normalise
for col in list(df):
df[col] = scipy.stats.zscore(df[col])
row_linkage = scipy.cluster.hierarchy.linkage(df, method="average")
cg = sns.clustermap(
df, row_linkage=row_linkage, col_cluster=False, col_colors=colours, rasterized=True)
cg.ax_heatmap.set_ylabel("")
for region, col in get_tgp_region_colours().items():
cg.ax_col_dendrogram.bar(0, 0, color=col, label=region, linewidth=0)
cg.ax_col_dendrogram.legend(bbox_to_anchor=(1.2, 0.8))
self.save("ukbb_1kg_clustermap_british_centre")
def plot(self):
self.plot_ukbb_region_clustermap()
self.plot_1kg_ukbb_clustermap()
self.plot_1kg_ukbb_british_centre_clustermap()
class GlobalStructureFigure(Figure):
name = "global_structure"
def __init__(self):
# We don't have a CSV called this, name.csv so skip loading.
pass
def plot_clustermap(self, df, pop_colours, region_colours, figsize):
dfg = df.groupby("population").mean()
# Zscore normalise
for col in list(dfg):
dfg[col] = scipy.stats.zscore(dfg[col])
row_linkage = scipy.cluster.hierarchy.linkage(dfg, method="average")
order = scipy.cluster.hierarchy.leaves_list(row_linkage)
x_pop = dfg.index.values[order]
colours = pd.Series(pop_colours)
cg = sns.clustermap(
dfg[x_pop], row_linkage=row_linkage, col_cluster=False,
row_colors=colours, figsize=figsize, rasterized=True)
cg.ax_heatmap.set_ylabel("")
for region, col in region_colours.items():
cg.ax_col_dendrogram.bar(0, 0, color=col, label=region, linewidth=0)
return cg
def plot_1kg_clustermap(self):
df = | pd.read_csv("data/1kg_gnn.csv") | pandas.read_csv |
# -*-coding:utf-8 -*-
'''
@File : preprocess.py
@Author : <NAME>
@Date : 2020/9/9
@Desc :
'''
import pandas as pd
import json
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
DATA_BASE = BASE_DIR + "/data/"
# print(DATA_BASE)
def data_preprocess(corpus_file_name):
""" 数据预处理 """
print("===================Start Preprocess======================")
df = pd.read_csv(DATA_BASE + corpus_file_name + ".csv") # 读取源数据,将数据解析为时间格式
# df["小时"] = df["time"].map(lambda x: int(x.strftime("%H"))) # 提取小时
df = df.drop_duplicates() # 去重
print("Remove duplicate items completed! ")
df = df.dropna(subset=["内容"]) # 删除 “评论内容” 空值行
# df = df.dropna(subset=["gender"]) # 删除 “性别” 空值行
print("Remove empty contents completed! ")
# df.to_csv(corpus_file_name+".csv") # 写入处理后的数据
print("===================数据清洗完毕======================")
return df
def get_phrases(corpus_file_name):
""" 从excel/csv文件中提取相应的短语组合 """
print("===================Start Withdraw======================")
print(DATA_BASE + corpus_file_name + ".csv")
df = pd.read_csv("../data/" + corpus_file_name + ".csv") # 读取源数据
df = df.fillna(" ") # 用空格 " "替换 nan
print("Replace NAN completed! ")
# print(list(df["中性"]))
pos = [ph.split(";") for ph in df["正向_细"]]
neu = [ph.split(";") for ph in df["中性_细"]]
neg = [ph.split(";") for ph in df["负向_细"]]
pos_phrases, neu_phrases, neg_phrases = [], [], []
for i in range(len(pos)):
pos_phrases.extend(pos[i])
neu_phrases.extend(neu[i])
neg_phrases.extend(neg[i])
with open(DATA_BASE + "neg_phrases.txt", "w", encoding="utf-8") as f:
for ph in neg_phrases:
if len(ph) > 1:
f.write(ph + "\n")
#
# all_phrases = pos_phrases + neu_phrases + neg_phrases
# special_phrases = [line.strip() for line in open(DATA_BASE + "special_phrases.txt", encoding='utf-8').readlines()]
# all_phrases = list(set(special_phrases + all_phrases))
# # print(all_phrases)
#
# with open(DATA_BASE + "special_phrases.txt", "w", encoding="utf-8") as fw:
# for ph in all_phrases:
# if len(ph) > 1:
# fw.write(ph + "\n")
print("===================Phrases saved in file======================")
def combine_phrases():
""" 整合化妆品和护肤品的短语 """
df1 = | pd.read_csv(DATA_BASE + "skin_care_phrases" + ".csv") | pandas.read_csv |
import pandas as pd
from zipfile import ZipFile
import os
class CPSPandas():
def __init__(self):
pass
def zipToPandas(self,filePath,startsWith="", endsWith="",contain="",header=None,error_bad_lines=True):
zip_file = ZipFile(filePath)
alldf = []
for textFile in zip_file.infolist():
fileName = textFile.filename
baseName = os.path.basename(fileName)
if baseName.startswith(startsWith) and baseName.endswith(endsWith) and (contain in baseName):
try:
oneFileDF = pd.read_csv(zip_file.open(fileName),header=header,error_bad_lines=error_bad_lines)
alldf.append(oneFileDF)
except Exception as e:
print(e)
df = | pd.concat(alldf) | pandas.concat |
import random
import pandas as pd
from functools import partial
from torch.utils.data import DataLoader
from datasets import load_dataset, concatenate_datasets
from datasets import Features, Value, ClassLabel
from datastreams.datasets import dataset_configs
class DataStream:
features = Features({
"context": Value("string"),
"statement": Value("string"),
"label": ClassLabel(2, names=["False", "True"])
})
def __init__(self, dataset_names: list, split: str="train_split"):
self.dataset_names = dataset_names
self.stream = []
for name in dataset_names:
config = dataset_configs[name]
path = config["path"]
name = config.get("name", None)
dataset_split = config[split]
dataset = load_dataset(path, name, split=dataset_split)
filter_column = config.get("filter_column", None)
filter_value = config.get("filter_value", None)
if filter_column and filter_value:
dataset = dataset.filter(lambda batch: batch[filter_column]==filter_value)
transform = config["transform"]
dataset = dataset.map(transform, batched=True, remove_columns=dataset.column_names)
try:
dataset = dataset.cast(self.features)
except:
raise ValueError(f"{transform} didn't transform to datastream features.")
self.stream.append(dataset)
def summary(self):
return pd.DataFrame(
[(name, data.num_rows) for name, data in zip(self.dataset_names, self.stream)],
columns=["dataset", "num_examples"]
)
def save(self, path):
path.mkdir(parents=True, exist_ok=True)
for name, data in zip(self.dataset_names, self.stream):
data.to_pandas().to_csv(path/f"{name}.csv", index=False)
def sample_examples(self, num_per_dataset: int=1) -> pd.DataFrame:
all_sample_data = []
for name, data in zip(self.dataset_names, self.stream):
sample_idxs = random.choices(range(data.num_rows), k=num_per_dataset)
sample_data = data.select(sample_idxs).to_pandas()
sample_data["dataset"] = name
all_sample_data.append(sample_data)
return | pd.concat(all_sample_data) | pandas.concat |
from collections import OrderedDict
from itertools import product
from re import match
from typing import List, Dict
from numpy import nan
from pandas import read_excel, DataFrame, Series, ExcelFile, concat, notnull, \
isnull
from survey import Survey
from survey.groups.question_groups.count_question_group import \
CountQuestionGroup
from survey.groups.question_groups.free_text_question_group import \
FreeTextQuestionGroup
from survey.groups.question_groups.likert_question_group import \
LikertQuestionGroup
from survey.groups.question_groups.multi_choice_question_group import \
MultiChoiceQuestionGroup
from survey.groups.question_groups.positive_measure_question_group import \
PositiveMeasureQuestionGroup
from survey.groups.question_groups.question_group import QuestionGroup
from survey.groups.question_groups.ranked_choice_question_group import \
RankedChoiceQuestionGroup
from survey.groups.question_groups.single_choice_question_group import \
SingleChoiceQuestionGroup
from survey.surveys.metadata.question_metadata import QuestionMetadata
from survey.surveys.survey_creators.base.survey_creator import SurveyCreator
class FocusVisionCreator(SurveyCreator):
def read_survey_data(self):
data = read_excel(self.survey_data_fn)
data.columns = [column.replace(u'\xa0', u' ')
for column in data.columns]
if self.pre_clean is not None:
data = self.pre_clean(data)
self.survey_data = data
def _loop_variable_froms(self, variable_name: str) -> List[str]:
return self.loop_mappings.loc[
self.loop_mappings['loop_variable'] == variable_name, 'map_from'
].tolist()
def _loop_variable_tos(self, variable_name: str) -> List[str]:
return self.loop_mappings.loc[
self.loop_mappings['loop_variable'] == variable_name, 'map_to'
].tolist()
def _loop_variable_mappings(self, variable_name: str) -> Series:
return self.loop_mappings.loc[
self.loop_mappings['loop_variable'] == variable_name
].set_index('map_from')['map_to']
def _create_looped_metadata(self, metadata_dict: dict) -> List[dict]:
new_metadata_dicts: List[dict] = []
expressions: DataFrame = self.loop_expressions.loc[
self.loop_expressions['loop_name'] ==
metadata_dict['loop_variables'], :
]
loop_variables: List[str] = metadata_dict['loop_variables'].split('\n')
# build lists of loop variable values
loop_froms: List[list] = []
for loop_variable in loop_variables:
loop_froms.append(self._loop_variable_froms(loop_variable))
# iterate over potential matching expressions
for _, expression_data in expressions.iterrows():
# iterate over product of loop variable values and check for
# matching column(s)
for loop_vals in product(*loop_froms):
# create a new expression using the loop variable values
loop_expression = expression_data['expression_builder']
for l, loop_val in enumerate(loop_vals):
loop_expression = loop_expression.replace(
'{{' + loop_variables[l] + '}}', str(loop_val)
)
# find columns matching the question expression and
# loop expression
match_cols = [column for column in self.survey_data.columns
if match(loop_expression, column)
and match(metadata_dict['expression'], column)]
if len(match_cols) > 0:
# create new metadata dict
new_metadata = {k: v for k, v in metadata_dict.items()}
# build list of loop variable values to sub into question /
# attribute name
var_vals: List[str] = []
for loop_variable, loop_val in zip(
loop_variables, loop_vals
):
loop_var_mappings = self._loop_variable_mappings(
loop_variable
)
var_vals.append(loop_var_mappings.loc[loop_val])
# create new name for metadata based on loop values
new_metadata['name'] = (
new_metadata['name'] + '__' + '__'.join(var_vals)
)
# assign loop expression to match columns against
new_metadata['loop_expression'] = loop_expression
new_metadata_dicts.append(new_metadata)
return new_metadata_dicts
def read_metadata(self):
metadata = ExcelFile(self.metadata_fn)
# read metadata
questions_metadata = read_excel(metadata, 'questions')
self.questions_metadata_original = questions_metadata
attributes_metadata = read_excel(metadata, 'attributes')
orders_metadata = read_excel(metadata, 'orders')
if 'loop_mappings' in metadata.sheet_names:
self.loop_mappings = read_excel(metadata, 'loop_mappings')
if 'loop_expressions' in metadata.sheet_names:
self.loop_expressions = read_excel(metadata, 'loop_expressions')
# filter to specified survey
if None not in (self.survey_id_col, self.survey_id):
questions_metadata = self._filter_to_survey(questions_metadata)
attributes_metadata = self._filter_to_survey(attributes_metadata)
orders_metadata = self._filter_to_survey(orders_metadata)
# check for clashes in question, attribute and category names
category_names = sorted(orders_metadata['category'].unique())
q_name_errors = []
for q_name in sorted(questions_metadata['name'].unique()):
if q_name in category_names:
q_name_errors.append(q_name)
if q_name_errors:
raise ValueError(
f'The following categories clash with question names. '
f'Rename questions or categories.\n{q_name_errors}'
)
a_name_errors = []
for a_name in sorted(attributes_metadata['name'].unique()):
if a_name in category_names:
a_name_errors.append(a_name)
if a_name_errors:
raise ValueError(
f'The following categories clash with attribute names. '
f'Rename attributes or categories.\n{a_name_errors}'
)
# create ordered choices for questions with shared choices
for meta in (attributes_metadata, questions_metadata):
for idx, row in meta.iterrows():
# add shared orders to metadata
if notnull(row['categories']):
q_name = row['name']
order_value = row['categories']
ordered_choices = orders_metadata[
orders_metadata['category'] == order_value
].copy()
ordered_choices['category'] = q_name
orders_metadata = concat([orders_metadata, ordered_choices])
# create looped questions
if 'loop_variables' in questions_metadata.columns:
questions_metadata_items = []
for _, row in questions_metadata.iterrows():
metadata_dict = row.to_dict()
if notnull(metadata_dict['loop_variables']):
new_metadatas = self._create_looped_metadata(metadata_dict)
questions_metadata_items.extend(new_metadatas)
for new_metadata in new_metadatas:
q_name = new_metadata['name']
order_value = row['categories']
ordered_choices = orders_metadata[
orders_metadata['category'] == order_value
].copy()
ordered_choices['category'] = q_name
orders_metadata = concat([
orders_metadata, ordered_choices
])
else:
questions_metadata_items.append(metadata_dict)
questions_metadata = DataFrame(questions_metadata_items)
# set member variables
self.questions_metadata = questions_metadata
self.attributes_metadata = attributes_metadata
self.orders_metadata = orders_metadata
def _get_single_column_data(
self, question_metadata: QuestionMetadata
) -> Series:
if question_metadata.expression is None:
return self.survey_data[question_metadata.text]
else:
if not question_metadata.loop_expression:
match_cols = [c for c in self.survey_data.columns
if match(question_metadata.expression, c)]
else:
match_cols = [c for c in self.survey_data.columns
if match(question_metadata.expression, c) and
match(question_metadata.loop_expression, c)]
if len(match_cols) != 1:
raise ValueError(
f'Did not match exactly one column for question:'
f' "{question_metadata.name}". '
f'Matched {len(match_cols)}.'
)
data = self.survey_data[match_cols[0]]
data = data.rename(question_metadata.name)
return data
def _get_multi_choice_data(
self, question_metadata: QuestionMetadata
) -> Series:
# merge multi-choice questions to single column
if question_metadata.expression is None:
raise ValueError('Need a regular expression to match '
'MultiChoice question columns.')
match_cols = [c for c in self.survey_data.columns
if match(question_metadata.expression, c)]
if len(match_cols) == 0:
raise ValueError(
f'Could not match expression "{question_metadata.expression}" '
f'for MultiChoice question "question_metadata"'
)
if notnull(question_metadata.loop_expression):
match_cols = [c for c in match_cols
if match(question_metadata.loop_expression, c)]
choices = self.survey_data[match_cols].copy(deep=True)
def create_cell_data(row: Series):
selected_values = [val for val in row.to_list()
if not val.startswith('NO TO:')]
return '\n'.join(selected_values)
null_rows = choices.notnull().sum(axis=1) == 0
data = Series(data=nan, index=choices.index)
data.loc[~null_rows] = choices.loc[~null_rows].apply(
create_cell_data, axis=1
)
return Series(data=data, name=question_metadata.name)
def clean_survey_data(self):
new_columns = []
# copy attribute columns to new dataframe
for amd in self.attribute_metadatas:
new_columns.append(self._get_single_column_data(amd))
for qmd in self.question_metadatas:
if qmd.type_name not in ('MultiChoice', 'RankedChoice'):
new_columns.append(self._get_single_column_data(qmd))
elif qmd.type_name == 'MultiChoice':
new_columns.append(self._get_multi_choice_data(qmd))
elif qmd.type_name == 'RankedChoice':
raise NotImplementedError(
'No implementation for FocusVision RankedChoice Questions'
)
# set index of respondent id
new_survey_data = concat(new_columns, axis=1)
new_survey_data.index = self.survey_data['record: Record number']
new_survey_data.index.name = 'Respondent ID'
self.survey_data = new_survey_data
def format_survey_data(self):
pass
def _create_groups(self) -> Dict[str, QuestionGroup]:
# create groups
groups = {}
for _, question_metadata in self.questions_metadata_original.iterrows():
if | isnull(question_metadata['loop_variables']) | pandas.isnull |
import os
import pandas as pd
import numpy as np
import time
from pathlib import Path as P
from multiprocessing import Pool, Manager, cpu_count
from tqdm import tqdm
from .tools import read_peaklists, process,\
check_peaklist, export_to_excel,\
MINT_RESULTS_COLUMNS, PEAKLIST_COLUMNS
from .peak_detection import OpenMSFFMetabo
import ms_mint
class Mint(object):
def __init__(self, verbose:bool=False):
self._verbose = verbose
self._version = ms_mint.__version__
self._progress_callback = None
self.reset()
if self.verbose:
print('Mint Version:', self.version , '\n')
self.peak_detector = OpenMSFFMetabo()
@property
def verbose(self):
return self._verbose
@verbose.setter
def verbose(self, value:bool):
self._verbose = value
@property
def version(self):
return self._version
def reset(self):
self._files = []
self._peaklist_files = []
self._peaklist = pd.DataFrame(columns=PEAKLIST_COLUMNS)
self._results = pd.DataFrame({i: [] for i in MINT_RESULTS_COLUMNS})
self._all_df = None
self._progress = 0
self.runtime = None
self._status = 'waiting'
self._messages = []
def clear_peaklist(self):
self._peaklist = | pd.DataFrame(columns=PEAKLIST_COLUMNS) | pandas.DataFrame |
""" This module contains functions for loading data. """
from collections import OrderedDict
import csv
from typing import Tuple, Optional, Dict, Union, Type, List
import arff
import numpy as np
import pandas as pd
from pandas.api.types import is_numeric_dtype
from gama.utilities.preprocessing import log
CSV_SNIFF_SIZE = 2 ** 12
def load_csv_header(file_path: str, **kwargs) -> List[str]:
""" Return column names in the header, or 0...N if no header is present. """
with open(file_path, "r") as csv_file:
has_header = csv.Sniffer().has_header(csv_file.read(CSV_SNIFF_SIZE))
csv_file.seek(0)
if "sep" not in kwargs:
dialect = csv.Sniffer().sniff(csv_file.read(CSV_SNIFF_SIZE))
kwargs["sep"] = dialect.delimiter
csv_file.seek(0)
first_line = csv_file.readline()[:-1]
if has_header:
return first_line.split(kwargs["sep"])
else:
return [str(i) for i, _ in enumerate(first_line.split(kwargs["sep"]))]
def csv_to_pandas(file_path: str, **kwargs) -> pd.DataFrame:
""" Load data from the csv file into a pd.DataFrame.
Parameters
----------
file_path: str
Path of the csv file
kwargs:
Additional arguments for pandas.read_csv.
If not specified, the presence of the header and the delimiter token are
both detected automatically.
Returns
-------
pandas.DataFrame
A dataframe of the data in the ARFF file,
with categorical columns having category dtype.
"""
if "header" not in kwargs:
with open(file_path, "r") as csv_file:
has_header = csv.Sniffer().has_header(csv_file.read(CSV_SNIFF_SIZE))
kwargs["header"] = 0 if has_header else None
df = | pd.read_csv(file_path, **kwargs) | pandas.read_csv |
import faiss
from utils import MovieDataApp, RetToHive
spark = MovieDataApp().spark
import numpy as np
spark.sql('use movie_recall')
# id = spark.sql('select movie_id from movie_vector_1969').limit(10000)
user_vector_arr = spark.sql('select * from movie_vector_1969').limit(100).toPandas().values[:, 2].tolist()
user_vector_arr = np.asarray(user_vector_arr).astype('float32')
# print(type(user_vector_arr))
# print(user_vector_arr.shape)
# print(user_vector_arr)
# user_vector_arr.printSchema()
gds_vector_arr = spark.sql('select movievector from movie_vector_1969').limit(100).toPandas().values[:, 0].tolist()
gds_vector_arr = np.asarray(gds_vector_arr).astype('float32')
# print(gds_vector_arr.shape)
# print(gds_vector_arr)
# user_vector_arr # shape(1000, 100)
# gds_vector_arr # shape(100, 100)
dim = 100# 向量维度
k = 10 # 定义召回向量个数
index = faiss.IndexFlatL2(dim) # L2距离,即欧式距离(越小越好)
# index=faiss.IndexFlatIP(dim) # 点乘,归一化的向量点乘即cosine相似度(越大越好)
# print(index.is_trained)
index.add(gds_vector_arr) # 添加训练时的样本
# print(index.ntotal)
D, I = index.search(user_vector_arr, k) # 寻找相似向量, I表示相似用户ID矩阵, D表示距离矩阵
print(D[:5])
print(I[-5:])
import pandas as pd
df = pd.DataFrame(columns=['index'])
df.append(pd.Series([None]), ignore_index=True)
df['index'] = I.tolist()
df2 = | pd.DataFrame(columns=['similar']) | pandas.DataFrame |
import numpy as np
import pandas as pd
from sklearn import preprocessing
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
def main():
try:
df_train = pd.read_csv(
'http://archive.ics.uci.edu/ml/'
'machine-learning-databases/adult/adult.data', header=None)
df_test = pd.read_csv(
'http://archive.ics.uci.edu/ml/'
'machine-learning-databases/adult/adult.test',
skiprows=[0], header=None)
except:
df_train = pd.read_csv('adult.data', header=None)
df_test = | pd.read_csv('adult.test', skiprows=[0], header=None) | pandas.read_csv |
import numpy as np
import pandas as pd
import pickle
import warnings
from tqdm import tqdm
import time
from collections import defaultdict
import matplotlib.pyplot as plt
warnings.filterwarnings('ignore')
train = | pd.read_csv('../IA2-train.csv') | pandas.read_csv |
"""
Name : c9_44_equal_weighted_vs_value_weighted.py
Book : Python for Finance (2nd ed.)
Publisher: Packt Publishing Ltd.
Author : <NAME>
Date : 6/6/2017
email : <EMAIL>
<EMAIL>
"""
import pandas as pd
import scipy as sp
x=pd.read_pickle("c:/temp/yanMonthly.pkl")
def ret_f(ticker):
a=x[x.index==ticker]
p=sp.array(a['VALUE'])
ddate=a['DATE'][1:]
ret=p[1:]/p[:-1]-1
out1=pd.DataFrame(p[1:],index=ddate)
out2=pd.DataFrame(ret,index=ddate)
output=pd.merge(out1,out2,left_index=True, right_index=True)
output.columns=['Price_'+ticker,'Ret_'+ticker]
return output
a=ret_f("IBM")
b=ret_f('WMT')
c= | pd.merge(a,b,left_index=True, right_index=True) | pandas.merge |
import numpy as np
from pandas.core.dtypes.dtypes import CategoricalDtype
import pandas as pd
from pandas import Categorical, DataFrame, Series
import pandas._testing as tm
class TestCategoricalConcat:
def test_categorical_concat(self, sort):
# See GH 10177
df1 = DataFrame(
np.arange(18, dtype="int64").reshape(6, 3), columns=["a", "b", "c"]
)
df2 = DataFrame(np.arange(14, dtype="int64").reshape(7, 2), columns=["a", "c"])
cat_values = ["one", "one", "two", "one", "two", "two", "one"]
df2["h"] = Series(Categorical(cat_values))
res = pd.concat((df1, df2), axis=0, ignore_index=True, sort=sort)
exp = DataFrame(
{
"a": [0, 3, 6, 9, 12, 15, 0, 2, 4, 6, 8, 10, 12],
"b": [
1,
4,
7,
10,
13,
16,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
],
"c": [2, 5, 8, 11, 14, 17, 1, 3, 5, 7, 9, 11, 13],
"h": [None] * 6 + cat_values,
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_concat_dtypes(self):
# GH8143
index = ["cat", "obj", "num"]
cat = Categorical(["a", "b", "c"])
obj = Series(["a", "b", "c"])
num = Series([1, 2, 3])
df = pd.concat([Series(cat), obj, num], axis=1, keys=index)
result = df.dtypes == "object"
expected = Series([False, True, False], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "int64"
expected = Series([False, False, True], index=index)
tm.assert_series_equal(result, expected)
result = df.dtypes == "category"
expected = Series([True, False, False], index=index)
tm.assert_series_equal(result, expected)
def test_concat_categoricalindex(self):
# GH 16111, categories that aren't lexsorted
categories = [9, 0, 1, 2, 3]
a = Series(1, index=pd.CategoricalIndex([9, 0], categories=categories))
b = Series(2, index=pd.CategoricalIndex([0, 1], categories=categories))
c = Series(3, index=pd.CategoricalIndex([1, 2], categories=categories))
result = pd.concat([a, b, c], axis=1)
exp_idx = pd.CategoricalIndex([9, 0, 1, 2], categories=categories)
exp = DataFrame(
{
0: [1, 1, np.nan, np.nan],
1: [np.nan, 2, 2, np.nan],
2: [np.nan, np.nan, 3, 3],
},
columns=[0, 1, 2],
index=exp_idx,
)
tm.assert_frame_equal(result, exp)
def test_categorical_concat_preserve(self):
# GH 8641 series concat not preserving category dtype
# GH 13524 can concat different categories
s = Series(list("abc"), dtype="category")
s2 = Series(list("abd"), dtype="category")
exp = Series(list("abcabd"))
res = pd.concat([s, s2], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), dtype="category")
res = pd.concat([s, s], ignore_index=True)
tm.assert_series_equal(res, exp)
exp = Series(list("abcabc"), index=[0, 1, 2, 0, 1, 2], dtype="category")
res = pd.concat([s, s])
tm.assert_series_equal(res, exp)
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame({"A": a, "B": b.astype(CategoricalDtype(list("cab")))})
res = pd.concat([df2, df2])
exp = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
)
tm.assert_frame_equal(res, exp)
def test_categorical_index_preserver(self):
a = Series(np.arange(6, dtype="int64"))
b = Series(list("aabbca"))
df2 = DataFrame(
{"A": a, "B": b.astype(CategoricalDtype(list("cab")))}
).set_index("B")
result = pd.concat([df2, df2])
expected = DataFrame(
{
"A": pd.concat([a, a]),
"B": pd.concat([b, b]).astype(CategoricalDtype(list("cab"))),
}
).set_index("B")
tm.assert_frame_equal(result, expected)
# wrong categories -> uses concat_compat, which casts to object
df3 = DataFrame(
{"A": a, "B": Categorical(b, categories=list("abe"))}
).set_index("B")
result = pd.concat([df2, df3])
expected = pd.concat(
[
df2.set_axis(df2.index.astype(object), 0),
df3.set_axis(df3.index.astype(object), 0),
]
)
tm.assert_frame_equal(result, expected)
def test_concat_categorical_tz(self):
# GH-23816
a = Series(pd.date_range("2017-01-01", periods=2, tz="US/Pacific"))
b = Series(["a", "b"], dtype="category")
result = | pd.concat([a, b], ignore_index=True) | pandas.concat |
# -*- coding: utf-8 -*-
from collections import OrderedDict
import pandas as pd
try:
from pandas.testing import assert_frame_equal
except ImportError:
from pandas.util.testing import assert_frame_equal
from jparse import JParser
jp = JParser()
TEST_CASE1 = [OrderedDict([('A1', 1), ('A2', 2), ('A3', 3)]),
OrderedDict([('A1', [4, 5, 6]), ('A2', 7), ('A3', 'x')])]
TEST_CASE2 = OrderedDict([('A1', [OrderedDict([('B1', 4), ('B2', 5)]),
OrderedDict([('B1', 6), ('B3', 7)])]),
('A2', OrderedDict([('C1', [8, 9]), ('C2', [10, 11])])),
('A3', OrderedDict([('A1', OrderedDict([('B4', 12)])),
('A4', 10)]))])
def test_to_df_default():
result1 = jp.to_df(TEST_CASE1)
result2 = jp.to_df(TEST_CASE2)
expected1 = pd.DataFrame([{'0_A1': 1, '0_A2': 2, '0_A3': 3,
'1_A1_0': 4, '1_A1_1': 5,
'1_A1_2': 6, '1_A2': 7,
'1_A3': 'x'}])
expected2 = pd.DataFrame([{'A1_0_B1': 4, 'A1_0_B2': 5,
'A1_1_B1': 6, 'A1_1_B3': 7,
'A2_C1_0': 8, 'A2_C1_1': 9,
'A2_C2_0': 10, 'A2_C2_1': 11,
'A3_A1_B4': 12, 'A3_A4': 10}])
assert_frame_equal(result1, expected1)
| assert_frame_equal(result2, expected2) | pandas.util.testing.assert_frame_equal |
import logging
from collections import OrderedDict
import pandas as pd
import pyprind
import six
import dask
from dask import delayed
from dask.diagnostics import ProgressBar
from py_stringmatching.tokenizer.qgram_tokenizer import QgramTokenizer
from py_stringmatching.tokenizer.whitespace_tokenizer import WhitespaceTokenizer
import cloudpickle as cp
import pickle
import py_entitymatching.catalog.catalog_manager as cm
from py_entitymatching.blocker.blocker import Blocker
import py_stringsimjoin as ssj
from py_entitymatching.utils.catalog_helper import log_info, get_name_for_key, \
add_key_column
from py_entitymatching.utils.generic_helper import parse_conjunct
from py_entitymatching.utils.validation_helper import validate_object_type
from py_entitymatching.dask.utils import validate_chunks, get_num_partitions, \
get_num_cores, wrap
logger = logging.getLogger(__name__)
class DaskRuleBasedBlocker(Blocker):
"""
WARNING THIS BLOCKER IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.
Blocks based on a sequence of blocking rules supplied by the user.
"""
def __init__(self, *args, **kwargs):
feature_table = kwargs.pop('feature_table', None)
self.feature_table = feature_table
self.rules = OrderedDict()
self.rule_str = OrderedDict()
self.rule_ft = OrderedDict()
self.filterable_sim_fns = {'jaccard', 'cosine', 'dice', 'overlap_coeff'}
self.allowed_ops = {'<', '<='}
self.rule_source = OrderedDict()
self.rule_cnt = 0
logger.warning("WARNING THIS BLOCKER IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN "
"RISK.")
super(Blocker, self).__init__(*args, **kwargs)
def _create_rule(self, conjunct_list, feature_table, rule_name):
if rule_name is None:
# set the rule name automatically
name = '_rule_' + str(self.rule_cnt)
self.rule_cnt += 1
else:
# use the rule name supplied by the user
name = rule_name
# create function string
fn_str = 'def ' + name + '(ltuple, rtuple):\n'
# add 4 tabs
fn_str += ' '
fn_str += 'return ' + ' and '.join(conjunct_list)
if feature_table is not None:
feat_dict = dict(
zip(feature_table['feature_name'], feature_table['function']))
else:
feat_dict = dict(zip(self.feature_table['feature_name'],
self.feature_table['function']))
six.exec_(fn_str, feat_dict)
return feat_dict[name], name, fn_str
def add_rule(self, conjunct_list, feature_table=None, rule_name=None):
"""Adds a rule to the rule-based blocker.
Args:
conjunct_list (list): A list of conjuncts specifying the rule.
feature_table (DataFrame): A DataFrame containing all the
features that are being referenced by
the rule (defaults to None). If the
feature_table is not supplied here,
then it must have been specified
during the creation of the rule-based
blocker or using set_feature_table
function. Otherwise an AssertionError
will be raised and the rule will not
be added to the rule-based blocker.
rule_name (string): A string specifying the name of the rule to
be added (defaults to None). If the
rule_name is not specified then a name will
be automatically chosen. If there is already
a rule with the specified rule_name, then
an AssertionError will be raised and the
rule will not be added to the rule-based
blocker.
Returns:
The name of the rule added (string).
Raises:
AssertionError: If `rule_name` already exists.
AssertionError: If `feature_table` is not a valid value
parameter.
Examples:
>>> import py_entitymatching
>>> from py_entitymatching.dask.dask_rule_based_blocker import DaskRuleBasedBlocker
>>> rb = DaskRuleBasedBlocker()
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='id')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='id')
>>> block_f = em.get_features_for_blocking(A, B)
>>> rule = ['name_name_lev(ltuple, rtuple) > 3']
>>> rb.add_rule(rule, rule_name='rule1')
"""
if rule_name is not None and rule_name in self.rules.keys():
logger.error('A rule with the specified rule_name already exists.')
raise AssertionError('A rule with the specified rule_name already exists.')
if feature_table is None and self.feature_table is None:
logger.error('Either feature table should be given as parameter ' +
'or use set_feature_table to set the feature table.')
raise AssertionError('Either feature table should be given as ' +
'parameter or use set_feature_table to set ' +
'the feature table.')
if not isinstance(conjunct_list, list):
conjunct_list = [conjunct_list]
fn, name, fn_str = self._create_rule(conjunct_list, feature_table, rule_name)
self.rules[name] = fn
self.rule_source[name] = fn_str
self.rule_str[name] = conjunct_list
if feature_table is not None:
self.rule_ft[name] = feature_table
else:
self.rule_ft[name] = self.feature_table
return name
def delete_rule(self, rule_name):
"""Deletes a rule from the rule-based blocker.
Args:
rule_name (string): Name of the rule to be deleted.
Examples:
>>> import py_entitymatching as em
>>> from py_entitymatching.dask.dask_rule_based_blocker import DaskRuleBasedBlocker
>>> rb = DaskRuleBasedBlocker()
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='id')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='id')
>>> block_f = em.get_features_for_blocking(A, B)
>>> rule = ['name_name_lev(ltuple, rtuple) > 3']
>>> rb.add_rule(rule, block_f, rule_name='rule_1')
>>> rb.delete_rule('rule_1')
"""
assert rule_name in self.rules.keys(), 'Rule name not in current set of rules'
del self.rules[rule_name]
del self.rule_source[rule_name]
del self.rule_str[rule_name]
del self.rule_ft[rule_name]
return True
def view_rule(self, rule_name):
"""Prints the source code of the function corresponding to a rule.
Args:
rule_name (string): Name of the rule to be viewed.
Examples:
>>> import py_entitymatching as em
>>> rb = em.DaskRuleBasedBlocker()
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='id')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='id')
>>> block_f = em.get_features_for_blocking(A, B)
>>> rule = ['name_name_lev(ltuple, rtuple) > 3']
>>> rb.add_rule(rule, block_f, rule_name='rule_1')
>>> rb.view_rule('rule_1')
"""
assert rule_name in self.rules.keys(), 'Rule name not in current set of rules'
print(self.rule_source[rule_name])
def get_rule_names(self):
"""Returns the names of all the rules in the rule-based blocker.
Returns:
A list of names of all the rules in the rule-based blocker (list).
Examples:
>>> import py_entitymatching as em
>>> rb = em.DaskRuleBasedBlocker()
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='id')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='id')
>>> block_f = em.get_features_for_blocking(A, B)
>>> rule = ['name_name_lev(ltuple, rtuple) > 3']
>>> rb.add_rule(rule, block_f, rule_name='rule_1')
>>> rb.get_rule_names()
"""
return self.rules.keys()
def get_rule(self, rule_name):
"""Returns the function corresponding to a rule.
Args:
rule_name (string): Name of the rule.
Returns:
A function object corresponding to the specified rule.
Examples:
>>> import py_entitymatching as em
>>> rb = em.DaskRuleBasedBlocker()
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='id')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='id')
>>> block_f = em.get_features_for_blocking(A, B)
>>> rule = ['name_name_lev(ltuple, rtuple) > 3']
>>> rb.add_rule(rule, feature_table=block_f, rule_name='rule_1')
>>> rb.get_rule()
"""
assert rule_name in self.rules.keys(), 'Rule name not in current set of rules'
return self.rules[rule_name]
def set_feature_table(self, feature_table):
"""Sets feature table for the rule-based blocker.
Args:
feature_table (DataFrame): A DataFrame containing features.
Examples:
>>> import py_entitymatching as em
>>> rb = em.DaskRuleBasedBlocker()
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='id')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='id')
>>> block_f = em.get_features_for_blocking(A, B)
>>> rb.set_feature_table(block_f)
"""
if self.feature_table is not None:
logger.warning(
'Feature table is already set, changing it now will not recompile '
'existing rules')
self.feature_table = feature_table
def block_tables(self, ltable, rtable, l_output_attrs=None,
r_output_attrs=None,
l_output_prefix='ltable_', r_output_prefix='rtable_',
verbose=False, show_progress=True, n_ltable_chunks=1,
n_rtable_chunks=1):
"""
WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK
Blocks two tables based on the sequence of rules supplied by the user.
Finds tuple pairs from left and right tables that survive the sequence
of blocking rules. A tuple pair survives the sequence of blocking rules
if none of the rules in the sequence returns True for that pair. If any
of the rules returns True, then the pair is blocked.
Args:
ltable (DataFrame): The left input table.
rtable (DataFrame): The right input table.
l_output_attrs (list): A list of attribute names from the left
table to be included in the
output candidate set (defaults to None).
r_output_attrs (list): A list of attribute names from the right
table to be included in the
output candidate set (defaults to None).
l_output_prefix (string): The prefix to be used for the attribute names
coming from the left table in the output
candidate set (defaults to 'ltable\_').
r_output_prefix (string): The prefix to be used for the attribute names
coming from the right table in the output
candidate set (defaults to 'rtable\_').
verbose (boolean): A flag to indicate whether the debug
information should be logged (defaults to False).
show_progress (boolean): A flag to indicate whether progress should
be displayed to the user (defaults to True).
n_ltable_chunks (int): The number of partitions to split the left table (
defaults to 1). If it is set to -1, then the number of
partitions is set to the number of cores in the
machine.
n_rtable_chunks (int): The number of partitions to split the right table (
defaults to 1). If it is set to -1, then the number of
partitions is set to the number of cores in the
machine.
Returns:
A candidate set of tuple pairs that survived the sequence of
blocking rules (DataFrame).
Raises:
AssertionError: If `ltable` is not of type pandas
DataFrame.
AssertionError: If `rtable` is not of type pandas
DataFrame.
AssertionError: If `l_output_attrs` is not of type of
list.
AssertionError: If `r_output_attrs` is not of type of
list.
AssertionError: If the values in `l_output_attrs` is not of type
string.
AssertionError: If the values in `r_output_attrs` is not of type
string.
AssertionError: If the input `l_output_prefix` is not of type
string.
AssertionError: If the input `r_output_prefix` is not of type
string.
AssertionError: If `verbose` is not of type
boolean.
AssertionError: If `show_progress` is not of type
boolean.
AssertionError: If `n_ltable_chunks` is not of type
int.
AssertionError: If `n_rtable_chunks` is not of type
int.
AssertionError: If `l_out_attrs` are not in the ltable.
AssertionError: If `r_out_attrs` are not in the rtable.
AssertionError: If there are no rules to apply.
Examples:
>>> import py_entitymatching as em
>>> from py_entitymatching.dask.dask_rule_based_blocker import DaskRuleBasedBlocker
>>> rb = DaskRuleBasedBlocker()
>>> A = em.read_csv_metadata('path_to_csv_dir/table_A.csv', key='id')
>>> B = em.read_csv_metadata('path_to_csv_dir/table_B.csv', key='id')
>>> block_f = em.get_features_for_blocking(A, B)
>>> rule = ['name_name_lev(ltuple, rtuple) > 3']
>>> rb.add_rule(rule, feature_table=block_f)
>>> C = rb.block_tables(A, B)
"""
logger.warning(
"WARNING THIS COMMAND IS EXPERIMENTAL AND NOT TESTED. USE AT YOUR OWN RISK.")
# validate data types of input parameters
self.validate_types_params_tables(ltable, rtable,
l_output_attrs, r_output_attrs,
l_output_prefix,
r_output_prefix, verbose, 1)
# validate data type of show_progress
self.validate_show_progress(show_progress)
# validate input parameters
self.validate_output_attrs(ltable, rtable, l_output_attrs,
r_output_attrs)
# get and validate metadata
log_info(logger, 'Required metadata: ltable key, rtable key', verbose)
# # get metadata
l_key, r_key = cm.get_keys_for_ltable_rtable(ltable, rtable, logger,
verbose)
# # validate metadata
cm._validate_metadata_for_table(ltable, l_key, 'ltable', logger,
verbose)
cm._validate_metadata_for_table(rtable, r_key, 'rtable', logger,
verbose)
# validate rules
assert len(self.rules.keys()) > 0, 'There are no rules to apply'
# validate number of ltable and rtable chunks
validate_object_type(n_ltable_chunks, int, 'Parameter n_ltable_chunks')
validate_object_type(n_rtable_chunks, int, 'Parameter n_rtable_chunks')
validate_chunks(n_ltable_chunks)
validate_chunks(n_rtable_chunks)
# # determine the number of chunks
n_ltable_chunks = get_num_partitions(n_ltable_chunks, len(ltable))
n_rtable_chunks = get_num_partitions(n_rtable_chunks, len(rtable))
# # set index for convenience
l_df = ltable.set_index(l_key, drop=False)
r_df = rtable.set_index(r_key, drop=False)
# # remove l_key from l_output_attrs and r_key from r_output_attrs
l_output_attrs_1 = []
if l_output_attrs:
l_output_attrs_1 = [x for x in l_output_attrs if x != l_key]
r_output_attrs_1 = []
if r_output_attrs:
r_output_attrs_1 = [x for x in r_output_attrs if x != r_key]
# # get attributes to project
l_proj_attrs, r_proj_attrs = self.get_attrs_to_project(l_key, r_key,
l_output_attrs_1,
r_output_attrs_1)
l_df, r_df = l_df[l_proj_attrs], r_df[r_proj_attrs]
candset, rule_applied = self.block_tables_with_filters(l_df, r_df,
l_key, r_key,
l_output_attrs_1,
r_output_attrs_1,
l_output_prefix,
r_output_prefix,
verbose,
show_progress,
get_num_cores())
# pass number of splits as
# the number of cores in the machine
if candset is None:
# no filterable rule was applied
candset = self.block_tables_without_filters(l_df, r_df, l_key,
r_key, l_output_attrs_1,
r_output_attrs_1,
l_output_prefix,
r_output_prefix,
verbose, show_progress,
n_ltable_chunks, n_rtable_chunks)
elif len(self.rules) > 1:
# one filterable rule was applied but other rules are left
# block candset by applying other rules and excluding the applied rule
candset = self.block_candset_excluding_rule(candset, l_df, r_df,
l_key, r_key,
l_output_prefix + l_key,
r_output_prefix + r_key,
rule_applied,
show_progress, get_num_cores())
retain_cols = self.get_attrs_to_retain(l_key, r_key, l_output_attrs_1,
r_output_attrs_1,
l_output_prefix, r_output_prefix)
if len(candset) > 0:
candset = candset[retain_cols]
else:
candset = pd.DataFrame(columns=retain_cols)
# update catalog
key = get_name_for_key(candset.columns)
candset = add_key_column(candset, key)
cm.set_candset_properties(candset, key, l_output_prefix + l_key,
r_output_prefix + r_key, ltable, rtable)
# return candidate set
return candset
def block_candset_excluding_rule(self, c_df, l_df, r_df, l_key, r_key,
fk_ltable, fk_rtable, rule_to_exclude,
show_progress, n_chunks):
# # list to keep track of valid ids
valid = []
apply_rules_excluding_rule_pkl = cp.dumps(self.apply_rules_excluding_rule)
if n_chunks == 1:
# single process
valid = _block_candset_excluding_rule_split(c_df, l_df, r_df,
l_key, r_key,
fk_ltable, fk_rtable,
rule_to_exclude,
apply_rules_excluding_rule_pkl,
show_progress)
else:
# multiprocessing
c_splits = pd.np.array_split(c_df, n_chunks)
valid_splits = []
for i in range(len(c_splits)):
partial_result = delayed(_block_candset_excluding_rule_split)(c_splits[i],
l_df, r_df,
l_key, r_key,
fk_ltable,
fk_rtable,
rule_to_exclude,
apply_rules_excluding_rule_pkl, False)
# use Progressbar from
# Dask.diagnostics so set the
#show_progress to False
valid_splits.append(partial_result)
valid_splits = delayed(wrap)(valid_splits)
if show_progress:
with ProgressBar():
valid_splits = valid_splits.compute(scheduler="processes",
num_workers=get_num_cores())
else:
valid_splits = valid_splits.compute(scheduler="processes",
num_workers=get_num_cores())
valid = sum(valid_splits, [])
# construct output candset
if len(c_df) > 0:
candset = c_df[valid]
else:
candset = | pd.DataFrame(columns=c_df.columns) | pandas.DataFrame |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 31 15:16:47 2017
@author: wasifaahmed
"""
from flask import Flask, flash,render_template, request, Response, redirect, url_for, send_from_directory,jsonify,session
import json as json
from datetime import datetime,timedelta,date
from sklearn.cluster import KMeans
import numpy as np
from PIL import Image
from flask.ext.sqlalchemy import SQLAlchemy
import matplotlib.image as mpimg
from io import StringIO
from skimage import data, exposure, img_as_float ,io,color
import scipy
from scipy import ndimage
import time
import tensorflow as tf
import os , sys
import shutil
import numpy as np
import pandas as pd
from PIL import Image
from model import *
from sqlalchemy.sql import text
from sqlalchemy import *
from forms import *
import math
from io import StringIO
import csv
from sqlalchemy.orm import load_only
from datetime import datetime,date
from numpy import genfromtxt
from sqlalchemy.ext.serializer import loads, dumps
from sqlalchemy.orm import sessionmaker, scoped_session
from flask_bootstrap import Bootstrap
graph = tf.Graph()
with graph.as_default():
sess = tf.Session(graph=graph)
init_op = tf.global_variables_initializer()
pointsarray=[]
def load_model():
sess.run(init_op)
saver = tf.train.import_meta_graph('E:/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727.meta')
#saver = tf.train.import_meta_graph('/Users/wasifaahmed/Documents/FRAS/Fras_production_v.0.1/FRAS Windows/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727.meta')
print('The model is loading...')
#saver.restore(sess, "/Users/wasifaahmed/Documents/FRAS/Fras_production_v.0.1/FRAS Windows/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727")
saver.restore(sess, 'E:/FRAS Windows/FRAS_production/Simulation/FRAS_20170726/FRAS_20170727')
print('loaded...')
pass
engine =create_engine('postgresql://postgres:user@localhost/postgres')
Session = scoped_session(sessionmaker(bind=engine))
mysession = Session()
app = Flask(__name__)
app.config.update(
DEBUG=True,
SECRET_KEY='\<KEY>')
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql://postgres:user@localhost/fras_production'
db.init_app(app)
Bootstrap(app)
@app.after_request
def add_header(response):
response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1'
response.headers['Cache-Control'] = 'public, max-age=0'
return response
@app.route('/',methods=['GET', 'POST'])
def login():
form = LoginForm()
return render_template('forms/login.html', form=form)
@app.route('/home',methods=['GET', 'POST'])
def index():
return render_template('pages/home.html')
@app.route('/detail_setup/')
def Detail_Setup():
curdate=time.strftime("%Y-%m-%d")
selection=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
firer_1 = [row.service_id for row in Shooter.query.all()]
return render_template('pages/detail_setup.html',
data=selection,
firer_1=firer_1)
@app.route('/auto_setup/')
def auto_setup():
drop=[]
curdate=time.strftime("%Y-%m-%d")
form=BulkRegistrationForm()
selection_2=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
selection=TGroup.query.distinct(TGroup.group_no).filter(TGroup.date==curdate).all()
return render_template('pages/auto_setup.html',
data=selection, data_2=selection_2,form=form)
@app.route('/auto_setup_1/')
def auto_setup_1():
drop=[]
curdate=time.strftime("%Y-%m-%d")
form=BulkRegistrationForm()
selection_2=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
selection=TGroup.query.distinct(TGroup.group_no).all()
return render_template('pages/auto_setup_1.html',
data=selection, data_2=selection_2,form=form)
@app.route('/group_gen/',methods=['GET', 'POST'])
def group_gen():
da_1=None
da_2=None
da_3=None
da_4=None
da_5=None
da_6=None
da_7=None
da_8=None
if request.method == "POST":
data = request.get_json()
group=data['data']
session['group']=group
data=TGroup.query.filter(TGroup.group_no==group).scalar()
da_1=data.target_1_no
da_2=data.target_2_no
da_3=data.target_3_no
da_4=data.target_4_no
da_5=data.target_5_no
da_6=data.target_6_no
da_7=data.target_7_no
da_8=data.target_8_no
return jsonify(data1=da_1,
data2=da_2,
data3=da_3,
data4=da_4,
data5=da_5,
data6=da_6,
data7=da_7,
data8=da_8
)
@app.route('/detail_exitence_1/',methods=['GET', 'POST'])
def detail_exitence_1():
ra_1=None
da_1=None
detail=None
service_id_1=None
session=None
paper=None
set_no=None
cant=None
if request.method == "POST":
data = request.get_json()
detail=data['data']
dt=time.strftime("%Y-%m-%d")
data=db.session.query(Session_Detail).filter(Session_Detail.detail_no==detail).scalar()
db.session.query(TShooting).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=data.session_id,
detail_no=data.detail_no,
target_1_id=data.target_1_id,
target_2_id=data.target_2_id,
target_3_id=data.target_3_id,
target_4_id=data.target_4_id,
target_5_id=data.target_5_id,
target_6_id=data.target_6_id,
target_7_id=data.target_7_id,
target_8_id=data.target_8_id,
paper_ref=data.paper_ref,
set_no=data.set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
res=[]
ten=[]
gp_len=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==data.target_1_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==data.target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
tgp = db.session.query(Grouping.grouping_length_f).filter(Grouping.firer_id==data.target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
for ele in tres:
for ele2 in ele:
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
ten.append(ele4)
for ele5 in tgp:
for ele6 in ele5:
gp_len.append(ele6)
da_1=db.session.query(Shooter.name).filter(Shooter.id==data.target_1_id).scalar()
cant_id=db.session.query(Shooter.cantonment_id).filter(Shooter.id==data.target_1_id).scalar()
cant=db.session.query(Cantonment.cantonment).filter(Cantonment.id==cant_id).scalar()
ra_1_id=db.session.query(Shooter.rank_id).filter(Shooter.id==data.target_1_id).scalar()
ra_1 = db.session.query(Rank.name).filter(Rank.id==ra_1_id).scalar()
session=db.session.query(TShooting.session_id).scalar()
paper=db.session.query(TShooting.paper_ref).scalar()
set_no=db.session.query(TShooting.set_no).scalar()
service_id_1 = db.session.query(Shooter.service_id).filter(Shooter.id==data.target_1_id).scalar()
return jsonify(
data1=da_1,
ra_1=ra_1,
detail=detail,
service_id_1=service_id_1,
session=session,
paper=paper,
set_no=set_no,
cant=cant,
res=res,
ten=ten,
gp_len=gp_len
)
@app.route('/generate_ref/' ,methods=['GET', 'POST'])
def generate_ref():
g=None
if request.method == "POST":
data = request.get_json()
paper_ref =data['data']
if (paper_ref == 'New'):
g=0
else:
obj=TPaper_ref.query.scalar()
g= obj.paper_ref
return jsonify(gen=int(g))
@app.route('/create_detail_target_2/', methods=['GET', 'POST'])
def create_detail_target_2():
curdate=time.strftime("%Y-%m-%d")
firer_1 = [row.service_id for row in Shooter.query.all()]
detail_data=TShooting.query.scalar()
return render_template('pages/create_detail_target_2.html',
detail_data=detail_data,
firer_1=firer_1
)
@app.route('/save_target_2/', methods=['GET', 'POST'])
def save_target_2():
r=request.form['tag']
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
ses=Session_Detail.query.first()
ses.target_2_id=r_id
db.session.commit()
temp =TShooting.query.first()
temp.target_2_id=r_id
db.session.commit()
return redirect(url_for('individual_score_target_2'))
@app.route('/create_detail_target_1/', methods=['GET', 'POST'])
def create_detail_target_1():
curdate=time.strftime("%Y-%m-%d")
selection=Shooting_Session.query.filter(Shooting_Session.date==curdate).all()
firer_1 = [row.service_id for row in Shooter.query.all()]
return render_template('pages/create_detail_target_1.html',
data=selection,
firer_1=firer_1
)
@app.route('/create_session/', methods=['GET', 'POST'])
def create_session():
try:
data = Shooter.query.all()
rang= Range.query.all()
firearms = Firearms.query.all()
ammunation = Ammunation.query.all()
rang_name = request.form.get('comp_select_4')
fire_name = request.form.get('comp_select_5')
ammu_name = request.form.get('comp_select_6')
form=SessionForm()
if(rang_name is None):
range_id=999
fire_id=999
ammu_id=999
else:
range_id = db.session.query(Range.id).filter(Range.name==rang_name).scalar()
fire_id = db.session.query(Firearms.id).filter(Firearms.name==fire_name).scalar()
ammu_id = db.session.query(Ammunation.id).filter(Ammunation.name==ammu_name).scalar()
if form.validate_on_submit():
shooting=Shooting_Session(
date=form.date.data.strftime('%Y-%m-%d'),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
shooting_range_id=range_id,
firearms_id=fire_id,
ammunation_id=ammu_id,
target_distance = form.target_distance.data,
weather_notes = form.weather_notes.data,
comments = form.comments.data,
session_no=form.session_no.data,
occasion=form.occ.data
)
db.session.add(shooting)
db.session.commit()
return redirect(url_for('create_detail_target_1'))
except Exception as e:
return redirect(url_for('error5_505.html'))
return render_template('forms/shooting_form.html', form=form, data =data ,rang=rang , firearmns=firearms, ammunation = ammunation)
@app.route('/monthly_report/',methods=['GET','POST'])
def monthly_report():
year=None
month=None
date_start=None
try:
if request.method=='POST':
month=request.form.get('comp_select')
year = datetime.now().year
if (month == 'October'):
dt_start='-10-01'
dt_end ='-10-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='January'):
dt_start='-01-01'
dt_end ='-01-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='February'):
dt_start='-02-01'
dt_end ='-02-28'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='March'):
dt_start='-03-01'
dt_end ='-03-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='April'):
dt_start='-04-01'
dt_end ='-04-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='May'):
dt_start='-05-01'
dt_end ='-05-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='June'):
dt_start='-06-01'
dt_end ='-06-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='July'):
dt_start='-07-01'
dt_end ='-07-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='August'):
dt_start='-08-01'
dt_end ='-08-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='September'):
dt_start='-09-01'
dt_end ='-09-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
elif(month=='November'):
dt_start='-11-01'
dt_end ='-11-30'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
else:
dt_start='-12-01'
dt_end ='-12-31'
str_date_start = str(year)+dt_start
date_start=datetime.strptime(str_date_start, "%Y-%m-%d")
str_date_end = str(year)+dt_end
date_end=datetime.strptime(str_date_end, "%Y-%m-%d")
dat1=db.session.query(Grouping.date,Shooter.service_id,Rank.name,Shooter.name.label('firer'),Shooter.unit,Shooter.brigade,Grouping.detail_no,Grouping.result,Grouping.grouping_length_f,MPI.tendency_text).filter(Grouping.date.between(date_start,date_end), Grouping.firer_id==Shooter.id,Shooter.rank_id==Rank.id and Grouping.date==MPI.date, Grouping.session_id==MPI.session_id,Grouping.firer_id==MPI.firer_id,Grouping.detail_no==MPI.detail_no,Grouping.target_no==MPI.target_no,Grouping.spell_no==MPI.spell_no,Grouping.paper_ref==MPI.paper_ref).all()
return render_template('pages/monthly_report.html', dat1=dat1 ,month=month)
except Exception as e:
return render_template('errors/month_session.html')
return render_template('pages/monthly_report.html')
@app.route('/save_target_1/', methods=['GET', 'POST'])
def save_target_1():
ref_1=None
try:
if request.method == 'POST':
detail_no = request.form['game_id_1']
r=request.form['tag']
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
r2_id=999
r3_id=999
r4_id=999
r5_id=999
r6_id=999
r7_id=999
r8_id=999
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
ref_1 = None
paper=db.session.query(TPaper_ref).scalar()
if(ref == ""):
ref_1=paper.paper_ref
else:
ref_1=ref
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting is None):
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
else:
db.session.query(TShooting).delete()
db.session.commit()
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
except Exception as e:
return redirect(url_for('error_target_1'))
return redirect(url_for('individual_score_target_1'))
@app.route('/FRAS/', methods=['GET', 'POST'])
def load ():
try:
ref_1=None
if request.method == 'POST':
detail_no = request.form['game_id_1']
tmp_list = []
duplicate = False
r=request.form['tag']
if (r== ""):
r_id = 999
else:
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
r1=request.form['tag_1']
if(r1== ""):
r1_id=999
else:
r1_object=Shooter.query.filter(Shooter.service_id==r1).scalar()
r1_id=r1_object.id
r2=request.form['tag_2']
if (r2==""):
r2_id=999
else:
r2_object=Shooter.query.filter(Shooter.service_id==r2).scalar()
r2_id=r2_object.id
r3=request.form['tag_3']
if(r3==""):
r3_id=999
else:
r3_object=Shooter.query.filter(Shooter.service_id==r3).scalar()
r3_id=r3_object.id
r4=request.form['tag_4']
if(r4==""):
r4_id=999
else:
r4_object=Shooter.query.filter(Shooter.service_id==r4).scalar()
r4_id=r4_object.id
r5=request.form['tag_5']
if(r5==""):
r5_id=999
else:
r5_object=Shooter.query.filter(Shooter.service_id==r5).scalar()
r5_id=r5_object.id
r6=request.form['tag_6']
if(r6==""):
r6_id=999
else:
r6_object=Shooter.query.filter(Shooter.service_id==r6).scalar()
r6_id=r6_object.id
r7=request.form['tag_7']
if(r7== ""):
r7_id=999
else:
r7_object=Shooter.query.filter(Shooter.service_id==r7).scalar()
r7_id=r7_object.id
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
if ref == None or ref =="":
ref_obj=TPaper_ref.query.scalar()
ref_1=ref_obj.paper_ref
else :
print("Inside ref _4 else")
ref_1=ref
print(ref_1)
print("Inside ref _4 else 1")
if(int(set_no)>5):
print("Inside ref _5 else")
return redirect(url_for('paper_duplicate_error'))
else:
print("Inside TPaper_ref")
db.session.query(TPaper_ref).delete()
print("Inside TPaper_ref")
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
print("Inside load 3")
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
print("temp1")
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
print("temp")
temp=db.session.query(TShooting.save_flag).scalar()
print(temp)
if(temp is None):
print("Inside the temp if")
print(sess)
print(detail_no)
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
print(Tdetail_shots)
print("Tdetail_shots")
db.session.add(Tdetail_shots)
db.session.commit()
print(""
)
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
except Exception as e:
print(e)
return redirect(url_for('error_2'))
return redirect(url_for('image_process'))
@app.route('/FRAS_1/', methods=['GET', 'POST'])
def load_1 ():
ref_1=None
try:
if request.method == 'POST':
print("This is inside Post")
detail_no = request.form['game_id_1']
print("this is detail_no")
print(detail_no)
tmp_list = []
duplicate = False
gr=session.get('group',None)
data=TGroup.query.filter(TGroup.group_no==gr).scalar()
da_1=data.target_1_no
da_2=data.target_2_no
da_3=data.target_3_no
da_4=data.target_4_no
da_5=data.target_5_no
da_6=data.target_6_no
da_7=data.target_7_no
da_8=data.target_8_no
if(da_1==""):
r_id=999
else:
r=Shooter.query.filter(Shooter.service_id==da_1).scalar()
r_id=r.id
if(da_2==""):
r1_id=999
else:
r1=Shooter.query.filter(Shooter.service_id==da_2).scalar()
r1_id=r1.id
if(da_3==""):
r2_id=999
else:
r2=Shooter.query.filter(Shooter.service_id==da_3).scalar()
r2_id=r2.id
if(da_4==""):
r3_id=999
else:
r3=Shooter.query.filter(Shooter.service_id==da_4).scalar()
r3_id=r3.id
if(da_5==""):
r4_id=999
else:
r4=Shooter.query.filter(Shooter.service_id==da_5).scalar()
r4_id=r4.id
if(da_6==""):
r5_id=999
else:
r5=Shooter.query.filter(Shooter.service_id==da_6).scalar()
r5_id=r5.id
if(da_7==""):
r6_id=999
else:
r6=Shooter.query.filter(Shooter.service_id==da_7).scalar()
r6_id=r6.id
if(da_8==""):
r7_id=999
else:
r7=Shooter.query.filter(Shooter.service_id==da_8).scalar()
r7_id=r7.id
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
print(tmp_list)
if ref == None or ref =="":
ref_obj=TPaper_ref.query.scalar()
ref_1=ref_obj.paper_ref
else :
ref_1=ref
check=TPaper_ref.query.scalar()
cses=check.session_no
det=check.detail_no
if(int(set_no)>5):
return redirect(url_for('paper_duplicate_error'))
else:
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting is None):
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
except Exception as e:
return redirect(url_for('error_102'))
return redirect(url_for('detail_view'))
@app.route('/FRAS_2/', methods=['GET', 'POST'])
def load_2 ():
ref_1=None
try:
if request.method == 'POST':
print("This is inside Post")
detail_no = request.form['game_id_1']
print("this is detail_no")
print(detail_no)
tmp_list = []
duplicate = False
gr=session.get('group',None)
data=TGroup.query.filter(TGroup.group_no==gr).scalar()
da_1=data.target_1_no
da_2=data.target_2_no
da_3=data.target_3_no
da_4=data.target_4_no
da_5=data.target_5_no
da_6=data.target_6_no
da_7=data.target_7_no
da_8=data.target_8_no
if(da_1==""):
r_id=999
else:
r=Shooter.query.filter(Shooter.service_id==da_1).scalar()
r_id=r.id
if(da_2==""):
r1_id=999
else:
r1=Shooter.query.filter(Shooter.service_id==da_2).scalar()
r1_id=r1.id
if(da_3==""):
r2_id=999
else:
r2=Shooter.query.filter(Shooter.service_id==da_3).scalar()
r2_id=r2.id
if(da_4==""):
r3_id=999
else:
r3=Shooter.query.filter(Shooter.service_id==da_4).scalar()
r3_id=r3.id
if(da_5==""):
r4_id=999
else:
r4=Shooter.query.filter(Shooter.service_id==da_5).scalar()
r4_id=r4.id
if(da_6==""):
r5_id=999
else:
r5=Shooter.query.filter(Shooter.service_id==da_6).scalar()
r5_id=r5.id
if(da_7==""):
r6_id=999
else:
r6=Shooter.query.filter(Shooter.service_id==da_7).scalar()
r6_id=r6.id
if(da_8==""):
r7_id=999
else:
r7=Shooter.query.filter(Shooter.service_id==da_8).scalar()
r7_id=r7.id
ref=request.form['business']
set_no = request.form.get('comp_select_6')
shots = request.form['tag_8']
sess=request.form.get('comp_select')
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
print(tmp_list)
if ref == None or ref =="":
ref_obj=TPaper_ref.query.scalar()
ref_1=ref_obj.paper_ref
else :
ref_1=ref
check=TPaper_ref.query.scalar()
cses=check.session_no
det=check.detail_no
if(int(set_no)>5):
return redirect(url_for('paper_duplicate_error'))
else:
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
paper_ref=ref_1,
detail_no=detail_no,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting is None):
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_shots =TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(Tdetail_shots)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail_no,
target_1_id=r_id,
target_2_id=r1_id,
target_3_id=r2_id,
target_4_id=r3_id,
target_5_id=r4_id,
target_6_id=r5_id,
target_7_id=r6_id,
target_8_id=r7_id,
paper_ref=ref_1,
set_no=set_no,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
except Exception as e:
print(e)
return redirect(url_for('error'))
return redirect(url_for('image_process'))
@app.route('/detail_view/', methods=['GET', 'POST'])
def detail_view():
detail = Session_Detail.query.all()
for details in detail:
details.target_1=Shooter.query.filter(Shooter.id==details.target_1_id).scalar()
details.target_2=Shooter.query.filter(Shooter.id==details.target_2_id).scalar()
details.target_3=Shooter.query.filter(Shooter.id==details.target_3_id).scalar()
details.target_4=Shooter.query.filter(Shooter.id==details.target_4_id).scalar()
details.target_5=Shooter.query.filter(Shooter.id==details.target_5_id).scalar()
details.target_6=Shooter.query.filter(Shooter.id==details.target_6_id).scalar()
details.target_7=Shooter.query.filter(Shooter.id==details.target_7_id).scalar()
details.target_8=Shooter.query.filter(Shooter.id==details.target_8_id).scalar()
return render_template('pages/detail_view.html',detail=detail)
@app.route('/detail_view/detail/<id>', methods=['GET', 'POST'])
def view_detail(id):
detail=Session_Detail.query.filter(Session_Detail.id == id)
for details in detail:
details.target_1=Shooter.query.filter(Shooter.id==details.target_1_id).scalar()
details.target_2=Shooter.query.filter(Shooter.id==details.target_2_id).scalar()
details.target_3=Shooter.query.filter(Shooter.id==details.target_3_id).scalar()
details.target_4=Shooter.query.filter(Shooter.id==details.target_4_id).scalar()
details.target_5=Shooter.query.filter(Shooter.id==details.target_5_id).scalar()
details.target_6=Shooter.query.filter(Shooter.id==details.target_6_id).scalar()
details.target_7=Shooter.query.filter(Shooter.id==details.target_7_id).scalar()
details.target_8=Shooter.query.filter(Shooter.id==details.target_8_id).scalar()
return render_template('pages/detail_view_id.html',data=detail)
@app.route('/detail_view/edit/<id>', methods=['GET', 'POST'])
def view_detail_edit(id):
try:
detail=Session_Detail.query.filter(Session_Detail.id == id).first()
form=DetailEditForm(obj=detail)
if form.validate_on_submit():
tmp_list = []
target_1=Shooter.query.filter(Shooter.service_id == form.target_1_service.data).scalar()
tmp_list.append(target_1.id)
target_2=Shooter.query.filter(Shooter.service_id == form.target_2_service.data).scalar()
tmp_list.append(target_2.id)
target_3=Shooter.query.filter(Shooter.service_id == form.target_3_service.data).scalar()
tmp_list.append(target_3.id)
target_4=Shooter.query.filter(Shooter.service_id == form.target_4_service.data).scalar()
tmp_list.append(target_4.id)
target_5=Shooter.query.filter(Shooter.service_id == form.target_5_service.data).scalar()
tmp_list.append(target_5.id)
target_6=Shooter.query.filter(Shooter.service_id == form.target_6_service.data).scalar()
tmp_list.append(target_6.id)
target_7=Shooter.query.filter(Shooter.service_id == form.target_7_service.data).scalar()
tmp_list.append(target_7.id)
target_8=Shooter.query.filter(Shooter.service_id == form.target_8_service.data).scalar()
tmp_list.append(target_8.id)
duplicate = False
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
detail.date=form.date.data
detail.session_id=form.session_id.data
detail.detail_no=form.detail_no.data
detail.paper_ref=form.paper_ref.data
detail.set_no=form.set_no.data
target_1_obj=Shooter.query.filter(Shooter.service_id == form.target_1_service.data).scalar()
detail.target_1_id=target_1_obj.id
target_2_obj=Shooter.query.filter(Shooter.service_id == form.target_2_service.data).scalar()
detail.target_2_id=target_2_obj.id
target_3_obj=Shooter.query.filter(Shooter.service_id == form.target_3_service.data).scalar()
detail.target_3_id=target_3_obj.id
target_4_obj=Shooter.query.filter(Shooter.service_id == form.target_4_service.data).scalar()
detail.target_4_id=target_4_obj.id
target_5_obj=Shooter.query.filter(Shooter.service_id == form.target_5_service.data).scalar()
detail.target_5_id=target_5_obj.id
target_6_obj=Shooter.query.filter(Shooter.service_id == form.target_6_service.data).scalar()
detail.target_6_id=target_6_obj.id
target_7_obj=Shooter.query.filter(Shooter.service_id == form.target_7_service.data).scalar()
detail.target_7_id=target_7_obj.id
target_8_obj=Shooter.query.filter(Shooter.service_id == form.target_8_service.data).scalar()
detail.target_8_id=target_8_obj.id
db.session.commit()
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_edit = TPaper_ref(
paper_ref=form.paper_ref.data,
detail_no=form.detail_no.data,
session_no=form.session_id.data
)
db.session.add(ref_edit)
db.session.commit()
target_1_obj=Shooter.query.filter(Shooter.service_id == form.target_1_service.data).scalar()
target_2_obj=Shooter.query.filter(Shooter.service_id == form.target_2_service.data).scalar()
target_3_obj=Shooter.query.filter(Shooter.service_id == form.target_3_service.data).scalar()
target_4_obj=Shooter.query.filter(Shooter.service_id == form.target_4_service.data).scalar()
target_5_obj=Shooter.query.filter(Shooter.service_id == form.target_5_service.data).scalar()
target_6_obj=Shooter.query.filter(Shooter.service_id == form.target_6_service.data).scalar()
target_7_obj=Shooter.query.filter(Shooter.service_id == form.target_7_service.data).scalar()
target_8_obj=Shooter.query.filter(Shooter.service_id == form.target_8_service.data).scalar()
temp_shooting=db.session.query(TShooting).scalar()
if(temp_shooting.save_flag==1):
return redirect(url_for('data_save'))
else:
db.session.query(TShooting).filter(TShooting.id != 999).delete()
db.session.commit()
Tdetail_edit =TShooting(
date=form.date.data,
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=form.session_id.data,
detail_no=form.detail_no.data,
target_1_id=target_1_obj.id,
target_2_id=target_2_obj.id,
target_3_id=target_3_obj.id,
target_4_id=target_4_obj.id,
target_5_id=target_5_obj.id,
target_6_id=target_6_obj.id,
target_7_id=target_7_obj.id,
target_8_id=target_8_obj.id,
paper_ref=form.paper_ref.data,
set_no=form.set_no.data,
save_flag=0
)
db.session.add(Tdetail_edit)
db.session.commit()
return redirect(url_for('detail_view'))
form.date.data=detail.date
form.session_id.data=detail.session_id
form.detail_no.data=detail.detail_no
form.paper_ref.data=detail.paper_ref
form.set_no.data=detail.set_no
name_1= Shooter.query.filter(Shooter.id==detail.target_1_id).scalar()
form.target_1_service.data=data=name_1.service_id
name_2= Shooter.query.filter(Shooter.id==detail.target_2_id).scalar()
form.target_2_service.data=data=name_2.service_id
name_3= Shooter.query.filter(Shooter.id==detail.target_3_id).scalar()
form.target_3_service.data=data=name_3.service_id
name_4= Shooter.query.filter(Shooter.id==detail.target_4_id).scalar()
form.target_4_service.data=data=name_4.service_id
name_5=Shooter.query.filter(Shooter.id==detail.target_5_id).scalar()
form.target_5_service.data=data=name_5.service_id
name_6=Shooter.query.filter(Shooter.id==detail.target_6_id).scalar()
form.target_6_service.data=data=name_6.service_id
name_7=Shooter.query.filter(Shooter.id==detail.target_7_id).scalar()
form.target_7_service.data=data=name_7.service_id
name_8=Shooter.query.filter(Shooter.id==detail.target_8_id).scalar()
form.target_8_service.data=data=name_8.service_id
except Exception as e:
return render_template('errors/detail_view.html')
return render_template('pages/detail_view_edit.html' , detail=detail,form=form)
@app.route('/data_save', methods=['GET', 'POST'])
def data_save():
return render_template('pages/data_save.html')
@app.route('/target_registration/', methods=['GET', 'POST'])
def target_registration():
result=None
if request.method=="POST":
data1 = request.get_json()
print(data1)
cant=data1['cant']
div=data1['div']
rank=data1['rank']
gen=data1['gender']
dt=data1['date']
name=data1['name']
army_no=data1['service']
unit=data1['unit']
brigade=data1['brig']
gender_id=db.session.query(Gender.id).filter(Gender.name==gen).scalar()
rank_id=db.session.query(Rank.id).filter(Rank.name==rank).scalar()
cant_id=db.session.query(Cantonment.id).filter(Cantonment.cantonment==cant ,Cantonment.division==div).scalar()
print("cant_id")
print(cant_id)
shooter = Shooter(
name=name,
service_id = army_no,
registration_date = dt,
gender_id=gender_id,
cantonment_id = cant_id,
rank_id =rank_id,
unit=unit,
brigade=brigade
)
db.session.add(shooter)
db.session.commit()
result="Data Saved Sucessfully"
return jsonify(result=result)
@app.route('/shooter_registration/', methods=['GET', 'POST'])
def registration():
try:
cantonment=Cantonment.query.distinct(Cantonment.cantonment)
gender =Gender.query.all()
rank = Rank.query.all()
ran = request.form.get('comp_select4')
cant = request.form.get('comp_select')
gen = request.form.get('comp_select5')
brig = request.form.get('comp_select1')
form = RegistrationForm(request.form)
if(ran is None):
pass
else:
ran_object=Rank.query.filter(Rank.name==ran).scalar()
rank_id = ran_object.id
cant_object = Cantonment.query.filter(Cantonment.cantonment==cant,Cantonment.division==brig).scalar()
cant_id = cant_object.id
gen_obj=Gender.query.filter(Gender.name==gen).scalar()
gender_id = gen_obj.id
if form.validate_on_submit():
shooter = Shooter(
name=form.name.data,
service_id = form.service_id.data,
registration_date = form.dt.data.strftime('%Y-%m-%d'),
gender_id=gender_id,
cantonment_id = cant_id,
rank_id =rank_id,
unit=form.unit.data,
brigade=form.brig.data
)
db.session.add(shooter)
db.session.commit()
new_form = RegistrationForm(request.form)
return redirect(url_for('firer_details'))
except Exception as e:
return redirect(url_for('error_4'))
return render_template('forms/registration.html',
cantonment = cantonment ,
form=form ,
rank = rank,
gender=gender)
@app.route('/get_brigade/')
def get_brigade():
cant = request.args.get('customer')
da = da = Cantonment.query.filter(Cantonment.cantonment==cant).distinct(Cantonment.division)
data = [{"name": x.division} for x in da]
return jsonify(data)
@app.route('/firer_details/', methods=['GET', 'POST'])
def firer_details():
firer = Shooter.query.all()
for firers in firer:
firers.cantonment_name= Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.division = Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.rank = Rank.query.filter(Rank.id==firers.rank_id).scalar()
firers.gender_name = Gender.query.filter(Gender.id==firers.gender_id).scalar()
return render_template('pages/firer_details.html' , firer = firer)
@app.route('/bulk_registration_group')
def bulk_registration_group():
form=BulkRegistrationForm(request.form)
return render_template('pages/bulk_registration_group.html',form=form)
@app.route('/bulk_registration')
def bulk_registration():
cantonment=db.session.query(Cantonment).distinct(Cantonment.cantonment)
form=RegistrationForm(request.form)
return render_template('pages/bulk_registration.html',cantonment=cantonment,form=form)
@app.route('/upload', methods=['POST'])
def upload():
try:
f = request.files['data_file']
cant = request.form.get('comp_select')
div = request.form.get('comp_select1')
form=RegistrationForm(request.form)
unit = request.form['game_id_1']
brig = request.form['game_id_2']
cant_id = db.session.query(Cantonment.id).filter(Cantonment.cantonment==cant,
Cantonment.division==div
).scalar()
if form.is_submitted():
stream = StringIO(f.stream.read().decode("UTF8"))
csv_input = csv.reader(stream)
lis =list(csv_input)
for i in range(len(lis)):
if (i==0):
pass
else:
shooters = Shooter(
name = lis[i][0],
service_id=lis[i][3],
registration_date=datetime.now(),
gender_id=db.session.query(Gender.id).filter(Gender.name==lis[i][2]).scalar(),
cantonment_id = cant_id,
rank_id = db.session.query(Rank.id).filter(Rank.name==lis[i][1]).scalar(),
unit=unit,
brigade=brig
)
db.session.add(shooters)
db.session.commit()
except Exception as e:
return redirect(url_for('error_3'))
return redirect(url_for('firer_details'))
@app.route('/uploadgroup', methods=['POST'])
def uploadgroup():
try:
f = request.files['data_file']
form=BulkRegistrationForm(request.form)
if form.is_submitted():
curdate_p=(date.today())- timedelta(1)
if(db.session.query(db.exists().where(TGroup.date <= curdate_p)).scalar()):
db.session.query(TGroup).delete()
db.session.commit()
stream = StringIO(f.stream.read().decode("UTF8"))
csv_input = csv.reader(stream)
lis =list(csv_input)
for i in range(len(lis)):
if (i==0):
pass
else:
group = TGroup(
date=datetime.now(),
group_no=lis[i][0],
target_1_no=lis[i][1],
target_2_no=lis[i][2],
target_3_no=lis[i][3],
target_4_no=lis[i][4],
target_5_no=lis[i][5],
target_6_no=lis[i][6],
target_7_no=lis[i][7],
target_8_no=lis[i][8]
)
db.session.add(group)
db.session.commit()
else:
stream = StringIO(f.stream.read().decode("UTF8"))
csv_input = csv.reader(stream)
lis =list(csv_input)
for i in range(len(lis)):
if (i==0):
pass
else:
group = TGroup(
date=datetime.now(),
group_no=lis[i][0],
target_1_no=lis[i][1],
target_2_no=lis[i][2],
target_3_no=lis[i][3],
target_4_no=lis[i][4],
target_5_no=lis[i][5],
target_6_no=lis[i][6],
target_7_no=lis[i][7],
target_8_no=lis[i][8]
)
db.session.add(group)
db.session.commit()
except Exception as e:
return redirect(url_for('error_duplicate'))
return redirect(url_for('group_view'))
@app.route('/new_group')
def new_group():
firer = [row.service_id for row in Shooter.query.all()]
return render_template('pages/new_group.html',firer_1=firer)
@app.route('/individual_group/', methods=['GET', 'POST'])
def individual_group():
try:
curdate_p=(date.today())- timedelta(1)
#check=mysession.query(TGroup).filter(date==curdate_p).all()
if request.method=="POST":
grp = request.form['game_id_1']
tmp_list = []
duplicate = False
r=request.form['tag']
if (r== ""):
r_id = 999
else:
r_object=Shooter.query.filter(Shooter.service_id==r).scalar()
r_id=r_object.id
r1=request.form['tag_1']
if(r1== ""):
r1_id=999
else:
r1_object=Shooter.query.filter(Shooter.service_id==r1).scalar()
r1_id=r1_object.id
r2=request.form['tag_2']
if (r2==""):
r2_id=999
else:
r2_object=Shooter.query.filter(Shooter.service_id==r2).scalar()
r2_id=r2_object.id
r3=request.form['tag_3']
if(r3==""):
r3_id=999
else:
r3_object=Shooter.query.filter(Shooter.service_id==r3).scalar()
r3_id=r3_object.id
r4=request.form['tag_4']
if(r4==""):
r4_id=999
else:
r4_object=Shooter.query.filter(Shooter.service_id==r4).scalar()
r4_id=r4_object.id
r5=request.form['tag_5']
if(r5==""):
r5_id=999
else:
r5_object=Shooter.query.filter(Shooter.service_id==r5).scalar()
r5_id=r5_object.id
r6=request.form['tag_6']
if(r6==""):
r6_id=999
else:
r6_object=Shooter.query.filter(Shooter.service_id==r6).scalar()
r6_id=r6_object.id
r7=request.form['tag_7']
if(r7== ""):
r7_id=999
else:
r7_object=Shooter.query.filter(Shooter.service_id==r7).scalar()
r7_id=r7_object.id
tmp_list.append(r_id)
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
elif(i!=j and tmp_list[i]==tmp_list[j]):
duplicate = True
if(db.session.query(db.exists().where(TGroup.date == curdate_p)).scalar()):
db.session.query(TGroup).delete()
db.session.commit()
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
gr=TGroup(
date=datetime.now(),
group_no=grp,
target_1_no=r,
target_2_no=r1,
target_3_no=r2,
target_4_no=r3,
target_5_no=r4,
target_6_no=r5,
target_7_no=r6,
target_8_no=r7
)
db.session.add(gr)
db.session.commit()
else:
if(duplicate):
return redirect(url_for('duplicate_firer_error'))
else:
gr=TGroup(
date=datetime.now(),
group_no=grp,
target_1_no=r,
target_2_no=r1,
target_3_no=r2,
target_4_no=r3,
target_5_no=r4,
target_6_no=r5,
target_7_no=r6,
target_8_no=r7
)
db.session.add(gr)
db.session.commit()
except Exception as e:
return render_template('errors/group_view_error.html')
return redirect(url_for('group_view'))
@app.route('/group_view/', methods=['GET', 'POST'])
def group_view():
detail = TGroup.query.all()
return render_template('pages/group_detail_view.html',detail=detail)
@app.route('/group_view/detail/<id>', methods=['GET', 'POST'])
def group_detail_view(id):
view = TGroup.query.filter(TGroup.group_no == id)
return render_template('pages/group_detail_view_id.html' , data = view)
@app.route('/group_details/edit/<id>', methods=['GET', 'POST'])
def group_detail_edit(id):
firer = TGroup.query.filter(TGroup.group_no == id).first()
form=GroupEditForm(obj=firer)
if form.validate_on_submit():
firer.date=form.date.data
firer.target_1_no=form.target_1_army.data
firer.target_2_no=form.target_2_army.data
firer.target_3_no=form.target_3_army.data
firer.target_4_no=form.target_4_army.data
firer.target_5_no=form.target_5_army.data
firer.target_6_no=form.target_6_army.data
firer.target_7_no=form.target_7_army.data
firer.target_8_no=form.target_8_army.data
firer.group_no=form.group_no.data
db.session.commit()
return redirect(url_for('group_view'))
form.group_no.data=firer.group_no
form.target_1_army.data=firer.target_1_no
form.target_2_army.data=firer.target_2_no
form.target_3_army.data=firer.target_3_no
form.target_4_army.data=firer.target_4_no
form.target_5_army.data=firer.target_5_no
form.target_6_army.data=firer.target_6_no
form.target_7_army.data=firer.target_7_no
form.target_8_army.data=firer.target_8_no
return render_template('pages/group_edit.html' , firer = firer , form=form)
@app.route('/firer_details/detail/<id>', methods=['GET', 'POST'])
def firer_detail_view(id):
firer = Shooter.query.filter(Shooter.service_id == id)
for firers in firer:
firers.cantonment_name= Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.division = Cantonment.query.filter(Cantonment.id==firers.cantonment_id).scalar()
firers.rank = Rank.query.filter(Rank.id==firers.rank_id).scalar()
firers.gender_name = Gender.query.filter(Gender.id==firers.gender_id).scalar()
return render_template('pages/firer_detail_view.html' , data = firer)
@app.route('/firer_details/edit/<id>', methods=['GET', 'POST'])
def firer_detail_edit(id):
firer = Shooter.query.filter(Shooter.service_id == id).first()
form=RegistrationEditForm(obj=firer)
try:
if form.validate_on_submit():
firer.name = form.name.data
firer.service_id=form.service_id.data
firer.registration_date=form.date.data
gender_obj=Gender.query.filter(Gender.name==form.gender.data).scalar()
firer.gender_id=gender_obj.id
cantonment_obj=Cantonment.query.filter(Cantonment.cantonment==form.cantonment.data ,Cantonment.division==form.div.data).scalar()
firer.cantonment_id=cantonment_obj.id
rank_obj=Range.query.filter(Rank.name==form.rank.data).distinct(Rank.id).scalar()
firer.rank_id=rank_obj.id
firer.unit=form.unit.data
firer.brigade=form.brigade.data
db.session.commit()
return redirect(url_for('firer_details'))
form.name.data=firer.name
form.service_id.data=firer.service_id
form.date.data=firer.registration_date
gender_name=Gender.query.filter(Gender.id==firer.gender_id).scalar()
form.gender.data=gender_name.name
cantonment_name=Cantonment.query.filter(Cantonment.id==firer.cantonment_id).scalar()
form.cantonment.data=cantonment_name.cantonment
form.div.data=cantonment_name.division
unit_data=Shooter.query.filter(Shooter.service_id==firer.service_id).scalar()
form.unit.data=unit_data.unit
form.brigade.data=unit_data.brigade
rank_name=Rank.query.filter(Rank.id==firer.rank_id).distinct(Rank.name).scalar()
form.rank.data=rank_name.name
except Exception as e:
return redirect(url_for('error_7'))
return render_template('pages/firer_detail_edit.html' , firer = firer , form=form)
@app.route('/live/')
def live():
T1_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_rank = mysession.query(Rank.name).filter(Rank.id==T1_r_id).scalar()
T2_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_rank = mysession.query(Rank.name).filter(Rank.id==T2_r_id).scalar()
T3_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_rank = mysession.query(Rank.name).filter(Rank.id==T3_r_id).scalar()
T4_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_rank = mysession.query(Rank.name).filter(Rank.id==T4_r_id).scalar()
T5_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_rank = mysession.query(Rank.name).filter(Rank.id==T5_r_id).scalar()
T6_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_rank = mysession.query(Rank.name).filter(Rank.id==T6_r_id).scalar()
T7_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_rank = mysession.query(Rank.name).filter(Rank.id==T7_r_id).scalar()
T8_name = mysession.query(Shooter.name).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_service = mysession.query(Shooter.service_id).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_r_id = mysession.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_rank = mysession.query(Rank.name).filter(Rank.id==T8_r_id).scalar()
return render_template('pages/live.html' ,
T1_name=T1_name,
T1_service=T1_service,
T2_name=T2_name,
T2_service=T2_service,
T3_name=T3_name,
T3_service=T3_service,
T4_name=T4_name,
T4_service=T4_service,
T5_name=T5_name,
T5_service=T5_service,
T6_name=T6_name,
T6_service=T6_service,
T7_name=T7_name,
T7_service=T7_service,
T8_name=T8_name,
T8_service=T8_service,
T1_rank=T1_rank,
T2_rank=T2_rank,
T3_rank=T3_rank,
T4_rank=T4_rank,
T5_rank=T5_rank,
T6_rank=T6_rank,
T7_rank=T7_rank,
T8_rank=T8_rank
)
@app.route('/cam_detail_2/', methods=['GET', 'POST'])
def cam_detail_2():
return render_template('pages/cam_detail_1.html')
@app.route('/cam_detail_4/', methods=['GET', 'POST'])
def cam_detail_4():
return render_template('pages/cam_detail_2.html')
@app.route('/cam_detail_1/', methods=['GET', 'POST'])
def cam_detail_1():
return render_template('pages/cam_detail_3.html')
@app.route('/cam_detail_3/', methods=['GET', 'POST'])
def cam_detail_3():
return render_template('pages/cam_detail_4.html')
@app.route('/cam_detail_6/', methods=['GET', 'POST'])
def cam_detail_6():
return render_template('pages/cam_detail_5.html')
@app.route('/cam_detail_8/', methods=['GET', 'POST'])
def cam_detail_8():
return render_template('pages/cam_detail_6.html')
@app.route('/cam_detail_7/', methods=['GET', 'POST'])
def cam_detail_7():
return render_template('pages/cam_detail_7.html')
@app.route('/cam_detail_5/', methods=['GET', 'POST'])
def cam_detail_5():
return render_template('pages/cam_detail_8.html')
@app.route('/session_setup/', methods=['GET', 'POST'])
def session_setup():
try:
data = Shooter.query.all()
rang= Range.query.all()
firearms = Firearms.query.all()
ammunation = Ammunation.query.all()
rang_name = request.form.get('comp_select_4')
fire_name = request.form.get('comp_select_5')
ammu_name = request.form.get('comp_select_6')
form=SessionForm()
if(rang_name is None):
range_id=999
fire_id=999
ammu_id=999
else:
range_id = db.session.query(Range.id).filter(Range.name==rang_name).scalar()
fire_id = db.session.query(Firearms.id).filter(Firearms.name==fire_name).scalar()
ammu_id = db.session.query(Ammunation.id).filter(Ammunation.name==ammu_name).scalar()
if form.validate_on_submit():
shooting=Shooting_Session(
date=form.date.data.strftime('%Y-%m-%d'),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
shooting_range_id=range_id,
firearms_id=fire_id,
ammunation_id=ammu_id,
target_distance = form.target_distance.data,
weather_notes = form.weather_notes.data,
comments = form.comments.data,
session_no=form.session_no.data,
occasion=form.occ.data
)
db.session.add(shooting)
db.session.commit()
return redirect(url_for('session_config'))
except Exception as e:
return redirect(url_for('error5_505.html'))
return render_template('forms/shooting_form.html', form=form, data =data ,rang=rang , firearmns=firearms, ammunation = ammunation)
@app.route('/configuration/', methods=['GET', 'POST'])
def session_config():
config = Shooting_Session.query.all()
for con in config:
con.range_name = Range.query.filter(Range.id==con.shooting_range_id).scalar()
con.firerarms_name = Firearms.query.filter(Firearms.id==con.firearms_id).scalar()
con.ammunation_name = Ammunation.query.filter(Ammunation.id==con.ammunation_id).scalar()
return render_template('pages/shooting_configuration_detail.html',con=config)
@app.route('/image_process/')
def image_process():
dt=time.strftime("%Y-%m-%d")
detail_data=db.session.query(Session_Detail).filter(Session_Detail.date==dt,Session_Detail.save_flag==0).all()
data =TShooting.query.scalar()
if(data is None):
T1_name ="NA"
T1_service ="NA"
T1_rank="NA"
T2_name ="NA"
T2_service ="NA"
T2_rank="NA"
T3_name ="NA"
T3_service ="NA"
T3_rank="NA"
T4_name ="NA"
T4_service ="NA"
T4_rank="NA"
T5_name ="NA"
T5_service ="NA"
T5_rank="NA"
T6_name ="NA"
T6_service ="NA"
T6_rank="NA"
T7_name ="NA"
T7_service ="NA"
T7_rank="NA"
T8_name ="NA"
T8_service ="NA"
T8_rank="NA"
elif(data.save_flag == 1 ):
db.session.query(TShooting).delete()
db.session.commit()
T1_name ="NA"
T1_service ="NA"
T1_rank="NA"
T2_name ="NA"
T2_service ="NA"
T2_rank="NA"
T3_name ="NA"
T3_service ="NA"
T3_rank="NA"
T4_name ="NA"
T4_service ="NA"
T4_rank="NA"
T5_name ="NA"
T5_service ="NA"
T5_rank="NA"
T6_name ="NA"
T6_service ="NA"
T6_rank="NA"
T7_name ="NA"
T7_service ="NA"
T7_rank="NA"
T8_name ="NA"
T8_service ="NA"
T8_rank="NA"
else:
T1=Shooter.query.filter(Shooter.id==TShooting.target_1_id).scalar()
if(T1 is None):
T1_name ="NA"
T1_service ="NA"
T1_rank="NA"
else:
T1_name = T1.name
T1_service = T1.service_id
T1_r_id = T1.rank_id
T1_rank_id = Rank.query.filter(Rank.id==T1_r_id).scalar()
T1_rank=T1_rank_id.name
T2=Shooter.query.filter(Shooter.id==TShooting.target_2_id).scalar()
if(T2 is None):
T2_name ="NA"
T2_service ="NA"
T2_rank="NA"
else:
T2_name = T2.name
T2_service = T2.service_id
T2_r_id = T2.rank_id
T2_rank_id = Rank.query.filter(Rank.id==T2_r_id).scalar()
T2_rank=T2_rank_id.name
T3=Shooter.query.filter(Shooter.id==TShooting.target_3_id,TShooting.target_3_id!=999).scalar()
if(T3 is None):
T3_name ="NA"
T3_service ="NA"
T3_rank="NA"
else:
T3_name = T3.name
T3_service = T3.service_id
T3_r_id = T3.rank_id
T3_rank_id = Rank.query.filter(Rank.id==T3_r_id).scalar()
T3_rank=T3_rank_id.name
T4=Shooter.query.filter(Shooter.id==TShooting.target_4_id,TShooting.target_4_id!=999).scalar()
if(T4 is None):
T4_name ="NA"
T4_service ="NA"
T4_rank="NA"
else:
T4_name = T4.name
T4_service = T4.service_id
T4_r_id = T4.rank_id
T4_rank_id = Rank.query.filter(Rank.id==T4_r_id).scalar()
T4_rank=T4_rank_id.name
T5=Shooter.query.filter(Shooter.id==TShooting.target_5_id).scalar()
if(T5 is None):
T5_name ="NA"
T5_service ="NA"
T5_rank="NA"
else:
T5_name = T5.name
T5_service = T5.service_id
T5_r_id = T5.rank_id
T5_rank_id = Rank.query.filter(Rank.id==T5_r_id).scalar()
T5_rank=T5_rank_id.name
T6=Shooter.query.filter(Shooter.id==TShooting.target_6_id).scalar()
if(T6 is None):
T6_name ="NA"
T6_service ="NA"
T6_rank="NA"
else:
T6_name = T6.name
T6_service = T6.service_id
T6_r_id = T6.rank_id
T6_rank_id = Rank.query.filter(Rank.id==T6_r_id).scalar()
T6_rank=T6_rank_id.name
T7=Shooter.query.filter(Shooter.id==TShooting.target_7_id).scalar()
if(T7 is None):
T7_name ="NA"
T7_service ="NA"
T7_rank="NA"
else:
T7_name = T7.name
T7_service = T7.service_id
T7_r_id = T7.rank_id
T7_rank_id = Rank.query.filter(Rank.id==T7_r_id).scalar()
T7_rank=T7_rank_id.name
T8=Shooter.query.filter(Shooter.id==TShooting.target_8_id).scalar()
if(T8 is None):
T8_name ="NA"
T8_service ="NA"
T8_rank="NA"
else:
T8_name = T8.name
T8_service = T8.service_id
T8_r_id = T8.rank_id
T8_rank_id = Rank.query.filter(Rank.id==T8_r_id).scalar()
T8_rank=T8_rank_id.name
return render_template('pages/image_process.html' ,
T1_name=T1_name,
detail_data=detail_data,
T1_service=T1_service,
T2_name=T2_name,
T2_service=T2_service,
T3_name=T3_name,
T3_service=T3_service,
T4_name=T4_name,
T4_service=T4_service,
T5_name=T5_name,
T5_service=T5_service,
T6_name=T6_name,
T6_service=T6_service,
T7_name=T7_name,
T7_service=T7_service,
T8_name=T8_name,
T8_service=T8_service,
T1_rank=T1_rank,
T2_rank=T2_rank,
T3_rank=T3_rank,
T4_rank=T4_rank,
T5_rank=T5_rank,
T6_rank=T6_rank,
T7_rank=T7_rank,
T8_rank=T8_rank
)
@app.route('/image_edit_1/', methods=['GET', 'POST'])
def image_edit_1():
return render_template('pages/image_edit_1.html')
@app.route('/image_edit_2/', methods=['GET', 'POST'])
def image_edit_2():
return render_template('pages/image_edit_2.html')
@app.route('/image_edit_3/', methods=['GET', 'POST'])
def image_edit_3():
return render_template('pages/image_edit_3.html')
@app.route('/image_edit_4/', methods=['GET', 'POST'])
def image_edit_4():
return render_template('pages/image_edit_4.html')
@app.route('/image_edit_5/', methods=['GET', 'POST'])
def image_edit_5():
return render_template('pages/image_edit_5.html')
@app.route('/image_edit_6/', methods=['GET', 'POST'])
def image_edit_6():
return render_template('pages/image_edit_6.html')
@app.route('/image_edit_7/', methods=['GET', 'POST'])
def image_edit_7():
return render_template('pages/image_edit_7.html')
@app.route('/image_edit_8/', methods=['GET', 'POST'])
def image_edit_8():
return render_template('pages/image_edit_8.html')
@app.route('/configuration/detail/<id>', methods=['GET', 'POST'])
def session_config_detail(id):
config = Shooting_Session.query.filter(Shooting_Session.id == id)
for con in config:
con.range_name = Range.query.filter(Range.id==con.shooting_range_id).scalar()
con.firerarms_name = Firearms.query.filter(Firearms.id==con.firearms_id).scalar()
con.ammunation_name = Ammunation.query.filter(Ammunation.id==con.ammunation_id).scalar()
return render_template('pages/shooting_configuration_detail_view.html',con=config)
@app.route('/configuration/edit/<id>', methods=['GET', 'POST'])
def shooting_config_edit(id):
edit = Shooting_Session.query.get_or_404(id)
form = SessionEditForm(obj=edit)
if form.validate_on_submit():
edit.session_no = form.session_no.data
edit.date = form.date.data
edit.occasion=form.occ.data
edit.target_distance = form.target_distance.data
ammunation_id=Ammunation.query.filter(Ammunation.name==form.ammunation_name.data).scalar()
edit.ammunation_id=ammunation_id.id
firearms_id=Firearms.query.filter(Firearms.name==form.firerarms_name.data).scalar()
edit.firearms_id=firearms_id.id
range_id=Range.query.filter(Range.name==form.range_name.data).scalar()
edit.shooting_range_id=range_id.id
edit.weather_notes=form.weather_notes.data
edit.comments=form.comments.data
db.session.commit()
return redirect(url_for('session_config'))
form.session_no.data=edit.session_no
form.date.data=edit.date
form.occ.data=edit.occasion
ammunation_name=Ammunation.query.filter(Ammunation.id==edit.ammunation_id).scalar()
form.ammunation_name.data=ammunation_name.name
firerarms_name=Firearms.query.filter(Firearms.id==edit.firearms_id).scalar()
form.firerarms_name.data=firerarms_name.name
range_name=Range.query.filter(Range.id==edit.shooting_range_id).scalar()
form.range_name.data=range_name.name
form.weather_notes.data=edit.weather_notes
form.comments.data=edit.comments
return render_template('pages/shooting_configuration_edit.html',form=form,edit=edit)
@app.route('/detail_dashboard/')
def detail_dashboard():
tshoot=db.session.query(TShooting).scalar()
if(tshoot is None):
T1_name = "NA"
T1_service="NA"
T1_rank ="NA"
T2_name = "NA"
T2_service="NA"
T2_rank ="NA"
T3_name = "NA"
T3_service="NA"
T3_rank ="NA"
T4_name = "NA"
T4_service="NA"
T4_rank ="NA"
T5_name = "NA"
T5_service="NA"
T5_rank ="NA"
T6_name = "NA"
T6_service="NA"
T6_rank ="NA"
T7_name = "NA"
T7_service="NA"
T7_rank ="NA"
T8_name = "NA"
T8_service="NA"
T8_rank ="NA"
else:
T1=Shooter.query.filter(Shooter.id==TShooting.target_1_id).scalar()
T1_name = T1.name
T1_service = T1.service_id
T1_r_id = T1.rank_id
T1_rank_id = Rank.query.filter(Rank.id==T1_r_id).scalar()
T1_rank=T1_rank_id.name
T2=Shooter.query.filter(Shooter.id==TShooting.target_2_id).scalar()
T2_name = T2.name
T2_service = T2.service_id
T2_r_id = T2.rank_id
T2_rank_id = Rank.query.filter(Rank.id==T2_r_id).scalar()
T2_rank=T2_rank_id.name
T3=Shooter.query.filter(Shooter.id==TShooting.target_3_id).scalar()
T3_name = T3.name
T3_service = T3.service_id
T3_r_id = T3.rank_id
T3_rank_id = Rank.query.filter(Rank.id==T3_r_id).scalar()
T3_rank=T3_rank_id.name
T4=Shooter.query.filter(Shooter.id==TShooting.target_4_id).scalar()
T4_name = T4.name
T4_service = T4.service_id
T4_r_id = T4.rank_id
T4_rank_id = Rank.query.filter(Rank.id==T4_r_id).scalar()
T4_rank=T4_rank_id.name
T5=Shooter.query.filter(Shooter.id==TShooting.target_5_id).scalar()
T5_name = T5.name
T5_service = T5.service_id
T5_r_id = T5.rank_id
T5_rank_id = Rank.query.filter(Rank.id==T5_r_id).scalar()
T5_rank=T5_rank_id.name
T6=Shooter.query.filter(Shooter.id==TShooting.target_6_id).scalar()
T6_name = T6.name
T6_service = T6.service_id
T6_r_id = T6.rank_id
T6_rank_id = Rank.query.filter(Rank.id==T6_r_id).scalar()
T6_rank=T6_rank_id.name
T7=Shooter.query.filter(Shooter.id==TShooting.target_7_id).scalar()
T7_name = T7.name
T7_service = T7.service_id
T7_r_id = T7.rank_id
T7_rank_id = Rank.query.filter(Rank.id==T7_r_id).scalar()
T7_rank=T7_rank_id.name
T8=Shooter.query.filter(Shooter.id==TShooting.target_8_id).scalar()
T8_name = T8.name
T8_service = T8.service_id
T8_r_id = T8.rank_id
T8_rank_id = Rank.query.filter(Rank.id==T8_r_id).scalar()
T8_rank=T8_rank_id.name
return render_template('pages/detail_dashboard.html' ,
T1_name=T1_name,
T1_service=T1_service,
T2_name=T2_name,
T2_service=T2_service,
T3_name=T3_name,
T3_service=T3_service,
T4_name=T4_name,
T4_service=T4_service,
T5_name=T5_name,
T5_service=T5_service,
T6_name=T6_name,
T6_service=T6_service,
T7_name=T7_name,
T7_service=T7_service,
T8_name=T8_name,
T8_service=T8_service,
T1_rank=T1_rank,
T2_rank=T2_rank,
T3_rank=T3_rank,
T4_rank=T4_rank,
T5_rank=T5_rank,
T6_rank=T6_rank,
T7_rank=T7_rank,
T8_rank=T8_rank
)
@app.route('/adhoc_detail_1/', methods=['GET', 'POST'])
def adhoc_detail_1():
name_1=None
army=None
rank=None
cant=None
set_1_name=None
set_1_army=None
set_2_name=None
set_2_army=None
set_3_name=None
set_3_army=None
set_4_name=None
set_4_army=None
res=[]
ten=[]
gp_len=[]
if request.method == "POST":
data1 = request.get_json()
army=data1['usr']
curdate=time.strftime("%Y-%m-%d")
name_1=db.session.query(Shooter.name).filter(Shooter.service_id==army).scalar()
target_1_id=db.session.query(Shooter.id).filter(Shooter.service_id==army).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.service_id==army).scalar()
cant_id=db.session.query(Shooter.cantonment_id).filter(Shooter.service_id==army).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
cant=db.session.query(Cantonment.cantonment).filter(Cantonment.id==cant_id).scalar()
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==target_1_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
tgp = db.session.query(Grouping.grouping_length_f).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
for ele in tres:
for ele2 in ele:
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
ten.append(ele4)
for ele5 in tgp:
for ele6 in ele5:
gp_len.append(ele6)
set_1_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(Shooter.id==set_1_id).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==2,
Firer_Details.set_no==2
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(Shooter.id==set_2_id).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==3,
Firer_Details.set_no==3
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(Shooter.id==set_3_id).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(Firer_Details.firer_id).filter(Firer_Details.date==curdate,
Firer_Details.target_no==4,
Firer_Details.set_no==4
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(Shooter.id==set_4_id).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
return jsonify(name_1=name_1,army=army,rank=rank,cant=cant,
set_1_name=set_1_name,
set_2_name=set_2_name,
set_3_name=set_3_name,
set_4_name=set_4_name,
set_1_army=set_1_army,
set_2_army=set_2_army,
set_3_army=set_3_army,
set_4_army=set_4_army,
gp_len=gp_len,
res=res,
ten=ten
)
@app.route('/individual_score/target_1', methods=['GET', 'POST'])
def individual_score_target_1():
session.clear()
data=TShooting.query.scalar()
firing_set_arr=[]
cantonment=Cantonment.query.distinct(Cantonment.cantonment)
curdate=time.strftime("%Y-%m-%d")
selection=Shooting_Session.query.filter(Shooting_Session.date>=curdate).order_by(Shooting_Session.datetimestamp.desc()).all()
gender =Gender.query.all()
rank_s = Rank.query.all()
firing_set=db.session.query(Firer_Details.set_no).filter(Firer_Details.target_no==1).distinct().all()
for ele in firing_set:
for ele2 in ele:
firing_set_arr.append(ele2)
if(len(firing_set_arr)<1):
pass
else:
i=len(firing_set_arr)-1
if(firing_set_arr[i]==5):
db.session.query(Firer_Details).filter(Firer_Details.target_no==1).delete()
db.session.commit()
else:
pass
dt=time.strftime("%Y-%m-%d")
curdatetime=datetime.now()
firer_1 = [row.service_id for row in Shooter.query.all()]
detail_data=db.session.query(Session_Detail).filter(Session_Detail.date==dt,Session_Detail.save_flag==0).all()
name = "NA"
detail_no ="NA"
rank ="NA"
target_no = 1
service_id ="NA"
ten = []
res = []
selection=Shooting_Session.query.filter(Shooting_Session.date>=dt).order_by(Shooting_Session.datetimestamp.desc()).all()
firearms = Firearms.query.all()
rang= Range.query.all()
ammunation = Ammunation.query.all()
return render_template('pages/prediction_target_1.html',
curdatetime=curdatetime,
name = name,
firer_1=firer_1,
rank=rank,
detail_data=detail_data,
detail_no=detail_no,
target_no=target_no,
service_id=service_id,
firearms=firearms,
ammunation=ammunation,
data=selection,
rang=rang,
res=res,
date=dt,
ten=ten,
cantonment=cantonment,
gender=gender,
rank_s=rank_s)
@app.route('/session_target_1/', methods=['GET', 'POST'])
def session_target_1():
if request.method == "POST":
data1 = request.get_json()
session=data1["session"]
ran=data1["range"]
arms=data1["arms"]
distance=data1["dis"]
occ=data1["occ"]
ammu=data1["ammu"]
weather=data1["weather"]
comment=data1["comment"]
range_id=db.session.query(Range.id).filter(Range.name==ran).scalar()
arms_id=db.session.query(Firearms.id).filter(Firearms.name==arms).scalar()
ammu_id=db.session.query(Ammunation.id).filter(Ammunation.name==ammu).scalar()
shooting=Shooting_Session(
date=time.strftime("%Y-%m-%d"),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
shooting_range_id=range_id,
firearms_id=arms_id,
ammunation_id=ammu_id,
target_distance=distance,
weather_notes =weather,
comments =comment,
session_no=session,
occasion=occ
)
db.session.add(shooting)
db.session.commit()
result="This is Successfully Saved"
return jsonify(result=result ,session=session)
@app.route('/target_1_populate/', methods=['GET', 'POST'])
def target_1_populate():
if request.method == 'POST':
session_id=db.session.query(TShooting.session_id).scalar()
return jsonify(session_id=session_id)
@app.route('/load_detail_1/', methods=['GET', 'POST'])
def load_detail_1():
result_1="Done"
if request.method == 'POST':
curdate=time.strftime("%Y-%m-%d")
r8=None
data=request.get_json()
tmp_list = []
duplicate = False
detail =data["detail"]
sess=data["session"]
paper=data["paper"]
shot=data["shot"]
set=data["set"]
if(data["r1"]==""):
r1_id=999
else:
r1=data["r1"]
r1_id=db.session.query(Shooter.id).filter(Shooter.service_id==r1).scalar()
if(data["r2"]==""):
r2_id=999
else:
r2=data["r2"]
r2_id=db.session.query(Shooter.id).filter(Shooter.service_id==r2).scalar()
if(data["r3"]==""):
r3_id=999
else:
r3=data["r3"]
r3_id=db.session.query(Shooter.id).filter(Shooter.service_id==r3).scalar()
if(data["r4"]==""):
r4_id=999
else:
r4=data["r4"]
r4_id=db.session.query(Shooter.id).filter(Shooter.service_id==r4).scalar()
if(data["r5"]==""):
r5_id=999
else:
r5=data["r5"]
r5_id=db.session.query(Shooter.id).filter(Shooter.service_id==r5).scalar()
if(data["r6"]==""):
r6_id=999
else:
r6=data["r6"]
r6_id=db.session.query(Shooter.id).filter(Shooter.service_id==r6).scalar()
if(data["r7"]==""):
r7_id=999
else:
r7=data["r7"]
r7_id=db.session.query(Shooter.id).filter(Shooter.service_id==r7).scalar()
if(data["r8"]==""):
r8_id=999
else:
r8=data["r8"]
r8_id=db.session.query(Shooter.id).filter(Shooter.service_id==r8).scalar()
tmp_list.append(r1_id)
tmp_list.append(r2_id)
tmp_list.append(r3_id)
tmp_list.append(r4_id)
tmp_list.append(r5_id)
tmp_list.append(r6_id)
tmp_list.append(r7_id)
tmp_list.append(r8_id)
db.session.query(TPaper_ref).delete()
db.session.commit()
ref_db = TPaper_ref(
date=time.strftime("%Y-%m-%d"),
paper_ref=paper,
detail_no=detail,
session_no=sess
)
db.session.add(ref_db)
db.session.commit()
for i in range(len(tmp_list)):
for j in range(len(tmp_list)):
if(i!=j and tmp_list[i]==tmp_list[j]):
if(tmp_list[i]== 999 and tmp_list[j]==999):
duplicate = False
else:
duplicate = True
else:
duplicate = False
if(duplicate):
print("inside dup")
error="dup"
else:
db.session.query(TShooting).delete()
db.session.commit()
tshoot=TShooting(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail,
target_1_id=r1_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=paper,
set_no=set,
save_flag=0
)
db.session.add(tshoot)
db.session.commit()
detail_shots =Session_Detail(
date=datetime.now(),
datetimestamp=time.strftime("%Y-%m-%d %H:%M"),
session_id=sess,
detail_no=detail,
target_1_id=r1_id,
target_2_id=r2_id,
target_3_id=r3_id,
target_4_id=r4_id,
target_5_id=r5_id,
target_6_id=r6_id,
target_7_id=r7_id,
target_8_id=r8_id,
paper_ref=paper,
set_no=set,
save_flag=0
)
db.session.add(detail_shots)
db.session.commit()
error="ok"
firer_name,cant,rank,service_id,res,tenden,gp_len,set_4_name,set_4_army,set_4_session_no,set_4_detail_no,set_3_name,set_3_army,set_3_session_no,set_3_detail_no,set_2_name,set_2_army,set_2_session_no,set_2_detail_no,set_1_name,set_1_army,set_1_session_no,set_1_detail_no,current_firer_name,current_army_no,current_session_no,current_detail_no=get_information(r1_id,sess,paper)
result="The Detail is Saved Successfully"
return jsonify(result=result,data1=firer_name,ra_1=rank,detail=detail,
service_id_1=service_id,
session=sess,
paper=paper,
set_no=set,
cant=cant,
gp_len=gp_len,
res=res,
ten=tenden,
set_4_name=set_4_name,
set_3_name=set_3_name,
set_2_name=set_2_name,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_4_army=set_4_army,
set_3_army=set_3_army,
set_2_army=set_2_army,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_4_session_no=set_4_session_no,
set_3_session_no=set_3_session_no,
set_2_session_no=set_2_session_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_4_detail_no=set_4_detail_no,
set_3_detail_no=set_3_detail_no,
set_2_detail_no=set_2_detail_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no
)
return jsonify(result_1=result_1)
def get_information(target_1_id,sess,paper_ref):
res=[]
ten=[]
gp_len=[]
curdate=time.strftime("%Y-%m-%d")
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==target_1_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
tgp = db.session.query(Grouping.grouping_length_f).filter(Grouping.firer_id==target_1_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
for ele in tres:
for ele2 in ele:
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
ten.append(ele4)
for ele5 in tgp:
for ele6 in ele5:
gp_len.append(int(ele6))
da_1=db.session.query(Shooter.name).filter(Shooter.id==target_1_id).scalar()
cant_id=db.session.query(Shooter.cantonment_id).filter(Shooter.id==target_1_id).scalar()
cant=db.session.query(Cantonment.cantonment).filter(Cantonment.id==cant_id).scalar()
ra_1_id=db.session.query(Shooter.rank_id).filter(Shooter.id==target_1_id).scalar()
ra_1 = db.session.query(Rank.name).filter(Rank.id==ra_1_id).scalar()
service_id_1 = db.session.query(Shooter.service_id).filter(Shooter.id==target_1_id).scalar()
set_1_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==target_1_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==target_1_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==target_1_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==target_1_id).scalar()
return(da_1,cant,ra_1,service_id_1,res,ten,gp_len,
set_4_name,set_4_army,set_4_session_no,set_4_detail_no,
set_3_name,set_3_army,set_3_session_no,set_3_detail_no,
set_2_name,set_2_army,set_2_session_no,set_2_detail_no,
set_1_name,set_1_army,set_1_session_no,set_1_detail_no,
current_firer_name,current_army_no,current_session_no,current_detail_no
)
@app.route('/individual_score/target_2', methods=['GET', 'POST'])
def individual_score_target_2():
firer_id =db.session.query(TShooting.target_2_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 2
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres,)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_2()
if request.method == 'POST':
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print("paper_ref")
print(paper_ref)
return render_template('pages/prediction_target_2.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_3', methods=['GET', 'POST'])
def individual_score_target_3():
firer_id =db.session.query(TShooting.target_3_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 3
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_3.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_4', methods=['GET', 'POST'])
def individual_score_target_4():
firer_id =db.session.query(TShooting.target_4_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 4
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_4.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_5', methods=['GET', 'POST'])
def individual_score_target_5():
firer_id =db.session.query(TShooting.target_5_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 5
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_5.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_6', methods=['GET', 'POST'])
def individual_score_target_6():
firer_id =db.session.query(TShooting.target_6_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 6
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_6.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_7', methods=['GET', 'POST'])
def individual_score_target_7():
firer_id =db.session.query(TShooting.target_7_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 7
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_7.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/individual_score/target_8', methods=['GET', 'POST'])
def individual_score_target_8():
firer_id =db.session.query(TShooting.target_8_id).scalar()
detail_no =db.session.query(TShooting.detail_no).scalar()
session_no =db.session.query(TShooting.session_id).scalar()
target_no = 7
tres = db.session.query(Grouping.result).filter(Grouping.firer_id==firer_id).order_by(Grouping.datetimestamp.desc()).limit(5).all()[::-1]
res=[]
ten=[]
tten=db.session.query(MPI.tendency_code).filter(MPI.firer_id==firer_id).order_by(MPI.datetimestamp.desc()).limit(5).all()[::-1]
print(tres)
for ele in tres:
for ele2 in ele:
print(type(ele2))
res.append(ele2)
for ele3 in tten:
for ele4 in ele3:
print(type(ele4))
ten.append(ele4)
service_id = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
rank_id=db.session.query(Shooter.rank_id).filter(Shooter.id==firer_id).scalar()
rank=db.session.query(Rank.name).filter(Rank.id==rank_id).scalar()
name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
return render_template('pages/prediction_target_8.html',
name = name,
detail_no=detail_no,
session_no=session_no,
target_no=target_no,
service_id=service_id,
rank=rank,
res=res,
ten=ten)
@app.route('/prediction_target_1/', methods=['GET', 'POST'])
def prediction_target_1():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,detail,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_1()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 ,Firer_Details.set_no==2 , Firer_Details.session_id==sess).all()
set_2_y=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==2 , Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==3 , Firer_Details.session_id==sess).all()
set_3_y=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==3 , Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
print(set_3_x_arr)
set_4_x=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==4 , Firer_Details.session_id==sess).all()
set_4_y=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==4 , Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==1
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
set_2_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==2
).distinct().scalar()
print("set_2_detail_no")
print(set_2_detail_no)
print(set_2_detail_no)
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==3
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(Firer_Details.firer_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_session_no=db.session.query(Firer_Details.session_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_detail_no=db.session.query(Firer_Details.detail_id).filter(
Firer_Details.date==curdate,
Firer_Details.target_no==1,
Firer_Details.set_no==4
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_2/', methods=['GET', 'POST'])
def prediction_target_2():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_2()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 ,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==2,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
fin_x_arr_1=[]
fin_y_arr_1=[]
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_3/', methods=['GET', 'POST'])
def prediction_target_3():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_3()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 ,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_2 in set_2_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_2 in set_2_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==3,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
fin_x_arr_1=[]
fin_y_arr_1=[]
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_4/', methods=['GET', 'POST'])
def prediction_target_4():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_4()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 ,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==4,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_5/', methods=['GET', 'POST'])
def prediction_target_5():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_5()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 ,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==5,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_6/', methods=['GET', 'POST'])
def prediction_target_6():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_6()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 ,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==6,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_7/', methods=['GET', 'POST'])
def prediction_target_7():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_7()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==7,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==7 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==7 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==7 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==7 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==7 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==7,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/prediction_target_8/', methods=['GET', 'POST'])
def prediction_target_8():
t1_x=0
t1_y=0
xmpi_j=0
ympi_j=0
gp=0
Tfirt_x_j=0
Tfirt_y_j=0
fin_x_1=0
fin_y_1=0
xmpi_inch = 0
ympi_inch = 0
result_1=None
fir_tendency=None
set_1_name = None
set_1_army =None
set_1_session_no = None
set_1_detail_no=None
set_1_id =None
set_2_name = None
set_2_army =None
set_2_session_no = None
set_2_detail_no=None
set_2_id =None
set_3_name = None
set_3_army =None
set_3_session_no = None
set_3_detail_no=None
set_3_id =None
set_4_name = None
set_4_army =None
set_4_session_no = None
set_4_detail_no=None
set_4_id =None
fir_tendency_1=None
firer_id=None
current_army_no=None
current_firer_name=None
current_session_no=None
session_detail_no=None
current_detail_no=None
set_2_x=None
set_2_y=None
set_3_x=None
set_3_y=None
set_4_x=None
set_4_y=None
paper_ref=None
sess=None
res=None
set_2_x_arr=[]
set_2_y_arr=[]
set_3_x_arr=[]
set_3_y_arr=[]
set_4_x_arr=[]
set_4_y_arr=[]
fin_x_arr_1=[]
fin_y_arr_1=[]
curdate=time.strftime("%Y-%m-%d")
if request.method == 'POST':
firer_id,sess,o,p,u,q,t1_x,t1_y,xmpi,ympi,f,gp,Tfirt_x,Tfirt_y,fin_x_1,fin_y_1,result_1,fir_tendency_1=prediction_calculation_8()
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
set_2_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==8,T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_2_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==8 , T_Firer_Details.set_no==2 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_2 in set_2_x:
set_2_x_arr.append(int(x_2.final_x))
for y_2 in set_2_y:
set_2_y_arr.append(int(y_2.final_y))
set_3_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==8 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_3_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==8 , T_Firer_Details.set_no==3 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_3 in set_3_x:
set_3_x_arr.append(int(x_3.final_x))
for y_3 in set_3_y:
set_3_y_arr.append(int(y_3.final_y))
set_4_x=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==8 , T_Firer_Details.set_no==4 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
set_4_y=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==8 , T_Firer_Details.set_no==4 ,T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess).all()
for x_4 in set_4_x:
set_4_x_arr.append(int(x_4.final_x))
for y_4 in set_4_y:
set_4_y_arr.append(int(y_4.final_y))
set_1_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==1,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_1_name=db.session.query(Shooter.name).filter(
Shooter.id==set_1_id
).scalar()
set_1_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_1_id).scalar()
set_2_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==2,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_2_name=db.session.query(Shooter.name).filter(
Shooter.id==set_2_id
).scalar()
set_2_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_2_id).scalar()
set_3_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==3,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_3_name=db.session.query(Shooter.name).filter(
Shooter.id==set_3_id
).scalar()
set_3_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_3_id).scalar()
set_4_id = db.session.query(T_Firer_Details.firer_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_session_no=db.session.query(T_Firer_Details.session_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_detail_no=db.session.query(T_Firer_Details.detail_id).filter(T_Firer_Details.date==curdate,
T_Firer_Details.target_no==8,
T_Firer_Details.set_no==4,
T_Firer_Details.paper_ref==paper_ref ,
T_Firer_Details.session_id==sess
).distinct().scalar()
set_4_name=db.session.query(Shooter.name).filter(
Shooter.id==set_4_id
).scalar()
set_4_army=db.session.query(Shooter.service_id).filter(Shooter.id==set_4_id).scalar()
current_firer_name = db.session.query(Shooter.name).filter(Shooter.id==firer_id).scalar()
current_army_no = db.session.query(Shooter.service_id).filter(Shooter.id==firer_id).scalar()
current_session_no=db.session.query(TShooting.session_id).filter(TShooting.target_1_id==firer_id).scalar()
current_detail_no=db.session.query(TShooting.detail_no).filter(TShooting.target_1_id==firer_id).scalar()
xmpi_inch = pixeltoinch(xmpi)
ympi_inch = pixeltoinch(ympi)
xmpi_j =pd.Series(xmpi_inch).to_json(orient='values')
ympi_j =pd.Series(ympi_inch).to_json(orient='values')
Tfirt_x_j =pd.Series(Tfirt_x).to_json(orient='values')
Tfirt_y_j =pd.Series(Tfirt_y).to_json(orient='values')
for x_1 in fin_x_1:
fin_x_arr_1.append(int(x_1.final_x))
for y_1 in fin_y_1 :
fin_y_arr_1.append(int(y_1.final_y))
return jsonify(x1=t1_x ,
y1=t1_y ,
xmpi1=Tfirt_x_j ,
ympi1=Tfirt_y_j,
gp=gp,
txf1=Tfirt_x_j,
tyf1=Tfirt_y_j,
fx1=fin_x_arr_1,
fy1=fin_y_arr_1,
result_1=result_1,
fir_tendency_1=fir_tendency_1,
set_1_name=set_1_name,
current_firer_name=current_firer_name,
set_1_army=set_1_army,
current_army_no=current_army_no,
set_1_session_no=set_1_session_no,
current_session_no=current_session_no,
set_1_detail_no=set_1_detail_no,
current_detail_no=current_detail_no,
set_2_x=set_2_x_arr,
set_2_y=set_2_y_arr,
set_2_name=set_2_name,
set_2_army=set_2_army,
set_2_detail_no=set_2_detail_no,
set_2_session_no=set_2_session_no,
set_3_x=set_3_x_arr,
set_3_y=set_3_y_arr,
set_3_name=set_3_name,
set_3_army=set_3_army,
set_3_session_no=set_3_session_no,
set_3_detail_no=set_3_detail_no,
set_4_x=set_4_x_arr,
set_4_y=set_4_y_arr,
set_4_name=set_4_name,
set_4_army=set_4_army,
set_4_session_no=set_4_session_no,
set_4_detail_no=set_4_detail_no
)
@app.route('/previous_page_target_1/', methods=['GET', 'POST'])
def previous_page_target_1():
T1_name = db.session.query(Shooter.name).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_service = db.session.query(Shooter.service_id).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_r_id = db.session.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_1_id).scalar()
T1_rank = db.session.query(Rank.name).filter(Rank.id==T1_r_id).scalar()
T2_name = db.session.query(Shooter.name).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_service = db.session.query(Shooter.service_id).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_r_id = db.session.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_2_id).scalar()
T2_rank = db.session.query(Rank.name).filter(Rank.id==T2_r_id).scalar()
T3_name = db.session.query(Shooter.name).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_service = db.session.query(Shooter.service_id).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_r_id = db.session.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_3_id).scalar()
T3_rank = db.session.query(Rank.name).filter(Rank.id==T3_r_id).scalar()
T4_name = db.session.query(Shooter.name).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_service = db.session.query(Shooter.service_id).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_r_id = db.session.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_4_id).scalar()
T4_rank = db.session.query(Rank.name).filter(Rank.id==T4_r_id).scalar()
print(T1_rank)
print(T2_rank)
print(T3_rank)
print(T4_rank)
return render_template('pages/previous_page_target_1.html' ,
T1_name=T1_name,
T1_service=T1_service,
T2_name=T2_name,
T2_service=T2_service,
T3_name=T3_name,
T3_service=T3_service,
T4_name=T4_name,
T4_service=T4_service,
T4_rank=T4_rank,
T1_rank=T1_rank,
T2_rank=T2_rank,
T3_rank=T3_rank
)
@app.route('/previous_page_target_5/', methods=['GET', 'POST'])
def previous_page_target_5():
T5_name = db.session.query(Shooter.name).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_service = db.session.query(Shooter.service_id).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_r_id = db.session.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_5_id).scalar()
T5_rank = db.session.query(Rank.name).filter(Rank.id==T5_r_id).scalar()
T6_name = db.session.query(Shooter.name).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_service = db.session.query(Shooter.service_id).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_r_id = db.session.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_6_id).scalar()
T6_rank = db.session.query(Rank.name).filter(Rank.id==T6_r_id).scalar()
T7_name = db.session.query(Shooter.name).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_service = db.session.query(Shooter.service_id).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_r_id = db.session.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_7_id).scalar()
T7_rank = db.session.query(Rank.name).filter(Rank.id==T7_r_id).scalar()
T8_name = db.session.query(Shooter.name).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_service = db.session.query(Shooter.service_id).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_r_id = db.session.query(Shooter.rank_id).filter(Shooter.id==TShooting.target_8_id).scalar()
T8_rank = db.session.query(Rank.name).filter(Rank.id==T8_r_id).scalar()
return render_template('pages/previous_page_target_5.html' ,
T5_name=T5_name,
T5_service=T5_service,
T6_name=T6_name,
T6_service=T6_service,
T7_name=T7_name,
T7_service=T7_service,
T8_name=T8_name,
T8_service=T8_service,
T5_rank=T5_rank,
T6_rank=T6_rank,
T7_rank=T7_rank,
T8_rank=T8_rank
)
def prediction_calculation_1():
curdate=time.strftime("%Y-%m-%d")
X_json=0
Y_json=0
firer_id =db.session.query(TShooting.target_1_id).scalar()
sess_id = db.session.query(TShooting.session_id).scalar()
detail_id = db.session.query(TShooting.detail_no).scalar()
target_no=1
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print(paper_ref )
data_x_1=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==1 , Firer_Details.paper_ref==paper_ref , Firer_Details.session_id==sess_id).all()
data_y_1=db.session.query(Firer_Details).filter(Firer_Details.date==curdate , Firer_Details.target_no==1 , Firer_Details.set_no==1 , Firer_Details.paper_ref==paper_ref , Firer_Details.session_id==sess_id).all()
print(data_x_1)
set_no=db.session.query(TShooting.set_no).scalar()
paper_ref=db.session.query(TShooting.paper_ref).scalar()
print('Old x')
print(data_x_1)
image=Image.open('E:/FRAS Windows/FRAS_production/static/img_dump/1.png')
#image=Image.open('/Users/wasifaahmed/Documents/FRAS/Fras_production_v.0.1/FRAS Windows/FRAS Windows/FRAS_production/static/img_dump/1.png')
w,h = image.size
predictedMatrix = predictAsMatrix(image,w,h)
g= Graph(80, 80, predictedMatrix)
N=g.countIslands()
points(predictedMatrix,h=80,w=80)
centroids=kmean(N,pointsarray)
print(centroids)
if(centroids is None):
x=0,
y=0,
mpit=0
xmpi1=0
ympi1=0
f1=0,
firt_x=0
firt_y=0
fir_tendency_code=""
fir_tendency_txt=""
gp_1=""
result_1=""
else:
x= centroids [:, 1]
y= 2000-centroids [:, 0]
X_json=pd.Series(x).to_json(orient='values')
Y_json = pd.Series(y).to_json(orient='values')
mpit=mpi(1,centroids)
xmpi1 = mpit [:, 1]
ympi1 = 2000-mpit [:, 0]
f1 ,firt_x,firt_y= firing_tendancy(1000, 1000 , xmpi1, ympi1)
fir_tendency_txt,fir_tendency_code = getfiringtendencytext(f1 ,firt_x,firt_y)
gp_1 = grouping_length(0 , 0 , x , y)
result_1 =getresulttext(gp_1)
return (firer_id,
sess_id,
detail_id,
target_no,
set_no,
paper_ref,
X_json,
Y_json,
xmpi1,
ympi1,
f1,
gp_1,
firt_x,
firt_y,
data_x_1,
data_y_1,
result_1,
fir_tendency_txt
)
def prediction_calculation_2():
curdate=time.strftime("%Y-%m-%d")
X_json=0
Y_json=0
firer_id =db.session.query(TShooting.target_2_id).scalar()
sess_id = db.session.query(TShooting.session_id).scalar()
detail_id = db.session.query(TShooting.detail_no).scalar()
target_no=2
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print(paper_ref )
data_x_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
data_y_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==2 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
print(data_x_1)
set_no=db.session.query(TShooting.set_no).scalar()
paper_ref=db.session.query(TShooting.paper_ref).scalar()
print('Old x' )
print(data_x_1)
image=Image.open('E:/FRAS Windows/FRAS_production/static/img_dump/2.png')
w,h = image.size
predictedMatrix = predictAsMatrix(image,w,h)
g= Graph(80, 80, predictedMatrix)
N=g.countIslands()
points(predictedMatrix,h=80,w=80)
centroids=kmean(N,pointsarray)
if(centroids is None):
x=0,
y=0,
mpit=0
xmpi1=0
ympi1=0
f1=0,
firt_x=0
firt_y=0
fir_tendency_code=""
fir_tendency_txt=""
gp_1=""
result_1=""
else:
x= centroids [:, 1]
y= 2000-centroids [:, 0]
X_json=pd.Series(x).to_json(orient='values')
Y_json = pd.Series(y).to_json(orient='values')
mpit=mpi(1,centroids)
xmpi1 = mpit [:, 1]
ympi1 = 2000-mpit [:, 0]
f1 ,firt_x,firt_y= firing_tendancy(1000, 1000 , xmpi1, ympi1)
fir_tendency_txt,fir_tendency_code = getfiringtendencytext(f1 ,firt_x,firt_y)
gp_1 = grouping_length(0 , 0 , x , y)
result_1 =getresulttext(gp_1)
return (firer_id,
sess_id,
detail_id,
target_no,
set_no,
paper_ref,
X_json,
Y_json,
xmpi1,
ympi1,
f1,
gp_1,
firt_x,
firt_y,
data_x_1,
data_y_1,
result_1,
fir_tendency_txt
)
def prediction_calculation_3():
X_json=0
Y_json=0
curdate=time.strftime("%Y-%m-%d")
firer_id =db.session.query(TShooting.target_3_id).scalar()
sess_id = db.session.query(TShooting.session_id).scalar()
detail_id = db.session.query(TShooting.detail_no).scalar()
target_no=3
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print(paper_ref)
data_x_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
data_y_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==3 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
print(data_x_1)
set_no=db.session.query(TShooting.set_no).scalar()
paper_ref=db.session.query(TShooting.paper_ref).scalar()
print('Old x' )
print(data_x_1)
image=Image.open('E:/FRAS Windows/FRAS_production/static/img_dump/3.png')
w,h = image.size
predictedMatrix = predictAsMatrix(image,w,h)
g= Graph(80, 80, predictedMatrix)
N=g.countIslands()
points(predictedMatrix,h=80,w=80)
centroids=kmean(N,pointsarray)
if(centroids is None):
x=0,
y=0,
mpit=0
xmpi1=0
ympi1=0
f1=0,
firt_x=0
firt_y=0
fir_tendency_code=""
fir_tendency_txt=""
gp_1=""
result_1=""
else:
x= centroids [:, 1]
y= 2000-centroids [:, 0]
X_json=pd.Series(x).to_json(orient='values')
Y_json = pd.Series(y).to_json(orient='values')
mpit=mpi(1,centroids)
xmpi1 = mpit [:, 1]
ympi1 = 2000-mpit [:, 0]
f1 ,firt_x,firt_y= firing_tendancy(1000, 1000 , xmpi1, ympi1)
fir_tendency_txt,fir_tendency_code = getfiringtendencytext(f1 ,firt_x,firt_y)
print("calling from prediction_calculation_1" )
gp_1 = grouping_length(0 , 0 , x , y)
result_1 =getresulttext(gp_1)
return (firer_id,
sess_id,
detail_id,
target_no,
set_no,
paper_ref,
X_json,
Y_json,
xmpi1,
ympi1,
f1,
gp_1,
firt_x,
firt_y,
data_x_1,
data_y_1,
result_1,
fir_tendency_txt
)
def prediction_calculation_4():
curdate=time.strftime("%Y-%m-%d")
firer_id =db.session.query(TShooting.target_4_id).scalar()
sess_id = db.session.query(TShooting.session_id).scalar()
detail_id = db.session.query(TShooting.detail_no).scalar()
target_no=4
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print(paper_ref )
data_x_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
data_y_1=db.session.query(T_Firer_Details.final_y).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==4 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
print(data_x_1)
set_no=db.session.query(TShooting.set_no).scalar()
paper_ref=db.session.query(TShooting.paper_ref).scalar()
print('Old x' )
print(data_x_1)
image=Image.open('E:/FRAS Windows/FRAS_production/static/img_dump/4.png')
w,h = image.size
predictedMatrix = predictAsMatrix(image,w,h)
g= Graph(80, 80, predictedMatrix)
N=g.countIslands()
points(predictedMatrix,h=80,w=80)
centroids=kmean(N,pointsarray)
if(centroids is None):
x=0,
y=0,
mpit=0
xmpi1=0
ympi1=0
f1=0,
firt_x=0
firt_y=0
fir_tendency_code=""
fir_tendency_txt=""
gp_1=""
result_1=""
else:
x= centroids [:, 1]
y= 2000-centroids [:, 0]
X_json=pd.Series(x).to_json(orient='values')
Y_json = pd.Series(y).to_json(orient='values')
mpit=mpi(1,centroids)
xmpi1 = mpit [:, 1]
ympi1 = 2000-mpit [:, 0]
f1 ,firt_x,firt_y= firing_tendancy(1000, 1000 , xmpi1, ympi1)
fir_tendency_txt,fir_tendency_code = getfiringtendencytext(f1 ,firt_x,firt_y)
print("calling from prediction_calculation_1" )
gp_1 = grouping_length(0 , 0 , x , y)
result_1 =getresulttext(gp_1)
return (firer_id,
sess_id,
detail_id,
target_no,
set_no,
paper_ref,
X_json,
Y_json,
xmpi1,
ympi1,
f1,
gp_1,
firt_x,
firt_y,
data_x_1,
data_y_1,
result_1,
fir_tendency_txt
)
def prediction_calculation_5():
curdate=time.strftime("%Y-%m-%d")
firer_id =db.session.query(TShooting.target_5_id).scalar()
sess_id = db.session.query(TShooting.session_id).scalar()
detail_id = db.session.query(TShooting.detail_no).scalar()
target_no=5
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print(paper_ref)
data_x_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
data_y_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==5 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
print(data_x_1)
set_no=db.session.query(TShooting.set_no).scalar()
paper_ref=db.session.query(TShooting.paper_ref).scalar()
print('Old x' )
print(data_x_1)
image=Image.open('E:/FRAS Windows/FRAS_production/static/img_dump/5.png')
w,h = image.size
predictedMatrix = predictAsMatrix(image,w,h)
g= Graph(80, 80, predictedMatrix)
N=g.countIslands()
points(predictedMatrix,h=80,w=80)
centroids=kmean(N,pointsarray)
if(centroids is None):
x=0,
y=0,
mpit=0
xmpi1=0
ympi1=0
f1=0,
firt_x=0
firt_y=0
fir_tendency_code=""
fir_tendency_txt=""
gp_1=""
result_1=""
else:
x= centroids [:, 1]
y= 2000-centroids [:, 0]
X_json=pd.Series(x).to_json(orient='values')
Y_json = pd.Series(y).to_json(orient='values')
mpit=mpi(1,centroids)
xmpi1 = mpit [:, 1]
ympi1 = 2000-mpit [:, 0]
f1 ,firt_x,firt_y= firing_tendancy(1000, 1000 , xmpi1, ympi1)
fir_tendency_txt,fir_tendency_code = getfiringtendencytext(f1 ,firt_x,firt_y)
print("calling from prediction_calculation_1" )
gp_1 = grouping_length(0 , 0 , x , y)
result_1 =getresulttext(gp_1)
return (firer_id,
sess_id,
detail_id,
target_no,
set_no,
paper_ref,
X_json,
Y_json,
xmpi1,
ympi1,
f1,
gp_1,
firt_x,
firt_y,
data_x_1,
data_y_1,
result_1,
fir_tendency_txt
)
def prediction_calculation_6():
curdate=time.strftime("%Y-%m-%d")
firer_id =db.session.query(TShooting.target_6_id).scalar()
sess_id = db.session.query(TShooting.session_id).scalar()
detail_id = db.session.query(TShooting.detail_no).scalar()
target_no=6
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print(paper_ref)
data_x_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
data_y_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==6 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
print(data_x_1)
set_no=db.session.query(TShooting.set_no).scalar()
paper_ref=db.session.query(TShooting.paper_ref).scalar()
print('Old x' )
print(data_x_1)
image=Image.open('E:/FRAS Windows/FRAS_production/static/img_dump/6.png')
w,h = image.size
predictedMatrix = predictAsMatrix(image,w,h)
g= Graph(80, 80, predictedMatrix)
N=g.countIslands()
points(predictedMatrix,h=80,w=80)
centroids=kmean(N,pointsarray)
if(centroids is None):
x=0,
y=0,
mpit=0
xmpi1=0
ympi1=0
f1=0,
firt_x=0
firt_y=0
fir_tendency_code=""
fir_tendency_txt=""
gp_1=""
result_1=""
else:
x= centroids [:, 1]
y= 2000-centroids [:, 0]
X_json=pd.Series(x).to_json(orient='values')
Y_json = pd.Series(y).to_json(orient='values')
mpit=mpi(1,centroids)
xmpi1 = mpit [:, 1]
ympi1 = 2000-mpit [:, 0]
f1 ,firt_x,firt_y= firing_tendancy(1000, 1000 , xmpi1, ympi1)
fir_tendency_txt,fir_tendency_code = getfiringtendencytext(f1 ,firt_x,firt_y)
print("calling from prediction_calculation_1" )
gp_1 = grouping_length(0 , 0 , x , y)
result_1 =getresulttext(gp_1)
return (firer_id,
sess_id,
detail_id,
target_no,
set_no,
paper_ref,
X_json,
Y_json,
xmpi1,
ympi1,
f1,
gp_1,
firt_x,
firt_y,
data_x_1,
data_y_1,
result_1,
fir_tendency_txt
)
def prediction_calculation_7():
curdate=time.strftime("%Y-%m-%d")
firer_id =db.session.query(TShooting.target_7_id).scalar()
sess_id = db.session.query(TShooting.session_id).scalar()
detail_id = db.session.query(TShooting.detail_no).scalar()
target_no=7
paper_ref=db.session.query(TPaper_ref.paper_ref).scalar()
print(paper_ref)
data_x_1=db.session.query(T_Firer_Details).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==7 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
data_y_1=db.session.query(T_Firer_Details.final_y).filter(T_Firer_Details.date==curdate , T_Firer_Details.target_no==7 , T_Firer_Details.set_no==1 , T_Firer_Details.paper_ref==paper_ref , T_Firer_Details.session_id==sess_id).all()
print(data_x_1)
set_no=db.session.query(TShooting.set_no).scalar()
paper_ref=db.session.query(TShooting.paper_ref).scalar()
print('Old x' )
print(data_x_1)
image=Image.open('E:/FRAS Windows/FRAS_production/static/img_dump/7.png')
w,h = image.size
predictedMatrix = predictAsMatrix(image,w,h)
g= Graph(80, 80, predictedMatrix)
N=g.countIslands()
points(predictedMatrix,h=80,w=80)
centroids=kmean(N,pointsarray)
if(centroids is None):
x=0,
y=0,
mpit=0
xmpi1=0
ympi1=0
f1=0,
firt_x=0
firt_y=0
fir_tendency_code=""
fir_tendency_txt=""
gp_1=""
result_1=""
else:
x= centroids [:, 1]
y= 2000-centroids [:, 0]
X_json= | pd.Series(x) | pandas.Series |
#!/usr/bin/env python
# coding: utf-8
# # Experiments @Fischer in Montebelluna 28.02.20
# We had the oppurtunity to use the Flexometer for ski boots of Fischer with their help at Montebelluna. The idea is to validate our system acquiring simultaneously data by our sensor setup and the one from their machine. With the machine of Fischer it's possible to apply exact loads.
# We used booth our sensorized ski boots (Dynafit Hoji Pro Tour W and Dynafit TLT Speedfit). The Hoji we already used in the past for our experiments in the lab @Bz with our selfbuild experiment test bench. For the TLT Speedfit this was the first experiment.
#
# Strain gauge setup:
# - Dynafit Hoji Pro Tour: 4 pairs of strain gauges 1-4 (a=0°, b=90°)
# - Dynafit TLT Speedfit: 4 triples of strain gauges 1-4 (a=0°,b=45°,c=90°)
# As we had only a restricted time, we tested all 4 strain gauges pairs of the Hoji and only strain gauge triple 3 for TLT Speedfit. For the first time the new prototype of datalogger was running in an experiment. In addition also the first time in battery mode and not at room temperature. Unfortunately the connection of the strains to the logging system was not the best as in battery mode we don't have any possibility to control the connection to the channels yet. We'll get a solution for this the next days.
#
# Experiments (ambient temperature: 4°C):
# - #1: Hoji Pro Tour, 4a&b
# - #2: Hoji Pro Tour, 3a&b
# - #3: Hoji Pro Tour, 2a&b
# - #4: Hoji Pro Tour, 1a&b
# - #5: TLT Speedfit, 3a&b&c
#
# ATTENTION: The Hoji boot was not closed as much as the TLT. Take in consideration this when looking at force/angular displacement graph.
# In[50]:
# Importing libraries
import pandas as pd
import numpy as np
import datetime
import time
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
import csv
import matplotlib.patches as mpatches #needed for plot legend
from matplotlib.pyplot import *
get_ipython().run_line_magic('matplotlib', 'inline')
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('png', 'pdf')
# # Machine Data: load and plot
# The boot was loaded cyclical by the machine with a maximum of F = 150N. In each single experiment 1-5 we exported the data of the last 5 cycles.
#
# In[51]:
#Loading data in df[expnr]: exprnr-> experiment 1-5 with cycle 1-5
expnr=5 #number of exp
cyclenr = 5 #number of cycle per experiment
colnr = 2*cyclenr #
dfm={}
for expnr in range(expnr):
d = {}
for i in range(cyclenr): #load data from cycle 1-5
d[expnr,i] = pd.DataFrame()
d[expnr,i] = pd.read_csv('ESP'+ str(expnr+1) + 'ciclo'+ str(i+1) +'.csv', sep='\t',header=None)
dfm[expnr]=pd.concat([d[expnr,0], d[expnr,1], d[expnr,2], d[expnr,3], d[expnr,4]], axis=1, join='inner')
dfm[expnr] = np.array(dfm[expnr]) #transform in np.array
for i in range(len(dfm[expnr])): #replace , with . and change format to float
for j in range(colnr):
dfm[expnr][i,j]=float(dfm[expnr][i,j].replace(',', '.'))
#print(dfm[1][:,0])
# In[52]:
figm, axm = plt.subplots(5, 5, figsize=(13, 11), sharex='col') #define plot settings
col_title = ['Experiment {}'.format(col) for col in range(1, 5)]
for i in range(expnr+1):
for j in range(cyclenr):
axm[j,i].plot(dfm[i][:,2*j+1],dfm[i][:,2*j])
axm[0,i].set_title('Experiment '+ str(i+1))
axm[j,0].set(ylabel='F[N] Cycle'+ str(j+1))
axm[4,i].set(xlabel='angle [°]')
plt.tight_layout()
figm.suptitle('Machine Data Plot (Hoji Pro Tour: 1-4, TLT Speedfit: 5)',fontsize=16)
figm.subplots_adjust(top=0.88)
# On the x-axis the force F is shown (max 150N) and on the y-axis the displacement angle alpha.
# In the plot above the columns are showing the experiment and the rows the single cycles. The cycles within the same experiment are quite similar (qualitative). It's cool how clear is the difference between the two different ski boot models we used. Experiment 1-4 is showing Dynafit Hoji Pro Tour and experiment 5 the Dynafit TLT Speedfit.
# # Calculate surface under curve
# To compare the energy release between Hoji and TLT we are going to calculate the surface in the closed curve.
# We can calculate an area under a curve (curve to x-axis) by integration (E = \int{M dphi}). Via interpolation of extracted points on the curve we generate a function which is integrated afterwards by trapezian rule to get the surface. By subtracting the surface of unloading from the one of loading the area between can be calculated, which corresponds the energy release.
# In[53]:
from scipy.interpolate import interp1d
from numpy import trapz
# Experiment data
x1=dfm[1][:,1] # Exp1 cycle 1 Hoji
y1=dfm[1][:,0] # Exp1 cycle 1 Hoji
x2=dfm[4][:,1] # Exp5 cycle 1 Hoji
y2=dfm[4][:,0] # Exp5 cycle 1 Hoji
ym1=np.array([-29,17,41.14,63,96,147.8]) # x points loading Hoji
xm1=np.array([-1.5,2.9,7.312,11,13.7,13.94]) # y points loading Hoji
ym2=np.array([-29,3.741,25,43.08,63,72,106,147.8]) # x points unloading Hoji
xm2=np.array([-1.5,-0.646,1.2,3.127,6.6,8.37,13.28,13.94]) # y points unloading Hoji
ym3=np.array([-28.5,-12.27,4.841,18.01,31.92,39.46,87.48,145.6]) # x points loading TLT
xm3=np.array([-2.752,-0.989,1.022,3.23,5.387,6.012,6.521,6.915]) # y point loading TLT
ym4=np.array([-28.5,2.042,26.35,41.36,51.86,56.33,93.87,145.6]) # x points unloading TLT
xm4=np.array([-2.752,-1.94,-0.43,1.524,3.76,5.625,6.24,6.915]) # y points unloading TLt
# Interpolation
f1 = interp1d(xm1, ym1)
f2 = interp1d(xm2, ym2)
f3 = interp1d(xm3, ym3)
f4 = interp1d(xm4, ym4)
# Plot of original data and interpolation
fig0, ax0 = plt.subplots(1, 2, figsize=(15, 8))
fig0.suptitle('Ski boot testing machine', fontsize=16)
#fig0.suptitle('Interpolation of experiment data 1&5 cycle 1 (left: Hoji, right: TLT)', fontsize=16)
ax0[0].plot(x1,y1) # loading Hoji
ax0[0].set_title('Hoji Pro Tour W')
#ax0[0].plot(xm2,ym2, 'o', xm2, f2(xm2), '-', xm2, f2(xm2), '--') # unloading Hoji
#ax0[0].plot(x1,y1,xm1,ym1, 'o', xm1, f1(xm1), '-') # loading Hoji
#ax0[0].plot(xm2,ym2, 'o', xm2, f2(xm2), '-', xm2, f2(xm2), '--') # unloading Hoji
ax0[0].set(xlabel='angle [°]')
ax0[0].set(ylabel='Force [N]')
ax0[1].plot(x2,y2) # loading Hoji
ax0[1].set_title('TLT Speedfit')
#ax0[1].plot(x2,y2,xm3,ym3, 'o', xm3, f3(xm3), '-') # loading Hoji
#ax0[1].plot(xm4,ym4, 'o', xm4, f4(xm4), '-', xm4, f4(xm4), '--') # unloading Hoji
ax0[1].set(xlabel='angle [°]')
ax0[1].set(ylabel='Force [N]')
plt.show()
# Calculation of area between loading and unloading curve -> Energy
area1_hoji=np.trapz(f1(xm1), xm1)
area2_hoji=np.trapz(f2(xm2), xm2)
area1_TLT=np.trapz(f3(xm3), xm3)
area2_TLT=np.trapz(f4(xm4), xm4)
energy_hoji=abs(area1_hoji-area2_hoji)
energy_TLT=abs(area1_TLT-area2_TLT)
#print('Energy release Hoji = ', energy_hoji, '[J]')
#print('Energy release TLT = ', energy_TLT, '[J]')
# # Bootsensing: load and plot
# We created a datalogger which is saving the experiment data in a .txt file on a SD card. After the experiments we took them from the SD card to our PC.
# <NAME> did an excellent work with his file reader (https://github.com/raphaFanti/multiSensor/blob/master/analysis/03.%20Experiments_200220/Analysis%20v02/datanalysis_200220-v02.ipynb) which I'm using here to load this data. I modified the col_names as we used adapted column names the last time and updated the experiment date. He implemented also a good way to store all in a big dataframe. I'll copy also this code from Raphael.
# In[54]:
# transforms a time string into a datetime element
def toDate(timeString):
hh, mm, ss = timeString.split(":")
return datetime.datetime(2020, 2, 28, int(hh), int(mm), int(ss)) # date of experiment: 28.02.20
# returns a dataframe for each sub experient
col_names = ["ID","strain1","strain2","strain3","temp","millis"] # column names from file
cols_ordered = ["time","strain1","strain2","strain3"] # order wished
cols_int = ["strain1","strain2","strain3"] # to be transformed to int columns
def getDf(fl, startTime):
# ! note that we remove the first data line for each measurement since the timestamp remains zero for two first lines
fl.readline() # line removed
line = fl.readline()
lines = []
while "Time" not in line:
cleanLine = line.rstrip()
# trick for int since parsing entire column was not working
intsLine = cleanLine.replace(".00", "")
splitedLine = intsLine.split(",")
lines.append(splitedLine)
line = fl.readline()
# create dataframe
df = pd.DataFrame(lines, columns = col_names)
# create time colum
df["time"] = df["millis"].apply(lambda x: startTime + datetime.timedelta(milliseconds = int(x)))
# drop ID, millis and temperature, and order columns
df = df.drop(["ID", "temp", "millis"], axis = 1)
df = df[cols_ordered]
# adjust types
df[cols_int] = df[cols_int].astype(int)
return df
# Load data to dataframe. As we were not working with our usually experiment protocol, I had to skip phase = bs2.
# In[55]:
filenames = ["2022823_exp1","2022848_exp2","2022857_exp3", "202285_exp4", "2022829_exp5"]
nExp = len(filenames) # we simply calculate the number of experiments
# big data frame
df = pd.DataFrame()
for i, this_file in enumerate(filenames):
# experiment counter
exp = i + 1
# open file
with open(this_file + ".TXT", 'r') as fl:
# throw away first 3 lines and get baseline 1 start time
for i in range(3):
fl.readline()
# get start time for first baseline
bl1_time = fl.readline().replace("BASELINE Time: ", "")
startTime = toDate(bl1_time)
# get data for first baseline
df_bl1 = getDf(fl, startTime)
df_bl1["phase"] = "bl1"
# get start time for experiment
exp_time = fl.readline().replace("RECORDING Time: ", "")
startTime = toDate(exp_time)
# get data for experiment
df_exp = getDf(fl, startTime)
df_exp["phase"] = "exp"
# get start time for second baseline
#bl2_time = fl.readline().replace("BASELINE Time: ", "")
#startTime = toDate(bl2_time)
# get data for second baseline
#df_bl2 = getDf(fl, startTime)
#df_bl2["phase"] = "bl2"
# create full panda
df_exp_full = pd.concat([df_bl1, df_exp])
# create experiment column
df_exp_full["exp"] = exp
df = pd.concat([df, df_exp_full])
# shift columns exp and phase to begining
cols = list(df.columns)
cols = [cols[0]] + [cols[-1]] + [cols[-2]] + cols[1:-2]
df = df[cols]
#print(df)
# In[56]:
def plotExpLines(df, exp):
fig, ax = plt.subplots(3, 1, figsize=(15, 8), sharex='col')
fig.suptitle('Experiment ' + str(exp), fontsize=16)
# fig.subplots_adjust(top=0.88)
ax[0].plot(dfExp["time"], dfExp["strain3"], 'tab:green')
ax[0].set(ylabel='strain3')
ax[1].plot(dfExp["time"], dfExp["strain1"], 'tab:red')
ax[1].set(ylabel='strain1')
ax[2].plot(dfExp["time"], dfExp["strain2"], 'tab:blue')
ax[2].set(ylabel='strain2')
ax[2].set(xlabel='time [ms]')
plt.show()
# ### Experiment 1
# In[57]:
figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.plot(df[df["exp"] == 1]['time'],df[df["exp"] == 1]['strain3'])
plt.xlabel('daytime')
plt.ylabel('4A')
plt.title('Experiment 1: 4A ')
plt.show()
# We applied 34 cycles.
# ### Experiment 2
# In[58]:
figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.plot(df[df["exp"] == 2]['time'],df[df["exp"] == 2]['strain3'])
plt.xlabel('daytime')
plt.ylabel('3A')
plt.title('Experiment 2: 3A ')
plt.show()
# # Experiment 3
# In[59]:
figure(num=None, figsize=(10, 8), dpi=80, facecolor='w', edgecolor='k')
plt.plot(df[df["exp"] == 3]['time'],df[df["exp"] == 3]['strain3'])
plt.xlabel('daytime')
plt.ylabel('2B')
plt.title('Experiment 3: 2B ')
plt.show()
# ### Experiment 4
# In[60]:
figure(num=None, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k')
plt.plot(df[df["exp"] == 4]['time'],df[df["exp"] == 4]['strain3'])
plt.xlabel('daytime')
plt.ylabel('1A')
plt.title('Experiment 4: 1A ')
plt.show()
# ### Experiment 5
# In[61]:
fig, ax = plt.subplots(2, 1, figsize=(15, 8), sharex='col')
fig.suptitle('Experiment 5: 3B & 3C ', fontsize=16)
# fig.subplots_adjust(top=0.88)
ax[0].plot(df[df["exp"] == 5]['time'], df[df["exp"] == 5]['strain3'], 'tab:green')
ax[0].set(ylabel='3C')
ax[1].plot(df[df["exp"] == 5]['time'], df[df["exp"] == 5]['strain2'], 'tab:red')
ax[1].set(ylabel='3B')
ax[1].set(xlabel='daytime')
plt.show()
# In[62]:
#dfExp = df[df["exp"] == 3]
#plotExpLines(dfExp, 3)
# # Analysis
# Now we try to compare the data from the Flexometer of Fischer and from our Bootsensing.
# - Fischer: force F over displacement angle alpha
# - Bootsensing: deformation measured by strain gauge (resistance change) in at the moment unknown unit over time (daytime in plot shown)
# The idea now is to identify the last 5 cycles in Bootsensing data automatically and to exstract time information (t0,t). Afterwards this delta t can be applied on Fischers data to plot force F over the extracted time.
# ### Bootsensing: Cycle identification
# For Experiment 1-5 we will identfy the last 5 cycles of strain3. As the data of Fischer starts at a peak (maximum load), we will identify them also in our bootsensing data and extract the last 6 peak indexes. Applying these indices on strain3/time data we get the last 5 cycles.
#
# Find peaks: find_peaks function https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.find_peaks.html
# Find valley: with Inverse of find peaks
#
#
# In[63]:
from scipy.signal import find_peaks
import numpy as np
# Load data of Experiments 1-5
ds={} # dict for strain data -> dataformat will be changed
dt={} # time data
peaks={} # peaks
valleys={} # valleys
inv_ds={} # inverse for valleys calculation
ds_peaks={} # index of peak (used for 5-2)
ds_peaks_end={} # index of last peaks
ds_valleys_end = {} # index of last valley
ds_valleys={} # index of valley (used for 5-2)
len_valley={} # valley lenght
for i in range(1,6): # i = Experiment number
ds[i]=df[df["exp"] == i]['strain3'] #data for strain3
dt[i]=df[df["exp"] == i]['time'] # time data
ds[i]=ds[i].dropna() # drop NaN
dt[i]=dt[i].dropna()
ds[i]=ds[i].reset_index(drop=True) #reset index
dt[i]=dt[i].reset_index(drop=True)
peaks[i],_=find_peaks(ds[i],prominence=100000) # find peaks
inv_ds[i]=ds[i]*(-1) # inverse of ds
valleys[i],_=find_peaks(inv_ds[i],prominence=10000) # find valleys
for j in range(1,6): # j = cycle number
ds_valleys[j,i]=valleys[i][-1-j:-j] # selecting last 5 valleys
ds_valleys_end[j,i]=valleys[i][-1:] # select last valley
ds_valleys[j,i]=ds_valleys[j,i][0] # assign index
ds_valleys_end[j,i]=ds_valleys_end[j,i][0]
ds_peaks[j,i]=peaks[i][-1-j:-j] # selecting last 5 peaks
ds_peaks_end[j,i]=peaks[i][-1:] # select last peak
ds_peaks[j,i]=ds_peaks[j,i][0] # assign index
ds_peaks_end[j,i]=ds_peaks_end[j,i][0]
#print(ds1[1][ds_valleys[1,1]])
#Calculate cycle lengths
#for i in range(1,6):
#len_valley[e] = dt1[e][ds_valleys[1,1]] - dt1[e][ds_valleys[2,1]] #1th
#len_valley1_2[i] = dt1[ds_valley_3[i]] - dt1[ds_valley_4[i]] #2th
#len_valley2_3[i] = dt1[ds_valley_2[i]] - dt1[ds_valley_3[i]] #3th
#len_valley3_4[i] = dt1[ds_valley_1[i]] - dt1[ds_valley_2[i]] #4th
#len_valley4_5[i] = dt1[ds_valley_last_end[i]] - dt1[ds_valley_1[i]] #5th
# EXPERIMENT 1: pay attention for peaks/valley after cycles
# Now we will plot the data for strain3 for each experiment with their peaks and valleys.
# In[64]:
# Plot peaks and valleys for Exp 1-5 for strain3
fig1, ax1 = plt.subplots(5, 1, figsize=(15, 8))
fig1.subplots_adjust(top=2)
fig1.suptitle('Experiments 1-5: peaks and valleys ', fontsize=16)
for i in range(5): # i for Experiment number
ax1[i].plot(df[df["exp"] == (i+1)]['time'], df[df["exp"] == (i+1)]['strain3'], 'tab:green')
ax1[i].plot(dt[(i+1)][peaks[(i+1)]],ds[(i+1)][peaks[(i+1)]],"x") #Plot peaks with x
ax1[i].plot(dt[(i+1)][valleys[(i+1)]],ds[(i+1)][valleys[(i+1)]],"o") #Plot valleys with o
ax1[i].set(ylabel='raw signal')
ax1[i].set(xlabel='daytime')
ax1[i].set_title('Experiment'+str(i+1))
plt.tight_layout()
fig1.subplots_adjust(top=0.88) # spacer between title and plot
plt.show()
# Plot last 5 cycles for Exp 1-5 for strain3
fig2, ax2 = plt.subplots(5, 1, figsize=(10, 8))
fig2.suptitle('Experiments 1-5: last 5 cycles ', fontsize=16)
for i in range(5): # i for Experiment number
ax2[i].plot(dt[(i+1)][ds_valleys[5,(i+1)]:ds_valleys_end[1,(i+1)]],ds[(i+1)][ds_valleys[5,(i+1)]:ds_valleys_end[1,(i+1)]]) # select data between 5th last and last valley
#ax2[i].plot(dt[(i+1)][ds_peaks[5,(i+1)]:ds_peaks_end[1,(i+1)]],ds[(i+1)][ds_peaks[5,(i+1)]:ds_peaks_end[1,(i+1)]])# select data between 5th last and last peak
ax2[i].set(ylabel='raw signal')
ax2[i].set(xlabel='daytime')
ax2[i].set_title('Experiment'+str(i+1))
plt.tight_layout()
fig2.subplots_adjust(top=0.88) # spacer between title and plot
plt.show()
#plt.axvline(x=dt[ds_valley_2_index],color="grey") #time borders 3th cycle
#plt.axvline(x=dt[ds_valley_3_index],color="grey")
#plt.axhline(y=ds[ds_valley_3_index],color="red") # h line
# For Experiment 2-5 the last 5 cycles are clear. The signal of experiment 1 is raising again after the cyclic loading as it's not possible to select the last 5 cycles with this "peaks" method, but happily we can extract still the last cycle.
# As we can see in the plot of the last 5 cycles above, the last cycle for Exp1, Exp3 and Exp5 is ending with a peak where Exp2 and Exp4 is ending with a valley. We can say this from the plots as we know from our exported machine data that a cycle ends always with the maximum force of 150N. This means a valley or peak for our bootsensing system.
# ### Match Fischer Data with Bootsensing cycle time
# Now we are going to match the Bootsensing cycle time with the force data of Fischer for each experiment 1-5. As the machine of Fischer applied the load with a frequency of 0.33 Hz, the cycle length of each cycle should be approximately t=3s. We verified this calculating the length between 2 neighbour valley of our bootsensing data (see code above).
# In[65]:
#Identify frequency of Fischer Dataacquisition
f={} # Fischer force matrix
freq={} # matrix with vector lenght to identify frequency
for i in range(5): #
f[i] = dfm[i][:,2*i] # load force data for Exp5, strain3 0,2,4,6,8
freq[i] = len(dfm[i][:,2*i]) # force vector len
#Create time linspace for Fischer data
#Timestamp can not be selected by item, done without manually
time_start1=dt[1][ds_peaks[5,1]] # Exp1: select manually last cycle
time_end1=dt[1][ds_peaks[4,1]]
time_start2=dt[2][ds_valleys[5,2]] # Exp2
time_end2=dt[2][ds_valleys[4,2]]
time_start3=dt[3][ds_peaks[5,3]] # Exp3
time_end3=dt[3][ds_peaks[4,3]]
time_start4=dt[4][ds_valleys[5,4]] # Exp4
time_end4=dt[4][ds_valleys[4,4]]
time_start5=dt[5][ds_peaks[5,5]] # Exp5
time_end5=dt[5][ds_peaks[4,5]]
#print(time_start1,time_end1)
x1=pd.date_range(time_start1, time_end1, periods=freq[0]).to_pydatetime()
x2=pd.date_range(time_start2, time_end2, periods=freq[1]).to_pydatetime()
x3= | pd.date_range(time_start3, time_end3, periods=freq[2]) | pandas.date_range |
import numpy as np
import pandas as pd
import os.path
from glob import glob
import scipy.stats as ss
from sklearn.metrics import r2_score, roc_auc_score, average_precision_score
COLORS = {
'orange': '#f0593e',
'dark_red': '#7c2712',
'red': '#ed1d25',
'yellow': '#ed9f22',
'light_green': '#67bec5',
'dark_green': '#018a84',
'light_blue': '#00abe5',
'dark_blue': '#01526e',
'grey': '#a8a8a8'
}
DINUCLEOTIDES = {
'AA': 'AA/TT', 'AC': 'AC/GT', 'AG': 'AG/CT',
'CA': 'CA/TG', 'CC': 'CC/GG', 'GA': 'GA/TC'
}
FEATURE_NAME_DICT = {
'yeast': {
'tf_binding:TF': 'TF binding',
'histone_modifications:h3k27ac_tp1_0_merged': 'H3K27ac',
'histone_modifications:h3k36me3_tp1_0_merged': 'H3K36me3',
'histone_modifications:h3k4me3_tp1_0_merged': 'H3K4me3',
'histone_modifications:h3k4me_tp1_0_merged': 'H3K4me1',
'histone_modifications:h3k79me_tp1_0_merged': 'H3K79me1',
'histone_modifications:h4k16ac_tp1_0_merged': 'H4K16ac',
'chromatin_accessibility:BY4741_ypd_osm_0min.occ': 'Chrom acc',
'gene_expression:TF': 'GEX level',
'gene_expression:variation': 'GEX var',
'dna_sequence:nt_freq_agg': 'Dinucleotides'
},
'human_k562': {
'tf_binding:TF': 'TF binding',
'histone_modifications:K562_H3K27ac': 'H3K27ac',
'histone_modifications:K562_H3K27me3': 'H3K27me3',
'histone_modifications:K562_H3K36me3': 'H3K36me3',
'histone_modifications:K562_H3K4me1': 'H3K4me1',
'histone_modifications:K562_H3K4me3': 'H3K4me3',
'histone_modifications:K562_H3K9me3': 'H3K9me3',
'chromatin_accessibility:K562_atac': 'Chrom acc',
'gene_expression:median_level': 'GEX level',
'gene_expression:variation': 'GEX var',
'dna_sequence:nt_freq_agg': 'DNA sequence'
},
'human_hek293': {
'tf_binding:TF': 'TF binding',
'histone_modifications:HEK293_H3K27ac': 'H3K27ac',
'histone_modifications:HEK293_H3K27me3': 'H3K27me3',
'histone_modifications:HEK293_H3K36me3': 'H3K36me3',
'histone_modifications:HEK293_H3K4me1': 'H3K4me1',
'histone_modifications:HEK293_H3K4me3': 'H3K4me3',
'histone_modifications:HEK293_H3K9me3': 'H3K9me3',
'chromatin_accessibility:HEK293T_dnase': 'Chrom acc',
'gene_expression:median_level': 'GEX level',
'gene_expression:variation': 'GEX var',
'dna_sequence:nt_freq_agg': 'DNA sequence'
},
'human_h1': {
'tf_binding:TF': 'TF binding',
'histone_modifications:H3K27ac': 'H3K27ac',
'histone_modifications:H3K27me3': 'H3K27me3',
'histone_modifications:H3K36me3': 'H3K36me3',
'histone_modifications:H3K4me1': 'H3K4me1',
'histone_modifications:H3K4me3': 'H3K4me3',
'histone_modifications:H3K9me3': 'H3K9me3',
'chromatin_accessibility:H1_ChromAcc_intersect': 'Chrom acc',
'gene_expression:median_level': 'GEX level',
'gene_expression:variation': 'GEX var',
'dna_sequence:nt_freq_agg': 'DNA sequence'
}
}
def parse_classifier_stats(dirpath, algorithm, feat_types, sys2com_dict=None):
out_df = pd.DataFrame(
columns=['tf', 'chance', 'feat_type', 'cv', 'auroc', 'auprc'])
for feat_type in feat_types:
print('... working on', feat_type)
subdirs = glob('{}/{}/{}/*'.format(dirpath, feat_type, algorithm))
for subdir in subdirs:
tf = os.path.basename(subdir)
filename = glob('{}/stats.csv*'.format(subdir))[0]
stats_df = pd.read_csv(filename)
filename = glob('{}/preds.csv*'.format(subdir))[0]
preds_df = pd.read_csv(filename)
stats_df['feat_type'] = feat_type
if sys2com_dict is not None:
tf_com = sys2com_dict[tf] if tf in sys2com_dict else tf
stats_df['tf'] = '{} ({})'.format(tf, tf_com)
stats_df['tf_com'] = tf_com
else:
stats_df['tf'] = tf
stats_df['chance'] = np.sum(preds_df['label'] == 1) / preds_df.shape[0]
out_df = out_df.append(stats_df, ignore_index=True)
return out_df
def compare_model_stats(df, metric, comp_groups):
stats_df = pd.DataFrame(columns=['tf', 'comp_group', 'p_score'])
for tf, df2 in df.groupby('tf'):
for (f1, f2) in comp_groups:
x1 = df2.loc[df2['feat_type'] == f1, metric]
x2 = df2.loc[df2['feat_type'] == f2, metric]
_, p = ss.ttest_rel(x1, x2)
sign = '+' if np.median(x2) > np.median(x1) else '-'
stats_row = pd.Series({
'tf': tf,
'comp_group': '{} vs {}'.format(f1, f2),
'p_score': -np.log10(p),
'sign': sign})
stats_df = stats_df.append(stats_row, ignore_index=True)
return stats_df
def get_feature_indices(df, organism):
"""Parse feature indices for visualization.
"""
feat_dict = FEATURE_NAME_DICT[organism]
idx_df = pd.DataFrame()
for _, row in df.iterrows():
if row['feat_type'] == 'dna_sequence_nt_freq':
type_name = 'dna_sequence:nt_freq_agg'
else:
type_name = row['feat_type'] + ':' + row['feat_name']
type_name2 = feat_dict[type_name]
for i in range(row['start'], row['end']):
idx_df = idx_df.append(pd.Series({'feat_type_name': type_name2, 'feat_idx': i}), ignore_index=True)
return idx_df
def calculate_resp_and_unresp_signed_shap_sum(data_dir, tfs=None, organism='yeast', sum_over_type='tf'):
"""Calculate the sum of SHAP values within responsive and unresponsive genes respectively.
"""
# TODO: update shap csv header
print('Loading feature data ...', end=' ')
shap_subdf_list = []
for i, shap_subdf in enumerate(pd.read_csv('{}/feat_shap_wbg.csv.gz'.format(data_dir), chunksize=10 ** 7, low_memory=False)):
print(i, end=' ')
shap_subdf = shap_subdf.rename(columns={'gene': 'tf:gene', 'feat': 'shap'})
shap_subdf['tf'] = shap_subdf['tf:gene'].apply(lambda x: x.split(':')[0])
if tfs is not None:
shap_subdf = shap_subdf[shap_subdf['tf'].isin(tfs)]
shap_subdf_list.append(shap_subdf)
print()
shap_df = pd.concat(shap_subdf_list)
del shap_subdf_list
feats_df = pd.read_csv('{}/feats.csv.gz'.format(data_dir), names=['feat_type', 'feat_name', 'start', 'end'])
preds_df = pd.read_csv('{}/preds.csv.gz'.format(data_dir))
if tfs is not None:
preds_df = preds_df[preds_df['tf'].isin(tfs)]
feat_idx_df = get_feature_indices(feats_df, organism)
## Parse out shap+ and shap- values
print('Parsing signed shap values ...')
shap_df = shap_df.merge(preds_df[['tf:gene', 'label', 'gene']], how='left', on='tf:gene')
shap_df['shap+'] = shap_df['shap'].clip(lower=0)
shap_df['shap-'] = shap_df['shap'].clip(upper=0)
## Sum across reg region for each feature and each tf:gene, and then take
## the mean among responsive targets and repeat for non-responsive targets.
print('Summing shap ...')
shap_df = shap_df.merge(feat_idx_df[['feat_type_name', 'feat_idx']], on='feat_idx')
sum_shap = shap_df.groupby(['tf', 'gene', 'label', 'feat_type_name'])[['shap+', 'shap-']].sum().reset_index()
sum_shap = sum_shap.groupby([sum_over_type, 'label', 'feat_type_name'])[['shap+', 'shap-']].mean().reset_index()
sum_shap['label_name'] = sum_shap['label'].apply(lambda x: 'Responsive' if x == 1 else 'Non-responsive')
sum_shap['label_name'] = pd.Categorical(
sum_shap['label_name'], ordered=True, categories=['Responsive', 'Non-responsive'])
sum_shap_pos = sum_shap[[sum_over_type, 'label_name', 'feat_type_name', 'shap+']].copy().rename(columns={'shap+': 'shap'})
sum_shap_pos['shap_dir'] = 'SHAP > 0'
sum_shap_neg = sum_shap[[sum_over_type, 'label_name', 'feat_type_name', 'shap-']].copy().rename(columns={'shap-': 'shap'})
sum_shap_neg['shap_dir'] = 'SHAP < 0'
sum_signed_shap = pd.concat([sum_shap_pos, sum_shap_neg])
sum_signed_shap['shap_dir'] = | pd.Categorical(sum_signed_shap['shap_dir'], categories=['SHAP > 0', 'SHAP < 0']) | pandas.Categorical |
# The MIT License (MIT)
# Copyright (c) 2020 by Brockmann Consult GmbH and contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import re
from datetime import datetime
from typing import List
from typing import Tuple, Optional
import pandas as pd
import xarray as xr
from .error import ConverterError
from .log import LOGGER
class DatasetPreProcessor:
def __init__(self,
input_variables: List[str] = None,
input_concat_dim: str = None,
input_datetime_format: str = None):
self._input_variables = input_variables
self._input_concat_dim = input_concat_dim
self._input_datetime_format = input_datetime_format
self._first_dataset_shown = False
def preprocess_dataset(self, ds: xr.Dataset) -> xr.Dataset:
if self._input_variables:
drop_variables = set(ds.variables).difference(self._input_variables)
ds = ds.drop_vars(drop_variables)
if self._input_concat_dim:
ds = ensure_dataset_has_concat_dim(ds, self._input_concat_dim,
datetime_format=self._input_datetime_format)
if not self._first_dataset_shown:
LOGGER.debug(f'First input dataset:\n{ds}')
self._first_dataset_shown = True
return ds
_RE_TO_DATETIME_FORMATS = [(re.compile(14 * '\\d'), '%Y%m%d%H%M%S'),
(re.compile(12 * '\\d'), '%Y%m%d%H%M'),
(re.compile(8 * '\\d'), '%Y%m%d'),
(re.compile(6 * '\\d'), '%Y%m'),
(re.compile(4 * '\\d'), '%Y')]
def ensure_dataset_has_concat_dim(ds: xr.Dataset,
concat_dim_name: str,
datetime_format: str = None) -> xr.Dataset:
"""
:param ds: Dataset to adjust
:param concat_dim_name: Name of dimension to be appended
:param datetime_format: Name of dimension to be appended
:return: Adjusted dataset
"""
concat_dim_var = None
if concat_dim_name in ds:
concat_dim_var = ds[concat_dim_name]
if concat_dim_var is not None:
if not concat_dim_var.dims:
# if the concat_dim_var does not yet have a dimension, add it
ds = ds.assign_coords({
concat_dim_name: xr.DataArray(concat_dim_var, dims=(concat_dim_name,))
})
elif concat_dim_name == 'time':
time_coverage_start, time_coverage_end = \
get_time_coverage_from_ds(ds, datetime_format=datetime_format)
time_coverage_start = time_coverage_start or time_coverage_end
time_coverage_end = time_coverage_end or time_coverage_start
ds = ds.assign_coords(
time=xr.DataArray([time_coverage_start + 0.5 * (time_coverage_end - time_coverage_start)],
dims=('time',),
attrs=dict(bounds='time_bnds')),
time_bnds=xr.DataArray([[time_coverage_start, time_coverage_end]],
dims=('time', 'bnds'))
)
concat_dim_var = ds.time
else:
# Can't do anything
raise ConverterError(f'Missing (coordinate) variable "{concat_dim_name}" for dimension "{concat_dim_name}".')
is_concat_dim_used = any((concat_dim_name in ds[var_name].dims) for var_name in ds.data_vars)
if not is_concat_dim_used:
concat_dim_bnds_name = concat_dim_var.attrs.get('bounds', f'{concat_dim_name}_bnds')
concat_dim_bnds_var = ds[concat_dim_bnds_name] if concat_dim_bnds_name in ds else None
# ds.expand_dims() will raise if coordinates exist, so remove them temporarily
if concat_dim_bnds_var is not None:
ds = ds.drop_vars([concat_dim_name, concat_dim_bnds_name])
else:
ds = ds.drop_vars(concat_dim_name)
# if concat_dim_name is still a dimension, drop it too
if concat_dim_name in ds.dims:
ds = ds.drop_dims(concat_dim_name)
# expand dataset by concat_dim_name/concat_dim_var, this will add the dimension and the coordinate
ds = ds.expand_dims({concat_dim_name: concat_dim_var})
# also (re)assign bounds coordinates
if concat_dim_bnds_var is not None:
ds = ds.assign_coords(time_bnds=concat_dim_bnds_var)
return ds
def get_time_coverage_from_ds(ds: xr.Dataset,
datetime_format: str = None) -> Tuple[datetime, datetime]:
time_coverage_start = ds.attrs.get('time_coverage_start')
if time_coverage_start is not None:
time_coverage_start = parse_timestamp(time_coverage_start, datetime_format=datetime_format)
time_coverage_end = ds.attrs.get('time_coverage_end')
if time_coverage_end is not None:
time_coverage_end = parse_timestamp(time_coverage_end, datetime_format=datetime_format)
time_coverage_start = time_coverage_start or time_coverage_end
time_coverage_end = time_coverage_end or time_coverage_start
if time_coverage_start and time_coverage_end:
return time_coverage_start, time_coverage_end
# TODO: use special parameters to parse time_coverage_start, time_coverage_end from source_path
# source_path = ds.encoding.get('source', '')
raise ConverterError('Missing time_coverage_start and/or time_coverage_end in dataset attributes.')
def parse_timestamp(string: str, datetime_format: str = None) \
-> Optional[datetime]:
try:
return | pd.to_datetime(string, format=datetime_format) | pandas.to_datetime |
#-*- coding:utf-8 -*-
from __future__ import print_function
import os,sys,sip,time
from datetime import datetime,timedelta
from qtpy.QtWidgets import QTreeWidgetItem,QMenu,QApplication,QAction,QMainWindow
from qtpy import QtGui,QtWidgets
from qtpy.QtCore import Qt,QUrl,QDate
from Graph import graphpage
from layout import Ui_MainWindow
from pandas import DataFrame as df
import pandas as pd
import tushare as ts
import pickle
import numpy as np
list1 = []
class MyUi(QMainWindow):
def __init__(self):
super(MyUi, self).__init__()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
cwd = os.getcwd()
cwd = str(cwd)
if os.path.isfile(cwd+"/time"):
with open("time","rb") as outfile:#reads current time
history = pickle.load(outfile)
if (datetime.now()-history).total_seconds()<43200: #measures if time elapse>12 hours
print("Less than 12 hours. Loading previously saved Pickle...")
else:
print("More than 12 hours. Updating Pickle...")
data = ts.get_industry_classified()
with open("class","wb+") as outfile:
pickle.dump(data,outfile)
now = datetime.now()
with open("time", "wb+") as outfile: #update time
pickle.dump(now, outfile)
else:
print("No Pickle found!") #If this is first time using tuchart in this directory
data = | df() | pandas.DataFrame |
"""
Unit test suite for OLS and PanelOLS classes
"""
# pylint: disable-msg=W0212
from __future__ import division
from datetime import datetime
import unittest
import nose
import numpy as np
from pandas import date_range, bdate_range
from pandas.core.panel import Panel
from pandas import DataFrame, Index, Series, notnull, datetools
from pandas.stats.api import ols
from pandas.stats.ols import _filter_data
from pandas.stats.plm import NonPooledPanelOLS, PanelOLS
from pandas.util.testing import (assert_almost_equal, assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from common import BaseTest
_have_statsmodels = True
try:
import statsmodels.api as sm
except ImportError:
try:
import scikits.statsmodels.api as sm
except ImportError:
_have_statsmodels = False
def _check_repr(obj):
repr(obj)
str(obj)
def _compare_ols_results(model1, model2):
assert(type(model1) == type(model2))
if hasattr(model1, '_window_type'):
_compare_moving_ols(model1, model2)
else:
_compare_fullsample_ols(model1, model2)
def _compare_fullsample_ols(model1, model2):
assert_series_equal(model1.beta, model2.beta)
def _compare_moving_ols(model1, model2):
assert_frame_equal(model1.beta, model2.beta)
class TestOLS(BaseTest):
# TODO: Add tests for OLS y predict
# TODO: Right now we just check for consistency between full-sample and
# rolling/expanding results of the panel OLS. We should also cross-check
# with trusted implementations of panel OLS (e.g. R).
# TODO: Add tests for non pooled OLS.
@classmethod
def setUpClass(cls):
try:
import matplotlib as mpl
mpl.use('Agg', warn=False)
except ImportError:
pass
if not _have_statsmodels:
raise nose.SkipTest
def testOLSWithDatasets(self):
self.checkDataSet(sm.datasets.ccard.load(), skip_moving=True)
self.checkDataSet(sm.datasets.cpunish.load(), skip_moving=True)
self.checkDataSet(sm.datasets.longley.load(), skip_moving=True)
self.checkDataSet(sm.datasets.stackloss.load(), skip_moving=True)
self.checkDataSet(sm.datasets.copper.load())
self.checkDataSet(sm.datasets.scotland.load())
# degenerate case fails on some platforms
# self.checkDataSet(datasets.ccard.load(), 39, 49) # one col in X all 0s
def testWLS(self):
X = DataFrame(np.random.randn(30, 4), columns=['A', 'B', 'C', 'D'])
Y = Series(np.random.randn(30))
weights = X.std(1)
self._check_wls(X, Y, weights)
weights.ix[[5, 15]] = np.nan
Y[[2, 21]] = np.nan
self._check_wls(X, Y, weights)
def _check_wls(self, x, y, weights):
result = ols(y=y, x=x, weights=1/weights)
combined = x.copy()
combined['__y__'] = y
combined['__weights__'] = weights
combined = combined.dropna()
endog = combined.pop('__y__').values
aweights = combined.pop('__weights__').values
exog = sm.add_constant(combined.values, prepend=False)
sm_result = sm.WLS(endog, exog, weights=1/aweights).fit()
assert_almost_equal(sm_result.params, result._beta_raw)
assert_almost_equal(sm_result.resid, result._resid_raw)
self.checkMovingOLS('rolling', x, y, weights=weights)
self.checkMovingOLS('expanding', x, y, weights=weights)
def checkDataSet(self, dataset, start=None, end=None, skip_moving=False):
exog = dataset.exog[start : end]
endog = dataset.endog[start : end]
x = DataFrame(exog, index=np.arange(exog.shape[0]),
columns=np.arange(exog.shape[1]))
y = Series(endog, index=np.arange(len(endog)))
self.checkOLS(exog, endog, x, y)
if not skip_moving:
self.checkMovingOLS('rolling', x, y)
self.checkMovingOLS('rolling', x, y, nw_lags=0)
self.checkMovingOLS('expanding', x, y, nw_lags=0)
self.checkMovingOLS('rolling', x, y, nw_lags=1)
self.checkMovingOLS('expanding', x, y, nw_lags=1)
self.checkMovingOLS('expanding', x, y, nw_lags=1, nw_overlap=True)
def checkOLS(self, exog, endog, x, y):
reference = sm.OLS(endog, sm.add_constant(exog, prepend=False)).fit()
result = ols(y=y, x=x)
# check that sparse version is the same
sparse_result = ols(y=y.to_sparse(), x=x.to_sparse())
_compare_ols_results(result, sparse_result)
assert_almost_equal(reference.params, result._beta_raw)
assert_almost_equal(reference.df_model, result._df_model_raw)
assert_almost_equal(reference.df_resid, result._df_resid_raw)
assert_almost_equal(reference.fvalue, result._f_stat_raw[0])
assert_almost_equal(reference.pvalues, result._p_value_raw)
assert_almost_equal(reference.rsquared, result._r2_raw)
assert_almost_equal(reference.rsquared_adj, result._r2_adj_raw)
assert_almost_equal(reference.resid, result._resid_raw)
assert_almost_equal(reference.bse, result._std_err_raw)
assert_almost_equal(reference.tvalues, result._t_stat_raw)
assert_almost_equal(reference.cov_params(), result._var_beta_raw)
assert_almost_equal(reference.fittedvalues, result._y_fitted_raw)
_check_non_raw_results(result)
def checkMovingOLS(self, window_type, x, y, weights=None, **kwds):
window = sm.tools.tools.rank(x.values) * 2
moving = ols(y=y, x=x, weights=weights, window_type=window_type,
window=window, **kwds)
# check that sparse version is the same
sparse_moving = ols(y=y.to_sparse(), x=x.to_sparse(),
weights=weights,
window_type=window_type,
window=window, **kwds)
_compare_ols_results(moving, sparse_moving)
index = moving._index
for n, i in enumerate(moving._valid_indices):
if window_type == 'rolling' and i >= window:
prior_date = index[i - window + 1]
else:
prior_date = index[0]
date = index[i]
x_iter = {}
for k, v in x.iteritems():
x_iter[k] = v.truncate(before=prior_date, after=date)
y_iter = y.truncate(before=prior_date, after=date)
static = ols(y=y_iter, x=x_iter, weights=weights, **kwds)
self.compare(static, moving, event_index=i,
result_index=n)
_check_non_raw_results(moving)
FIELDS = ['beta', 'df', 'df_model', 'df_resid', 'f_stat', 'p_value',
'r2', 'r2_adj', 'rmse', 'std_err', 't_stat',
'var_beta']
def compare(self, static, moving, event_index=None,
result_index=None):
index = moving._index
# Check resid if we have a time index specified
if event_index is not None:
ref = static._resid_raw[-1]
label = index[event_index]
res = moving.resid[label]
assert_almost_equal(ref, res)
ref = static._y_fitted_raw[-1]
res = moving.y_fitted[label]
assert_almost_equal(ref, res)
# Check y_fitted
for field in self.FIELDS:
attr = '_%s_raw' % field
ref = getattr(static, attr)
res = getattr(moving, attr)
if result_index is not None:
res = res[result_index]
assert_almost_equal(ref, res)
def test_ols_object_dtype(self):
df = DataFrame(np.random.randn(20, 2), dtype=object)
model = ols(y=df[0], x=df[1])
summary = repr(model)
class TestOLSMisc(unittest.TestCase):
'''
For test coverage with faux data
'''
@classmethod
def setupClass(cls):
if not _have_statsmodels:
raise nose.SkipTest
def test_f_test(self):
x = tm.makeTimeDataFrame()
y = x.pop('A')
model = ols(y=y, x=x)
hyp = '1*B+1*C+1*D=0'
result = model.f_test(hyp)
hyp = ['1*B=0',
'1*C=0',
'1*D=0']
result = model.f_test(hyp)
assert_almost_equal(result['f-stat'], model.f_stat['f-stat'])
self.assertRaises(Exception, model.f_test, '1*A=0')
def test_r2_no_intercept(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
x_with = x.copy()
x_with['intercept'] = 1.
model1 = ols(y=y, x=x)
model2 = ols(y=y, x=x_with, intercept=False)
assert_series_equal(model1.beta, model2.beta)
# TODO: can we infer whether the intercept is there...
self.assert_(model1.r2 != model2.r2)
# rolling
model1 = ols(y=y, x=x, window=20)
model2 = ols(y=y, x=x_with, window=20, intercept=False)
assert_frame_equal(model1.beta, model2.beta)
self.assert_((model1.r2 != model2.r2).all())
def test_summary_many_terms(self):
x = DataFrame(np.random.randn(100, 20))
y = np.random.randn(100)
model = ols(y=y, x=x)
model.summary
def test_y_predict(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
model1 = ols(y=y, x=x)
assert_series_equal(model1.y_predict, model1.y_fitted)
assert_almost_equal(model1._y_predict_raw, model1._y_fitted_raw)
def test_predict(self):
y = tm.makeTimeSeries()
x = tm.makeTimeDataFrame()
model1 = ols(y=y, x=x)
assert_series_equal(model1.predict(), model1.y_predict)
assert_series_equal(model1.predict(x=x), model1.y_predict)
assert_series_equal(model1.predict(beta=model1.beta), model1.y_predict)
exog = x.copy()
exog['intercept'] = 1.
rs = Series(np.dot(exog.values, model1.beta.values), x.index)
assert_series_equal(model1.y_predict, rs)
x2 = x.reindex(columns=x.columns[::-1])
assert_series_equal(model1.predict(x=x2), model1.y_predict)
x3 = x2 + 10
pred3 = model1.predict(x=x3)
x3['intercept'] = 1.
x3 = x3.reindex(columns = model1.beta.index)
expected = Series(np.dot(x3.values, model1.beta.values), x3.index)
assert_series_equal(expected, pred3)
beta = Series(0., model1.beta.index)
pred4 = model1.predict(beta=beta)
assert_series_equal(Series(0., pred4.index), pred4)
def test_predict_longer_exog(self):
exogenous = {"1998": "4760","1999": "5904","2000": "4504",
"2001": "9808","2002": "4241","2003": "4086",
"2004": "4687","2005": "7686","2006": "3740",
"2007": "3075","2008": "3753","2009": "4679",
"2010": "5468","2011": "7154","2012": "4292",
"2013": "4283","2014": "4595","2015": "9194",
"2016": "4221","2017": "4520"}
endogenous = {"1998": "691", "1999": "1580", "2000": "80",
"2001": "1450", "2002": "555", "2003": "956",
"2004": "877", "2005": "614", "2006": "468",
"2007": "191"}
endog = Series(endogenous)
exog = Series(exogenous)
model = ols(y=endog, x=exog)
pred = model.y_predict
self.assert_(pred.index.equals(exog.index))
def test_longpanel_series_combo(self):
wp = tm.makePanel()
lp = wp.to_frame()
y = lp.pop('ItemA')
model = ols(y=y, x=lp, entity_effects=True, window=20)
self.assert_(notnull(model.beta.values).all())
self.assert_(isinstance(model, PanelOLS))
model.summary
def test_series_rhs(self):
y = tm.makeTimeSeries()
x = tm.makeTimeSeries()
model = ols(y=y, x=x)
expected = ols(y=y, x={'x' : x})
assert_series_equal(model.beta, expected.beta)
def test_various_attributes(self):
# just make sure everything "works". test correctness elsewhere
x = DataFrame(np.random.randn(100, 5))
y = np.random.randn(100)
model = ols(y=y, x=x, window=20)
series_attrs = ['rank', 'df', 'forecast_mean', 'forecast_vol']
for attr in series_attrs:
value = getattr(model, attr)
self.assert_(isinstance(value, Series))
# works
model._results
def test_catch_regressor_overlap(self):
df1 = tm.makeTimeDataFrame().ix[:, ['A', 'B']]
df2 = tm.makeTimeDataFrame().ix[:, ['B', 'C', 'D']]
y = tm.makeTimeSeries()
data = {'foo' : df1, 'bar' : df2}
self.assertRaises(Exception, ols, y=y, x=data)
def test_plm_ctor(self):
y = tm.makeTimeDataFrame()
x = {'a' : tm.makeTimeDataFrame(),
'b' : tm.makeTimeDataFrame()}
model = ols(y=y, x=x, intercept=False)
model.summary
model = ols(y=y, x=Panel(x))
model.summary
def test_plm_attrs(self):
y = tm.makeTimeDataFrame()
x = {'a' : tm.makeTimeDataFrame(),
'b' : tm.makeTimeDataFrame()}
rmodel = ols(y=y, x=x, window=10)
model = ols(y=y, x=x)
model.resid
rmodel.resid
def test_plm_lagged_y_predict(self):
y = tm.makeTimeDataFrame()
x = {'a' : tm.makeTimeDataFrame(),
'b' : tm.makeTimeDataFrame()}
model = ols(y=y, x=x, window=10)
result = model.lagged_y_predict(2)
def test_plm_f_test(self):
y = tm.makeTimeDataFrame()
x = {'a' : tm.makeTimeDataFrame(),
'b' : tm.makeTimeDataFrame()}
model = ols(y=y, x=x)
hyp = '1*a+1*b=0'
result = model.f_test(hyp)
hyp = ['1*a=0',
'1*b=0']
result = model.f_test(hyp)
assert_almost_equal(result['f-stat'], model.f_stat['f-stat'])
def test_plm_exclude_dummy_corner(self):
y = tm.makeTimeDataFrame()
x = {'a' : tm.makeTimeDataFrame(),
'b' : tm.makeTimeDataFrame()}
model = ols(y=y, x=x, entity_effects=True, dropped_dummies={'entity' : 'D'})
model.summary
self.assertRaises(Exception, ols, y=y, x=x, entity_effects=True,
dropped_dummies={'entity' : 'E'})
def test_columns_tuples_summary(self):
# #1837
X = DataFrame(np.random.randn(10, 2), columns=[('a', 'b'), ('c', 'd')])
Y = Series(np.random.randn(10))
# it works!
model = ols(y=Y, x=X)
model.summary
class TestPanelOLS(BaseTest):
FIELDS = ['beta', 'df', 'df_model', 'df_resid', 'f_stat',
'p_value', 'r2', 'r2_adj', 'rmse', 'std_err',
't_stat', 'var_beta']
_other_fields = ['resid', 'y_fitted']
def testFiltering(self):
result = ols(y=self.panel_y2, x=self.panel_x2)
x = result._x
index = x.index.get_level_values(0)
index = Index(sorted(set(index)))
exp_index = Index([datetime(2000, 1, 1), datetime(2000, 1, 3)])
self.assertTrue;(exp_index.equals(index))
index = x.index.get_level_values(1)
index = Index(sorted(set(index)))
exp_index = Index(['A', 'B'])
self.assertTrue(exp_index.equals(index))
x = result._x_filtered
index = x.index.get_level_values(0)
index = Index(sorted(set(index)))
exp_index = Index([datetime(2000, 1, 1),
datetime(2000, 1, 3),
datetime(2000, 1, 4)])
self.assertTrue(exp_index.equals(index))
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = [[6, 14, 1],
[9, 17, 1],
[30, 48, 1]]
assert_almost_equal(exp_x, result._x.values)
exp_x_filtered = [[6, 14, 1],
[9, 17, 1],
[30, 48, 1],
[11, 20, 1],
[12, 21, 1]]
assert_almost_equal(exp_x_filtered, result._x_filtered.values)
self.assertTrue(result._x_filtered.index.levels[0].equals(
result.y_fitted.index))
def test_wls_panel(self):
y = tm.makeTimeDataFrame()
x = Panel({'x1' : tm.makeTimeDataFrame(),
'x2' : tm.makeTimeDataFrame()})
y.ix[[1, 7], 'A'] = np.nan
y.ix[[6, 15], 'B'] = np.nan
y.ix[[3, 20], 'C'] = np.nan
y.ix[[5, 11], 'D'] = np.nan
stack_y = y.stack()
stack_x = DataFrame(dict((k, v.stack())
for k, v in x.iterkv()))
weights = x.std('items')
stack_weights = weights.stack()
stack_y.index = stack_y.index._tuple_index
stack_x.index = stack_x.index._tuple_index
stack_weights.index = stack_weights.index._tuple_index
result = ols(y=y, x=x, weights=1/weights)
expected = ols(y=stack_y, x=stack_x, weights=1/stack_weights)
assert_almost_equal(result.beta, expected.beta)
for attr in ['resid', 'y_fitted']:
rvals = getattr(result, attr).stack().values
evals = getattr(expected, attr).values
assert_almost_equal(rvals, evals)
def testWithTimeEffects(self):
result = ols(y=self.panel_y2, x=self.panel_x2, time_effects=True)
assert_almost_equal(result._y_trans.values.flat, [0, -0.5, 0.5])
exp_x = [[0, 0], [-10.5, -15.5], [10.5, 15.5]]
assert_almost_equal(result._x_trans.values, exp_x)
# _check_non_raw_results(result)
def testWithEntityEffects(self):
result = ols(y=self.panel_y2, x=self.panel_x2, entity_effects=True)
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = DataFrame([[0, 6, 14, 1], [0, 9, 17, 1], [1, 30, 48, 1]],
index=result._x.index, columns=['FE_B', 'x1', 'x2',
'intercept'],
dtype=float)
tm.assert_frame_equal(result._x, exp_x.ix[:, result._x.columns])
# _check_non_raw_results(result)
def testWithEntityEffectsAndDroppedDummies(self):
result = ols(y=self.panel_y2, x=self.panel_x2, entity_effects=True,
dropped_dummies={'entity' : 'B'})
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = DataFrame([[1, 6, 14, 1], [1, 9, 17, 1], [0, 30, 48, 1]],
index=result._x.index, columns=['FE_A', 'x1', 'x2',
'intercept'],
dtype=float)
tm.assert_frame_equal(result._x, exp_x.ix[:, result._x.columns])
# _check_non_raw_results(result)
def testWithXEffects(self):
result = ols(y=self.panel_y2, x=self.panel_x2, x_effects=['x1'])
assert_almost_equal(result._y.values.flat, [1, 4, 5])
res = result._x
exp_x = DataFrame([[0, 0, 14, 1], [0, 1, 17, 1], [1, 0, 48, 1]],
columns=['x1_30', 'x1_9', 'x2', 'intercept'],
index=res.index, dtype=float)
assert_frame_equal(res, exp_x.reindex(columns=res.columns))
def testWithXEffectsAndDroppedDummies(self):
result = ols(y=self.panel_y2, x=self.panel_x2, x_effects=['x1'],
dropped_dummies={'x1' : 30})
res = result._x
assert_almost_equal(result._y.values.flat, [1, 4, 5])
exp_x = DataFrame([[1, 0, 14, 1], [0, 1, 17, 1], [0, 0, 48, 1]],
columns=['x1_6', 'x1_9', 'x2', 'intercept'],
index=res.index, dtype=float)
assert_frame_equal(res, exp_x.reindex(columns=res.columns))
def testWithXEffectsAndConversion(self):
result = ols(y=self.panel_y3, x=self.panel_x3, x_effects=['x1', 'x2'])
assert_almost_equal(result._y.values.flat, [1, 2, 3, 4])
exp_x = [[0, 0, 0, 1, 1], [1, 0, 0, 0, 1], [0, 1, 1, 0, 1],
[0, 0, 0, 1, 1]]
assert_almost_equal(result._x.values, exp_x)
exp_index = Index(['x1_B', 'x1_C', 'x2_baz', 'x2_foo', 'intercept'])
self.assertTrue(exp_index.equals(result._x.columns))
# _check_non_raw_results(result)
def testWithXEffectsAndConversionAndDroppedDummies(self):
result = ols(y=self.panel_y3, x=self.panel_x3, x_effects=['x1', 'x2'],
dropped_dummies={'x2' : 'foo'})
assert_almost_equal(result._y.values.flat, [1, 2, 3, 4])
exp_x = [[0, 0, 0, 0, 1], [1, 0, 1, 0, 1], [0, 1, 0, 1, 1],
[0, 0, 0, 0, 1]]
assert_almost_equal(result._x.values, exp_x)
exp_index = Index(['x1_B', 'x1_C', 'x2_bar', 'x2_baz', 'intercept'])
self.assertTrue(exp_index.equals(result._x.columns))
# _check_non_raw_results(result)
def testForSeries(self):
self.checkForSeries(self.series_panel_x, self.series_panel_y,
self.series_x, self.series_y)
self.checkForSeries(self.series_panel_x, self.series_panel_y,
self.series_x, self.series_y, nw_lags=0)
self.checkForSeries(self.series_panel_x, self.series_panel_y,
self.series_x, self.series_y, nw_lags=1,
nw_overlap=True)
def testRolling(self):
self.checkMovingOLS(self.panel_x, self.panel_y)
def testRollingWithFixedEffects(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
entity_effects=True)
self.checkMovingOLS(self.panel_x, self.panel_y, intercept=False,
entity_effects=True)
def testRollingWithTimeEffects(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
time_effects=True)
def testRollingWithNeweyWest(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
nw_lags=1)
def testRollingWithEntityCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
cluster='entity')
def testRollingWithTimeEffectsAndEntityCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
time_effects=True, cluster='entity')
def testRollingWithTimeCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
cluster='time')
def testRollingWithNeweyWestAndEntityCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
nw_lags=1, cluster='entity')
def testRollingWithNeweyWestAndTimeEffectsAndEntityCluster(self):
self.checkMovingOLS(self.panel_x, self.panel_y,
nw_lags=1, cluster='entity',
time_effects=True)
def testExpanding(self):
self.checkMovingOLS(self.panel_x, self.panel_y, window_type='expanding')
def testNonPooled(self):
self.checkNonPooled(y=self.panel_y, x=self.panel_x)
self.checkNonPooled(y=self.panel_y, x=self.panel_x,
window_type='rolling', window=25, min_periods=10)
def checkNonPooled(self, x, y, **kwds):
# For now, just check that it doesn't crash
result = ols(y=y, x=x, pool=False, **kwds)
_check_repr(result)
for attr in NonPooledPanelOLS.ATTRIBUTES:
_check_repr(getattr(result, attr))
def checkMovingOLS(self, x, y, window_type='rolling', **kwds):
window = 25 # must be larger than rank of x
moving = ols(y=y, x=x, window_type=window_type,
window=window, **kwds)
index = moving._index
for n, i in enumerate(moving._valid_indices):
if window_type == 'rolling' and i >= window:
prior_date = index[i - window + 1]
else:
prior_date = index[0]
date = index[i]
x_iter = {}
for k, v in x.iteritems():
x_iter[k] = v.truncate(before=prior_date, after=date)
y_iter = y.truncate(before=prior_date, after=date)
static = ols(y=y_iter, x=x_iter, **kwds)
self.compare(static, moving, event_index=i,
result_index=n)
_check_non_raw_results(moving)
def checkForSeries(self, x, y, series_x, series_y, **kwds):
# Consistency check with simple OLS.
result = ols(y=y, x=x, **kwds)
reference = ols(y=series_y, x=series_x, **kwds)
self.compare(reference, result)
def compare(self, static, moving, event_index=None,
result_index=None):
# Check resid if we have a time index specified
if event_index is not None:
staticSlice = _period_slice(static, -1)
movingSlice = _period_slice(moving, event_index)
ref = static._resid_raw[staticSlice]
res = moving._resid_raw[movingSlice]
assert_almost_equal(ref, res)
ref = static._y_fitted_raw[staticSlice]
res = moving._y_fitted_raw[movingSlice]
assert_almost_equal(ref, res)
# Check y_fitted
for field in self.FIELDS:
attr = '_%s_raw' % field
ref = getattr(static, attr)
res = getattr(moving, attr)
if result_index is not None:
res = res[result_index]
assert_almost_equal(ref, res)
def test_auto_rolling_window_type(self):
data = tm.makeTimeDataFrame()
y = data.pop('A')
window_model = ols(y=y, x=data, window=20, min_periods=10)
rolling_model = ols(y=y, x=data, window=20, min_periods=10,
window_type='rolling')
assert_frame_equal(window_model.beta, rolling_model.beta)
def _check_non_raw_results(model):
_check_repr(model)
_check_repr(model.resid)
_check_repr(model.summary_as_matrix)
_check_repr(model.y_fitted)
_check_repr(model.y_predict)
def _period_slice(panelModel, i):
index = panelModel._x_trans.index
period = index.levels[0][i]
L, R = index.get_major_bounds(period, period)
return slice(L, R)
class TestOLSFilter(unittest.TestCase):
def setUp(self):
date_index = date_range(datetime(2009, 12, 11), periods=3,
freq=datetools.bday)
ts = Series([3, 1, 4], index=date_index)
self.TS1 = ts
date_index = date_range(datetime(2009, 12, 11), periods=5,
freq=datetools.bday)
ts = Series([1, 5, 9, 2, 6], index=date_index)
self.TS2 = ts
date_index = date_range(datetime(2009, 12, 11), periods=3,
freq=datetools.bday)
ts = Series([5, np.nan, 3], index=date_index)
self.TS3 = ts
date_index = date_range(datetime(2009, 12, 11), periods=5,
freq=datetools.bday)
ts = | Series([np.nan, 5, 8, 9, 7], index=date_index) | pandas.Series |
from sox import Transformer
import sox
from glob import glob
import tensorflow as tf
import pandas as pd
import os
import unicodedata
import tqdm
import logging
import librosa
import numpy as np
import soundfile
from sklearn.model_selection import train_test_split
logging.basicConfig(level=logging.NOTSET)
logging.getLogger('sox').setLevel(logging.ERROR)
FLAGS = tf.compat.v1.app.flags.FLAGS
tfm = Transformer()
tfm.set_output_format(rate=16000)
def main(_):
source_dir = FLAGS.source_dir
data = []
df_details = pd.read_csv(os.path.join(source_dir, "validated.tsv"), sep="\t", header=0)
with tqdm.tqdm(total=len(df_details.index)) as bar:
for i in df_details.index:
file_name = df_details["path"][i]
source_file = os.path.join(source_dir, "clips/" + file_name)
wav_file = os.path.join(os.path.dirname(__file__), "../data/common-voice-mozilla/Common-Voice-Mozilla/wav-files/" +
file_name.split(".mp3")[0] + ".wav")
transcript = df_details["sentence"][i]
if pd.isnull(transcript):
continue
transcript = unicodedata.normalize("NFKD", transcript) \
.encode("ascii", "ignore") \
.decode("ascii", "ignore")
transcript = transcript.lower().strip()
try:
if not os.path.exists(wav_file):
tfm.build(source_file, wav_file)
y, sr = librosa.load(wav_file, sr=None)
yt, index = librosa.effects.trim(y, top_db=10)
yt = y[max(index[0] - 40000, 0): min(index[1] + 40000, len(y))]
soundfile.write(wav_file, yt, sr)
wav_filesize = os.path.getsize(wav_file)
data.append((os.path.abspath(wav_file), wav_filesize, transcript))
except sox.core.SoxError:
logging.info(f"Error in file: {source_file}")
bar.update(1)
train_data, test_data = train_test_split(data, test_size=FLAGS.test_size, random_state=FLAGS.random_state)
df_train = | pd.DataFrame(data=train_data, columns=["wav_filename", "wav_filesize", "transcript"]) | pandas.DataFrame |
# Importar librerias
import pandas # importar libreria pandas
import time # importar libreria time
import datetime # importar libreria de fecha y hora
from datetime import datetime # importar la libreria de datetime
import os # importar la libreria de os
from termcolor import colored # importar la libreria termcolor
import sqlite3 # importar libreria sqlite3
os.system('CLS') # limpiar la terminal
# Seccion carga de datos desde CSV (base de datos)
"""-----------------------------------------------------------------------------------------------------------------------"""
conn = sqlite3.connect('./database.db') # conexion a la base de datos
matrixpandas = pandas.read_sql_query("SELECT * FROM productos", conn) # carga de datos desde la base de datos de stock
matriz = matrixpandas.values.tolist() # convertir la matriz a una lista
registros = pandas.read_sql_query("SELECT * FROM registros", conn) # carga de datos desde la base de datos de registros
registros = registros.values.tolist() # convertir la matriz a una lista
"""-----------------------------------------------------------------------------------------------------------------------"""
# Seccion de funciones
"""-----------------------------------------------------------------------------------------------------------------------"""
# funcion para imprimir la matriz de productos
def print_data(matriz):
os.system('CLS')
print_matriz = pandas.DataFrame(
matriz, columns=["code", "name", "type", "stock", "repos", "price", "last_update"]) # generar la matriz en formato pandas
print("Imprimiendo matriz de datos...") # mensaje de impresion
time.sleep(1) # esperar 1 segundo
print(print_matriz) # imprimir la matriz de stock
print(" ")
decition = input(
"Cuando desee regresar al menú principal ingrese cualquier tecla: ") # volver al menu principal
os.system('CLS') # limpiar la terminal
time.sleep(1) # esperar 1 segundo
# funcion para imprimir la matriz de registros
def print_registros(registros):
print_registros = pandas.DataFrame(
registros, columns=["code", "variacion", "motivo", "timestamp"]) # generar la matriz en formato pandas
print("Imprimiendo matriz de datos...") # mensaje de impresion
time.sleep(1) # esperar 1 segundo
print(print_registros) # imprimir la matriz de registros
print(" ")
decition = input(
"Cuando desee regresar al menú principal ingrese cualquier tecla: ") # volver al menu principal
os.system('CLS') # limpiar la terminal
time.sleep(1) # esperar 1 segundo
# funcion para consultar el stock de un producto
def product_stock(matriz):
os.system("CLS") # limpiar la terminal
founded = False # variable para saber si se encontro el producto
stock = (input("Ingrese el código del producto a consultar stock: ")).upper() # capturar el codigo del producto a buscar
os.system('CLS') # limpiar la terminal
for i in range(len(matriz)): # recorrer la matriz
if stock == matriz[i][0]: # si se encontró el codigo del producto en la matriz
print("El stock actual del producto ", stock, "es: ", matriz[i][3]) # imprimir el stock del producto
founded = True # cambiar la variable a True
input("Ingrese cualquier tecla cuando desee volver al menu principal: ") # volver al menu principal
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
if founded == False: # si no se encontró el codigo del producto en la matriz
print("No se encontro el codigo") # mensaje de error
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
print(colored("- 1.", "blue", attrs=["bold"]), "Volver a intentar ") # mensaje de volver a intentar
print(colored("- 2.", "blue",
attrs=["bold"]), "Volver al menú principal") # mensaje de volver al menu principal
choose = (input("Ingrese una opción: ")).upper() # capturar la opcion
if choose == "1": # si la opcion es 1
product_stock(matriz) # volver a intentar
elif choose == "2": # si la opcion es 2
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
# funcion para filtrar los productos por tipo
def product_type(matriz):
type_product = input(
"Ingrese la categoria de producto por el que desea filtrar: ") # capturar el tipo de producto para filtrar
a = len(matriz) # obtener la longitud de la matriz
lista = list() # crear una lista
for i in range(a): # recorrer la matriz
if (matriz[i][2]).upper() == (type_product).upper(): # si el tipo de producto es igual al tipo de producto capturado
lista.append(matriz[i]) # agregar el producto a la lista
if len(lista) != 0:
c = pandas.DataFrame(
lista, columns=["code", "name", "type", "stock", "repos", "price", "last_update"]) # generar la matriz en formato pandas
os.system('CLS') # limpiar la terminal
print(c) # imprimir la matriz de productos
print(" ")
decition = input(
"Cuando desee regresar al menú principal ingrese cualquier tecla: ") # volver al menu principal
os.system('CLS') # limpiar la terminal
time.sleep(1) # esperar 1 segundo
else:
print("No se encontraron productos con ese tipo") # mensaje de error
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
print(colored("- 1.", "blue", attrs=["bold"]), "Volver a intentar ") # mensaje de volver a intentar
print(colored("- 2.", "blue",
attrs=["bold"]), "Volver al menú principal") # mensaje de volver al menu principal
choose = (input("Ingrese una opción: ")).upper() # capturar la opcion
if choose == "1": # si la opcion es 1
product_type(matriz) # volver a intentar
elif choose == "2": # si la opcion es 2
time.sleep(1) # esperar 1 segundo
os.system("CLS") # limpiar la terminal
# funcion para obtener el tiempo actual
def get_current_time():
time_update = datetime.now() # obtener la fecha y hora actual
now = time_update.strftime("%d/%m/%Y %H:%M:%S") # formatear la fecha y hora actual
return now # retornar fecha
# funcion para alertar si hay que reponer un producto
def alert(matriz):
time.sleep(0.2) # esperar 0.2 segundos
os.system("CLS") # limpiar la terminal
to_repos = list() # crear una lista para los productos a reponer
codes_to_repos = list() # crear una lista para los codigos de los productos a reponer
for i in range(len(matriz)): # recorrer la matriz
if int(matriz[i][3]) <= int(matriz[i][4]): # si el stock es menor o igual al reposicion
to_repos.append(matriz[i]) # agregar el producto a la lista
codes_to_repos.append(matriz[i][0]) # agregar el codigo del producto a la lista
to_repos = pandas.DataFrame(to_repos, columns=["code", "name", "type", "stock", "repos", "price", "last_update"]) # generar la matriz en formato pandas
if len(codes_to_repos) > 0: # si hay productos a reponer
print("Los codigos a reponer son: ") # mensaje de los codigos a reponer
for i in codes_to_repos: # recorrer la lista de codigos a reponer
print(i, end=" ") # imprimir los codigos a reponer
print("")
print("-----------------------------")
print(" ")
print(to_repos) # imprimir la matriz de productos a reponer
print(" ")
a = input("Ingrese una tecla cuando desee volver al menu principal: ") # volver al menu principal
os.system('CLS') # limpiar la terminal
else:
print("No hay ningun codigo a reponer por el momento.") # mensaje de error
os.system('CLS') # limpiar la terminal
# funcion para agregar un nuevo producto
def add_new_product(matriz):
new_product = list() # crear una lista para almacenar los datos del nuevo producto
code = input("Ingresa el codigo del producto que desea agregar: ") # capturar el codigo del producto
name = input("Ingresa el nombre del producto que va a agregar: ") # capturar el nombre del producto
type_product = input("Ingresa la categoria del producto: ") # capturar el tipo de producto
stock = int(input("Ingresa el stock inicial del producto, puede ser 0: ")) # capturar el stock inicial del producto
reposition = int(input("Punto de reposicion del producto: ")) # capturar el punto de reposicion del producto
price = input("Ingresa el precio del producto: ") # capturar el precio del producto
new_product.append(code.upper()) # agregar el codigo al nuevo producto
new_product.append(name) # agregar el nombre al nuevo producto
new_product.append(type_product) # agregar el tipo de producto al nuevo producto
new_product.append(stock) # agregar el stock al nuevo producto
new_product.append(reposition) # agregar el punto de reposicion al nuevo producto
new_product.append(price) # agregar el precio al nuevo producto
new_product.append(get_current_time()) # agregar la fecha y hora actual al nuevo producto
matriz.append(new_product) # agregar el nuevo producto a la matriz
print("El producto " + code.upper() + " fue agregado") # mensaje de confirmacion
time.sleep(2) # esperar 2 segundos
os.system('CLS') # limpiar la terminal
df = | pandas.DataFrame(matriz) | pandas.DataFrame |
#Author: <NAME> 2019
#merge tables
import pandas as pd
import click
# options
@click.command()
@click.option('--counts',
'counts_file',
required=True,
type=click.Path(exists=True, readable=True),
help='Barcodes (DNA or RNA).')
@click.option('--assignment',
'assignment_file',
required=True,
type=click.Path(exists=True, readable=True),
help='Assignment tsv file.')
@click.option('--name',
'name',
required=True,
type=str,
help='Name the statistic should be written with.')
@click.option('--output',
'output_file',
required=True,
type=click.Path(writable=True),
help='Output file.')
@click.option('--statistic',
'statistic_file',
required=True,
type=click.Path(writable=True),
help='Statistic output file.')
def cli(counts_file, assignment_file, output_file, statistic_file, name):
# statistic
statistic = pd.DataFrame(data={'Experiment' : [name], 'Barcodes': [0], 'Counts' : [0], 'Average counts' : [0],
'Assigned barcodes' : [0], 'Assigned counts' : [0], 'Average assigned counts' : [0],
'Fraction assigned barcodes' : [0], 'Fraction assigned counts' : [0]})
# Association file
click.echo("Read assignment file...")
assoc_barcodes_oligo=pd.read_csv(assignment_file, header=None, usecols=[0,1], sep="\t", names=['Barcode','Oligo'])
assoc_barcodes = set(assoc_barcodes_oligo.Barcode)
#get count df
click.echo("Read count file...")
counts= | pd.read_csv(counts_file, header=None, sep="\t", names=['Barcode','Counts']) | pandas.read_csv |
"""Tests for dynamic validator."""
from datetime import date, datetime
import numpy as np
import pandas as pd
from delphi_validator.report import ValidationReport
from delphi_validator.dynamic import DynamicValidator
class TestCheckRapidChange:
params = {"data_source": "", "span_length": 1,
"end_date": "2020-09-02", "expected_lag": {}}
def test_same_df(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
test_df = pd.DataFrame([date.today()] * 5, columns=["time_value"])
ref_df = pd.DataFrame([date.today()] * 5, columns=["time_value"])
validator.check_rapid_change_num_rows(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_0_vs_many(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
time_value = datetime.combine(date.today(), datetime.min.time())
test_df = pd.DataFrame([time_value] * 5, columns=["time_value"])
ref_df = pd.DataFrame([time_value] * 1, columns=["time_value"])
validator.check_rapid_change_num_rows(
test_df, ref_df, time_value, "geo", "signal", report)
assert len(report.raised_errors) == 1
assert "check_rapid_change_num_rows" in [
err.check_data_id[0] for err in report.raised_errors]
class TestCheckAvgValDiffs:
params = {"data_source": "", "span_length": 1,
"end_date": "2020-09-02", "expected_lag": {}}
def test_same_val(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [1, 1, 1, 2, 0, 1], "se": [np.nan] * 6,
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(data)
ref_df = pd.DataFrame(data)
validator.check_avg_val_vs_reference(
test_df, ref_df, date.today(), "geo", "signal", report)
assert len(report.raised_errors) == 0
def test_same_se(self):
validator = DynamicValidator(self.params)
report = ValidationReport([])
data = {"val": [np.nan] * 6, "se": [1, 1, 1, 2, 0, 1],
"sample_size": [np.nan] * 6, "geo_id": ["1"] * 6}
test_df = pd.DataFrame(data)
ref_df = | pd.DataFrame(data) | pandas.DataFrame |
"""SQL io tests
The SQL tests are broken down in different classes:
- `PandasSQLTest`: base class with common methods for all test classes
- Tests for the public API (only tests with sqlite3)
- `_TestSQLApi` base class
- `TestSQLApi`: test the public API with sqlalchemy engine
- `TestSQLiteFallbackApi`: test the public API with a sqlite DBAPI
connection
- Tests for the different SQL flavors (flavor specific type conversions)
- Tests for the sqlalchemy mode: `_TestSQLAlchemy` is the base class with
common methods, `_TestSQLAlchemyConn` tests the API with a SQLAlchemy
Connection object. The different tested flavors (sqlite3, MySQL,
PostgreSQL) derive from the base class
- Tests for the fallback mode (`TestSQLiteFallback`)
"""
import csv
from datetime import date, datetime, time
from io import StringIO
import sqlite3
import warnings
import numpy as np
import pytest
from pandas.core.dtypes.common import is_datetime64_dtype, is_datetime64tz_dtype
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
Series,
Timestamp,
concat,
date_range,
isna,
to_datetime,
to_timedelta,
)
import pandas._testing as tm
import pandas.io.sql as sql
from pandas.io.sql import read_sql_query, read_sql_table
try:
import sqlalchemy
import sqlalchemy.schema
import sqlalchemy.sql.sqltypes as sqltypes
from sqlalchemy.ext import declarative
from sqlalchemy.orm import session as sa_session
SQLALCHEMY_INSTALLED = True
except ImportError:
SQLALCHEMY_INSTALLED = False
SQL_STRINGS = {
"create_iris": {
"sqlite": """CREATE TABLE iris (
"SepalLength" REAL,
"SepalWidth" REAL,
"PetalLength" REAL,
"PetalWidth" REAL,
"Name" TEXT
)""",
"mysql": """CREATE TABLE iris (
`SepalLength` DOUBLE,
`SepalWidth` DOUBLE,
`PetalLength` DOUBLE,
`PetalWidth` DOUBLE,
`Name` VARCHAR(200)
)""",
"postgresql": """CREATE TABLE iris (
"SepalLength" DOUBLE PRECISION,
"SepalWidth" DOUBLE PRECISION,
"PetalLength" DOUBLE PRECISION,
"PetalWidth" DOUBLE PRECISION,
"Name" VARCHAR(200)
)""",
},
"insert_iris": {
"sqlite": """INSERT INTO iris VALUES(?, ?, ?, ?, ?)""",
"mysql": """INSERT INTO iris VALUES(%s, %s, %s, %s, "%s");""",
"postgresql": """INSERT INTO iris VALUES(%s, %s, %s, %s, %s);""",
},
"create_test_types": {
"sqlite": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TEXT,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" REAL,
"IntCol" INTEGER,
"BoolCol" INTEGER,
"IntColWithNull" INTEGER,
"BoolColWithNull" INTEGER
)""",
"mysql": """CREATE TABLE types_test_data (
`TextCol` TEXT,
`DateCol` DATETIME,
`IntDateCol` INTEGER,
`IntDateOnlyCol` INTEGER,
`FloatCol` DOUBLE,
`IntCol` INTEGER,
`BoolCol` BOOLEAN,
`IntColWithNull` INTEGER,
`BoolColWithNull` BOOLEAN
)""",
"postgresql": """CREATE TABLE types_test_data (
"TextCol" TEXT,
"DateCol" TIMESTAMP,
"DateColWithTz" TIMESTAMP WITH TIME ZONE,
"IntDateCol" INTEGER,
"IntDateOnlyCol" INTEGER,
"FloatCol" DOUBLE PRECISION,
"IntCol" INTEGER,
"BoolCol" BOOLEAN,
"IntColWithNull" INTEGER,
"BoolColWithNull" BOOLEAN
)""",
},
"insert_test_types": {
"sqlite": {
"query": """
INSERT INTO types_test_data
VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"mysql": {
"query": """
INSERT INTO types_test_data
VALUES("%s", %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
"postgresql": {
"query": """
INSERT INTO types_test_data
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
""",
"fields": (
"TextCol",
"DateCol",
"DateColWithTz",
"IntDateCol",
"IntDateOnlyCol",
"FloatCol",
"IntCol",
"BoolCol",
"IntColWithNull",
"BoolColWithNull",
),
},
},
"read_parameters": {
"sqlite": "SELECT * FROM iris WHERE Name=? AND SepalLength=?",
"mysql": 'SELECT * FROM iris WHERE `Name`="%s" AND `SepalLength`=%s',
"postgresql": 'SELECT * FROM iris WHERE "Name"=%s AND "SepalLength"=%s',
},
"read_named_parameters": {
"sqlite": """
SELECT * FROM iris WHERE Name=:name AND SepalLength=:length
""",
"mysql": """
SELECT * FROM iris WHERE
`Name`="%(name)s" AND `SepalLength`=%(length)s
""",
"postgresql": """
SELECT * FROM iris WHERE
"Name"=%(name)s AND "SepalLength"=%(length)s
""",
},
"create_view": {
"sqlite": """
CREATE VIEW iris_view AS
SELECT * FROM iris
"""
},
}
class MixInBase:
def teardown_method(self, method):
# if setup fails, there may not be a connection to close.
if hasattr(self, "conn"):
for tbl in self._get_all_tables():
self.drop_table(tbl)
self._close_conn()
class MySQLMixIn(MixInBase):
def drop_table(self, table_name):
cur = self.conn.cursor()
cur.execute(f"DROP TABLE IF EXISTS {sql._get_valid_mysql_name(table_name)}")
self.conn.commit()
def _get_all_tables(self):
cur = self.conn.cursor()
cur.execute("SHOW TABLES")
return [table[0] for table in cur.fetchall()]
def _close_conn(self):
from pymysql.err import Error
try:
self.conn.close()
except Error:
pass
class SQLiteMixIn(MixInBase):
def drop_table(self, table_name):
self.conn.execute(
f"DROP TABLE IF EXISTS {sql._get_valid_sqlite_name(table_name)}"
)
self.conn.commit()
def _get_all_tables(self):
c = self.conn.execute("SELECT name FROM sqlite_master WHERE type='table'")
return [table[0] for table in c.fetchall()]
def _close_conn(self):
self.conn.close()
class SQLAlchemyMixIn(MixInBase):
def drop_table(self, table_name):
sql.SQLDatabase(self.conn).drop_table(table_name)
def _get_all_tables(self):
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
table_list = meta.tables.keys()
return table_list
def _close_conn(self):
pass
class PandasSQLTest:
"""
Base class with common private methods for SQLAlchemy and fallback cases.
"""
def _get_exec(self):
if hasattr(self.conn, "execute"):
return self.conn
else:
return self.conn.cursor()
@pytest.fixture(params=[("data", "iris.csv")])
def load_iris_data(self, datapath, request):
import io
iris_csv_file = datapath(*request.param)
if not hasattr(self, "conn"):
self.setup_connect()
self.drop_table("iris")
self._get_exec().execute(SQL_STRINGS["create_iris"][self.flavor])
with io.open(iris_csv_file, mode="r", newline=None) as iris_csv:
r = csv.reader(iris_csv)
next(r) # skip header row
ins = SQL_STRINGS["insert_iris"][self.flavor]
for row in r:
self._get_exec().execute(ins, row)
def _load_iris_view(self):
self.drop_table("iris_view")
self._get_exec().execute(SQL_STRINGS["create_view"][self.flavor])
def _check_iris_loaded_frame(self, iris_frame):
pytype = iris_frame.dtypes[0].type
row = iris_frame.iloc[0]
assert issubclass(pytype, np.floating)
tm.equalContents(row.values, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _load_test1_data(self):
columns = ["index", "A", "B", "C", "D"]
data = [
(
"2000-01-03 00:00:00",
0.980268513777,
3.68573087906,
-0.364216805298,
-1.15973806169,
),
(
"2000-01-04 00:00:00",
1.04791624281,
-0.0412318367011,
-0.16181208307,
0.212549316967,
),
(
"2000-01-05 00:00:00",
0.498580885705,
0.731167677815,
-0.537677223318,
1.34627041952,
),
(
"2000-01-06 00:00:00",
1.12020151869,
1.56762092543,
0.00364077397681,
0.67525259227,
),
]
self.test_frame1 = DataFrame(data, columns=columns)
def _load_test2_data(self):
df = DataFrame(
dict(
A=[4, 1, 3, 6],
B=["asd", "gsq", "ylt", "jkl"],
C=[1.1, 3.1, 6.9, 5.3],
D=[False, True, True, False],
E=["1990-11-22", "1991-10-26", "1993-11-26", "1995-12-12"],
)
)
df["E"] = to_datetime(df["E"])
self.test_frame2 = df
def _load_test3_data(self):
columns = ["index", "A", "B"]
data = [
("2000-01-03 00:00:00", 2 ** 31 - 1, -1.987670),
("2000-01-04 00:00:00", -29, -0.0412318367011),
("2000-01-05 00:00:00", 20000, 0.731167677815),
("2000-01-06 00:00:00", -290867, 1.56762092543),
]
self.test_frame3 = DataFrame(data, columns=columns)
def _load_raw_sql(self):
self.drop_table("types_test_data")
self._get_exec().execute(SQL_STRINGS["create_test_types"][self.flavor])
ins = SQL_STRINGS["insert_test_types"][self.flavor]
data = [
{
"TextCol": "first",
"DateCol": "2000-01-03 00:00:00",
"DateColWithTz": "2000-01-01 00:00:00-08:00",
"IntDateCol": 535852800,
"IntDateOnlyCol": 20101010,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": 1,
"BoolColWithNull": False,
},
{
"TextCol": "first",
"DateCol": "2000-01-04 00:00:00",
"DateColWithTz": "2000-06-01 00:00:00-07:00",
"IntDateCol": 1356998400,
"IntDateOnlyCol": 20101212,
"FloatCol": 10.10,
"IntCol": 1,
"BoolCol": False,
"IntColWithNull": None,
"BoolColWithNull": None,
},
]
for d in data:
self._get_exec().execute(
ins["query"], [d[field] for field in ins["fields"]]
)
def _count_rows(self, table_name):
result = (
self._get_exec()
.execute(f"SELECT count(*) AS count_1 FROM {table_name}")
.fetchone()
)
return result[0]
def _read_sql_iris(self):
iris_frame = self.pandasSQL.read_query("SELECT * FROM iris")
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_parameter(self):
query = SQL_STRINGS["read_parameters"][self.flavor]
params = ["Iris-setosa", 5.1]
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _read_sql_iris_named_parameter(self):
query = SQL_STRINGS["read_named_parameters"][self.flavor]
params = {"name": "Iris-setosa", "length": 5.1}
iris_frame = self.pandasSQL.read_query(query, params=params)
self._check_iris_loaded_frame(iris_frame)
def _to_sql(self, method=None):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=method)
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _to_sql_empty(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1.iloc[:0], "test_frame1")
def _to_sql_fail(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
assert self.pandasSQL.has_table("test_frame1")
msg = "Table 'test_frame1' already exists"
with pytest.raises(ValueError, match=msg):
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
self.drop_table("test_frame1")
def _to_sql_replace(self):
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="replace")
assert self.pandasSQL.has_table("test_frame1")
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_append(self):
# Nuke table just in case
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="fail")
# Add to table again
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", if_exists="append")
assert self.pandasSQL.has_table("test_frame1")
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
self.drop_table("test_frame1")
def _to_sql_method_callable(self):
check = [] # used to double check function below is really being used
def sample(pd_table, conn, keys, data_iter):
check.append(1)
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(pd_table.table.insert(), data)
self.drop_table("test_frame1")
self.pandasSQL.to_sql(self.test_frame1, "test_frame1", method=sample)
assert self.pandasSQL.has_table("test_frame1")
assert check == [1]
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame1")
assert num_rows == num_entries
# Nuke table
self.drop_table("test_frame1")
def _roundtrip(self):
self.drop_table("test_frame_roundtrip")
self.pandasSQL.to_sql(self.test_frame1, "test_frame_roundtrip")
result = self.pandasSQL.read_query("SELECT * FROM test_frame_roundtrip")
result.set_index("level_0", inplace=True)
# result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def _execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = self.pandasSQL.execute("SELECT * FROM iris")
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def _to_sql_save_index(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")], columns=["A", "B", "C"], index=["A"]
)
self.pandasSQL.to_sql(df, "test_to_sql_saves_index")
ix_cols = self._get_index_columns("test_to_sql_saves_index")
assert ix_cols == [["A"]]
def _transaction_test(self):
with self.pandasSQL.run_transaction() as trans:
trans.execute("CREATE TABLE test_trans (A INT, B TEXT)")
class DummyException(Exception):
pass
# Make sure when transaction is rolled back, no rows get inserted
ins_sql = "INSERT INTO test_trans (A,B) VALUES (1, 'blah')"
try:
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
raise DummyException("error")
except DummyException:
# ignore raised exception
pass
res = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res) == 0
# Make sure when transaction is committed, rows do get inserted
with self.pandasSQL.run_transaction() as trans:
trans.execute(ins_sql)
res2 = self.pandasSQL.read_query("SELECT * FROM test_trans")
assert len(res2) == 1
# -----------------------------------------------------------------------------
# -- Testing the public API
class _TestSQLApi(PandasSQLTest):
"""
Base class to test the public API.
From this two classes are derived to run these tests for both the
sqlalchemy mode (`TestSQLApi`) and the fallback mode
(`TestSQLiteFallbackApi`). These tests are run with sqlite3. Specific
tests for the different sql flavours are included in `_TestSQLAlchemy`.
Notes:
flavor can always be passed even in SQLAlchemy mode,
should be correctly ignored.
we don't use drop_table because that isn't part of the public api
"""
flavor = "sqlite"
mode: str
def setup_connect(self):
self.conn = self.connect()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def load_test_data_and_sql(self):
self._load_iris_view()
self._load_test1_data()
self._load_test2_data()
self._load_test3_data()
self._load_raw_sql()
def test_read_sql_iris(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_sql_view(self):
iris_frame = sql.read_sql_query("SELECT * FROM iris_view", self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_to_sql(self):
sql.to_sql(self.test_frame1, "test_frame1", self.conn)
assert sql.has_table("test_frame1", self.conn)
def test_to_sql_fail(self):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
assert sql.has_table("test_frame2", self.conn)
msg = "Table 'test_frame2' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(self.test_frame1, "test_frame2", self.conn, if_exists="fail")
def test_to_sql_replace(self):
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame3", self.conn, if_exists="replace")
assert sql.has_table("test_frame3", self.conn)
num_entries = len(self.test_frame1)
num_rows = self._count_rows("test_frame3")
assert num_rows == num_entries
def test_to_sql_append(self):
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="fail")
# Add to table again
sql.to_sql(self.test_frame1, "test_frame4", self.conn, if_exists="append")
assert sql.has_table("test_frame4", self.conn)
num_entries = 2 * len(self.test_frame1)
num_rows = self._count_rows("test_frame4")
assert num_rows == num_entries
def test_to_sql_type_mapping(self):
sql.to_sql(self.test_frame3, "test_frame5", self.conn, index=False)
result = sql.read_sql("SELECT * FROM test_frame5", self.conn)
tm.assert_frame_equal(self.test_frame3, result)
def test_to_sql_series(self):
s = Series(np.arange(5, dtype="int64"), name="series")
sql.to_sql(s, "test_series", self.conn, index=False)
s2 = sql.read_sql_query("SELECT * FROM test_series", self.conn)
tm.assert_frame_equal(s.to_frame(), s2)
def test_roundtrip(self):
sql.to_sql(self.test_frame1, "test_frame_roundtrip", con=self.conn)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
# HACK!
result.index = self.test_frame1.index
result.set_index("level_0", inplace=True)
result.index.astype(int)
result.index.name = None
tm.assert_frame_equal(result, self.test_frame1)
def test_roundtrip_chunksize(self):
sql.to_sql(
self.test_frame1,
"test_frame_roundtrip",
con=self.conn,
index=False,
chunksize=2,
)
result = sql.read_sql_query("SELECT * FROM test_frame_roundtrip", con=self.conn)
tm.assert_frame_equal(result, self.test_frame1)
def test_execute_sql(self):
# drop_sql = "DROP TABLE IF EXISTS test" # should already be done
iris_results = sql.execute("SELECT * FROM iris", con=self.conn)
row = iris_results.fetchone()
tm.equalContents(row, [5.1, 3.5, 1.4, 0.2, "Iris-setosa"])
def test_date_parsing(self):
# Test date parsing in read_sql
# No Parsing
df = sql.read_sql_query("SELECT * FROM types_test_data", self.conn)
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["DateCol"]
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
assert df.DateCol.tolist() == [
pd.Timestamp(2000, 1, 3, 0, 0, 0),
pd.Timestamp(2000, 1, 4, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
assert df.IntDateCol.tolist() == [
pd.Timestamp(1986, 12, 25, 0, 0, 0),
pd.Timestamp(2013, 1, 1, 0, 0, 0),
]
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
parse_dates={"IntDateOnlyCol": "%Y%m%d"},
)
assert issubclass(df.IntDateOnlyCol.dtype.type, np.datetime64)
assert df.IntDateOnlyCol.tolist() == [
pd.Timestamp("2010-10-10"),
pd.Timestamp("2010-12-12"),
]
def test_date_and_index(self):
# Test case where same column appears in parse_date and index_col
df = sql.read_sql_query(
"SELECT * FROM types_test_data",
self.conn,
index_col="DateCol",
parse_dates=["DateCol", "IntDateCol"],
)
assert issubclass(df.index.dtype.type, np.datetime64)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_timedelta(self):
# see #6921
df = to_timedelta(Series(["00:00:01", "00:00:03"], name="foo")).to_frame()
with tm.assert_produces_warning(UserWarning):
df.to_sql("test_timedelta", self.conn)
result = sql.read_sql_query("SELECT * FROM test_timedelta", self.conn)
tm.assert_series_equal(result["foo"], df["foo"].astype("int64"))
def test_complex_raises(self):
df = DataFrame({"a": [1 + 1j, 2j]})
msg = "Complex datatypes not supported"
with pytest.raises(ValueError, match=msg):
df.to_sql("test_complex", self.conn)
@pytest.mark.parametrize(
"index_name,index_label,expected",
[
# no index name, defaults to 'index'
(None, None, "index"),
# specifying index_label
(None, "other_label", "other_label"),
# using the index name
("index_name", None, "index_name"),
# has index name, but specifying index_label
("index_name", "other_label", "other_label"),
# index name is integer
(0, None, "0"),
# index name is None but index label is integer
(None, 0, "0"),
],
)
def test_to_sql_index_label(self, index_name, index_label, expected):
temp_frame = DataFrame({"col1": range(4)})
temp_frame.index.name = index_name
query = "SELECT * FROM test_index_label"
sql.to_sql(temp_frame, "test_index_label", self.conn, index_label=index_label)
frame = sql.read_sql_query(query, self.conn)
assert frame.columns[0] == expected
def test_to_sql_index_label_multiindex(self):
temp_frame = DataFrame(
{"col1": range(4)},
index=MultiIndex.from_product([("A0", "A1"), ("B0", "B1")]),
)
# no index name, defaults to 'level_0' and 'level_1'
sql.to_sql(temp_frame, "test_index_label", self.conn)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[0] == "level_0"
assert frame.columns[1] == "level_1"
# specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["A", "B"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# using the index name
temp_frame.index.names = ["A", "B"]
sql.to_sql(temp_frame, "test_index_label", self.conn, if_exists="replace")
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["A", "B"]
# has index name, but specifying index_label
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label=["C", "D"],
)
frame = sql.read_sql_query("SELECT * FROM test_index_label", self.conn)
assert frame.columns[:2].tolist() == ["C", "D"]
msg = "Length of 'index_label' should match number of levels, which is 2"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
temp_frame,
"test_index_label",
self.conn,
if_exists="replace",
index_label="C",
)
def test_multiindex_roundtrip(self):
df = DataFrame.from_records(
[(1, 2.1, "line1"), (2, 1.5, "line2")],
columns=["A", "B", "C"],
index=["A", "B"],
)
df.to_sql("test_multiindex_roundtrip", self.conn)
result = sql.read_sql_query(
"SELECT * FROM test_multiindex_roundtrip", self.conn, index_col=["A", "B"]
)
tm.assert_frame_equal(df, result, check_index_type=True)
def test_integer_col_names(self):
df = DataFrame([[1, 2], [3, 4]], columns=[0, 1])
sql.to_sql(df, "test_frame_integer_col_names", self.conn, if_exists="replace")
def test_get_schema(self):
create_sql = sql.get_schema(self.test_frame1, "test", con=self.conn)
assert "CREATE" in create_sql
def test_get_schema_dtypes(self):
float_frame = DataFrame({"a": [1.1, 1.2], "b": [2.1, 2.2]})
dtype = sqlalchemy.Integer if self.mode == "sqlalchemy" else "INTEGER"
create_sql = sql.get_schema(
float_frame, "test", con=self.conn, dtype={"b": dtype}
)
assert "CREATE" in create_sql
assert "INTEGER" in create_sql
def test_get_schema_keys(self):
frame = DataFrame({"Col1": [1.1, 1.2], "Col2": [2.1, 2.2]})
create_sql = sql.get_schema(frame, "test", con=self.conn, keys="Col1")
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("Col1")'
assert constraint_sentence in create_sql
# multiple columns as key (GH10385)
create_sql = sql.get_schema(
self.test_frame1, "test", con=self.conn, keys=["A", "B"]
)
constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
assert constraint_sentence in create_sql
def test_chunksize_read(self):
df = DataFrame(np.random.randn(22, 5), columns=list("abcde"))
df.to_sql("test_chunksize", self.conn, index=False)
# reading the query in one time
res1 = sql.read_sql_query("select * from test_chunksize", self.conn)
# reading the query in chunks with read_sql_query
res2 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_query(
"select * from test_chunksize", self.conn, chunksize=5
):
res2 = concat([res2, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res2)
# reading the query in chunks with read_sql_query
if self.mode == "sqlalchemy":
res3 = DataFrame()
i = 0
sizes = [5, 5, 5, 5, 2]
for chunk in sql.read_sql_table("test_chunksize", self.conn, chunksize=5):
res3 = concat([res3, chunk], ignore_index=True)
assert len(chunk) == sizes[i]
i += 1
tm.assert_frame_equal(res1, res3)
def test_categorical(self):
# GH8624
# test that categorical gets written correctly as dense column
df = DataFrame(
{
"person_id": [1, 2, 3],
"person_name": ["<NAME>", "<NAME>", "<NAME>"],
}
)
df2 = df.copy()
df2["person_name"] = df2["person_name"].astype("category")
df2.to_sql("test_categorical", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_categorical", self.conn)
tm.assert_frame_equal(res, df)
def test_unicode_column_name(self):
# GH 11431
df = DataFrame([[1, 2], [3, 4]], columns=["\xe9", "b"])
df.to_sql("test_unicode", self.conn, index=False)
def test_escaped_table_name(self):
# GH 13206
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("d1187b08-4943-4c8d-a7f6", self.conn, index=False)
res = sql.read_sql_query("SELECT * FROM `d1187b08-4943-4c8d-a7f6`", self.conn)
tm.assert_frame_equal(res, df)
@pytest.mark.single
@pytest.mark.skipif(not SQLALCHEMY_INSTALLED, reason="SQLAlchemy not installed")
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
Test the public API as it would be used directly
Tests for `read_sql_table` are included here, as this is specific for the
sqlalchemy mode.
"""
flavor = "sqlite"
mode = "sqlalchemy"
def connect(self):
return sqlalchemy.create_engine("sqlite:///:memory:")
def test_read_table_columns(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
cols = ["A", "B"]
result = sql.read_sql_table("test_frame", self.conn, columns=cols)
assert result.columns.tolist() == cols
def test_read_table_index_col(self):
# test columns argument in read_table
sql.to_sql(self.test_frame1, "test_frame", self.conn)
result = sql.read_sql_table("test_frame", self.conn, index_col="index")
assert result.index.names == ["index"]
result = sql.read_sql_table("test_frame", self.conn, index_col=["A", "B"])
assert result.index.names == ["A", "B"]
result = sql.read_sql_table(
"test_frame", self.conn, index_col=["A", "B"], columns=["C", "D"]
)
assert result.index.names == ["A", "B"]
assert result.columns.tolist() == ["C", "D"]
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
iris_frame1 = sql.read_sql_table("iris", self.conn)
iris_frame2 = sql.read_sql("iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
def test_not_reflect_all_tables(self):
# create invalid table
qry = """CREATE TABLE invalid (x INTEGER, y UNKNOWN);"""
self.conn.execute(qry)
qry = """CREATE TABLE other_table (x INTEGER, y INTEGER);"""
self.conn.execute(qry)
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# Trigger a warning.
sql.read_sql_table("other_table", self.conn)
sql.read_sql_query("SELECT * FROM other_table", self.conn)
# Verify some things
assert len(w) == 0
def test_warning_case_insensitive_table_name(self):
# see gh-7815
#
# We can't test that this warning is triggered, a the database
# configuration would have to be altered. But here we test that
# the warning is certainly NOT triggered in a normal case.
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered.
warnings.simplefilter("always")
# This should not trigger a Warning
self.test_frame1.to_sql("CaseSensitive", self.conn)
# Verify some things
assert len(w) == 0
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes("test_index_saved")
ixs = [i["column_names"] for i in ixs]
return ixs
def test_sqlalchemy_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLDatabase(self.conn)
table = sql.SQLTable("test_type", db, frame=df)
# GH 9086: TIMESTAMP is the suggested type for datetimes with timezones
assert isinstance(table.table.c["time"].type, sqltypes.TIMESTAMP)
def test_database_uri_string(self):
# Test read_sql and .to_sql method with a database URI (GH10654)
test_frame1 = self.test_frame1
# db_uri = 'sqlite:///:memory:' # raises
# sqlalchemy.exc.OperationalError: (sqlite3.OperationalError) near
# "iris": syntax error [SQL: 'iris']
with tm.ensure_clean() as name:
db_uri = "sqlite:///" + name
table = "iris"
test_frame1.to_sql(table, db_uri, if_exists="replace", index=False)
test_frame2 = sql.read_sql(table, db_uri)
test_frame3 = sql.read_sql_table(table, db_uri)
query = "SELECT * FROM iris"
test_frame4 = sql.read_sql_query(query, db_uri)
tm.assert_frame_equal(test_frame1, test_frame2)
tm.assert_frame_equal(test_frame1, test_frame3)
tm.assert_frame_equal(test_frame1, test_frame4)
# using driver that will not be installed on Travis to trigger error
# in sqlalchemy.create_engine -> test passing of this error to user
try:
# the rest of this test depends on pg8000's being absent
import pg8000 # noqa
pytest.skip("pg8000 is installed")
except ImportError:
pass
db_uri = "postgresql+pg8000://user:pass@host/dbname"
with pytest.raises(ImportError, match="pg8000"):
sql.read_sql("select * from table", db_uri)
def _make_iris_table_metadata(self):
sa = sqlalchemy
metadata = sa.MetaData()
iris = sa.Table(
"iris",
metadata,
sa.Column("SepalLength", sa.REAL),
sa.Column("SepalWidth", sa.REAL),
sa.Column("PetalLength", sa.REAL),
sa.Column("PetalWidth", sa.REAL),
sa.Column("Name", sa.TEXT),
)
return iris
def test_query_by_text_obj(self):
# WIP : GH10846
name_text = sqlalchemy.text("select * from iris where name=:name")
iris_df = sql.read_sql(name_text, self.conn, params={"name": "Iris-versicolor"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-versicolor"}
def test_query_by_select_obj(self):
# WIP : GH10846
iris = self._make_iris_table_metadata()
name_select = sqlalchemy.select([iris]).where(
iris.c.Name == sqlalchemy.bindparam("name")
)
iris_df = sql.read_sql(name_select, self.conn, params={"name": "Iris-setosa"})
all_names = set(iris_df["Name"])
assert all_names == {"Iris-setosa"}
class _EngineToConnMixin:
"""
A mixin that causes setup_connect to create a conn rather than an engine.
"""
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
super().load_test_data_and_sql()
engine = self.conn
conn = engine.connect()
self.__tx = conn.begin()
self.pandasSQL = sql.SQLDatabase(conn)
self.__engine = engine
self.conn = conn
yield
self.__tx.rollback()
self.conn.close()
self.conn = self.__engine
self.pandasSQL = sql.SQLDatabase(self.__engine)
# XXX:
# super().teardown_method(method)
@pytest.mark.single
class TestSQLApiConn(_EngineToConnMixin, TestSQLApi):
pass
@pytest.mark.single
class TestSQLiteFallbackApi(SQLiteMixIn, _TestSQLApi):
"""
Test the public sqlite connection fallback API
"""
flavor = "sqlite"
mode = "fallback"
def connect(self, database=":memory:"):
return sqlite3.connect(database)
def test_sql_open_close(self):
# Test if the IO in the database still work if the connection closed
# between the writing and reading (as in many real situations).
with tm.ensure_clean() as name:
conn = self.connect(name)
sql.to_sql(self.test_frame3, "test_frame3_legacy", conn, index=False)
conn.close()
conn = self.connect(name)
result = sql.read_sql_query("SELECT * FROM test_frame3_legacy;", conn)
conn.close()
tm.assert_frame_equal(self.test_frame3, result)
@pytest.mark.skipif(SQLALCHEMY_INSTALLED, reason="SQLAlchemy is installed")
def test_con_string_import_error(self):
conn = "mysql://root@localhost/pandas_nosetest"
msg = "Using URI string without sqlalchemy installed"
with pytest.raises(ImportError, match=msg):
sql.read_sql("SELECT * FROM iris", conn)
def test_read_sql_delegate(self):
iris_frame1 = sql.read_sql_query("SELECT * FROM iris", self.conn)
iris_frame2 = sql.read_sql("SELECT * FROM iris", self.conn)
tm.assert_frame_equal(iris_frame1, iris_frame2)
msg = "Execution failed on sql 'iris': near \"iris\": syntax error"
with pytest.raises(sql.DatabaseError, match=msg):
sql.read_sql("iris", self.conn)
def test_safe_names_warning(self):
# GH 6798
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b "]) # has a space
# warns on create table with spaces in names
with tm.assert_produces_warning():
sql.to_sql(df, "test_frame3_legacy", self.conn, index=False)
def test_get_schema2(self):
# without providing a connection object (available for backwards comp)
create_sql = sql.get_schema(self.test_frame1, "test")
assert "CREATE" in create_sql
def _get_sqlite_column_type(self, schema, column):
for col in schema.split("\n"):
if col.split()[0].strip('""') == column:
return col.split()[1]
raise ValueError(f"Column {column} not found")
def test_sqlite_type_mapping(self):
# Test Timestamp objects (no datetime64 because of timezone) (GH9085)
df = DataFrame(
{"time": to_datetime(["201412120154", "201412110254"], utc=True)}
)
db = sql.SQLiteDatabase(self.conn)
table = sql.SQLiteTable("test_type", db, frame=df)
schema = table.sql_schema()
assert self._get_sqlite_column_type(schema, "time") == "TIMESTAMP"
# -----------------------------------------------------------------------------
# -- Database flavor specific tests
class _TestSQLAlchemy(SQLAlchemyMixIn, PandasSQLTest):
"""
Base class for testing the sqlalchemy backend.
Subclasses for specific database types are created below. Tests that
deviate for each flavor are overwritten there.
"""
flavor: str
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
cls.setup_import()
cls.setup_driver()
conn = cls.connect()
conn.connect()
def load_test_data_and_sql(self):
self._load_raw_sql()
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
@classmethod
def setup_import(cls):
# Skip this test if SQLAlchemy not available
if not SQLALCHEMY_INSTALLED:
pytest.skip("SQLAlchemy not installed")
@classmethod
def setup_driver(cls):
raise NotImplementedError()
@classmethod
def connect(cls):
raise NotImplementedError()
def setup_connect(self):
try:
self.conn = self.connect()
self.pandasSQL = sql.SQLDatabase(self.conn)
# to test if connection can be made:
self.conn.connect()
except sqlalchemy.exc.OperationalError:
pytest.skip(f"Can't connect to {self.flavor} server")
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
self._to_sql(method="multi")
def test_to_sql_method_callable(self):
self._to_sql_method_callable()
def test_create_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
def test_drop_table(self):
temp_conn = self.connect()
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
pandasSQL = sql.SQLDatabase(temp_conn)
pandasSQL.to_sql(temp_frame, "temp_frame")
assert temp_conn.has_table("temp_frame")
pandasSQL.drop_table("temp_frame")
assert not temp_conn.has_table("temp_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_read_table(self):
iris_frame = sql.read_sql_table("iris", con=self.conn)
self._check_iris_loaded_frame(iris_frame)
def test_read_table_columns(self):
iris_frame = sql.read_sql_table(
"iris", con=self.conn, columns=["SepalLength", "SepalLength"]
)
tm.equalContents(iris_frame.columns.values, ["SepalLength", "SepalLength"])
def test_read_table_absent_raises(self):
msg = "Table this_doesnt_exist not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("this_doesnt_exist", con=self.conn)
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
assert issubclass(df.BoolCol.dtype.type, np.bool_)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA values becomes object
assert issubclass(df.BoolColWithNull.dtype.type, np.object)
def test_bigint(self):
# int64 should be converted to BigInteger, GH7433
df = DataFrame(data={"i64": [2 ** 62]})
df.to_sql("test_bigint", self.conn, index=False)
result = sql.read_sql_table("test_bigint", self.conn)
tm.assert_frame_equal(df, result)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
# MySQL SHOULD be converted.
assert issubclass(df.DateCol.dtype.type, np.datetime64)
def test_datetime_with_timezone(self):
# edge case that converts postgresql datetime with time zone types
# to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
# but should be more natural, so coerce to datetime64[ns] for now
def check(col):
# check that a column is either datetime64[ns]
# or datetime64[ns, UTC]
if is_datetime64_dtype(col.dtype):
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
assert col[0] == Timestamp("2000-01-01 08:00:00")
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
assert col[1] == Timestamp("2000-06-01 07:00:00")
elif is_datetime64tz_dtype(col.dtype):
assert str(col.dt.tz) == "UTC"
# "2000-01-01 00:00:00-08:00" should convert to
# "2000-01-01 08:00:00"
# "2000-06-01 00:00:00-07:00" should convert to
# "2000-06-01 07:00:00"
# GH 6415
expected_data = [
Timestamp("2000-01-01 08:00:00", tz="UTC"),
Timestamp("2000-06-01 07:00:00", tz="UTC"),
]
expected = Series(expected_data, name=col.name)
tm.assert_series_equal(col, expected)
else:
raise AssertionError(
f"DateCol loaded with incorrect type -> {col.dtype}"
)
# GH11216
df = pd.read_sql_query("select * from types_test_data", self.conn)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
# this is parsed on Travis (linux), but not on macosx for some reason
# even with the same versions of psycopg2 & sqlalchemy, possibly a
# Postgresql server version difference
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
df = pd.read_sql_query(
"select * from types_test_data", self.conn, parse_dates=["DateColWithTz"]
)
if not hasattr(df, "DateColWithTz"):
pytest.skip("no column with datetime with time zone")
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
check(df.DateColWithTz)
df = pd.concat(
list(
pd.read_sql_query(
"select * from types_test_data", self.conn, chunksize=1
)
),
ignore_index=True,
)
col = df.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
assert str(col.dt.tz) == "UTC"
expected = sql.read_sql_table("types_test_data", self.conn)
col = expected.DateColWithTz
assert is_datetime64tz_dtype(col.dtype)
tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz)
# xref #7139
# this might or might not be converted depending on the postgres driver
df = sql.read_sql_table("types_test_data", self.conn)
check(df.DateColWithTz)
def test_datetime_with_timezone_roundtrip(self):
# GH 9086
# Write datetimetz data to a db and read it back
# For dbs that support timestamps with timezones, should get back UTC
# otherwise naive data should be returned
expected = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3, tz="US/Pacific")}
)
expected.to_sql("test_datetime_tz", self.conn, index=False)
if self.flavor == "postgresql":
# SQLAlchemy "timezones" (i.e. offsets) are coerced to UTC
expected["A"] = expected["A"].dt.tz_convert("UTC")
else:
# Otherwise, timestamps are returned as local, naive
expected["A"] = expected["A"].dt.tz_localize(None)
result = sql.read_sql_table("test_datetime_tz", self.conn)
tm.assert_frame_equal(result, expected)
result = sql.read_sql_query("SELECT * FROM test_datetime_tz", self.conn)
if self.flavor == "sqlite":
# read_sql_query does not return datetime type like read_sql_table
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, expected)
def test_naive_datetimeindex_roundtrip(self):
# GH 23510
# Ensure that a naive DatetimeIndex isn't converted to UTC
dates = date_range("2018-01-01", periods=5, freq="6H")
expected = DataFrame({"nums": range(5)}, index=dates)
expected.to_sql("foo_table", self.conn, index_label="info_date")
result = sql.read_sql_table("foo_table", self.conn, index_col="info_date")
# result index with gain a name from a set_index operation; expected
tm.assert_frame_equal(result, expected, check_names=False)
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
expected_type = object if self.flavor == "sqlite" else np.datetime64
assert issubclass(df.DateCol.dtype.type, expected_type)
df = sql.read_sql_table("types_test_data", self.conn, parse_dates=["DateCol"])
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"DateCol": "%Y-%m-%d %H:%M:%S"}
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data",
self.conn,
parse_dates={"DateCol": {"format": "%Y-%m-%d %H:%M:%S"}},
)
assert issubclass(df.DateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates=["IntDateCol"]
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": "s"}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
df = sql.read_sql_table(
"types_test_data", self.conn, parse_dates={"IntDateCol": {"unit": "s"}}
)
assert issubclass(df.IntDateCol.dtype.type, np.datetime64)
def test_datetime(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.to_sql("test_datetime", self.conn)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
result = result.drop("index", axis=1)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
result = result.drop("index", axis=1)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"])
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_NaT(self):
df = DataFrame(
{"A": date_range("2013-01-01 09:00:00", periods=3), "B": np.arange(3.0)}
)
df.loc[1, "A"] = np.nan
df.to_sql("test_datetime", self.conn, index=False)
# with read_table -> type information from schema used
result = sql.read_sql_table("test_datetime", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> no type information -> sqlite has no native
result = sql.read_sql_query("SELECT * FROM test_datetime", self.conn)
if self.flavor == "sqlite":
assert isinstance(result.loc[0, "A"], str)
result["A"] = to_datetime(result["A"], errors="coerce")
tm.assert_frame_equal(result, df)
else:
tm.assert_frame_equal(result, df)
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_table("test_date", self.conn)
result = res["a"]
expected = to_datetime(df["a"])
# comes back as datetime64
tm.assert_series_equal(result, expected)
def test_datetime_time(self):
# test support for datetime.time
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_table("test_time", self.conn)
tm.assert_frame_equal(res, df)
# GH8341
# first, use the fallback to have the sqlite adapter put in place
sqlite_conn = TestSQLiteFallback.connect()
sql.to_sql(df, "test_time2", sqlite_conn, index=False)
res = sql.read_sql_query("SELECT * FROM test_time2", sqlite_conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res) # check if adapter is in place
# then test if sqlalchemy is unaffected by the sqlite adapter
sql.to_sql(df, "test_time3", self.conn, index=False)
if self.flavor == "sqlite":
res = sql.read_sql_query("SELECT * FROM test_time3", self.conn)
ref = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(ref, res)
res = sql.read_sql_table("test_time3", self.conn)
tm.assert_frame_equal(df, res)
def test_mixed_dtype_insert(self):
# see GH6509
s1 = Series(2 ** 25 + 1, dtype=np.int32)
s2 = Series(0.0, dtype=np.float32)
df = DataFrame({"s1": s1, "s2": s2})
# write and read again
df.to_sql("test_read_write", self.conn, index=False)
df2 = sql.read_sql_table("test_read_write", self.conn)
tm.assert_frame_equal(df, df2, check_dtype=False, check_exact=True)
def test_nan_numeric(self):
# NaNs in numeric float column
df = DataFrame({"A": [0, 1, 2], "B": [0.2, np.nan, 5.6]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_fullcolumn(self):
# full NaN column (numeric float column)
df = DataFrame({"A": [0, 1, 2], "B": [np.nan, np.nan, np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql -> not type info from table -> stays None
df["B"] = df["B"].astype("object")
df["B"] = None
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def test_nan_string(self):
# NaNs in string column
df = DataFrame({"A": [0, 1, 2], "B": ["a", "b", np.nan]})
df.to_sql("test_nan", self.conn, index=False)
# NaNs are coming back as None
df.loc[2, "B"] = None
# with read_table
result = sql.read_sql_table("test_nan", self.conn)
tm.assert_frame_equal(result, df)
# with read_sql
result = sql.read_sql_query("SELECT * FROM test_nan", self.conn)
tm.assert_frame_equal(result, df)
def _get_index_columns(self, tbl_name):
from sqlalchemy.engine import reflection
insp = reflection.Inspector.from_engine(self.conn)
ixs = insp.get_indexes(tbl_name)
ixs = [i["column_names"] for i in ixs]
return ixs
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def test_get_schema_create_table(self):
# Use a dataframe without a bool column, since MySQL converts bool to
# TINYINT (which read_sql_table returns as an int and causes a dtype
# mismatch)
self._load_test3_data()
tbl = "test_get_schema_create_table"
create_sql = sql.get_schema(self.test_frame3, tbl, con=self.conn)
blank_test_df = self.test_frame3.iloc[:0]
self.drop_table(tbl)
self.conn.execute(create_sql)
returned_df = sql.read_sql_table(tbl, self.conn)
tm.assert_frame_equal(returned_df, blank_test_df, check_index_type=False)
self.drop_table(tbl)
def test_dtype(self):
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": sqlalchemy.TEXT})
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltype = meta.tables["dtype_test2"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.TEXT)
msg = "The type of B is not a SQLAlchemy type"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": str})
# GH9083
df.to_sql("dtype_test3", self.conn, dtype={"B": sqlalchemy.String(10)})
meta.reflect()
sqltype = meta.tables["dtype_test3"].columns["B"].type
assert isinstance(sqltype, sqlalchemy.String)
assert sqltype.length == 10
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype=sqlalchemy.TEXT)
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
sqltypea = meta.tables["single_dtype_test"].columns["A"].type
sqltypeb = meta.tables["single_dtype_test"].columns["B"].type
assert isinstance(sqltypea, sqlalchemy.TEXT)
assert isinstance(sqltypeb, sqlalchemy.TEXT)
def test_notna_dtype(self):
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": Series([1.1, None]),
}
df = DataFrame(cols)
tbl = "notna_dtype_test"
df.to_sql(tbl, self.conn)
returned_df = sql.read_sql_table(tbl, self.conn) # noqa
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
if self.flavor == "mysql":
my_type = sqltypes.Integer
else:
my_type = sqltypes.Boolean
col_dict = meta.tables[tbl].columns
assert isinstance(col_dict["Bool"].type, my_type)
assert isinstance(col_dict["Date"].type, sqltypes.DateTime)
assert isinstance(col_dict["Int"].type, sqltypes.Integer)
assert isinstance(col_dict["Float"].type, sqltypes.Float)
def test_double_precision(self):
V = 1.23456789101112131415
df = DataFrame(
{
"f32": Series([V], dtype="float32"),
"f64": Series([V], dtype="float64"),
"f64_as_f32": Series([V], dtype="float64"),
"i32": Series([5], dtype="int32"),
"i64": Series([5], dtype="int64"),
}
)
df.to_sql(
"test_dtypes",
self.conn,
index=False,
if_exists="replace",
dtype={"f64_as_f32": sqlalchemy.Float(precision=23)},
)
res = sql.read_sql_table("test_dtypes", self.conn)
# check precision of float64
assert np.round(df["f64"].iloc[0], 14) == np.round(res["f64"].iloc[0], 14)
# check sql types
meta = sqlalchemy.schema.MetaData(bind=self.conn)
meta.reflect()
col_dict = meta.tables["test_dtypes"].columns
assert str(col_dict["f32"].type) == str(col_dict["f64_as_f32"].type)
assert isinstance(col_dict["f32"].type, sqltypes.Float)
assert isinstance(col_dict["f64"].type, sqltypes.Float)
assert isinstance(col_dict["i32"].type, sqltypes.Integer)
assert isinstance(col_dict["i64"].type, sqltypes.BigInteger)
def test_connectable_issue_example(self):
# This tests the example raised in issue
# https://github.com/pandas-dev/pandas/issues/10104
def foo(connection):
query = "SELECT test_foo_data FROM test_foo_data"
return sql.read_sql_query(query, con=connection)
def bar(connection, data):
data.to_sql(name="test_foo_data", con=connection, if_exists="append")
def main(connectable):
with connectable.connect() as conn:
with conn.begin():
foo_data = conn.run_callable(foo)
conn.run_callable(bar, foo_data)
DataFrame({"test_foo_data": [0, 1, 2]}).to_sql("test_foo_data", self.conn)
main(self.conn)
def test_temporary_table(self):
test_data = "Hello, World!"
expected = DataFrame({"spam": [test_data]})
Base = declarative.declarative_base()
class Temporary(Base):
__tablename__ = "temp_test"
__table_args__ = {"prefixes": ["TEMPORARY"]}
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
spam = sqlalchemy.Column(sqlalchemy.Unicode(30), nullable=False)
Session = sa_session.sessionmaker(bind=self.conn)
session = Session()
with session.transaction:
conn = session.connection()
Temporary.__table__.create(conn)
session.add(Temporary(spam=test_data))
session.flush()
df = sql.read_sql_query(sql=sqlalchemy.select([Temporary.spam]), con=conn)
tm.assert_frame_equal(df, expected)
class _TestSQLAlchemyConn(_EngineToConnMixin, _TestSQLAlchemy):
def test_transactions(self):
pytest.skip("Nested transactions rollbacks don't work with Pandas")
class _TestSQLiteAlchemy:
"""
Test the sqlalchemy backend against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlalchemy.create_engine("sqlite:///:memory:")
@classmethod
def setup_driver(cls):
# sqlite3 is built-in
cls.driver = None
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# sqlite has no boolean type, so integer type is returned
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Non-native Bool column with NA values stays as float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_default_date_load(self):
df = sql.read_sql_table("types_test_data", self.conn)
# IMPORTANT - sqlite has no native date type, so shouldn't parse, but
assert not issubclass(df.DateCol.dtype.type, np.datetime64)
def test_bigint_warning(self):
# test no warning for BIGINT (to support int64) is raised (GH7433)
df = DataFrame({"a": [1, 2]}, dtype="int64")
df.to_sql("test_bigintwarning", self.conn, index=False)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
sql.read_sql_table("test_bigintwarning", self.conn)
assert len(w) == 0
class _TestMySQLAlchemy:
"""
Test the sqlalchemy backend against an MySQL database.
"""
flavor = "mysql"
@classmethod
def connect(cls):
url = "mysql+{driver}://root@localhost/pandas_nosetest"
return sqlalchemy.create_engine(
url.format(driver=cls.driver), connect_args=cls.connect_args
)
@classmethod
def setup_driver(cls):
pymysql = pytest.importorskip("pymysql")
cls.driver = "pymysql"
cls.connect_args = {"client_flag": pymysql.constants.CLIENT.MULTI_STATEMENTS}
def test_default_type_conversion(self):
df = sql.read_sql_table("types_test_data", self.conn)
assert issubclass(df.FloatCol.dtype.type, np.floating)
assert issubclass(df.IntCol.dtype.type, np.integer)
# MySQL has no real BOOL type (it's an alias for TINYINT)
assert issubclass(df.BoolCol.dtype.type, np.integer)
# Int column with NA values stays as float
assert issubclass(df.IntColWithNull.dtype.type, np.floating)
# Bool column with NA = int column with NA values => becomes float
assert issubclass(df.BoolColWithNull.dtype.type, np.floating)
def test_read_procedure(self):
import pymysql
# see GH7324. Although it is more an api test, it is added to the
# mysql tests as sqlite does not have stored procedures
df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})
df.to_sql("test_procedure", self.conn, index=False)
proc = """DROP PROCEDURE IF EXISTS get_testdb;
CREATE PROCEDURE get_testdb ()
BEGIN
SELECT * FROM test_procedure;
END"""
connection = self.conn.connect()
trans = connection.begin()
try:
r1 = connection.execute(proc) # noqa
trans.commit()
except pymysql.Error:
trans.rollback()
raise
res1 = sql.read_sql_query("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res1)
# test delegation to read_sql_query
res2 = sql.read_sql("CALL get_testdb();", self.conn)
tm.assert_frame_equal(df, res2)
class _TestPostgreSQLAlchemy:
"""
Test the sqlalchemy backend against an PostgreSQL database.
"""
flavor = "postgresql"
@classmethod
def connect(cls):
url = "postgresql+{driver}://postgres@localhost/pandas_nosetest"
return sqlalchemy.create_engine(url.format(driver=cls.driver))
@classmethod
def setup_driver(cls):
pytest.importorskip("psycopg2")
cls.driver = "psycopg2"
def test_schema_support(self):
# only test this for postgresql (schema's not supported in
# mysql/sqlite)
df = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe to different schema's
df.to_sql("test_schema_public", self.conn, index=False)
df.to_sql(
"test_schema_public_explicit", self.conn, index=False, schema="public"
)
df.to_sql("test_schema_other", self.conn, index=False, schema="other")
# read dataframes back in
res1 = sql.read_sql_table("test_schema_public", self.conn)
tm.assert_frame_equal(df, res1)
res2 = sql.read_sql_table("test_schema_public_explicit", self.conn)
tm.assert_frame_equal(df, res2)
res3 = sql.read_sql_table(
"test_schema_public_explicit", self.conn, schema="public"
)
tm.assert_frame_equal(df, res3)
res4 = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(df, res4)
msg = "Table test_schema_other not found"
with pytest.raises(ValueError, match=msg):
sql.read_sql_table("test_schema_other", self.conn, schema="public")
# different if_exists options
# create a schema
self.conn.execute("DROP SCHEMA IF EXISTS other CASCADE;")
self.conn.execute("CREATE SCHEMA other;")
# write dataframe with different if_exists options
df.to_sql("test_schema_other", self.conn, schema="other", index=False)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="replace",
)
df.to_sql(
"test_schema_other",
self.conn,
schema="other",
index=False,
if_exists="append",
)
res = sql.read_sql_table("test_schema_other", self.conn, schema="other")
tm.assert_frame_equal(concat([df, df], ignore_index=True), res)
# specifying schema in user-provided meta
# The schema won't be applied on another Connection
# because of transactional schemas
if isinstance(self.conn, sqlalchemy.engine.Engine):
engine2 = self.connect()
meta = sqlalchemy.MetaData(engine2, schema="other")
pdsql = sql.SQLDatabase(engine2, meta=meta)
pdsql.to_sql(df, "test_schema_other2", index=False)
pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="replace")
pdsql.to_sql(df, "test_schema_other2", index=False, if_exists="append")
res1 = sql.read_sql_table("test_schema_other2", self.conn, schema="other")
res2 = pdsql.read_table("test_schema_other2")
tm.assert_frame_equal(res1, res2)
def test_copy_from_callable_insertion_method(self):
# GH 8953
# Example in io.rst found under _io.sql.method
# not available in sqlite, mysql
def psql_insert_copy(table, conn, keys, data_iter):
# gets a DBAPI connection that can provide a cursor
dbapi_conn = conn.connection
with dbapi_conn.cursor() as cur:
s_buf = StringIO()
writer = csv.writer(s_buf)
writer.writerows(data_iter)
s_buf.seek(0)
columns = ", ".join(f'"{k}"' for k in keys)
if table.schema:
table_name = f"{table.schema}.{table.name}"
else:
table_name = table.name
sql_query = f"COPY {table_name} ({columns}) FROM STDIN WITH CSV"
cur.copy_expert(sql=sql_query, file=s_buf)
expected = DataFrame({"col1": [1, 2], "col2": [0.1, 0.2], "col3": ["a", "n"]})
expected.to_sql(
"test_copy_insert", self.conn, index=False, method=psql_insert_copy
)
result = sql.read_sql_table("test_copy_insert", self.conn)
tm.assert_frame_equal(result, expected)
@pytest.mark.single
@pytest.mark.db
class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
@pytest.mark.db
class TestMySQLAlchemyConn(_TestMySQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
@pytest.mark.db
class TestPostgreSQLAlchemy(_TestPostgreSQLAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
@pytest.mark.db
class TestPostgreSQLAlchemyConn(_TestPostgreSQLAlchemy, _TestSQLAlchemyConn):
pass
@pytest.mark.single
class TestSQLiteAlchemy(_TestSQLiteAlchemy, _TestSQLAlchemy):
pass
@pytest.mark.single
class TestSQLiteAlchemyConn(_TestSQLiteAlchemy, _TestSQLAlchemyConn):
pass
# -----------------------------------------------------------------------------
# -- Test Sqlite / MySQL fallback
@pytest.mark.single
class TestSQLiteFallback(SQLiteMixIn, PandasSQLTest):
"""
Test the fallback mode against an in-memory sqlite database.
"""
flavor = "sqlite"
@classmethod
def connect(cls):
return sqlite3.connect(":memory:")
def setup_connect(self):
self.conn = self.connect()
def load_test_data_and_sql(self):
self.pandasSQL = sql.SQLiteDatabase(self.conn)
self._load_test1_data()
@pytest.fixture(autouse=True)
def setup_method(self, load_iris_data):
self.load_test_data_and_sql()
def test_read_sql(self):
self._read_sql_iris()
def test_read_sql_parameter(self):
self._read_sql_iris_parameter()
def test_read_sql_named_parameter(self):
self._read_sql_iris_named_parameter()
def test_to_sql(self):
self._to_sql()
def test_to_sql_empty(self):
self._to_sql_empty()
def test_to_sql_fail(self):
self._to_sql_fail()
def test_to_sql_replace(self):
self._to_sql_replace()
def test_to_sql_append(self):
self._to_sql_append()
def test_to_sql_method_multi(self):
# GH 29921
self._to_sql(method="multi")
def test_create_and_drop_table(self):
temp_frame = DataFrame(
{"one": [1.0, 2.0, 3.0, 4.0], "two": [4.0, 3.0, 2.0, 1.0]}
)
self.pandasSQL.to_sql(temp_frame, "drop_test_frame")
assert self.pandasSQL.has_table("drop_test_frame")
self.pandasSQL.drop_table("drop_test_frame")
assert not self.pandasSQL.has_table("drop_test_frame")
def test_roundtrip(self):
self._roundtrip()
def test_execute_sql(self):
self._execute_sql()
def test_datetime_date(self):
# test support for datetime.date
df = DataFrame([date(2014, 1, 1), date(2014, 1, 2)], columns=["a"])
df.to_sql("test_date", self.conn, index=False)
res = read_sql_query("SELECT * FROM test_date", self.conn)
if self.flavor == "sqlite":
# comes back as strings
tm.assert_frame_equal(res, df.astype(str))
elif self.flavor == "mysql":
tm.assert_frame_equal(res, df)
def test_datetime_time(self):
# test support for datetime.time, GH #8341
df = DataFrame([time(9, 0, 0), time(9, 1, 30)], columns=["a"])
df.to_sql("test_time", self.conn, index=False)
res = read_sql_query("SELECT * FROM test_time", self.conn)
if self.flavor == "sqlite":
# comes back as strings
expected = df.applymap(lambda _: _.strftime("%H:%M:%S.%f"))
tm.assert_frame_equal(res, expected)
def _get_index_columns(self, tbl_name):
ixs = sql.read_sql_query(
"SELECT * FROM sqlite_master WHERE type = 'index' "
+ f"AND tbl_name = '{tbl_name}'",
self.conn,
)
ix_cols = []
for ix_name in ixs.name:
ix_info = sql.read_sql_query(f"PRAGMA index_info({ix_name})", self.conn)
ix_cols.append(ix_info.name.tolist())
return ix_cols
def test_to_sql_save_index(self):
self._to_sql_save_index()
def test_transactions(self):
self._transaction_test()
def _get_sqlite_column_type(self, table, column):
recs = self.conn.execute(f"PRAGMA table_info({table})")
for cid, name, ctype, not_null, default, pk in recs:
if name == column:
return ctype
raise ValueError(f"Table {table}, column {column} not found")
def test_dtype(self):
if self.flavor == "mysql":
pytest.skip("Not applicable to MySQL legacy")
cols = ["A", "B"]
data = [(0.8, True), (0.9, None)]
df = DataFrame(data, columns=cols)
df.to_sql("dtype_test", self.conn)
df.to_sql("dtype_test2", self.conn, dtype={"B": "STRING"})
# sqlite stores Boolean values as INTEGER
assert self._get_sqlite_column_type("dtype_test", "B") == "INTEGER"
assert self._get_sqlite_column_type("dtype_test2", "B") == "STRING"
msg = r"B \(<class 'bool'>\) not a string"
with pytest.raises(ValueError, match=msg):
df.to_sql("error", self.conn, dtype={"B": bool})
# single dtype
df.to_sql("single_dtype_test", self.conn, dtype="STRING")
assert self._get_sqlite_column_type("single_dtype_test", "A") == "STRING"
assert self._get_sqlite_column_type("single_dtype_test", "B") == "STRING"
def test_notna_dtype(self):
if self.flavor == "mysql":
pytest.skip("Not applicable to MySQL legacy")
cols = {
"Bool": Series([True, None]),
"Date": Series([datetime(2012, 5, 1), None]),
"Int": Series([1, None], dtype="object"),
"Float": Series([1.1, None]),
}
df = DataFrame(cols)
tbl = "notna_dtype_test"
df.to_sql(tbl, self.conn)
assert self._get_sqlite_column_type(tbl, "Bool") == "INTEGER"
assert self._get_sqlite_column_type(tbl, "Date") == "TIMESTAMP"
assert self._get_sqlite_column_type(tbl, "Int") == "INTEGER"
assert self._get_sqlite_column_type(tbl, "Float") == "REAL"
def test_illegal_names(self):
# For sqlite, these should work fine
df = DataFrame([[1, 2], [3, 4]], columns=["a", "b"])
msg = "Empty table or column name specified"
with pytest.raises(ValueError, match=msg):
df.to_sql("", self.conn)
for ndx, weird_name in enumerate(
[
"test_weird_name]",
"test_weird_name[",
"test_weird_name`",
'test_weird_name"',
"test_weird_name'",
"_b.test_weird_name_01-30",
'"_b.test_weird_name_01-30"',
"99beginswithnumber",
"12345",
"\xe9",
]
):
df.to_sql(weird_name, self.conn)
sql.table_exists(weird_name, self.conn)
df2 = DataFrame([[1, 2], [3, 4]], columns=["a", weird_name])
c_tbl = f"test_weird_col_name{ndx:d}"
df2.to_sql(c_tbl, self.conn)
sql.table_exists(c_tbl, self.conn)
# -----------------------------------------------------------------------------
# -- Old tests from 0.13.1 (before refactor using sqlalchemy)
def date_format(dt):
"""Returns date in YYYYMMDD format."""
return dt.strftime("%Y%m%d")
_formatters = {
datetime: "'{}'".format,
str: "'{}'".format,
np.str_: "'{}'".format,
bytes: "'{}'".format,
float: "{:.8f}".format,
int: "{:d}".format,
type(None): lambda x: "NULL",
np.float64: "{:.10f}".format,
bool: "'{!s}'".format,
}
def format_query(sql, *args):
"""
"""
processed_args = []
for arg in args:
if isinstance(arg, float) and isna(arg):
arg = None
formatter = _formatters[type(arg)]
processed_args.append(formatter(arg))
return sql % tuple(processed_args)
def tquery(query, con=None, cur=None):
"""Replace removed sql.tquery function"""
res = sql.execute(query, con=con, cur=cur).fetchall()
if res is None:
return None
else:
return list(res)
@pytest.mark.single
class TestXSQLite(SQLiteMixIn):
@pytest.fixture(autouse=True)
def setup_method(self, request, datapath):
self.method = request.function
self.conn = sqlite3.connect(":memory:")
# In some test cases we may close db connection
# Re-open conn here so we can perform cleanup in teardown
yield
self.method = request.function
self.conn = sqlite3.connect(":memory:")
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.iloc[0, 0] = np.nan
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
cur.execute(create_sql)
cur = self.conn.cursor()
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_sql("select * from test", con=self.conn)
result.index = frame.index
tm.assert_frame_equal(result, frame, check_less_precise=True)
def test_execute(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (?, ?, ?, ?)"
row = frame.iloc[0]
sql.execute(ins, self.conn, params=tuple(row))
self.conn.commit()
result = sql.read_sql("select * from test", self.conn)
result.index = frame.index[:1]
tm.assert_frame_equal(result, frame[:1])
def test_schema(self):
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test")
lines = create_sql.splitlines()
for l in lines:
tokens = l.split(" ")
if len(tokens) == 2 and tokens[0] == "A":
assert tokens[1] == "DATETIME"
frame = tm.makeTimeDataFrame()
create_sql = sql.get_schema(frame, "test", keys=["A", "B"])
lines = create_sql.splitlines()
assert 'PRIMARY KEY ("A", "B")' in create_sql
cur = self.conn.cursor()
cur.execute(create_sql)
def test_execute_fail(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
sql.execute('INSERT INTO test VALUES("foo", "baz", 2.567)', self.conn)
with pytest.raises(Exception):
sql.execute('INSERT INTO test VALUES("foo", "bar", 7)', self.conn)
def test_execute_closed_connection(self):
create_sql = """
CREATE TABLE test
(
a TEXT,
b TEXT,
c REAL,
PRIMARY KEY (a, b)
);
"""
cur = self.conn.cursor()
cur.execute(create_sql)
sql.execute('INSERT INTO test VALUES("foo", "bar", 1.234)', self.conn)
self.conn.close()
with pytest.raises(Exception):
tquery("select * from test", con=self.conn)
def test_na_roundtrip(self):
pass
def _check_roundtrip(self, frame):
sql.to_sql(frame, name="test_table", con=self.conn, index=False)
result = sql.read_sql("select * from test_table", self.conn)
# HACK! Change this once indexes are handled properly.
result.index = frame.index
expected = frame
tm.assert_frame_equal(result, expected)
frame["txt"] = ["a"] * len(frame)
frame2 = frame.copy()
new_idx = Index(np.arange(len(frame2))) + 10
frame2["Idx"] = new_idx.copy()
sql.to_sql(frame2, name="test_table2", con=self.conn, index=False)
result = sql.read_sql("select * from test_table2", self.conn, index_col="Idx")
expected = frame.copy()
expected.index = new_idx
expected.index.name = "Idx"
tm.assert_frame_equal(expected, result)
def test_keyword_as_column_names(self):
df = DataFrame({"From": np.ones(5)})
sql.to_sql(df, con=self.conn, name="testkeywords", index=False)
def test_onecolumn_of_integer(self):
# GH 3628
# a column_of_integers dataframe should transfer well to sql
mono_df = DataFrame([1, 2], columns=["c0"])
sql.to_sql(mono_df, con=self.conn, name="mono_df", index=False)
# computing the sum via sql
con_x = self.conn
the_sum = sum(my_c0[0] for my_c0 in con_x.execute("select * from mono_df"))
# it should not fail, and gives 3 ( Issue #3628 )
assert the_sum == 3
result = sql.read_sql("select * from mono_df", con_x)
tm.assert_frame_equal(result, mono_df)
def test_if_exists(self):
df_if_exists_1 = DataFrame({"col1": [1, 2], "col2": ["A", "B"]})
df_if_exists_2 = DataFrame({"col1": [3, 4, 5], "col2": ["C", "D", "E"]})
table_name = "table_if_exists"
sql_select = f"SELECT * FROM {table_name}"
def clean_up(test_table_to_drop):
"""
Drops tables created from individual tests
so no dependencies arise from sequential tests
"""
self.drop_table(test_table_to_drop)
msg = "'notvalidvalue' is not valid for if_exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="notvalidvalue",
)
clean_up(table_name)
# test if_exists='fail'
sql.to_sql(
frame=df_if_exists_1, con=self.conn, name=table_name, if_exists="fail"
)
msg = "Table 'table_if_exists' already exists"
with pytest.raises(ValueError, match=msg):
sql.to_sql(
frame=df_if_exists_1, con=self.conn, name=table_name, if_exists="fail"
)
# test if_exists='replace'
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="replace",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(1, "A"), (2, "B")]
sql.to_sql(
frame=df_if_exists_2,
con=self.conn,
name=table_name,
if_exists="replace",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(3, "C"), (4, "D"), (5, "E")]
clean_up(table_name)
# test if_exists='append'
sql.to_sql(
frame=df_if_exists_1,
con=self.conn,
name=table_name,
if_exists="fail",
index=False,
)
assert tquery(sql_select, con=self.conn) == [(1, "A"), (2, "B")]
sql.to_sql(
frame=df_if_exists_2,
con=self.conn,
name=table_name,
if_exists="append",
index=False,
)
assert tquery(sql_select, con=self.conn) == [
(1, "A"),
(2, "B"),
(3, "C"),
(4, "D"),
(5, "E"),
]
clean_up(table_name)
@pytest.mark.single
@pytest.mark.db
@pytest.mark.skip(
reason="gh-13611: there is no support for MySQL if SQLAlchemy is not installed"
)
class TestXMySQL(MySQLMixIn):
@pytest.fixture(autouse=True, scope="class")
def setup_class(cls):
pymysql = pytest.importorskip("pymysql")
pymysql.connect(host="localhost", user="root", passwd="", db="pandas_nosetest")
try:
pymysql.connect(read_default_group="pandas")
except pymysql.ProgrammingError:
raise RuntimeError(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
)
except pymysql.Error:
raise RuntimeError(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
)
@pytest.fixture(autouse=True)
def setup_method(self, request, datapath):
pymysql = pytest.importorskip("pymysql")
pymysql.connect(host="localhost", user="root", passwd="", db="pandas_nosetest")
try:
pymysql.connect(read_default_group="pandas")
except pymysql.ProgrammingError:
raise RuntimeError(
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
)
except pymysql.Error:
raise RuntimeError(
"Cannot connect to database. "
"Create a group of connection parameters under the heading "
"[pandas] in your system's mysql default file, "
"typically located at ~/.my.cnf or /etc/.my.cnf."
)
self.method = request.function
def test_basic(self):
frame = tm.makeTimeDataFrame()
self._check_roundtrip(frame)
def test_write_row_by_row(self):
frame = tm.makeTimeDataFrame()
frame.iloc[0, 0] = np.nan
drop_sql = "DROP TABLE IF EXISTS test"
create_sql = sql.get_schema(frame, "test")
cur = self.conn.cursor()
cur.execute(drop_sql)
cur.execute(create_sql)
ins = "INSERT INTO test VALUES (%s, %s, %s, %s)"
for idx, row in frame.iterrows():
fmt_sql = format_query(ins, *row)
tquery(fmt_sql, cur=cur)
self.conn.commit()
result = sql.read_sql("select * from test", con=self.conn)
result.index = frame.index
| tm.assert_frame_equal(result, frame, check_less_precise=True) | pandas._testing.assert_frame_equal |
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, inspect
import pymysql
pymysql.install_as_MySQLdb()
import pandas as pd
from flask import Flask, jsonify
import datetime as dt
from splinter import Browser
from bs4 import BeautifulSoup
import time
Base = automap_base()
engine = create_engine('sqlite:///./data/FPA_FOD_20170508.sqlite')
Base.metadata.create_all(engine)
session = Session(engine)
# Store data in dataframe
df = pd.read_sql('SELECT fire_year,fire_name, fips_name, fire_size, stat_cause_descr, latitude, longitude, fips_code, DISCOVERY_DATE, CONT_DATE FROM Fires WHERE state == "CA" AND fire_year >= 2010 and fire_year <= 2014 and fire_size > 1000 and county <> "none"', engine)
merge_df = df.rename(index=str,columns={"FIRE_YEAR":"Fire Year","FIRE_NAME":"Fire Name","FIRE_SIZE":"Acres Burned",
"STAT_CAUSE_DESCR":"Fire Cause","LATITUDE":"Latitude","LONGITUDE":"Longitude",
"FIPS_CODE":"FIPS Code","FIPS_NAME":"County","DISCOVERY_DATE":"Start Date",
"CONT_DATE":"Containment Date"})
merge_df = merge_df[["Fire Year","Fire Name","Acres Burned","Fire Cause","Latitude","Longitude","FIPS Code","County","Start Date","Containment Date"]]
merge_df["Number of Days"] = ""
# Web Scrapping
browser = Browser("chrome", executable_path='chromedriver.exe', headless=True)
# 2015 data
url = "https://en.wikipedia.org/wiki/2015_California_wildfires"
url = "https://en.wikipedia.org/wiki/2015_California_wildfires"
df_2015_list = | pd.read_html(url) | pandas.read_html |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
# Specifically for Period dtype
import operator
import numpy as np
import pytest
from pandas._libs.tslibs.period import IncompatibleFrequency
from pandas.errors import PerformanceWarning
import pandas as pd
from pandas import Period, PeriodIndex, Series, period_range
from pandas.core import ops
from pandas.core.arrays import TimedeltaArray
import pandas.util.testing as tm
from pandas.tseries.frequencies import to_offset
# ------------------------------------------------------------------
# Comparisons
class TestPeriodArrayLikeComparisons:
# Comparison tests for PeriodDtype vectors fully parametrized over
# DataFrame/Series/PeriodIndex/PeriodArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_zerodim(self, box_with_array):
# GH#26689 make sure we unbox zero-dimensional arrays
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000", periods=4)
other = np.array(pi.to_numpy()[0])
pi = tm.box_expected(pi, box_with_array)
result = pi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
class TestPeriodIndexComparisons:
# TODO: parameterize over boxes
@pytest.mark.parametrize("other", ["2017", 2017])
def test_eq(self, other):
idx = PeriodIndex(["2017", "2017", "2018"], freq="D")
expected = np.array([True, True, False])
result = idx == other
tm.assert_numpy_array_equal(result, expected)
def test_pi_cmp_period(self):
idx = period_range("2007-01", periods=20, freq="M")
result = idx < idx[10]
exp = idx.values < idx.values[10]
tm.assert_numpy_array_equal(result, exp)
# TODO: moved from test_datetime64; de-duplicate with version below
def test_parr_cmp_period_scalar2(self, box_with_array):
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
pi = pd.period_range("2000-01-01", periods=10, freq="D")
val = Period("2000-01-04", freq="D")
expected = [x > val for x in pi]
ser = tm.box_expected(pi, box_with_array)
expected = tm.box_expected(expected, xbox)
result = ser > val
tm.assert_equal(result, expected)
val = pi[5]
result = ser > val
expected = [x > val for x in pi]
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_period_scalar(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = tm.box_expected(base, box_with_array)
per = Period("2011-02", freq=freq)
exp = np.array([False, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base == per, exp)
tm.assert_equal(per == base, exp)
exp = np.array([True, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base != per, exp)
tm.assert_equal(per != base, exp)
exp = np.array([False, False, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base > per, exp)
tm.assert_equal(per < base, exp)
exp = np.array([True, False, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base < per, exp)
tm.assert_equal(per > base, exp)
exp = np.array([False, True, True, True])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base >= per, exp)
tm.assert_equal(per <= base, exp)
exp = np.array([True, True, False, False])
exp = tm.box_expected(exp, xbox)
tm.assert_equal(base <= per, exp)
tm.assert_equal(per >= base, exp)
@pytest.mark.parametrize("freq", ["M", "2M", "3M"])
def test_parr_cmp_pi(self, freq, box_with_array):
# GH#13200
xbox = np.ndarray if box_with_array is pd.Index else box_with_array
base = PeriodIndex(["2011-01", "2011-02", "2011-03", "2011-04"], freq=freq)
base = | tm.box_expected(base, box_with_array) | pandas.util.testing.box_expected |
import pandas as pd
from functools import reduce
# Antwoord op de vraag
#
# Als de hele wereld relatief gezien net zo veel (geregistreerde) coronadoden had als NL,"
# hoeveel waren dat er wereldwijd dan minder/meer geweest dan het huidige aantal van 4,6 miljoen?
#
# https://twitter.com/rcsmit/status/1434497100411293700
def save_df(df, name):
""" Saves the dataframe """
name_ = name + ".csv"
compression_opts = dict(method=None, archive_name=name_)
df.to_csv(name_, index=False, compression=compression_opts)
print("--- Saving " + name_ + " ---")
def prepare_cases_landelijk():
"""Berekent aantal overleden per leeftijdsgroep van casus_landelijk.csv (data.rivm.nl)
Returns:
df: df met aantal overleden per leeftijdsgroep
"""
url1 = "C:\\Users\\rcxsm\\Documents\\phyton_scripts\\covid19_seir_models\\input\\COVID-19_casus_landelijk.csv"
df = pd.read_csv(url1, delimiter=";", low_memory=False)
df["Date_statistics"] = | pd.to_datetime(df["Date_statistics"], format="%Y-%m-%d") | pandas.to_datetime |
"""High-level functions to help perform complex tasks
"""
from __future__ import print_function, division
import os
import multiprocessing as mp
import warnings
from datetime import datetime
import platform
import struct
import shutil
import copy
import numpy as np
import pandas as pd
import time
pd.options.display.max_colwidth = 100
from ..pyemu_warnings import PyemuWarning
try:
import flopy
except:
pass
import pyemu
from pyemu.utils.os_utils import run, start_workers
def geostatistical_draws(pst, struct_dict,num_reals=100,sigma_range=4,verbose=True):
"""construct a parameter ensemble from a prior covariance matrix
implied by geostatistical structure(s) and parameter bounds.
Args:
pst (`pyemu.Pst`): a control file (or the name of control file). The
parameter bounds in `pst` are used to define the variance of each
parameter group.
struct_dict (`dict`): a dict of GeoStruct (or structure file), and list of
pilot point template files pairs. If the values in the dict are
`pd.DataFrames`, then they must have an 'x','y', and 'parnme' column.
If the filename ends in '.csv', then a pd.DataFrame is loaded,
otherwise a pilot points file is loaded.
num_reals (`int`, optional): number of realizations to draw. Default is 100
sigma_range (`float`): a float representing the number of standard deviations
implied by parameter bounds. Default is 4.0, which implies 95% confidence parameter bounds.
verbose (`bool`, optional): flag to control output to stdout. Default is True.
flag for stdout.
Returns
`pyemu.ParameterEnsemble`: the realized parameter ensemble.
Note:
parameters are realized by parameter group. The variance of each
parameter group is used to scale the resulting geostatistical
covariance matrix Therefore, the sill of the geostatistical structures
in `struct_dict` should be 1.0
Example::
pst = pyemu.Pst("my.pst")
sd = {"struct.dat":["hkpp.dat.tpl","vka.dat.tpl"]}
pe = pyemu.helpers.geostatistical_draws(pst,struct_dict=sd}
pe.to_csv("my_pe.csv")
"""
if isinstance(pst,str):
pst = pyemu.Pst(pst)
assert isinstance(pst,pyemu.Pst),"pst arg must be a Pst instance, not {0}".\
format(type(pst))
if verbose: print("building diagonal cov")
full_cov = pyemu.Cov.from_parameter_data(pst, sigma_range=sigma_range)
full_cov_dict = {n: float(v) for n, v in zip(full_cov.col_names, full_cov.x)}
# par_org = pst.parameter_data.copy # not sure about the need or function of this line? (BH)
par = pst.parameter_data
par_ens = []
pars_in_cov = set()
keys = list(struct_dict.keys())
keys.sort()
for gs in keys:
items = struct_dict[gs]
if verbose: print("processing ",gs)
if isinstance(gs,str):
gss = pyemu.geostats.read_struct_file(gs)
if isinstance(gss,list):
warnings.warn("using first geostat structure in file {0}".\
format(gs),PyemuWarning)
gs = gss[0]
else:
gs = gss
if gs.sill != 1.0:
warnings.warn("GeoStruct {0} sill != 1.0 - this is bad!".format(gs.name))
if not isinstance(items,list):
items = [items]
#items.sort()
for item in items:
if isinstance(item,str):
assert os.path.exists(item),"file {0} not found".\
format(item)
if item.lower().endswith(".tpl"):
df = pyemu.pp_utils.pp_tpl_to_dataframe(item)
elif item.lower.endswith(".csv"):
df = pd.read_csv(item)
else:
df = item
if "pargp" in df.columns:
if verbose: print("working on pargroups {0}".format(df.pargp.unique().tolist()))
for req in ['x','y','parnme']:
if req not in df.columns:
raise Exception("{0} is not in the columns".format(req))
missing = df.loc[df.parnme.apply(
lambda x : x not in par.parnme),"parnme"]
if len(missing) > 0:
warnings.warn("the following parameters are not " + \
"in the control file: {0}".\
format(','.join(missing)),PyemuWarning)
df = df.loc[df.parnme.apply(lambda x: x not in missing)]
if "zone" not in df.columns:
df.loc[:,"zone"] = 1
zones = df.zone.unique()
aset = set(pst.adj_par_names)
for zone in zones:
df_zone = df.loc[df.zone==zone,:].copy()
df_zone = df_zone.loc[df_zone.parnme.apply(lambda x: x in aset),:]
if df_zone.shape[0] == 0:
warnings.warn("all parameters in zone {0} tied and/or fixed, skipping...".format(zone),PyemuWarning)
continue
#df_zone.sort_values(by="parnme",inplace=True)
df_zone.sort_index(inplace=True)
if verbose: print("build cov matrix")
cov = gs.covariance_matrix(df_zone.x,df_zone.y,df_zone.parnme)
if verbose: print("done")
if verbose: print("getting diag var cov",df_zone.shape[0])
#tpl_var = np.diag(full_cov.get(list(df_zone.parnme)).x).max()
tpl_var = max([full_cov_dict[pn] for pn in df_zone.parnme])
if verbose: print("scaling full cov by diag var cov")
#cov.x *= tpl_var
for i in range(cov.shape[0]):
cov.x[i,:] *= tpl_var
# no fixed values here
pe = pyemu.ParameterEnsemble.from_gaussian_draw(pst=pst,cov=cov,num_reals=num_reals,
by_groups=False,fill=False)
#df = pe.iloc[:,:]
par_ens.append(pe._df)
pars_in_cov.update(set(pe.columns))
if verbose: print("adding remaining parameters to diagonal")
fset = set(full_cov.row_names)
diff = list(fset.difference(pars_in_cov))
if (len(diff) > 0):
name_dict = {name:i for i,name in enumerate(full_cov.row_names)}
vec = np.atleast_2d(np.array([full_cov.x[name_dict[d]] for d in diff]))
cov = pyemu.Cov(x=vec,names=diff,isdiagonal=True)
#cov = full_cov.get(diff,diff)
# here we fill in the fixed values
pe = pyemu.ParameterEnsemble.from_gaussian_draw(pst,cov,num_reals=num_reals,
fill=False)
par_ens.append(pe._df)
par_ens = pd.concat(par_ens,axis=1)
par_ens = pyemu.ParameterEnsemble(pst=pst,df=par_ens)
return par_ens
def geostatistical_prior_builder(pst, struct_dict,sigma_range=4,
verbose=False):
"""construct a full prior covariance matrix using geostastical structures
and parameter bounds information.
Args:
pst (`pyemu.Pst`): a control file instance (or the name of control file)
struct_dict (`dict`): a dict of GeoStruct (or structure file), and list of
pilot point template files pairs. If the values in the dict are
`pd.DataFrames`, then they must have an 'x','y', and 'parnme' column.
If the filename ends in '.csv', then a pd.DataFrame is loaded,
otherwise a pilot points file is loaded.
sigma_range (`float`): a float representing the number of standard deviations
implied by parameter bounds. Default is 4.0, which implies 95% confidence parameter bounds.
verbose (`bool`, optional): flag to control output to stdout. Default is True.
flag for stdout.
Returns:
`pyemu.Cov`: a covariance matrix that includes all adjustable parameters in the control
file.
Note:
The covariance of parameters associated with geostatistical structures is defined
as a mixture of GeoStruct and bounds. That is, the GeoStruct is used to construct a
pyemu.Cov, then the entire pyemu.Cov is scaled by the uncertainty implied by the bounds and
sigma_range. Sounds complicated...
Example::
pst = pyemu.Pst("my.pst")
sd = {"struct.dat":["hkpp.dat.tpl","vka.dat.tpl"]}
cov = pyemu.helpers.geostatistical_draws(pst,struct_dict=sd}
cov.to_binary("prior.jcb")
"""
if isinstance(pst,str):
pst = pyemu.Pst(pst)
assert isinstance(pst,pyemu.Pst),"pst arg must be a Pst instance, not {0}".\
format(type(pst))
if verbose: print("building diagonal cov")
full_cov = pyemu.Cov.from_parameter_data(pst,sigma_range=sigma_range)
full_cov_dict = {n:float(v) for n,v in zip(full_cov.col_names,full_cov.x)}
#full_cov = None
par = pst.parameter_data
for gs,items in struct_dict.items():
if verbose: print("processing ",gs)
if isinstance(gs,str):
gss = pyemu.geostats.read_struct_file(gs)
if isinstance(gss,list):
warnings.warn("using first geostat structure in file {0}".\
format(gs),PyemuWarning)
gs = gss[0]
else:
gs = gss
if not isinstance(items,list):
items = [items]
for item in items:
if isinstance(item,str):
assert os.path.exists(item),"file {0} not found".\
format(item)
if item.lower().endswith(".tpl"):
df = pyemu.pp_utils.pp_tpl_to_dataframe(item)
elif item.lower.endswith(".csv"):
df = pd.read_csv(item)
else:
df = item
for req in ['x','y','parnme']:
if req not in df.columns:
raise Exception("{0} is not in the columns".format(req))
missing = df.loc[df.parnme.apply(
lambda x : x not in par.parnme),"parnme"]
if len(missing) > 0:
warnings.warn("the following parameters are not " + \
"in the control file: {0}".\
format(','.join(missing)),PyemuWarning)
df = df.loc[df.parnme.apply(lambda x: x not in missing)]
if "zone" not in df.columns:
df.loc[:,"zone"] = 1
zones = df.zone.unique()
aset = set(pst.adj_par_names)
for zone in zones:
df_zone = df.loc[df.zone==zone,:].copy()
df_zone = df_zone.loc[df_zone.parnme.apply(lambda x: x in aset), :]
if df_zone.shape[0] == 0:
warnings.warn("all parameters in zone {0} tied and/or fixed, skipping...".format(zone),
PyemuWarning)
continue
#df_zone.sort_values(by="parnme",inplace=True)
df_zone.sort_index(inplace=True)
if verbose: print("build cov matrix")
cov = gs.covariance_matrix(df_zone.x,df_zone.y,df_zone.parnme)
if verbose: print("done")
# find the variance in the diagonal cov
if verbose: print("getting diag var cov",df_zone.shape[0])
#tpl_var = np.diag(full_cov.get(list(df_zone.parnme)).x).max()
tpl_var = max([full_cov_dict[pn] for pn in df_zone.parnme])
#if np.std(tpl_var) > 1.0e-6:
# warnings.warn("pars have different ranges" +\
# " , using max range as variance for all pars")
#tpl_var = tpl_var.max()
if verbose: print("scaling full cov by diag var cov")
cov *= tpl_var
if verbose: print("test for inversion")
try:
ci = cov.inv
except:
df_zone.to_csv("prior_builder_crash.csv")
raise Exception("error inverting cov {0}".
format(cov.row_names[:3]))
if verbose: print('replace in full cov')
full_cov.replace(cov)
# d = np.diag(full_cov.x)
# idx = np.argwhere(d==0.0)
# for i in idx:
# print(full_cov.names[i])
return full_cov
def _condition_on_par_knowledge(cov,par_knowledge_dict):
""" experimental function to include conditional prior information
for one or more parameters in a full covariance matrix
"""
missing = []
for parnme in par_knowledge_dict.keys():
if parnme not in cov.row_names:
missing.append(parnme)
if len(missing):
raise Exception("par knowledge dict parameters not found: {0}".\
format(','.join(missing)))
# build the selection matrix and sigma epsilon
#sel = pyemu.Cov(x=np.identity(cov.shape[0]),names=cov.row_names)
sel = cov.zero2d
sel = cov.to_pearson()
new_cov_diag = pyemu.Cov(x=np.diag(cov.as_2d.diagonal()),names=cov.row_names)
#new_cov_diag = cov.zero2d
for parnme,var in par_knowledge_dict.items():
idx = cov.row_names.index(parnme)
#sel.x[idx,:] = 1.0
#sel.x[idx,idx] = var
new_cov_diag.x[idx,idx] = var #cov.x[idx,idx]
new_cov_diag = sel * new_cov_diag * sel.T
for _ in range(2):
for parnme, var in par_knowledge_dict.items():
idx = cov.row_names.index(parnme)
# sel.x[idx,:] = 1.0
# sel.x[idx,idx] = var
new_cov_diag.x[idx, idx] = var # cov.x[idx,idx]
new_cov_diag = sel * new_cov_diag * sel.T
print(new_cov_diag)
return new_cov_diag
def kl_setup(num_eig,sr,struct,prefixes,
factors_file="kl_factors.dat",
islog=True, basis_file=None,
tpl_dir="."):
"""setup a karhuenen-Loeve based parameterization for a given
geostatistical structure.
Args:
num_eig (`int`): the number of basis vectors to retain in the
reduced basis
sr (`flopy.reference.SpatialReference`): a spatial reference instance
struct (`str`): a PEST-style structure file. Can also be a
`pyemu.geostats.Geostruct` instance.
prefixes ([`str`]): a list of parameter prefixes to generate KL
parameterization for.
factors_file (`str`, optional): name of the PEST-style interpolation
factors file to write (can be processed with FAC2REAL).
Default is "kl_factors.dat".
islog (`bool`, optional): flag to indicate if the parameters are log transformed.
Default is True
basis_file (`str`, optional): the name of the PEST-style binary (e.g. jco)
file to write the reduced basis vectors to. Default is None (not saved).
tpl_dir (`str`, optional): the directory to write the resulting
template files to. Default is "." (current directory).
Returns:
`pandas.DataFrame`: a dataframe of parameter information.
Note:
This is the companion function to `helpers.apply_kl()`
Example::
m = flopy.modflow.Modflow.load("mymodel.nam")
prefixes = ["hk","vka","ss"]
df = pyemu.helpers.kl_setup(10,m.sr,"struct.dat",prefixes)
"""
try:
import flopy
except Exception as e:
raise Exception("error import flopy: {0}".format(str(e)))
assert isinstance(sr,flopy.utils.SpatialReference)
# for name,array in array_dict.items():
# assert isinstance(array,np.ndarray)
# assert array.shape[0] == sr.nrow
# assert array.shape[1] == sr.ncol
# assert len(name) + len(str(num_eig)) <= 12,"name too long:{0}".\
# format(name)
if isinstance(struct,str):
assert os.path.exists(struct)
gs = pyemu.utils.read_struct_file(struct)
else:
gs = struct
names = []
for i in range(sr.nrow):
names.extend(["i{0:04d}j{1:04d}".format(i,j) for j in range(sr.ncol)])
cov = gs.covariance_matrix(sr.xcentergrid.flatten(),
sr.ycentergrid.flatten(),
names=names)
eig_names = ["eig_{0:04d}".format(i) for i in range(cov.shape[0])]
trunc_basis = cov.u
trunc_basis.col_names = eig_names
#trunc_basis.col_names = [""]
if basis_file is not None:
trunc_basis.to_binary(basis_file)
trunc_basis = trunc_basis[:,:num_eig]
eig_names = eig_names[:num_eig]
pp_df = pd.DataFrame({"name":eig_names},index=eig_names)
pp_df.loc[:,"x"] = -1.0 * sr.ncol
pp_df.loc[:,"y"] = -1.0 * sr.nrow
pp_df.loc[:,"zone"] = -999
pp_df.loc[:,"parval1"] = 1.0
pyemu.pp_utils.write_pp_file(os.path.join("temp.dat"),pp_df)
_eigen_basis_to_factor_file(sr.nrow, sr.ncol, trunc_basis,
factors_file=factors_file, islog=islog)
dfs = []
for prefix in prefixes:
tpl_file = os.path.join(tpl_dir,"{0}.dat_kl.tpl".format(prefix))
df = pyemu.pp_utils.pilot_points_to_tpl("temp.dat",tpl_file,prefix)
shutil.copy2("temp.dat",tpl_file.replace(".tpl",""))
df.loc[:,"tpl_file"] = tpl_file
df.loc[:,"in_file"] = tpl_file.replace(".tpl","")
df.loc[:,"prefix"] = prefix
df.loc[:,"pargp"] = "kl_{0}".format(prefix)
dfs.append(df)
#arr = pyemu.geostats.fac2real(df,factors_file=factors_file,out_file=None)
df = pd.concat(dfs)
df.loc[:,"parubnd"] = 10.0
df.loc[:,"parlbnd"] = 0.1
return pd.concat(dfs)
# back_array_dict = {}
# f = open(tpl_file,'w')
# f.write("ptf ~\n")
# f.write("name,org_val,new_val\n")
# for name,array in array_dict.items():
# mname = name+"mean"
# f.write("{0},{1:20.8E},~ {2} ~\n".format(mname,0.0,mname))
# #array -= array.mean()
# array_flat = pyemu.Matrix(x=np.atleast_2d(array.flatten()).transpose()
# ,col_names=["flat"],row_names=names,
# isdiagonal=False)
# factors = trunc_basis * array_flat
# enames = ["{0}{1:04d}".format(name,i) for i in range(num_eig)]
# for n,val in zip(enames,factors.x):
# f.write("{0},{1:20.8E},~ {0} ~\n".format(n,val[0]))
# back_array_dict[name] = (factors.T * trunc_basis).x.reshape(array.shape)
# print(array_back)
# print(factors.shape)
#
# return back_array_dict
def _eigen_basis_to_factor_file(nrow, ncol, basis, factors_file, islog=True):
assert nrow * ncol == basis.shape[0]
with open(factors_file,'w') as f:
f.write("junk.dat\n")
f.write("junk.zone.dat\n")
f.write("{0} {1}\n".format(ncol,nrow))
f.write("{0}\n".format(basis.shape[1]))
[f.write(name+"\n") for name in basis.col_names]
t = 0
if islog:
t = 1
for i in range(nrow * ncol):
f.write("{0} {1} {2} {3:8.5e}".format(i+1,t,basis.shape[1],0.0))
[f.write(" {0} {1:12.8g} ".format(i + 1, w)) for i, w in enumerate(basis.x[i,:])]
f.write("\n")
def kl_apply(par_file, basis_file,par_to_file_dict,arr_shape):
""" Apply a KL parameterization transform from basis factors to model
input arrays.
Args:
par_file (`str`): the csv file to get factor values from. Must contain
the following columns: "name", "new_val", "org_val"
basis_file (`str`): the PEST-style binary file that contains the reduced
basis
par_to_file_dict (`dict`): a mapping from KL parameter prefixes to array
file names.
arr_shape (tuple): a length 2 tuple of number of rows and columns
the resulting arrays should have.
Note:
This is the companion function to kl_setup.
This function should be called during the forward run
"""
df = pd.read_csv(par_file)
assert "name" in df.columns
assert "org_val" in df.columns
assert "new_val" in df.columns
df.loc[:,"prefix"] = df.name.apply(lambda x: x[:-4])
for prefix in df.prefix.unique():
assert prefix in par_to_file_dict.keys(),"missing prefix:{0}".\
format(prefix)
basis = pyemu.Matrix.from_binary(basis_file)
assert basis.shape[1] == arr_shape[0] * arr_shape[1]
arr_min = 1.0e-10 # a temp hack
#means = df.loc[df.name.apply(lambda x: x.endswith("mean")),:]
#print(means)
df = df.loc[df.name.apply(lambda x: not x.endswith("mean")),:]
for prefix,filename in par_to_file_dict.items():
factors = pyemu.Matrix.from_dataframe(df.loc[df.prefix==prefix,["new_val"]])
factors.autoalign = False
basis_prefix = basis[:factors.shape[0],:]
arr = (factors.T * basis_prefix).x.reshape(arr_shape)
#arr += means.loc[means.prefix==prefix,"new_val"].values
arr[arr<arr_min] = arr_min
np.savetxt(filename,arr,fmt="%20.8E")
def zero_order_tikhonov(pst, parbounds=True,par_groups=None,
reset=True):
"""setup preferred-value regularization in a pest control file.
Args:
pst (`pyemu.Pst`): the control file instance
parbounds (`bool`, optional): flag to weight the new prior information
equations according to parameter bound width - approx the KL
transform. Default is True
par_groups (`list`): a list of parameter groups to build PI equations for.
If None, all adjustable parameters are used. Default is None
reset (`bool`): a flag to remove any existing prior information equations
in the control file. Default is True
Example::
pst = pyemu.Pst("my.pst")
pyemu.helpers.zero_order_tikhonov(pst)
pst.write("my_reg.pst")
"""
if par_groups is None:
par_groups = pst.par_groups
pilbl, obgnme, weight, equation = [], [], [], []
for idx, row in pst.parameter_data.iterrows():
pt = row["partrans"].lower()
try:
pt = pt.decode()
except:
pass
if pt not in ["tied", "fixed"] and\
row["pargp"] in par_groups:
pilbl.append(row["parnme"])
weight.append(1.0)
ogp_name = "regul"+row["pargp"]
obgnme.append(ogp_name[:12])
parnme = row["parnme"]
parval1 = row["parval1"]
if pt == "log":
parnme = "log(" + parnme + ")"
parval1 = np.log10(parval1)
eq = "1.0 * " + parnme + " ={0:15.6E}".format(parval1)
equation.append(eq)
if reset:
pst.prior_information = pd.DataFrame({"pilbl": pilbl,
"equation": equation,
"obgnme": obgnme,
"weight": weight})
else:
pi = pd.DataFrame({"pilbl": pilbl,
"equation": equation,
"obgnme": obgnme,
"weight": weight})
pst.prior_information = pst.prior_information.append(pi)
if parbounds:
_regweight_from_parbound(pst)
if pst.control_data.pestmode == "estimation":
pst.control_data.pestmode = "regularization"
def _regweight_from_parbound(pst):
"""sets regularization weights from parameter bounds
which approximates the KL expansion. Called by
zero_order_tikhonov().
"""
pst.parameter_data.index = pst.parameter_data.parnme
pst.prior_information.index = pst.prior_information.pilbl
for idx, parnme in enumerate(pst.prior_information.pilbl):
if parnme in pst.parameter_data.index:
row = pst.parameter_data.loc[parnme, :]
lbnd,ubnd = row["parlbnd"], row["parubnd"]
if row["partrans"].lower() == "log":
weight = 1.0 / (np.log10(ubnd) - np.log10(lbnd))
else:
weight = 1.0 / (ubnd - lbnd)
pst.prior_information.loc[parnme, "weight"] = weight
else:
print("prior information name does not correspond" +\
" to a parameter: " + str(parnme))
def first_order_pearson_tikhonov(pst,cov,reset=True,abs_drop_tol=1.0e-3):
"""setup preferred-difference regularization from a covariance matrix.
Args:
pst (`pyemu.Pst`): the PEST control file
cov (`pyemu.Cov`): a covariance matrix instance with
some or all of the parameters listed in `pst`.
reset (`bool`): a flag to remove any existing prior information equations
in the control file. Default is True
abs_drop_tol (`float`, optional): tolerance to control how many pi equations
are written. If the absolute value of the Pearson CC is less than
abs_drop_tol, the prior information equation will not be included in
the control file.
Note:
The weights on the prior information equations are the Pearson
correlation coefficients implied by covariance matrix.
Example::
pst = pyemu.Pst("my.pst")
cov = pyemu.Cov.from_ascii("my.cov")
pyemu.helpers.first_order_pearson_tikhonov(pst,cov)
pst.write("my_reg.pst")
"""
assert isinstance(cov,pyemu.Cov)
print("getting CC matrix")
cc_mat = cov.to_pearson()
#print(pst.parameter_data.dtypes)
try:
ptrans = pst.parameter_data.partrans.apply(lambda x:x.decode()).to_dict()
except:
ptrans = pst.parameter_data.partrans.to_dict()
pi_num = pst.prior_information.shape[0] + 1
pilbl, obgnme, weight, equation = [], [], [], []
sadj_names = set(pst.adj_par_names)
print("processing")
for i,iname in enumerate(cc_mat.row_names):
if iname not in sadj_names:
continue
for j,jname in enumerate(cc_mat.row_names[i+1:]):
if jname not in sadj_names:
continue
#print(i,iname,i+j+1,jname)
cc = cc_mat.x[i,j+i+1]
if cc < abs_drop_tol:
continue
pilbl.append("pcc_{0}".format(pi_num))
iiname = str(iname)
if str(ptrans[iname]) == "log":
iiname = "log("+iname+")"
jjname = str(jname)
if str(ptrans[jname]) == "log":
jjname = "log("+jname+")"
equation.append("1.0 * {0} - 1.0 * {1} = 0.0".\
format(iiname,jjname))
weight.append(cc)
obgnme.append("regul_cc")
pi_num += 1
df = pd.DataFrame({"pilbl": pilbl,"equation": equation,
"obgnme": obgnme,"weight": weight})
df.index = df.pilbl
if reset:
pst.prior_information = df
else:
pst.prior_information = pst.prior_information.append(df)
if pst.control_data.pestmode == "estimation":
pst.control_data.pestmode = "regularization"
def simple_tpl_from_pars(parnames, tplfilename='model.input.tpl'):
"""Make a simple template file from a list of parameter names.
Args:
parnames ([`str`]): list of parameter names to put in the
new template file
tplfilename (`str`): Name of the template file to create. Default
is "model.input.tpl"
Note:
writes a file `tplfilename` with each parameter name in `parnames` on a line
"""
with open(tplfilename, 'w') as ofp:
ofp.write('ptf ~\n')
[ofp.write('~{0:^12}~\n'.format(cname)) for cname in parnames]
def simple_ins_from_obs(obsnames, insfilename='model.output.ins'):
"""write a simple instruction file that reads the values named
in obsnames in order, one per line from a model output file
Args:
obsnames (`str`): list of observation names to put in the
new instruction file
insfilename (`str`): the name of the instruction file to
create. Default is "model.output.ins"
Note:
writes a file `insfilename` with each observation read off
of a single line
"""
with open(insfilename, 'w') as ofp:
ofp.write('pif ~\n')
[ofp.write('!{0}!\n'.format(cob)) for cob in obsnames]
def pst_from_parnames_obsnames(parnames, obsnames,
tplfilename='model.input.tpl', insfilename='model.output.ins'):
"""Creates a Pst object from a list of parameter names and a list of observation names.
Args:
parnames (`str`): list of parameter names
obsnames (`str`): list of observation names
tplfilename (`str`): template filename. Default is "model.input.tpl"
insfilename (`str`): instruction filename. Default is "model.output.ins"
Returns:
`pyemu.Pst`: the generic control file
"""
simple_tpl_from_pars(parnames, tplfilename)
simple_ins_from_obs(obsnames, insfilename)
modelinputfilename = tplfilename.replace('.tpl','')
modeloutputfilename = insfilename.replace('.ins','')
return pyemu.Pst.from_io_files(tplfilename, modelinputfilename, insfilename, modeloutputfilename)
def read_pestpp_runstorage(filename,irun=0,with_metadata=False):
"""read pars and obs from a specific run in a pest++ serialized
run storage file into dataframes.
Args:
filename (`str`): the name of the run storage file
irun (`int`): the run id to process. If 'all', then all runs are
read. Default is 0
with_metadata (`bool`): flag to return run stats and info txt as well
Returns:
tuple containing
- **pandas.DataFrame**: parameter information
- **pandas.DataFrame**: observation information
- **pandas.DataFrame**: optionally run status and info txt.
"""
header_dtype = np.dtype([("n_runs",np.int64),("run_size",np.int64),("p_name_size",np.int64),
("o_name_size",np.int64)])
try:
irun = int(irun)
except:
if irun.lower() == "all":
irun = irun.lower()
else:
raise Exception("unrecognized 'irun': should be int or 'all', not '{0}'".
format(irun))
def status_str(r_status):
if r_status == 0:
return "not completed"
if r_status == 1:
return "completed"
if r_status == -100:
return "canceled"
else:
return "failed"
assert os.path.exists(filename)
f = open(filename,"rb")
header = np.fromfile(f,dtype=header_dtype,count=1)
p_name_size,o_name_size = header["p_name_size"][0],header["o_name_size"][0]
par_names = struct.unpack('{0}s'.format(p_name_size),
f.read(p_name_size))[0].strip().lower().decode().split('\0')[:-1]
obs_names = struct.unpack('{0}s'.format(o_name_size),
f.read(o_name_size))[0].strip().lower().decode().split('\0')[:-1]
n_runs,run_size = header["n_runs"][0],header["run_size"][0]
run_start = f.tell()
def _read_run(irun):
f.seek(run_start + (irun * run_size))
r_status = np.fromfile(f, dtype=np.int8, count=1)
info_txt = struct.unpack("41s", f.read(41))[0].strip().lower().decode()
par_vals = np.fromfile(f, dtype=np.float64, count=len(par_names) + 1)[1:]
obs_vals = np.fromfile(f, dtype=np.float64, count=len(obs_names) + 1)[:-1]
par_df = pd.DataFrame({"parnme": par_names, "parval1": par_vals})
par_df.index = par_df.pop("parnme")
obs_df = pd.DataFrame({"obsnme": obs_names, "obsval": obs_vals})
obs_df.index = obs_df.pop("obsnme")
return r_status,info_txt,par_df,obs_df
if irun == "all":
par_dfs,obs_dfs = [],[]
r_stats, txts = [],[]
for irun in range(n_runs):
#print(irun)
r_status, info_txt, par_df, obs_df = _read_run(irun)
par_dfs.append(par_df)
obs_dfs.append(obs_df)
r_stats.append(r_status)
txts.append(info_txt)
par_df = pd.concat(par_dfs,axis=1).T
par_df.index = np.arange(n_runs)
obs_df = pd.concat(obs_dfs, axis=1).T
obs_df.index = np.arange(n_runs)
meta_data = pd.DataFrame({"r_status":r_stats,"info_txt":txts})
meta_data.loc[:,"status"] = meta_data.r_status.apply(status_str)
else:
assert irun <= n_runs
r_status,info_txt,par_df,obs_df = _read_run(irun)
meta_data = pd.DataFrame({"r_status": [r_status], "info_txt": [info_txt]})
meta_data.loc[:, "status"] = meta_data.r_status.apply(status_str)
f.close()
if with_metadata:
return par_df,obs_df,meta_data
else:
return par_df,obs_df
def jco_from_pestpp_runstorage(rnj_filename,pst_filename):
""" read pars and obs from a pest++ serialized run storage
file (e.g., .rnj) and return jacobian matrix instance
Args:
rnj_filename (`str`): the name of the run storage file
pst_filename (`str`): the name of the pst file
Note:
This can then be passed to Jco.to_binary or Jco.to_coo, etc., to write jco
file in a subsequent step to avoid memory resource issues associated
with very large problems.
Returns:
`pyemu.Jco`: a jacobian matrix constructed from the run results and
pest control file information.
TODO:
Check rnj file contains transformed par vals (i.e., in model input space)
Currently only returns pyemu.Jco; doesn't write jco file due to memory
issues associated with very large problems
Compare rnj and jco from Freyberg problem in autotests
"""
header_dtype = np.dtype([("n_runs",np.int64),("run_size",np.int64),("p_name_size",np.int64),
("o_name_size",np.int64)])
pst = pyemu.Pst(pst_filename)
par = pst.parameter_data
log_pars = set(par.loc[par.partrans=="log","parnme"].values)
with open(rnj_filename,'rb') as f:
header = np.fromfile(f,dtype=header_dtype,count=1)
try:
base_par,base_obs = read_pestpp_runstorage(rnj_filename,irun=0)
except:
raise Exception("couldn't get base run...")
par = par.loc[base_par.index,:]
li = base_par.index.map(lambda x: par.loc[x,"partrans"]=="log")
base_par.loc[li] = base_par.loc[li].apply(np.log10)
jco_cols = {}
for irun in range(1,int(header["n_runs"])):
par_df,obs_df = read_pestpp_runstorage(rnj_filename,irun=irun)
par_df.loc[li] = par_df.loc[li].apply(np.log10)
obs_diff = base_obs - obs_df
par_diff = base_par - par_df
# check only one non-zero element per col(par)
if len(par_diff[par_diff.parval1 != 0]) > 1:
raise Exception("more than one par diff - looks like the file wasn't created during jco filling...")
parnme = par_diff[par_diff.parval1 != 0].index[0]
parval = par_diff.parval1.loc[parnme]
# derivatives
jco_col = obs_diff / parval
# some tracking, checks
print("processing par {0}: {1}...".format(irun, parnme))
print("%nzsens: {0}%...".format((jco_col[abs(jco_col.obsval)>1e-8].shape[0] / jco_col.shape[0])*100.))
jco_cols[parnme] = jco_col.obsval
jco_cols = pd.DataFrame.from_records(data=jco_cols, index=list(obs_diff.index.values))
jco_cols = pyemu.Jco.from_dataframe(jco_cols)
# write # memory considerations important here for very large matrices - break into chunks...
#jco_fnam = "{0}".format(filename[:-4]+".jco")
#jco_cols.to_binary(filename=jco_fnam, droptol=None, chunk=None)
return jco_cols
def parse_dir_for_io_files(d):
""" find template/input file pairs and instruction file/output file
pairs by extension.
Args:
d (`str`): directory to search for interface files
Note:
the return values from this function can be passed straight to
`pyemu.Pst.from_io_files()` classmethod constructor. Assumes the
template file names are <input_file>.tpl and instruction file names
are <output_file>.ins.
Returns:
tuple containing
- **[`str`]**: list of template files in d
- **[`str`]**: list of input files in d
- **[`str`]**: list of instruction files in d
- **[`str`]**: list of output files in d
"""
files = os.listdir(d)
tpl_files = [f for f in files if f.endswith(".tpl")]
in_files = [f.replace(".tpl","") for f in tpl_files]
ins_files = [f for f in files if f.endswith(".ins")]
out_files = [f.replace(".ins","") for f in ins_files]
return tpl_files,in_files,ins_files,out_files
def pst_from_io_files(tpl_files, in_files, ins_files, out_files,
pst_filename=None, pst_path=None):
""" create a Pst instance from model interface files.
Args:
tpl_files ([`str`]): list of template file names
in_files ([`str`]): list of model input file names (pairs with template files)
ins_files ([`str`]): list of instruction file names
out_files ([`str`]): list of model output file names (pairs with instruction files)
pst_filename (`str`): name of control file to write. If None, no file is written.
Default is None
pst_path (`str`): the path to append to the template_file and in_file in the control file. If
not None, then any existing path in front of the template or in file is split off
and pst_path is prepended. If python is being run in a directory other than where the control
file will reside, it is useful to pass `pst_path` as `.`. Default is None
Returns:
`Pst`: new control file instance with parameter and observation names
found in `tpl_files` and `ins_files`, repsectively.
Note:
calls `pyemu.helpers.pst_from_io_files()`
Assigns generic values for parameter info. Tries to use INSCHEK
to set somewhat meaningful observation values
all file paths are relatively to where python is running.
TODO:
add pst_path option
make in_files and out_files optional
Example::
tpl_files = ["my.tpl"]
in_files = ["my.in"]
ins_files = ["my.ins"]
out_files = ["my.out"]
pst = pyemu.Pst.from_io_files(tpl_files,in_files,ins_files,out_files)
pst.control_data.noptmax = 0
pst.write("my.pst)
"""
par_names = set()
if not isinstance(tpl_files,list):
tpl_files = [tpl_files]
if not isinstance(in_files,list):
in_files = [in_files]
assert len(in_files) == len(tpl_files),"len(in_files) != len(tpl_files)"
for tpl_file in tpl_files:
assert os.path.exists(tpl_file),"template file not found: "+str(tpl_file)
#new_names = [name for name in pyemu.pst_utils.parse_tpl_file(tpl_file) if name not in par_names]
#par_names.extend(new_names)
new_names = pyemu.pst_utils.parse_tpl_file(tpl_file)
par_names.update(new_names)
if not isinstance(ins_files,list):
ins_files = [ins_files]
if not isinstance(out_files,list):
out_files = [out_files]
assert len(ins_files) == len(out_files),"len(out_files) != len(out_files)"
obs_names = []
for ins_file in ins_files:
assert os.path.exists(ins_file),"instruction file not found: "+str(ins_file)
obs_names.extend(pyemu.pst_utils.parse_ins_file(ins_file))
new_pst = pyemu.pst_utils.generic_pst(list(par_names),list(obs_names))
if "window" in platform.platform().lower() and pst_path == ".":
pst_path = ''
new_pst.instruction_files = ins_files
new_pst.output_files = out_files
#try to run inschek to find the observtion values
pyemu.pst_utils.try_process_output_pst(new_pst)
if pst_path is None:
new_pst.template_files = tpl_files
new_pst.input_files = in_files
else:
new_pst.template_files = [os.path.join(
pst_path, os.path.split(tpl_file)[-1]) for tpl_file in tpl_files]
new_pst.input_files = [os.path.join(
pst_path, os.path.split(in_file)[-1]) for in_file in in_files]
# now set the true path location to instruction files and output files
new_pst.instruction_files = [os.path.join(
pst_path, os.path.split(ins_file)[-1]) for ins_file in ins_files]
new_pst.output_files = [os.path.join(
pst_path, os.path.split(out_file)[-1]) for out_file in out_files]
if pst_filename:
new_pst.write(pst_filename,update_regul=True)
return new_pst
wildass_guess_par_bounds_dict = {"hk":[0.01,100.0],"vka":[0.1,10.0],
"sy":[0.25,1.75],"ss":[0.1,10.0],
"cond":[0.01,100.0],"flux":[0.25,1.75],
"rech":[0.9,1.1],"stage":[0.9,1.1],
}
class PstFromFlopyModel(object):
""" a monster helper class to setup a complex PEST interface around
an existing MODFLOW-2005-family model.
Args:
model (`flopy.mbase`): a loaded flopy model instance. If model is an str, it is treated as a
MODFLOW nam file (requires org_model_ws)
new_model_ws (`str`): a directory where the new version of MODFLOW input files and PEST(++)
files will be written
org_model_ws (`str`): directory to existing MODFLOW model files. Required if model argument
is an str. Default is None
pp_props ([[`str`,[`int`]]]): pilot point multiplier parameters for grid-based properties.
A nested list of grid-scale model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices. For example, ["lpf.hk",[0,1,2,]] would setup pilot point multiplier
parameters for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3. For time-varying properties (e.g. recharge), the
iterable is for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup pilot point multiplier parameters for recharge for stress
period 1,5,11,and 16.
const_props ([[`str`,[`int`]]]): constant (uniform) multiplier parameters for grid-based properties.
A nested list of grid-scale model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices. For example, ["lpf.hk",[0,1,2,]] would setup constant (uniform) multiplier
parameters for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3. For time-varying properties (e.g. recharge), the
iterable is for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup constant (uniform) multiplier parameters for recharge for stress
period 1,5,11,and 16.
temporal_list_props ([[`str`,[`int`]]]): list-type input stress-period level multiplier parameters.
A nested list of list-type input elements to parameterize using
name, iterable pairs. The iterable is zero-based stress-period indices.
For example, to setup multipliers for WEL flux and for RIV conductance,
temporal_list_props = [["wel.flux",[0,1,2]],["riv.cond",None]] would setup
multiplier parameters for well flux for stress periods 1,2 and 3 and
would setup one single river conductance multiplier parameter that is applied
to all stress periods
spatial_list_props ([[`str`,[`int`]]]): list-type input for spatial multiplier parameters.
A nested list of list-type elements to parameterize using
names (e.g. [["riv.cond",0],["wel.flux",1] to setup up cell-based parameters for
each list-type element listed. These multiplier parameters are applied across
all stress periods. For this to work, there must be the same number of entries
for all stress periods. If more than one list element of the same type is in a single
cell, only one parameter is used to multiply all lists in the same cell.
grid_props ([[`str`,[`int`]]]): grid-based (every active model cell) multiplier parameters.
A nested list of grid-scale model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices (e.g., ["lpf.hk",[0,1,2,]] would setup a multiplier
parameter for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3 in every active model cell). For time-varying properties (e.g. recharge), the
iterable is for zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup grid-based multiplier parameters in every active model cell
for recharge for stress period 1,5,11,and 16.
sfr_pars (`bool`): setup parameters for the stream flow routing modflow package.
If list is passed it defines the parameters to set up.
sfr_temporal_pars (`bool`)
flag to include stress-period level spatially-global multipler parameters in addition to
the spatially-discrete `sfr_pars`. Requires `sfr_pars` to be passed. Default is False
grid_geostruct (`pyemu.geostats.GeoStruct`): the geostatistical structure to build the prior parameter covariance matrix
elements for grid-based parameters. If None, a generic GeoStruct is created
using an "a" parameter that is 10 times the max cell size. Default is None
pp_space (`int`): number of grid cells between pilot points. If None, use the default
in pyemu.pp_utils.setup_pilot_points_grid. Default is None
zone_props ([[`str`,[`int`]]]): zone-based multiplier parameters.
A nested list of zone-based model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices (e.g., ["lpf.hk",[0,1,2,]] would setup a multiplier
parameter for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3 for unique zone values in the ibound array.
For time-varying properties (e.g. recharge), the iterable is for
zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup zone-based multiplier parameters for recharge for stress
period 1,5,11,and 16.
pp_geostruct (`pyemu.geostats.GeoStruct`): the geostatistical structure to use for building the prior parameter
covariance matrix for pilot point parameters. If None, a generic
GeoStruct is created using pp_space and grid-spacing information.
Default is None
par_bounds_dict (`dict`): a dictionary of model property/boundary condition name, upper-lower bound pairs.
For example, par_bounds_dict = {"hk":[0.01,100.0],"flux":[0.5,2.0]} would
set the bounds for horizontal hydraulic conductivity to
0.001 and 100.0 and set the bounds for flux parameters to 0.5 and
2.0. For parameters not found in par_bounds_dict,
`pyemu.helpers.wildass_guess_par_bounds_dict` is
used to set somewhat meaningful bounds. Default is None
temporal_list_geostruct (`pyemu.geostats.GeoStruct`): the geostastical struture to
build the prior parameter covariance matrix
for time-varying list-type multiplier parameters. This GeoStruct
express the time correlation so that the 'a' parameter is the length of
time that boundary condition multiplier parameters are correlated across.
If None, then a generic GeoStruct is created that uses an 'a' parameter
of 3 stress periods. Default is None
spatial_list_geostruct (`pyemu.geostats.GeoStruct`): the geostastical struture to
build the prior parameter covariance matrix
for spatially-varying list-type multiplier parameters.
If None, a generic GeoStruct is created using an "a" parameter that
is 10 times the max cell size. Default is None.
remove_existing (`bool`): a flag to remove an existing new_model_ws directory. If False and
new_model_ws exists, an exception is raised. If True and new_model_ws
exists, the directory is destroyed - user beware! Default is False.
k_zone_dict (`dict`): a dictionary of zero-based layer index, zone array pairs.
e.g. {lay: np.2darray} Used to
override using ibound zones for zone-based parameterization. If None,
use ibound values greater than zero as zones. Alternatively a dictionary of dictionaries
can be passed to allow different zones to be defined for different parameters.
e.g. {"upw.hk" {lay: np.2darray}, "extra.rc11" {lay: np.2darray}}
or {"hk" {lay: np.2darray}, "rc11" {lay: np.2darray}}
use_pp_zones (`bool`): a flag to use ibound zones (or k_zone_dict, see above) as pilot
point zones. If False, ibound values greater than zero are treated as
a single zone for pilot points. Default is False
obssim_smp_pairs ([[`str`,`str`]]: a list of observed-simulated PEST-type SMP file
pairs to get observations
from and include in the control file. Default is []
external_tpl_in_pairs ([[`str`,`str`]]: a list of existing template file, model input
file pairs to parse parameters
from and include in the control file. Default is []
external_ins_out_pairs ([[`str`,`str`]]: a list of existing instruction file,
model output file pairs to parse
observations from and include in the control file. Default is []
extra_pre_cmds ([`str`]): a list of preprocessing commands to add to the forward_run.py script
commands are executed with os.system() within forward_run.py. Default is None.
redirect_forward_output (`bool`): flag for whether to redirect forward model output to text files (True) or
allow model output to be directed to the screen (False). Default is True
extra_post_cmds ([`str`]): a list of post-processing commands to add to the forward_run.py script.
Commands are executed with os.system() within forward_run.py. Default is None.
tmp_files ([`str`]): a list of temporary files that should be removed at the start of the forward
run script. Default is [].
model_exe_name (`str`): binary name to run modflow. If None, a default from flopy is used,
which is dangerous because of the non-standard binary names
(e.g. MODFLOW-NWT_x64, MODFLOWNWT, mfnwt, etc). Default is None.
build_prior (`bool`): flag to build prior covariance matrix. Default is True
sfr_obs (`bool`): flag to include observations of flow and aquifer exchange from
the sfr ASCII output file
hfb_pars (`bool`): add HFB parameters. uses pyemu.gw_utils.write_hfb_template(). the resulting
HFB pars have parval1 equal to the values in the original file and use the
spatial_list_geostruct to build geostatistical covariates between parameters
kl_props ([[`str`,[`int`]]]): karhunen-loeve based multiplier parameters.
A nested list of KL-based model properties to parameterize using
name, iterable pairs. For 3D properties, the iterable is zero-based
layer indices (e.g., ["lpf.hk",[0,1,2,]] would setup a multiplier
parameter for layer property file horizontal hydraulic conductivity for model
layers 1,2, and 3 for unique zone values in the ibound array.
For time-varying properties (e.g. recharge), the iterable is for
zero-based stress period indices. For example, ["rch.rech",[0,4,10,15]]
would setup zone-based multiplier parameters for recharge for stress
period 1,5,11,and 16.
kl_num_eig (`int`): the number of KL-based eigenvector multiplier parameters to use for each
KL parameter set. default is 100
kl_geostruct (`pyemu.geostats.Geostruct`): the geostatistical structure
to build the prior parameter covariance matrix
elements for KL-based parameters. If None, a generic GeoStruct is created
using an "a" parameter that is 10 times the max cell size. Default is None
Note:
Setup up multiplier parameters for an existing MODFLOW model.
Does all kinds of coolness like building a
meaningful prior, assigning somewhat meaningful parameter groups and
bounds, writes a forward_run.py script with all the calls need to
implement multiplier parameters, run MODFLOW and post-process.
Works a lot better if TEMPCHEK, INSCHEK and PESTCHEK are available in the
system path variable
"""
def __init__(self,model,new_model_ws,org_model_ws=None,pp_props=[],const_props=[],
temporal_bc_props=[],temporal_list_props=[],grid_props=[],
grid_geostruct=None,pp_space=None,
zone_props=[],pp_geostruct=None,par_bounds_dict=None,sfr_pars=False, temporal_sfr_pars=False,
temporal_list_geostruct=None,remove_existing=False,k_zone_dict=None,
mflist_waterbudget=True,mfhyd=True,hds_kperk=[],use_pp_zones=False,
obssim_smp_pairs=None,external_tpl_in_pairs=None,
external_ins_out_pairs=None,extra_pre_cmds=None,
extra_model_cmds=None,extra_post_cmds=None,redirect_forward_output=True,
tmp_files=None,model_exe_name=None,build_prior=True,
sfr_obs=False,
spatial_bc_props=[],spatial_list_props=[],spatial_list_geostruct=None,
hfb_pars=False, kl_props=None,kl_num_eig=100, kl_geostruct=None):
self.logger = pyemu.logger.Logger("PstFromFlopyModel.log")
self.log = self.logger.log
self.logger.echo = True
self.zn_suffix = "_zn"
self.gr_suffix = "_gr"
self.pp_suffix = "_pp"
self.cn_suffix = "_cn"
self.kl_suffix = "_kl"
self.arr_org = "arr_org"
self.arr_mlt = "arr_mlt"
self.list_org = "list_org"
self.list_mlt = "list_mlt"
self.forward_run_file = "forward_run.py"
self.remove_existing = remove_existing
self.external_tpl_in_pairs = external_tpl_in_pairs
self.external_ins_out_pairs = external_ins_out_pairs
self._setup_model(model, org_model_ws, new_model_ws)
self._add_external()
self.arr_mult_dfs = []
self.par_bounds_dict = par_bounds_dict
self.pp_props = pp_props
self.pp_space = pp_space
self.pp_geostruct = pp_geostruct
self.use_pp_zones = use_pp_zones
self.const_props = const_props
self.grid_props = grid_props
self.grid_geostruct = grid_geostruct
self.zone_props = zone_props
self.kl_props = kl_props
self.kl_geostruct = kl_geostruct
self.kl_num_eig = kl_num_eig
if len(temporal_bc_props) > 0:
if len(temporal_list_props) > 0:
self.logger.lraise("temporal_bc_props and temporal_list_props. "+\
"temporal_bc_props is deprecated and replaced by temporal_list_props")
self.logger.warn("temporal_bc_props is deprecated and replaced by temporal_list_props")
temporal_list_props = temporal_bc_props
if len(spatial_bc_props) > 0:
if len(spatial_list_props) > 0:
self.logger.lraise("spatial_bc_props and spatial_list_props. "+\
"spatial_bc_props is deprecated and replaced by spatial_list_props")
self.logger.warn("spatial_bc_props is deprecated and replaced by spatial_list_props")
spatial_list_props = spatial_bc_props
self.temporal_list_props = temporal_list_props
self.temporal_list_geostruct = temporal_list_geostruct
if self.temporal_list_geostruct is None:
v = pyemu.geostats.ExpVario(contribution=1.0,a=180.0) # 180 correlation length
self.temporal_list_geostruct = pyemu.geostats.GeoStruct(variograms=v,name="temporal_list_geostruct")
self.spatial_list_props = spatial_list_props
self.spatial_list_geostruct = spatial_list_geostruct
if self.spatial_list_geostruct is None:
dist = 10 * float(max(self.m.dis.delr.array.max(),
self.m.dis.delc.array.max()))
v = pyemu.geostats.ExpVario(contribution=1.0, a=dist)
self.spatial_list_geostruct = pyemu.geostats.GeoStruct(variograms=v,name="spatial_list_geostruct")
self.obssim_smp_pairs = obssim_smp_pairs
self.hds_kperk = hds_kperk
self.sfr_obs = sfr_obs
self.frun_pre_lines = []
self.frun_model_lines = []
self.frun_post_lines = []
self.tmp_files = []
self.extra_forward_imports = []
if tmp_files is not None:
if not isinstance(tmp_files,list):
tmp_files = [tmp_files]
self.tmp_files.extend(tmp_files)
if k_zone_dict is None:
self.k_zone_dict = {k: self.m.bas6.ibound[k].array for k in np.arange(self.m.nlay)}
else:
# check if k_zone_dict is a dictionary of dictionaries
if np.all([isinstance(v, dict) for v in k_zone_dict.values()]):
# loop over outer keys
for par_key in k_zone_dict.keys():
for k, arr in k_zone_dict[par_key].items():
if k not in np.arange(self.m.nlay):
self.logger.lraise("k_zone_dict for par {1}, layer index not in nlay:{0}".
format(k, par_key))
if arr.shape != (self.m.nrow, self.m.ncol):
self.logger.lraise("k_zone_dict arr for k {0} for par{2} has wrong shape:{1}".
format(k, arr.shape, par_key))
else:
for k, arr in k_zone_dict.items():
if k not in np.arange(self.m.nlay):
self.logger.lraise("k_zone_dict layer index not in nlay:{0}".
format(k))
if arr.shape != (self.m.nrow, self.m.ncol):
self.logger.lraise("k_zone_dict arr for k {0} has wrong shape:{1}".
format(k, arr.shape))
self.k_zone_dict = k_zone_dict
# add any extra commands to the forward run lines
for alist,ilist in zip([self.frun_pre_lines,self.frun_model_lines,self.frun_post_lines],
[extra_pre_cmds,extra_model_cmds,extra_post_cmds]):
if ilist is None:
continue
if not isinstance(ilist,list):
ilist = [ilist]
for cmd in ilist:
self.logger.statement("forward_run line:{0}".format(cmd))
alist.append("pyemu.os_utils.run('{0}')\n".format(cmd))
# add the model call
if model_exe_name is None:
model_exe_name = self.m.exe_name
self.logger.warn("using flopy binary to execute the model:{0}".format(model))
if redirect_forward_output:
line = "pyemu.os_utils.run('{0} {1} 1>{1}.stdout 2>{1}.stderr')".format(model_exe_name,self.m.namefile)
else:
line = "pyemu.os_utils.run('{0} {1} ')".format(model_exe_name, self.m.namefile)
self.logger.statement("forward_run line:{0}".format(line))
self.frun_model_lines.append(line)
self.tpl_files,self.in_files = [],[]
self.ins_files,self.out_files = [],[]
self._setup_mult_dirs()
self.mlt_files = []
self.org_files = []
self.m_files = []
self.mlt_counter = {}
self.par_dfs = {}
self.mlt_dfs = []
self._setup_list_pars()
self._setup_array_pars()
if not sfr_pars and temporal_sfr_pars:
self.logger.lraise("use of `temporal_sfr_pars` requires `sfr_pars`")
if sfr_pars:
if isinstance(sfr_pars, str):
sfr_pars = [sfr_pars]
if isinstance(sfr_pars, list):
self._setup_sfr_pars(sfr_pars, include_temporal_pars=temporal_sfr_pars)
else:
self._setup_sfr_pars(include_temporal_pars=temporal_sfr_pars)
if hfb_pars:
self._setup_hfb_pars()
self.mflist_waterbudget = mflist_waterbudget
self.mfhyd = mfhyd
self._setup_observations()
self.build_pst()
if build_prior:
self.parcov = self.build_prior()
else:
self.parcov = None
self.log("saving intermediate _setup_<> dfs into {0}".
format(self.m.model_ws))
for tag,df in self.par_dfs.items():
df.to_csv(os.path.join(self.m.model_ws,"_setup_par_{0}_{1}.csv".
format(tag.replace(" ",'_'),self.pst_name)))
for tag,df in self.obs_dfs.items():
df.to_csv(os.path.join(self.m.model_ws,"_setup_obs_{0}_{1}.csv".
format(tag.replace(" ",'_'),self.pst_name)))
self.log("saving intermediate _setup_<> dfs into {0}".
format(self.m.model_ws))
self.logger.statement("all done")
def _setup_sfr_obs(self):
"""setup sfr ASCII observations"""
if not self.sfr_obs:
return
if self.m.sfr is None:
self.logger.lraise("no sfr package found...")
org_sfr_out_file = os.path.join(self.org_model_ws,"{0}.sfr.out".format(self.m.name))
if not os.path.exists(org_sfr_out_file):
self.logger.lraise("setup_sfr_obs() error: could not locate existing sfr out file: {0}".
format(org_sfr_out_file))
new_sfr_out_file = os.path.join(self.m.model_ws,os.path.split(org_sfr_out_file)[-1])
shutil.copy2(org_sfr_out_file,new_sfr_out_file)
seg_group_dict = None
if isinstance(self.sfr_obs,dict):
seg_group_dict = self.sfr_obs
df = pyemu.gw_utils.setup_sfr_obs(new_sfr_out_file,seg_group_dict=seg_group_dict,
model=self.m,include_path=True)
if df is not None:
self.obs_dfs["sfr"] = df
self.frun_post_lines.append("pyemu.gw_utils.apply_sfr_obs()")
def _setup_sfr_pars(self, par_cols=None, include_temporal_pars=None):
"""setup multiplier parameters for sfr segment data
Adding support for reachinput (and isfropt = 1)"""
assert self.m.sfr is not None, "can't find sfr package..."
if isinstance(par_cols, str):
par_cols = [par_cols]
reach_pars = False # default to False
seg_pars = True
par_dfs = {}
df = pyemu.gw_utils.setup_sfr_seg_parameters(
self.m, par_cols=par_cols,
include_temporal_pars=include_temporal_pars) # now just pass model
# self.par_dfs["sfr"] = df
if df.empty:
warnings.warn("No sfr segment parameters have been set up", PyemuWarning)
par_dfs["sfr"] = []
seg_pars = False
else:
par_dfs["sfr"] = [df] # may need df for both segs and reaches
self.tpl_files.append("sfr_seg_pars.dat.tpl")
self.in_files.append("sfr_seg_pars.dat")
if include_temporal_pars:
self.tpl_files.append("sfr_seg_temporal_pars.dat.tpl")
self.in_files.append("sfr_seg_temporal_pars.dat")
if self.m.sfr.reachinput:
# if include_temporal_pars:
# raise NotImplementedError("temporal pars is not set up for reach data style")
df = pyemu.gw_utils.setup_sfr_reach_parameters(self.m, par_cols=par_cols)
if df.empty:
warnings.warn("No sfr reach parameters have been set up", PyemuWarning)
else:
self.tpl_files.append("sfr_reach_pars.dat.tpl")
self.in_files.append("sfr_reach_pars.dat")
reach_pars = True
par_dfs["sfr"].append(df)
if len(par_dfs["sfr"]) > 0:
self.par_dfs["sfr"] = | pd.concat(par_dfs["sfr"]) | pandas.concat |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 16:02:22 2018
@author: joyce
"""
import pandas as pd
import numpy as np
import pymysql
from sklearn import linear_model
import time
from functools import wraps
config = {
'host': 'magiquant.mysql.rds.aliyuncs.com',
'port': 3306,
'user':'haoamc',
'passwd':'<PASSWORD>',
'db': 'quant'
}
def timer(function):
@wraps(function)
def function_timer(*args, **kwargs):
t0 = time.time()
result = function(*args, **kwargs)
t1 = time.time()
print ("Total time running %s: %s seconds" %(function.__name__, str(round((t1-t0), 2))))
return result
return function_timer
@timer
def get_stockdata_from_sql(mode,begin,end,name):
"""
get stock market data from sql,include: [Open,High,Low,Close,Pctchg,Vol,
Amount,total_shares,free_float_shares,Vwap]
"""
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
if mode == 0:
query = "SELECT stock_id,%s FROM stock_market_data WHERE trade_date='%s';"%(name,begin)
else:
query = "SELECT trade_date,stock_id,%s FROM stock_market_data WHERE trade_date >='%s' \
AND trade_date <= '%s';"%(name,begin,end)
cursor.execute(query)
date = pd.DataFrame(list(cursor.fetchall()))
if mode == 0:
date.columns =['ID','name']
else:
date.columns =['date','ID','name']
date = date.set_index('ID')
date.columns = ['date',name]
date = date.set_index([date['date'],date.index],drop = True)
del date['date']
return date
finally:
if conn:
conn.close()
@timer
def get_indexdata_from_sql(mode,begin,end,name,index):
"""
get stock market data from sql,include: [open,high,low,close,pctchg]
"""
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
if mode == 0:
query = "SELECT stock_id,%s FROM index_market_data WHERE trade_date='%s' AND stock_id ='%s';"%(name,begin,index)
else:
query = "SELECT trade_date,stock_id,%s FROM index_market_data WHERE trade_date >='%s' \
AND trade_date <= '%s' AND stock_id ='%s';"%(name,begin,end,index)
cursor.execute(query)
date = pd.DataFrame(list(cursor.fetchall()))
if mode == 0:
date.columns =['ID','name']
else:
date.columns =['date','ID','name']
date = date.set_index('ID')
date.columns = ['date',name]
date = date.set_index([date['date'],date.index],drop = True)
del date['date']
return date
finally:
if conn:
conn.close()
@timer
def get_tradedate(begin, end):
"""
get tradedate between begin date and end date
Params:
begin:
str,eg: '1999-01-01'
end:
str,eg: '2017-12-31'
Return:
pd.DataFrame
"""
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
query = "SELECT calendar_date FROM trade_calendar WHERE is_trade_day= 1 AND \
calendar_date>='" + begin + "' AND calendar_date<='" + end + "';"
cursor.execute(query)
date = pd.DataFrame(list(cursor.fetchall()))
return date
finally:
if conn:
conn.close()
def get_fama(begin,end,name,index):
"""
get fama factor from sql
Params:
begin:
str,eg:"1990-01-01"
end:
str:eg:"2017-12-31"
index:
str, index id ,eg :'000300.SH'
name:
the name of fama factors ['SMB','HML','MKT']
"""
try:
conn = pymysql.connect(**config)
cursor = conn.cursor()
query = "SELECT trade_date,%s FROM fama_factor WHERE \
stock_id = '%s' AND trade_date >= '%s' AND trade_date <= '%s';"\
%(name,index,begin,end)
cursor.execute(query)
data = pd.DataFrame(list(cursor.fetchall()))
data.columns = ['date',name]
return data
finally:
if conn:
conn.close()
@timer
def Corr(df,num):
"""
Params:
data:
pd.DataFrame,multi-index = ['date','id']
num:
int
Return:
pd.DataFrame,multi-index = ['date','id']
"""
df.columns = ['r1','r2']
df1 = df['r1']
df2 = df['r2']
df1_unstack = df1.unstack()
df2_unstack = df2.unstack()
corr = df1_unstack.rolling(num).corr(df2_unstack)
corr = corr.stack()
corr = pd.DataFrame(corr)
corr.columns = ['corr']
return corr
@timer
def Cov(df,num):
"""
Params:
data:
pd.DataFrame,multi-index = ['date','id']
num:
int
Return:
pd.DataFrame,multi-index = ['date','id']
"""
df.columns = ['r1','r2']
df1 = df['r1']
df2 = df['r2']
df1_unstack = df1.unstack()
df2_unstack = df2.unstack()
corr = df1_unstack.rolling(num).cov(df2_unstack)
corr = corr.stack()
corr = pd.DataFrame(corr)
corr.columns = ['cov']
return corr
@timer
def Delta(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID'],columns = ['alpha']
num:
int
Return:
pd.DataFrame,multi-inde = ['date','ID']
"""
df_unstack = df.unstack()
df_temp = df_unstack.shift(num)
df_temp1 = df_unstack - df_temp
df_final = df_temp1.stack()
return df_final
@timer
def Delay(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID'],columns = ['alpha']
num:
int
Return:
pd.DataFrame,multi-index = ['date','ID']
"""
df_unstack = df.unstack()
df_temp = df_unstack.shift(num)
df_final = df_temp.stack()
return df_final
@timer
def Rank(df):
"""
Params:
df: pd.DataFrame,multi-index = ['date','ID'],columns = ['alpha']
Return:
pd.DataFrame,multi-index = ['date','ID'],columns = ['alpha']
"""
df = df.swaplevel(0,1)
df_mod = df.unstack()
df_rank = df_mod.rank(axis = 1)
df_final_temp = df_rank.stack()
# 内外层索引进行交换
df_final = df_final_temp.swaplevel(0,1)
return df_final
@timer
def Cross_max(df1,df2):
"""
Params:
df1:
pd.DataFrame,multi-index = ['date','ID']
df2:
pd.DataFrame,multi-index = ['date','ID']
"""
df = pd.concat([df1,df2],axis =1 ,join = 'inner')
df_max = np.max(df,axis = 1)
return df_max
@timer
def Cross_min(df1,df2):
"""
Params:
df1:
pd.DataFrame,multi-index = ['date','ID']
df2:
pd.DataFrame,multi-index = ['date','ID']
"""
df = pd.concat([df1,df2],axis =1 ,join = 'inner')
df_min = np.min(df,axis = 1)
return df_min
@timer
def Sum(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID']
Returns:
df:
pd.DataFrame,multi-index = ['date','ID']
"""
df_unstack = df.unstack(level = 'ID')
df_temp = df_unstack.rolling(num).sum()
df_final = df_temp.stack()
return df_final
@timer
def Mean(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID']
Returns:
df:
pd.DataFrame,multi-index = ['date','ID']
"""
df_unstack = df.unstack()
df_temp = df_unstack.rolling(num).mean()
df_final = df_temp.stack()
return df_final
@timer
def STD(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID']
Returns:
df:
pd.DataFrame,multi-index = ['date','ID']
"""
df_unstack = df.unstack()
df_temp = df_unstack.rolling(num).std()
df_final = df_temp.stack()
return df_final
@timer
def TsRank(df,num):
"""
Params:
df:
pd.DataFrame,multi-index = ['date','ID']
num:
int
Returns:
df:
pd.DataFrame,multi-index = ['date','ID']
"""
df = df.swaplevel(0,1)
df_unstack = df.unstack()
date = df_unstack.index.tolist()
ts_rank = pd.DataFrame([])
for i in range(num,len(date)):
df = df_unstack.iloc[i-num:i,:]
df_rank = df.rank(axis = 0)
ts_rank_temp = | pd.DataFrame(df_rank.iloc[num-1,:]) | pandas.DataFrame |
"""This module contains auxiliary functions for the creation of tables in the main notebook."""
import json
import pandas as pd
import numpy as np
import statsmodels.api as sm_api
from auxiliary.auxiliary_tables import *
def create_table1(data):
"""
Creates Table 1.
"""
table1 = pd.crosstab(data.grade_group, [data.suba, data.group], colnames=['Experiment', 'Group'], margins=True, margins_name="Total")
table11 = table1.drop(index = "Total")
table12 = pd.crosstab(data.r_be_gene, [data.suba, data.group], margins=True, margins_name="Total")
table1 = table11.append(table12).rename(index={"F": "Female", "M": "Male"}, columns={0.0: "Basic-Savings", 1.0: "Tertiary"})
table1 = table1[[('Basic-Savings','Control'),('Basic-Savings','Basic'),('Basic-Savings','Savings'),('Tertiary','Control'),('Tertiary','Tertiary'),('Total', '')]]
table1 = table1.reindex(['Grades 6-8','Grades 9-10','Grade 11','Female','Male','Total'])
return table1
def style_specific_cell(x):
"""
Creates Colors for Table 2
"""
color1 = 'background-color: lightgreen'
color2 = 'background-color: lightcoral'
color3 = 'background-color: skyblue'
df1 = pd.DataFrame('', index=x.index, columns=x.columns)
df1.iloc[0, 1] = color1
df1.iloc[18, 2] = color1
df1.iloc[2, 2] = color2
df1.iloc[2, 3] = color2
df1.iloc[6, 3] = color2
df1.iloc[14, 1] = color3
df1.iloc[24, 3] = color3
return df1
def create_table2(data1, data2):
"""
Creates Table 2.
"""
x_sancristobal = data1[['T1_treat','T2_treat']]
x_sancristobal = sm_api.add_constant(x_sancristobal)
x_suba = data2['T3_treat']
x_suba = sm_api.add_constant(x_suba)
result_sancristobal = list()
result_suba = list()
T_Test = list()
Control_avg_bs = list()
Control_avg_t = list()
for i in ['s_teneviv_int','s_utilities','s_durables','s_infraest_hh','s_age_sorteo','s_sexo_int','s_yrs','s_single','s_edadhead','s_yrshead','s_tpersona','s_num18','s_estrato','s_puntaje','s_ingtotal']:
y_sancristobal = data1[i]
y_suba = data2[i]
reg_sancristobal = sm_api.OLS(y_sancristobal, x_sancristobal).fit(cov_type='cluster', cov_kwds={'groups': data1['school_code']})
result_sancristobal.append(round(reg_sancristobal.params, 2))
result_sancristobal.append(round(reg_sancristobal.bse, 2))
T_Test.append(round(reg_sancristobal.t_test('T1_treat=T2_treat').effect[0], 2))
T_Test.append(round(reg_sancristobal.t_test('T1_treat=T2_treat').sd[0][0], 2))
reg_suba = sm_api.OLS(y_suba, x_suba).fit(cov_type='cluster', cov_kwds={'groups': data2['school_code']})
result_suba.append(round(reg_suba.params, 2))
result_suba.append(round(reg_suba.bse, 2))
Control_avg_bs.append("%.2f" % round(data1.groupby(data1['control']).mean()[i][1], 2))
Control_avg_bs.append(round(data1.groupby(data1['control']).std()[i][1], 2))
Control_avg_t.append("%.2f" % round(data2.groupby(data2['control']).mean()[i][1], 2))
Control_avg_t.append(round(data2.groupby(data2['control']).std()[i][1], 2))
table21 = pd.DataFrame(result_sancristobal, index=['Possessions','Possessions SE','Utilities','Utilities SE','Durable Goods','Durable Goods SE','Physical Infrastructure','Physical Infrastructure SE','Age','Age SE','Gender','Gender SE','Years of Education','Years of Education SE','Single Head','Single Head SE','Age of Head','Age of Head SE','Years of ed., head','Years of ed., head SE','People in Household','People in Household SE','Member under 18','Member under 18 SE','Estrato','Estrato SE','SISBEN score','SISBEN score SE','Household income (1,000 pesos)','Household income (1,000 pesos) SE'])
table21.columns = ['Control average B-S', 'Basic-Control', 'Savings-Control']
table21['Control average B-S'] = Control_avg_bs
table21['Basic-Savings'] = T_Test
table22 = pd.DataFrame(result_suba, index=['Possessions','Possessions SE','Utilities','Utilities SE','Durable Goods','Durable Goods SE','Physical Infrastructure','Physical Infrastructure SE','Age','Age SE','Gender','Gender SE','Years of Education','Years of Education SE','Single Head','Single Head SE','Age of Head','Age of Head SE','Years of ed., head','Years of ed., head SE','People in Household','People in Household SE','Member under 18','Member under 18 SE','Estrato','Estrato SE','SISBEN score','SISBEN score SE','Household income (1,000 pesos)','Household income (1,000 pesos) SE'])
table22.columns = ['Control average T', 'Tertiary-Control']
table22['Control average T'] = Control_avg_t
table2 = table21.join(table22)
#table2 = table2.style.apply(style_specific_cell, axis=None)
return table2
def create_table34(data1, data2, data3, variable):
"""
Creates Table 3 and 4.
"""
result_sancristobal = list()
result_sancristobal1 = list()
result_sancristobal2 = list()
result_sancristobal3 = list()
result_suba = list()
result_suba1 = list()
result_suba2 = list()
result_suba3 = list()
result_both = list()
r2_sancristobal = list()
r2_suba = list()
y_suba = data2[variable]
y_sancristobal = data1[variable]
for i in [['T1_treat','T2_treat'],['T1_treat','T2_treat','Rent','Own paying it','Own payed','Other condition','s_utilities','s_durables','s_infraest_hh','s_age_sorteo','s_age_sorteo2','s_years_back','s_sexo_int','Free union','Married','Widow(er)','Divorced','Single','s_single','s_edadhead','s_yrshead','s_tpersona','s_num18','estrato_0','estrato_1','estrato_2','s_puntaje','s_ingtotal','grade_6.0','grade_7.0','grade_8.0','grade_9.0','grade_10.0','grade_11.0','suba','s_over_age']]:
x_sancristobal = data1[i]
x_sancristobal = sm_api.add_constant(x_sancristobal)
reg_sancristobal = sm_api.OLS(y_sancristobal, x_sancristobal).fit(cov_type='cluster', cov_kwds={'groups': data1['school_code']})
result_sancristobal.append(round(reg_sancristobal.params[1], 3))
result_sancristobal.append(round(reg_sancristobal.bse[1], 3))
result_sancristobal.append(round(reg_sancristobal.params[2], 3))
result_sancristobal.append(round(reg_sancristobal.bse[2], 3))
result_sancristobal.append(round(reg_sancristobal.f_test('T1_treat=T2_treat').fvalue[0][0], 3))
result_sancristobal.append(round(float(reg_sancristobal.f_test('T1_treat=T2_treat').pvalue), 3))
r2_sancristobal.append(round(reg_sancristobal.rsquared, 3))
x_sancristobal = data1[['T1_treat','T2_treat','Rent','Own paying it','Own payed','Other condition','s_utilities','s_durables','s_infraest_hh','s_age_sorteo','s_age_sorteo2','s_years_back','s_sexo_int','Free union','Married','Widow(er)','Divorced','Single','s_single','s_edadhead','s_yrshead','s_tpersona','s_num18','estrato_0','estrato_1','estrato_2','s_puntaje','s_ingtotal','grade_6.0','grade_7.0','grade_8.0','grade_9.0','grade_10.0','grade_11.0','suba','s_over_age']]
x_sancristobal = sm_api.add_constant(x_sancristobal)
x_sancristobal = x_sancristobal.join(pd.get_dummies(data1['school_code']))
reg_sancristobal = sm_api.OLS(y_sancristobal, x_sancristobal).fit(cov_type='cluster', cov_kwds={'groups': data1['school_code']})
result_sancristobal.append(round(reg_sancristobal.params[[1]][0], 3))
result_sancristobal.append(round(reg_sancristobal.bse[[1]][0], 3))
result_sancristobal.append(round(reg_sancristobal.params[[2]][0], 3))
result_sancristobal.append(round(reg_sancristobal.bse[[2]][0], 3))
A = np.zeros((len(x_sancristobal.columns),), dtype=int)
A[1] = 1
A[2] = -1
result_sancristobal.append(round(reg_sancristobal.f_test(A).fvalue[0][0], 3))
result_sancristobal.append(round(float(reg_sancristobal.f_test(A).pvalue), 3))
r2_sancristobal.append(round(reg_sancristobal.rsquared, 3))
result_sancristobal1.append(result_sancristobal[0:6])
result_sancristobal2.append(result_sancristobal[6:12])
result_sancristobal3.append(result_sancristobal[12:])
i = 4
while i < 6:
result_sancristobal1[0].insert(i, '')
result_sancristobal2[0].insert(i, '')
result_sancristobal3[0].insert(i, '')
result_sancristobal1[0].append('')
result_sancristobal2[0].append('')
result_sancristobal3[0].append('')
i += 1
result_sancristobal1[0].append('')
result_sancristobal2[0].append('')
result_sancristobal3[0].append('')
result_sancristobal1[0].append(len(y_sancristobal))
result_sancristobal2[0].append(len(y_sancristobal))
result_sancristobal3[0].append(len(y_sancristobal))
result_sancristobal1[0].append(r2_sancristobal[0])
result_sancristobal2[0].append(r2_sancristobal[1])
result_sancristobal3[0].append(r2_sancristobal[2])
for i in [['T3_treat'],['T3_treat','Rent','Own paying it','Own payed','Other condition','s_utilities','s_durables','s_infraest_hh','s_age_sorteo','s_age_sorteo2','s_years_back','s_sexo_int','Free union','Married','Widow(er)','Divorced','Single','s_single','s_edadhead','s_yrshead','s_tpersona','s_num18','estrato_0','estrato_1','estrato_2','s_puntaje','s_ingtotal','grade_6.0','grade_7.0','grade_8.0','grade_9.0','grade_10.0','grade_11.0','suba','s_over_age']]:
x_suba = data2[i]
x_suba = sm_api.add_constant(x_suba, has_constant='add')
reg_suba = sm_api.OLS(y_suba, x_suba).fit(cov_type='cluster', cov_kwds={'groups': data2['school_code']})
result_suba.append(round(reg_suba.params[1], 3))
result_suba.append(round(reg_suba.bse[1], 3))
r2_suba.append(round(reg_suba.rsquared, 3))
x_suba = data2[['T3_treat','Rent','Own paying it','Own payed','Other condition','s_utilities','s_durables','s_infraest_hh','s_age_sorteo','s_age_sorteo2','s_years_back','s_sexo_int','Free union','Married','Widow(er)','Divorced','Single','s_single','s_edadhead','s_yrshead','s_tpersona','s_num18','estrato_0','estrato_1','estrato_2','s_puntaje','s_ingtotal','grade_6.0','grade_7.0','grade_8.0','grade_9.0','grade_10.0','grade_11.0','suba','s_over_age']]
x_suba = sm_api.add_constant(x_suba, has_constant='add')
x_suba = x_suba.join(pd.get_dummies(data2['school_code']))
reg_suba = sm_api.OLS(y_suba, x_suba).fit(cov_type='cluster', cov_kwds={'groups': data2['school_code']})
result_suba.append(round(reg_suba.params[[1]][0], 3))
result_suba.append(round(reg_suba.bse[[1]][0], 3))
r2_suba.append(round(reg_suba.rsquared, 3))
result_suba1.append(result_suba[0:2])
result_suba2.append(result_suba[2:4])
result_suba3.append(result_suba[4:])
i = 1
while i < 5:
result_suba1[0].insert(0, '')
result_suba2[0].insert(0, '')
result_suba3[0].insert(0, '')
result_suba1[0].append('')
result_suba2[0].append('')
result_suba3[0].append('')
i += 1
result_suba1[0].append('')
result_suba2[0].append('')
result_suba3[0].append('')
result_suba1[0].append(len(y_suba))
result_suba2[0].append(len(y_suba))
result_suba3[0].append(len(y_suba))
result_suba1[0].append(r2_suba[0])
result_suba2[0].append(r2_suba[1])
result_suba3[0].append(r2_suba[2])
y_both = data3[variable]
x_both = data3[['T1_treat','T2_treat','T3_treat','Rent','Own paying it','Own payed','Other condition','s_utilities','s_durables','s_infraest_hh','s_age_sorteo','s_age_sorteo2','s_years_back','s_sexo_int','Free union','Married','Widow(er)','Divorced','Single','s_single','s_edadhead','s_yrshead','s_tpersona','s_num18','estrato_0','estrato_1','estrato_2','s_puntaje','s_ingtotal','grade_6.0','grade_7.0','grade_8.0','grade_9.0','grade_10.0','grade_11.0','suba','s_over_age']]
x_both = sm_api.add_constant(x_both, has_constant='add')
x_both = x_both.join(pd.get_dummies(data3['school_code']))
reg_both = sm_api.OLS(y_both, x_both).fit(cov_type='cluster', cov_kwds={'groups': data3['school_code']})
i = 1
while i < 4:
result_both.append(round(reg_both.params[[i]][0], 3))
result_both.append(round(reg_both.bse[[i]][0], 3))
i += 1
A = np.zeros((len(x_both.columns),), dtype=int)
A[1] = 1
A[2] = -1
result_both.append(round(reg_both.f_test(A).fvalue[0][0], 3))
result_both.append(round(float(reg_both.f_test(A).pvalue), 3))
A = np.zeros((len(x_both.columns),), dtype=int)
A[1] = 1
A[3] = -1
result_both.append(round(reg_both.f_test(A).fvalue[0][0], 3))
result_both.append(round(float(reg_both.f_test(A).pvalue), 3))
result_both.append('')
result_both.append(len(y_both))
result_both.append(round(reg_both.rsquared, 3))
table3 = pd.DataFrame({'Basic-Savings': result_sancristobal1[0]}, index=['Basic treatment','Basic treatment SE','Savings treatment','Savings treatment SE','Tertiary treatment','Tertiary treatment SE','H0: Basic-Savings F-Stat','p-value','H0: Tertiary-Basic F-Stat','p-value','','Observations','R squared'])
table3['Basic-Savings with demographics'] = result_sancristobal2[0]
table3['Basic-Savings with demographics and school fixed effects'] = result_sancristobal3[0]
table3['Tertiary'] = result_suba1[0]
table3['Tertiary with demographics'] = result_suba2[0]
table3['Tertiary with demographics and school fixed effects'] = result_suba3[0]
table3['Both'] = result_both
table3
return table3
def create_table5(data):
sample_fu = data.drop(data[(data.fu_observed == 0) | (data.grade != 11)].index)
sample_fu_sancristobal = sample_fu.drop(sample_fu[sample_fu.suba == 1].index)
sample_fu_suba = sample_fu.drop(sample_fu[sample_fu.suba == 0].index)
result_grad_sancristobal = list()
result_grad_suba = list()
result_grad_both = list()
result_tert_sancristobal = list()
result_tert_suba = list()
result_tert_both = list()
i = 1
while i < 5:
result_grad_suba.append('')
result_tert_suba.append('')
i += 1
x = sm_api.add_constant(sample_fu_sancristobal[['T1_treat','T2_treat','Rent','Own paying it','Own payed','Other condition','s_utilities','s_durables','s_infraest_hh','s_age_sorteo','s_age_sorteo2','s_years_back','s_sexo_int','Free union','Married','Widow(er)','Divorced','Single','s_single','s_edadhead','s_yrshead','s_tpersona','s_num18','estrato_0','estrato_1','estrato_2','s_puntaje','s_ingtotal','grade_6.0','grade_7.0','grade_8.0','grade_9.0','grade_10.0','grade_11.0','suba','s_over_age']], has_constant='add')
x = x.join(pd.get_dummies(sample_fu_sancristobal['school_code']))
y = sample_fu_sancristobal['graduated']
reg = sm_api.OLS(y, x).fit(cov_type='cluster', cov_kwds={'groups': sample_fu_sancristobal['school_code']})
result_grad_sancristobal.append(round(reg.params[[1]][0], 3))
result_grad_sancristobal.append(round(reg.bse[[1]][0], 3))
result_grad_sancristobal.append(round(reg.params[[2]][0], 3))
result_grad_sancristobal.append(round(reg.bse[[2]][0], 3))
i = 1
while i < 3:
result_grad_sancristobal.append('')
i += 1
A = np.array(([0,1,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]))
result_grad_sancristobal.append(round(reg.f_test(A).fvalue[0][0], 3))
result_grad_sancristobal.append(round(float(reg.f_test(A).pvalue), 3))
i = 1
while i < 3:
result_grad_sancristobal.append('')
i += 1
result_grad_sancristobal.append('')
result_grad_sancristobal.append(len(y))
result_grad_sancristobal.append(round(reg.rsquared, 3))
sample_fu_sancristobal_tert = sample_fu_sancristobal.drop(sample_fu_sancristobal[sample_fu_sancristobal.tertiary.isnull()].index)
x = sm_api.add_constant(sample_fu_sancristobal_tert[['T1_treat','T2_treat','Rent','Own paying it','Own payed','Other condition','s_utilities','s_durables','s_infraest_hh','s_age_sorteo','s_age_sorteo2','s_years_back','s_sexo_int','Free union','Married','Widow(er)','Divorced','Single','s_single','s_edadhead','s_yrshead','s_tpersona','s_num18','estrato_0','estrato_1','estrato_2','s_puntaje','s_ingtotal','grade_6.0','grade_7.0','grade_8.0','grade_9.0','grade_10.0','grade_11.0','suba','s_over_age']], has_constant='add')
x = x.join(pd.get_dummies(sample_fu_sancristobal_tert['school_code']))
y = sample_fu_sancristobal_tert['tertiary']
reg = sm_api.OLS(y, x).fit(cov_type='cluster', cov_kwds={'groups': sample_fu_sancristobal_tert['school_code']})
result_tert_sancristobal.append(round(reg.params[[1]][0], 3))
result_tert_sancristobal.append(round(reg.bse[[1]][0], 3))
result_tert_sancristobal.append(round(reg.params[[2]][0], 3))
result_tert_sancristobal.append(round(reg.bse[[2]][0], 3))
i = 1
while i < 3:
result_tert_sancristobal.append('')
i += 1
A = np.array(([0,1,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]))
result_tert_sancristobal.append(round(reg.f_test(A).fvalue[0][0], 3))
result_tert_sancristobal.append(round(float(reg.f_test(A).pvalue), 3))
i = 1
while i < 3:
result_tert_sancristobal.append('')
i += 1
result_tert_sancristobal.append('')
result_tert_sancristobal.append(len(y))
result_tert_sancristobal.append(round(reg.rsquared, 3))
x = sm_api.add_constant(sample_fu_suba[['T3_treat','Rent','Own paying it','Own payed','Other condition','s_utilities','s_durables','s_infraest_hh','s_age_sorteo','s_age_sorteo2','s_years_back','s_sexo_int','Free union','Married','Widow(er)','Divorced','Single','s_single','s_edadhead','s_yrshead','s_tpersona','s_num18','estrato_0','estrato_1','estrato_2','s_puntaje','s_ingtotal','grade_6.0','grade_7.0','grade_8.0','grade_9.0','grade_10.0','grade_11.0','suba','s_over_age']], has_constant='add')
x = x.join(pd.get_dummies(sample_fu_suba['school_code']))
y = sample_fu_suba['graduated']
reg = sm_api.OLS(y, x).fit(cov_type='cluster', cov_kwds={'groups': sample_fu_suba['school_code']})
result_grad_suba.append(round(reg.params[[1]][0], 3))
result_grad_suba.append(round(reg.bse[[1]][0], 3))
i = 1
while i < 5:
result_grad_suba.append('')
i += 1
result_grad_suba.append('')
result_grad_suba.append(len(y))
result_grad_suba.append(round(reg.rsquared, 3))
sample_fu_suba_tert = sample_fu_suba.drop(sample_fu_suba[sample_fu_suba.tertiary.isnull()].index)
x = sm_api.add_constant(sample_fu_suba_tert[['T3_treat','Rent','Own paying it','Own payed','Other condition','s_utilities','s_durables','s_infraest_hh','s_age_sorteo','s_age_sorteo2','s_years_back','s_sexo_int','Free union','Married','Widow(er)','Divorced','Single','s_single','s_edadhead','s_yrshead','s_tpersona','s_num18','estrato_0','estrato_1','estrato_2','s_puntaje','s_ingtotal','grade_6.0','grade_7.0','grade_8.0','grade_9.0','grade_10.0','grade_11.0','suba','s_over_age']], has_constant='add')
x = x.join(pd.get_dummies(sample_fu_suba_tert['school_code']))
y = sample_fu_suba_tert['tertiary']
reg = sm_api.OLS(y, x).fit(cov_type='cluster', cov_kwds={'groups': sample_fu_suba_tert['school_code']})
result_tert_suba.append(round(reg.params[[1]][0], 3))
result_tert_suba.append(round(reg.bse[[1]][0], 3))
i = 1
while i < 5:
result_tert_suba.append('')
i += 1
result_tert_suba.append('')
result_tert_suba.append(len(y))
result_tert_suba.append(round(reg.rsquared, 3))
x = sm_api.add_constant(sample_fu[['T1_treat','T2_treat','T3_treat','Rent','Own paying it','Own payed','Other condition','s_utilities','s_durables','s_infraest_hh','s_age_sorteo','s_age_sorteo2','s_years_back','s_sexo_int','Free union','Married','Widow(er)','Divorced','Single','s_single','s_edadhead','s_yrshead','s_tpersona','s_num18','estrato_0','estrato_1','estrato_2','s_puntaje','s_ingtotal','grade_6.0','grade_7.0','grade_8.0','grade_9.0','grade_10.0','grade_11.0','suba','s_over_age']], has_constant='add')
x = x.join(pd.get_dummies(sample_fu['school_code']))
y = sample_fu['graduated']
reg = sm_api.OLS(y, x).fit(cov_type='cluster', cov_kwds={'groups': sample_fu['school_code']})
result_grad_both.append(round(reg.params[[1]][0], 3))
result_grad_both.append(round(reg.bse[[1]][0], 3))
result_grad_both.append(round(reg.params[[2]][0], 3))
result_grad_both.append(round(reg.bse[[2]][0], 3))
result_grad_both.append(round(reg.params[[3]][0], 3))
result_grad_both.append(round(reg.bse[[3]][0], 3))
A = np.array(([0,1,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]))
result_grad_both.append(round(reg.f_test(A).fvalue[0][0], 3))
result_grad_both.append(round(float(reg.f_test(A).pvalue), 3))
A = np.array(([0,1,0,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]))
result_grad_both.append(round(reg.f_test(A).fvalue[0][0], 3))
result_grad_both.append(round(float(reg.f_test(A).pvalue), 3))
result_grad_both.append('')
result_grad_both.append(len(y))
result_grad_both.append(round(reg.rsquared, 3))
sample_fu_tert = sample_fu.drop(sample_fu[sample_fu.tertiary.isnull()].index)
x = sm_api.add_constant(sample_fu_tert[['T1_treat','T2_treat','T3_treat','Rent','Own paying it','Own payed','Other condition','s_utilities','s_durables','s_infraest_hh','s_age_sorteo','s_age_sorteo2','s_years_back','s_sexo_int','Free union','Married','Widow(er)','Divorced','Single','s_single','s_edadhead','s_yrshead','s_tpersona','s_num18','estrato_0','estrato_1','estrato_2','s_puntaje','s_ingtotal','grade_6.0','grade_7.0','grade_8.0','grade_9.0','grade_10.0','grade_11.0','suba','s_over_age']], has_constant='add')
x = x.join(pd.get_dummies(sample_fu_tert['school_code']))
y = sample_fu_tert['tertiary']
reg = sm_api.OLS(y, x).fit(cov_type='cluster', cov_kwds={'groups': sample_fu_tert['school_code']})
result_tert_both.append(round(reg.params[[1]][0], 3))
result_tert_both.append(round(reg.bse[[1]][0], 3))
result_tert_both.append(round(reg.params[[2]][0], 3))
result_tert_both.append(round(reg.bse[[2]][0], 3))
result_tert_both.append(round(reg.params[[3]][0], 3))
result_tert_both.append(round(reg.bse[[3]][0], 3))
A = np.array(([0,1,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]))
result_tert_both.append(round(reg.f_test(A).fvalue[0][0], 3))
result_tert_both.append(round(float(reg.f_test(A).pvalue), 3))
A = np.array(([0,1,0,-1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]))
result_tert_both.append(round(reg.f_test(A).fvalue[0][0], 3))
result_tert_both.append(round(float(reg.f_test(A).pvalue), 3))
result_tert_both.append('')
result_tert_both.append(len(y))
result_tert_both.append(round(reg.rsquared, 3))
table5 = | pd.DataFrame({'Graduation Basic-Savings':result_grad_sancristobal}, index=['Basic treatment','Basic treatment SE','Savings treatment','Savings treatment SE','Tertiary treatment','Tertiary treatment SE','H0: Basic-Savings F-Stat','p-value','H0: Tertiary-Basic F-Stat','p-value','','Observations','R squared']) | pandas.DataFrame |
import json
import math
import os
import pickle
import random
from multiprocessing import Pool
from pathlib import Path
import pandas as pd
import torch
import torch.nn.functional as F
import torch.utils.data
from augmentation.augmentation_methods import \
NoiseAugmentor, RirAugmentor, CodecAugmentor, \
LowpassAugmentor, HighpassAugmentor, ReverbAugmentor, \
HilbertAugmentor
from complex_data_parser import get_path_by_glob, parse_complex_data
from src.meldataset import load_wav
from textgrid_parsing import parse_textgrid
PHI = (1 + math.sqrt(5))/2
MAX_WAV_VALUE = 32768.0
labels_to_use = ['speaker', 'sex', 'mic-brand']
timed_labels_to_use = ['phones']
label_groups = {
'content': ['speaker', 'sex', 'phones'],
'style': ['mic-brand']
}
augmentation_label_groups = {
'content': [],
'style': ['noise', 'rir', 'lowpass', 'highpass', 'reverb', 'codec', 'hilbert']
}
class MultilabelWaveDataset(torch.utils.data.Dataset):
def __init__(self, data_dir, cache_dir, name, source, segment_length, sampling_rate, embedding_size,
augmentation_config=None, disable_wavs=False, split=True, size=None,
fine_tuning=False, deterministic=False):
self.data_dir = data_dir
self.cache_dir = cache_dir
self.name = name
self.source = source
self.segment_length = segment_length
self.embedding_size = embedding_size
self.sampling_rate = sampling_rate
self.split = split
self.fine_tuning = fine_tuning
self.size = size
self.deterministic = deterministic
self.random = random.Random()
self.disable_wavs = disable_wavs
self.should_augment = augmentation_config is not None
if self.should_augment:
self.aug_options = augmentation_config['options']
self.aug_probs = augmentation_config['probs']
print('Creating [{}] dataset:'.format(self.name))
name_path = Path(os.path.join(cache_dir, name))
if not name_path.exists():
os.mkdir(name_path)
cache_path = Path(os.path.join(cache_dir, name, 'labels_cache'))
if not name_path.exists():
os.mkdir(cache_path)
config_path = f'**/data_configs/{source}/*.json'
self.files_with_labels = self.do_with_pickle_cache(lambda: self.get_files_with_labels(cache_dir, config_path),
os.path.join(cache_dir, name, 'files_with_labels.pickle'))
if self.size is None:
self.size = len(self.files_with_labels)
self.label_options_weights = self.do_with_pickle_cache(self.get_all_label_options_weights,
os.path.join(cache_dir, name, 'label_options_weights.pickle'))
base_prob = self.aug_probs['prob']
sub_probs = self.aug_probs['sub_probs']
for augmentation, augmentation_labels in self.aug_options.items():
sub_prob = sub_probs[augmentation]['prob']
option_prob = 1.0/len(augmentation_labels)
self.label_options_weights[augmentation] = {'none': base_prob*(1-sub_prob), **{
label: base_prob*sub_prob*option_prob for label in augmentation_labels
}}
all_label_groups = {key: [*label_groups[key], *augmentation_label_groups[key]] for key in label_groups.keys()}
self.label_options_weights_groups = {
key: {label: self.label_options_weights[label] for label in label_group}
for key, label_group in all_label_groups.items()
}
self.label_options_groups = {
key: {label: tuple(value.keys()) for label, value in label_group.items()}
for key, label_group in self.label_options_weights_groups.items()
}
self.label_options = {
key: tuple(label_group.keys())
for key, label_group in self.label_options_weights.items()
}
self.label_weights_groups = {
key: {label: tuple(value.values()) for label, value in label_group.items()}
for key, label_group in self.label_options_weights_groups.items()
}
self.label_weights = {
key: tuple(label_group.values())
for key, label_group in self.label_options_weights.items()
}
if self.should_augment:
self.aug_methods = {
'noise': NoiseAugmentor(self.data_dir, self.label_options).augment,
'rir': RirAugmentor(self.data_dir).augment,
'reverb': ReverbAugmentor(self.sampling_rate).augment,
'lowpass': LowpassAugmentor(self.sampling_rate).augment,
'highpass': HighpassAugmentor(self.sampling_rate).augment,
'codec': CodecAugmentor(self.sampling_rate).augment,
'hilbert': HilbertAugmentor(self.sampling_rate).augment
}
print('Dataset [{}] is ready!\n'.format(self.name))
@staticmethod
def do_with_pickle_cache(func, pickle_path):
pickle_path = Path(pickle_path)
if pickle_path.exists():
with open(pickle_path, 'rb') as pickle_file:
result = pickle.load(pickle_file)
else:
if not pickle_path.parent.exists():
pickle_path.parent.mkdir(parents=True, exist_ok=True)
result = func()
with open(pickle_path, 'wb') as pickle_file:
pickle.dump(result, pickle_file)
return result
@staticmethod
def create_pickle_cache(func, pickle_path):
pickle_path = Path(pickle_path)
if not pickle_path.exists():
if not pickle_path.parent.exists():
pickle_path.parent.mkdir(parents=True, exist_ok=True)
result = func()
with open(pickle_path, 'wb') as pickle_file:
pickle.dump(result, pickle_file)
def get_all_label_options_weights(self):
all_label_options = {}
for col in labels_to_use:
all_label_options[col] = dict(self.files_with_labels[col].value_counts(normalize=True))
with Pool(16) as pool:
for label in timed_labels_to_use:
all_label_options[label] = dict()
results = pool.map(self.get_timed_labels_value_counts_by_index, range(len(self)))
rows_to_remove = []
for i, result in enumerate(results):
if isinstance(result, Exception):
rows_to_remove.append(i)
else:
for label in timed_labels_to_use:
for key, value in result[label].items():
if key not in all_label_options[label]:
all_label_options[label][key] = 0
all_label_options[label][key] += value
for label in timed_labels_to_use:
for key in all_label_options[label]:
all_label_options[label][key] /= len(results)
if len(rows_to_remove) > 0:
self.files_with_labels = self.files_with_labels.drop(rows_to_remove).reset_index(drop=True)
pickle_path = os.path.join(self.cache_dir, self.source, 'files_with_labels.pickle')
with open(pickle_path, 'wb') as pickle_file:
pickle.dump(self.files_with_labels, pickle_file)
all_label_options_weights = all_label_options
return all_label_options_weights
def get_timed_labels_value_counts_by_index(self, i):
try:
labels, timed_labels = self.get_timed_labels(i)
return self.get_labels_value_counts(timed_labels)
except Exception as e:
print('Item {} failed to get timed labels: [{}]'.format(i, e))
return e
def get_labels_value_counts(self, timed_labels):
result = {}
for label in timed_labels_to_use:
result[label] = dict(timed_labels[label]['text'].value_counts(normalize=True))
return result
def get_files_with_labels(self, main_dir, config_path):
main_dir = Path(main_dir)
subdir_list = [path for path in main_dir.glob('*/')]
results = None
for subdir in subdir_list:
try:
config_files = [path for path in subdir.glob(config_path)]
for config_file in config_files:
config = config_file.read_text()
config_dict = json.loads(config)
print('Loading [{}]...'.format(config_dict['name']))
complex_data = parse_complex_data(subdir, config_dict['config'], config_dict['result'])
print('[{}] loaded successfully!'.format(config_dict['name']))
if results is None:
results = complex_data
else:
results = pd.concat([results, complex_data], axis=0, ignore_index=True)
except Exception as e:
print(e)
print('Data config was not found or invalid, moving on.')
continue
return results
def get_timed_labels(self, index):
all_labels = self.files_with_labels.iloc[[index]].squeeze()
labels = self.get_labels(index)
timed_labels = parse_textgrid(all_labels['subdir'], all_labels['textgrid'])
return labels, {key: value for key, value in timed_labels.items() if key in timed_labels_to_use}
def get_labels(self, index):
labels = self.files_with_labels[labels_to_use].iloc[[index]].squeeze()
return labels
def get_grouped_labels(self, index):
labels = self.get_labels(index)
grouped_labels = {group: labels.filter(group_labels).to_dict() for group, group_labels in label_groups.items()}
return grouped_labels
def __getitem__(self, index):
if self.deterministic:
self.random.seed(index)
if self.size < len(self.files_with_labels):
index = (int(len(self.files_with_labels) / PHI) * index) % len(self.files_with_labels)
return self.get_augmented_item(index)
def get_augmented_item(self, index):
wav, wav_path, time_labels, grouped_labels = self.get_cut_item(index)
if self.should_augment:
wav, time_labels, grouped_labels = self.augment_item(wav, time_labels, grouped_labels)
return wav, wav_path, time_labels, grouped_labels
def create_pickle_label(self, index):
return self.create_pickle_cache(
lambda: self.get_fresh_label(index),
os.path.join(self.cache_dir, self.source, 'labels_cache', '{}.pickle'.format(index))
)
def get_pickle_label(self, index):
return self.do_with_pickle_cache(
lambda: self.get_fresh_label(index),
os.path.join(self.cache_dir, self.source, 'labels_cache', '{}.pickle'.format(index))
)
def get_fresh_label(self, index):
labels, timed_labels = self.get_timed_labels(index)
segmented_timed_labels = self.get_segmented_timed_labels(timed_labels)
all_segmented_labels = self.add_segmented_labels(segmented_timed_labels, labels)
segmented_tensor = self.convert_segmented_labels_to_tensor(all_segmented_labels, label_groups)
return segmented_tensor
def __len__(self):
return min(len(self.files_with_labels), self.size)
def get_segmented_timed_labels(self, timed_labels):
return pd.concat(
[
self.get_segmented_timed_labels_for_single(label_name, timed_label)
for label_name, timed_label in timed_labels.items()
],
axis=1
)
def get_segmented_timed_labels_for_single(self, label_name, timed_label):
result_rows = []
time_interval = self.embedding_size / self.sampling_rate
current_index = 0
current_time = 0
while current_index < len(timed_label):
result_rows.append({label_name: timed_label.iloc[[current_index]].squeeze()['text']})
current_time += time_interval
if current_time > timed_label.iloc[[current_index]].squeeze()['end']:
current_index += 1
return pd.DataFrame(result_rows)
def add_segmented_labels(self, segmented_timed_labels, labels):
for col in labels.axes[0]:
segmented_timed_labels[col] = labels[col]
return segmented_timed_labels
def convert_segmented_labels_to_tensor(self, all_segmented_labels, given_label_groups):
all_tensors = {}
for key, labels in given_label_groups.items():
tensors = {}
for col in labels:
if col in all_segmented_labels:
index_tensor = torch.tensor(
all_segmented_labels[col].apply(lambda x: self.label_options[col].index(x)).tolist(),
dtype=torch.int64
)
tensors[col] = index_tensor
all_tensors[key] = tensors
return all_tensors
def get_wav(self, index):
wav_path = get_path_by_glob(self.cache_dir, self.files_with_labels.iloc[[index]].squeeze()['wav'])
if self.disable_wavs:
return torch.zeros((self.segment_length,)), str(wav_path)
audio, sampling_rate = load_wav(wav_path)
if sampling_rate != self.sampling_rate:
raise ValueError("{} SR doesn't match target {} SR".format(
sampling_rate, self.sampling_rate))
audio = torch.FloatTensor(audio)
return audio.squeeze(0), str(wav_path)
def get_cut_item(self, index):
wav, wav_path = self.get_wav(index)
pickle_label_groups = self.get_pickle_label(index)
length = wav.size(0)
embedded_segment_length = self.segment_length // self.embedding_size
embedded_length = min(length // self.embedding_size,
next(iter(next(iter(pickle_label_groups.values())).values())).size(0))
trimed_length = embedded_length * self.embedding_size
trimed_start = 0
if len(wav) > trimed_length:
wav = wav[trimed_start:trimed_start + trimed_length]
length = wav.size(0)
# print(length, self.segment_length, embedded_length, embedded_segment_length)
if length >= self.segment_length:
max_embedded_start = embedded_length - embedded_segment_length
embedded_start = self.random.randint(0, max_embedded_start)
start = embedded_start * self.embedding_size
# print('trim: ', start, embedded_start)
else:
embedded_padding = embedded_segment_length - embedded_length
prefix_embedded_padding = self.random.randint(0, embedded_padding)
postfix_embedded_padding = embedded_padding - prefix_embedded_padding
padding = embedded_padding * self.embedding_size
prefix_padding = prefix_embedded_padding * self.embedding_size
postfix_padding = postfix_embedded_padding * self.embedding_size
for key, group in pickle_label_groups.items():
for label, label_item in group.items():
label_item = label_item[0:embedded_length]
if length >= self.segment_length:
cut_label_item = label_item[embedded_start:embedded_start + embedded_segment_length]
else:
cut_label_item = torch.nn.functional.pad(label_item,
(prefix_embedded_padding, postfix_embedded_padding),
'constant')
group[label] = cut_label_item
if length >= self.segment_length:
wav = wav[start:start + self.segment_length]
else:
wav = torch.nn.functional.pad(wav, (prefix_padding, postfix_padding), 'constant')
grouped_labels = self.get_grouped_labels(index)
return wav, wav_path, pickle_label_groups, grouped_labels
def augment_item(self, cut_wav, cut_label, grouped_labels):
options = self.aug_options
probs = self.aug_probs
methods = self.aug_methods
(length,) = next(iter(next(iter(cut_label.values())).values())).size()
augmented_wav = cut_wav
augmented_label = | pd.DataFrame(['none'] * length, columns=['none']) | pandas.DataFrame |
import pandas as pd
import pytest
import featuretools as ft
from featuretools.entityset import EntitySet, Relationship
from featuretools.utils.cudf_utils import pd_to_cudf_clean
from featuretools.utils.gen_utils import import_or_none
cudf = import_or_none('cudf')
# TODO: Fix vjawa
@pytest.mark.skipif('not cudf')
def test_create_entity_from_cudf_df(pd_es):
cleaned_df = pd_to_cudf_clean(pd_es["log"].df)
log_cudf = cudf.from_pandas(cleaned_df)
print(pd_es["log"].variable_types)
cudf_es = EntitySet(id="cudf_es")
cudf_es = cudf_es.entity_from_dataframe(
entity_id="log_cudf",
dataframe=log_cudf,
index="id",
time_index="datetime",
variable_types=pd_es["log"].variable_types
)
pd.testing.assert_frame_equal(cleaned_df, cudf_es["log_cudf"].df.to_pandas(), check_like=True)
@pytest.mark.skipif('not cudf')
def test_create_entity_with_non_numeric_index(pd_es, cudf_es):
df = pd.DataFrame({"id": ["A_1", "A_2", "C", "D"],
"values": [1, 12, -34, 27]})
cudf_df = cudf.from_pandas(df)
pd_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=df,
index="id")
cudf_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=cudf_df,
index="id",
variable_types={"id": ft.variable_types.Id, "values": ft.variable_types.Numeric})
pd.testing.assert_frame_equal(pd_es['new_entity'].df.reset_index(drop=True), cudf_es['new_entity'].df.to_pandas())
@pytest.mark.skipif('not cudf')
def test_create_entityset_with_mixed_dataframe_types(pd_es, cudf_es):
df = pd.DataFrame({"id": [0, 1, 2, 3],
"values": [1, 12, -34, 27]})
cudf_df = cudf.from_pandas(df)
# Test error is raised when trying to add Koalas entity to entitset with existing pandas entities
err_msg = "All entity dataframes must be of the same type. " \
"Cannot add entity of type {} to an entityset with existing entities " \
"of type {}".format(type(cudf_df), type(pd_es.entities[0].df))
with pytest.raises(ValueError, match=err_msg):
pd_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=cudf_df,
index="id")
# Test error is raised when trying to add pandas entity to entitset with existing cudf entities
err_msg = "All entity dataframes must be of the same type. " \
"Cannot add entity of type {} to an entityset with existing entities " \
"of type {}".format(type(df), type(cudf_es.entities[0].df))
with pytest.raises(ValueError, match=err_msg):
cudf_es.entity_from_dataframe(
entity_id="new_entity",
dataframe=df,
index="id")
@pytest.mark.skipif('not cudf')
def test_add_last_time_indexes():
pd_es = EntitySet(id="pd_es")
cudf_es = EntitySet(id="cudf_es")
sessions = pd.DataFrame({"id": [0, 1, 2, 3],
"user": [1, 2, 1, 3],
"time": [pd.to_datetime('2019-01-10'),
| pd.to_datetime('2019-02-03') | pandas.to_datetime |
# -*- coding: utf-8 -*-
"""
Created on Wed Oct 21 14:38:25 2020
@author: SKD-HiTMAN
"""
import pandas as pd
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
ratings = pd.read_csv('../../../Dataset/SongsDataset/ratings.csv', encoding='latin-1')
songs = pd.read_csv('../../../Dataset/NewDataset/songs.csv', encoding='latin-1')
# ratings = pd.read_csv('../Dataset/MovieLens/ml-latest-small/ratings.csv')
# songs = pd.read_csv('../Dataset/MovieLens/ml-latest-small/songs.csv', encoding='latin-1')
merged = pd.merge(ratings, songs, left_on='songId', right_on='songId', sort=True)
merged = merged[['userId', 'title', 'rating']]
songRatings = merged.pivot_table(index=['userId'], columns=['title'], values='rating')
# remove null value-> replace with 0
#songRatings.replace({np.nan:0}, regex=True, inplace=True)
songRatings = songRatings.fillna(0)
#print((songRatings[songRatings['userId'] == None]).head())
#print(songRatings.head())
# cosine similarity: pairwise similarity b/w all users and song-rating dfs
user_similarity = cosine_similarity(songRatings)
# user_similarity is a numpy array--> convert to df
user_sim_df = pd.DataFrame(user_similarity, index = songRatings.index, columns = songRatings.index)
songRatings = songRatings.T # transpose the df to work on columns in the upcoming function
# function to take user as parameter and show the result of highest rated songs for similar user
import operator
def recommendation(user):
if user not in songRatings.columns:
return ('Oops! No data available for this user!')
# sort all the similar user for the active user basing on cosine similarity
sim_user = user_sim_df.sort_values(by = user, ascending = False).index[1:11]
best = []
for i in sim_user:
max_score = songRatings.loc[:, i].max()
best.append(songRatings[songRatings.loc[:, i] == max_score].index.tolist())
user_seen_songs = songRatings[songRatings.loc[:, user] > 0].index.tolist()
# remove the songs user has already watched
for i in range(len(best)):
for j in best[i]:
if (j in user_seen_songs):
best[i].remove(j)
most_common = {}
for i in range(len(best)):
for j in best[i]:
if j in most_common:
most_common[j] += 1
else:
most_common[j] = 1
sorted_list = sorted(most_common.items(), key = operator.itemgetter(1), reverse = True) # sort by 1st elemt which is similar to user--> op.itemgetter(1)
return(sorted_list)
result = recommendation(45)
################################## performance evaluation
# create classes
# 1-3 ratings -> disliked; 4-5 -> liked
ratings.loc[ratings['rating'] <= 3, "rating"] = 0
ratings.loc[ratings['rating'] > 3, "rating"] = 1
merged = | pd.merge(ratings, songs, left_on='songId', right_on='songId', sort=True) | pandas.merge |
"""
MIT License
Copyright (c) 2021, <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation the
rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit
persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
------------------------------------------------------------------------------------------------------------------------
K-means++ attack
=====================
This class implements K-means++ attack from paper K-means++ vs. Behavioral Biometrics: One Loop to
Rule Them All
@inproceedings{negi2018k,
title={K-means++ vs. Behavioral Biometrics: One Loop to Rule Them All.},
author={<NAME> and <NAME> and <NAME> and <NAME>},
booktitle={NDSS},
year={2018}
}
"""
from source_code.adversaries.adversarial_attacks import Attacks
import pandas as pd
import numpy as np
class KppAttack(Attacks):
def __init__(self, data, required_attack_samples):
"""
@param required_attack_samples: Expects an integer for number of attack samples to generate
@param data: Expects a Pandas dataframe
"""
self.attack_df = data
self.attack_samples = required_attack_samples
self.attack_df_kpp = None
def generate_attack(self):
if 'user' in self.attack_df.columns:
centroid = self.attack_df.drop('user', axis=1).mean().values.reshape(1, -1)
# Using numpy arrays for more efficient usage
k_mean_ar = self.attack_df.drop('user', axis=1).to_numpy()
feat_list = self.attack_df.columns.drop('user').to_list()
else:
centroid = self.attack_df.mean()
# Using numpy arrays for more efficient usage
k_mean_ar = self.attack_df.to_numpy()
feat_list = self.attack_df.columns.drop('user').to_list()
# Generating attack set, first point is the mean of the attack data
init_point = centroid
self.attack_df_kpp = | pd.DataFrame(init_point, columns=feat_list) | pandas.DataFrame |
import time
import requests
import logging
import argparse
import pandas as pd
from lxml.html import fromstring
from tqdm.auto import tqdm
from bs4 import BeautifulSoup
from bs4.element import Comment
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from concurrent.futures import ThreadPoolExecutor, as_completed
logging.basicConfig(format='%(asctime)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO)
logger = logging.getLogger(__name__)
class CointelegraphCrawler(object):
SCROLL_PAUSE_TIME = 2
BUTTON_XPATH = '/html/body/div/div/div/div[1]/main/div/div/div[3]/div[1]/div/div/div/button'
UA = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/95.0.4638.69 Safari/537.36'
def __init__(self, query, driver_root_path, thread=16):
super(CointelegraphCrawler).__init__()
self.root_url = f'https://cointelegraph.com/tags/{query}'
self.driver_root_path = driver_root_path
self.service = Service(driver_root_path)
self.thread = thread
self.session = self.init_session()
def init_session(self):
# Create a reusable connection pool with python requests
session = requests.Session()
session.mount(
'https://',
requests.adapters.HTTPAdapter(
pool_maxsize=self.thread,
max_retries=3,
pool_block=True)
)
return session
def run(self, time_limit, save_filename=None):
driver = webdriver.Chrome(service=self.service)
driver.get(self.root_url)
WebDriverWait(driver, 5)
logger.info(f'Running selenium...')
start = time.time()
click_count = 0
last_height = driver.execute_script("return document.body.scrollHeight")
while True:
end = time.time()
eps = end - start
if eps >= time_limit:
break
try:
# Find the button and click
driver.execute_script(
"arguments[0].click();",
WebDriverWait(driver, 20).until(EC.element_to_be_clickable((By.XPATH, self.BUTTON_XPATH)))
)
# Scroll down to the bottom
driver.execute_script('window.scrollTo(0, document.body.scrollHeight);')
time.sleep(self.SCROLL_PAUSE_TIME)
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height:
break
last_height = new_height
click_count += 1
except:
break
soup = BeautifulSoup(driver.page_source, "html.parser")
driver.quit()
logger.info(f'Start parsing information...')
info = self._parse(soup)
self._news_df = | pd.DataFrame(info, columns=['title', 'link', 'author', 'date']) | pandas.DataFrame |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime, timedelta
import operator
from itertools import product, starmap
from numpy import nan, inf
import numpy as np
import pandas as pd
from pandas import (Index, Series, DataFrame, isnull, bdate_range,
NaT, date_range, timedelta_range,
_np_version_under1p8)
from pandas.tseries.index import Timestamp
from pandas.tseries.tdi import Timedelta
import pandas.core.nanops as nanops
from pandas.compat import range, zip
from pandas import compat
from pandas.util.testing import assert_series_equal, assert_almost_equal
import pandas.util.testing as tm
from .common import TestData
class TestSeriesOperators(TestData, tm.TestCase):
_multiprocess_can_split_ = True
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid='ignore'):
expected = (left > right).astype('O')
expected[:3] = np.nan
assert_almost_equal(result, expected)
s = Series(['a', 'b', 'c'])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
def test_op_method(self):
def check(series, other, check_reverse=False):
simple_ops = ['add', 'sub', 'mul', 'floordiv', 'truediv', 'pow']
if not compat.PY3:
simple_ops.append('div')
for opname in simple_ops:
op = getattr(Series, opname)
if op == 'div':
alt = operator.truediv
else:
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
check(self.ts, self.ts * 2)
check(self.ts, self.ts[::2])
check(self.ts, 5, check_reverse=True)
check(tm.makeFloatSeries(), tm.makeFloatSeries(), check_reverse=True)
def test_neg(self):
assert_series_equal(-self.series, -1 * self.series)
def test_invert(self):
assert_series_equal(-(self.series < 0), ~(self.series < 0))
def test_div(self):
with np.errstate(all='ignore'):
# no longer do integer div for any ops, but deal with the 0's
p = DataFrame({'first': [3, 4, 5, 8], 'second': [0, 0, 0, 3]})
result = p['first'] / p['second']
expected = Series(
p['first'].values.astype(float) / p['second'].values,
dtype='float64')
expected.iloc[0:3] = np.inf
assert_series_equal(result, expected)
result = p['first'] / 0
expected = Series(np.inf, index=p.index, name='first')
assert_series_equal(result, expected)
p = p.astype('float64')
result = p['first'] / p['second']
expected = Series(p['first'].values / p['second'].values)
assert_series_equal(result, expected)
p = DataFrame({'first': [3, 4, 5, 8], 'second': [1, 1, 1, 1]})
result = p['first'] / p['second']
assert_series_equal(result, p['first'].astype('float64'),
check_names=False)
self.assertTrue(result.name is None)
self.assertFalse(np.array_equal(result, p['second'] / p['first']))
# inf signing
s = Series([np.nan, 1., -1.])
result = s / 0
expected = Series([np.nan, np.inf, -np.inf])
assert_series_equal(result, expected)
# float/integer issue
# GH 7785
p = DataFrame({'first': (1, 0), 'second': (-0.01, -0.02)})
expected = Series([-0.01, -np.inf])
result = p['second'].div(p['first'])
assert_series_equal(result, expected, check_names=False)
result = p['second'] / p['first']
assert_series_equal(result, expected)
# GH 9144
s = Series([-1, 0, 1])
result = 0 / s
expected = Series([0.0, nan, 0.0])
assert_series_equal(result, expected)
result = s / 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
result = s // 0
expected = Series([-inf, nan, inf])
assert_series_equal(result, expected)
def test_operators(self):
def _check_op(series, other, op, pos_only=False,
check_dtype=True):
left = np.abs(series) if pos_only else series
right = np.abs(other) if pos_only else other
cython_or_numpy = op(left, right)
python = left.combine(right, op)
tm.assert_series_equal(cython_or_numpy, python,
check_dtype=check_dtype)
def check(series, other):
simple_ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'mod']
for opname in simple_ops:
_check_op(series, other, getattr(operator, opname))
_check_op(series, other, operator.pow, pos_only=True)
_check_op(series, other, lambda x, y: operator.add(y, x))
_check_op(series, other, lambda x, y: operator.sub(y, x))
_check_op(series, other, lambda x, y: operator.truediv(y, x))
_check_op(series, other, lambda x, y: operator.floordiv(y, x))
_check_op(series, other, lambda x, y: operator.mul(y, x))
_check_op(series, other, lambda x, y: operator.pow(y, x),
pos_only=True)
_check_op(series, other, lambda x, y: operator.mod(y, x))
check(self.ts, self.ts * 2)
check(self.ts, self.ts * 0)
check(self.ts, self.ts[::2])
check(self.ts, 5)
def check_comparators(series, other, check_dtype=True):
_check_op(series, other, operator.gt, check_dtype=check_dtype)
_check_op(series, other, operator.ge, check_dtype=check_dtype)
_check_op(series, other, operator.eq, check_dtype=check_dtype)
_check_op(series, other, operator.lt, check_dtype=check_dtype)
_check_op(series, other, operator.le, check_dtype=check_dtype)
check_comparators(self.ts, 5)
check_comparators(self.ts, self.ts + 1, check_dtype=False)
def test_operators_empty_int_corner(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({'x': 0.})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=['x']))
def test_operators_timedelta64(self):
# invalid ops
self.assertRaises(Exception, self.objSeries.__add__, 1)
self.assertRaises(Exception, self.objSeries.__add__,
np.array(1, dtype=np.int64))
self.assertRaises(Exception, self.objSeries.__sub__, 1)
self.assertRaises(Exception, self.objSeries.__sub__,
np.array(1, dtype=np.int64))
# seriese ops
v1 = date_range('2012-1-1', periods=3, freq='D')
v2 = date_range('2012-1-2', periods=3, freq='D')
rs = Series(v2) - Series(v1)
xp = Series(1e9 * 3600 * 24,
rs.index).astype('int64').astype('timedelta64[ns]')
assert_series_equal(rs, xp)
self.assertEqual(rs.dtype, 'timedelta64[ns]')
df = DataFrame(dict(A=v1))
td = Series([timedelta(days=i) for i in range(3)])
self.assertEqual(td.dtype, 'timedelta64[ns]')
# series on the rhs
result = df['A'] - df['A'].shift()
self.assertEqual(result.dtype, 'timedelta64[ns]')
result = df['A'] + td
self.assertEqual(result.dtype, 'M8[ns]')
# scalar Timestamp on rhs
maxa = df['A'].max()
tm.assertIsInstance(maxa, Timestamp)
resultb = df['A'] - df['A'].max()
self.assertEqual(resultb.dtype, 'timedelta64[ns]')
# timestamp on lhs
result = resultb + df['A']
values = [Timestamp('20111230'), Timestamp('20120101'),
Timestamp('20120103')]
expected = Series(values, name='A')
assert_series_equal(result, expected)
# datetimes on rhs
result = df['A'] - datetime(2001, 1, 1)
expected = Series(
[timedelta(days=4017 + i) for i in range(3)], name='A')
assert_series_equal(result, expected)
self.assertEqual(result.dtype, 'm8[ns]')
d = datetime(2001, 1, 1, 3, 4)
resulta = df['A'] - d
self.assertEqual(resulta.dtype, 'm8[ns]')
# roundtrip
resultb = resulta + d
assert_series_equal(df['A'], resultb)
# timedeltas on rhs
td = timedelta(days=1)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(resultb, df['A'])
self.assertEqual(resultb.dtype, 'M8[ns]')
# roundtrip
td = timedelta(minutes=5, seconds=3)
resulta = df['A'] + td
resultb = resulta - td
assert_series_equal(df['A'], resultb)
self.assertEqual(resultb.dtype, 'M8[ns]')
# inplace
value = rs[2] + np.timedelta64(timedelta(minutes=5, seconds=1))
rs[2] += np.timedelta64(timedelta(minutes=5, seconds=1))
self.assertEqual(rs[2], value)
def test_operator_series_comparison_zerorank(self):
# GH 13006
result = np.float64(0) > pd.Series([1, 2, 3])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
result = pd.Series([1, 2, 3]) < np.float64(0)
expected = pd.Series([1, 2, 3]) < 0.0
self.assert_series_equal(result, expected)
result = np.array([0, 1, 2])[0] > pd.Series([0, 1, 2])
expected = 0.0 > pd.Series([1, 2, 3])
self.assert_series_equal(result, expected)
def test_timedeltas_with_DateOffset(self):
# GH 4532
# operate with pd.offsets
s = Series([Timestamp('20130101 9:01'), Timestamp('20130101 9:02')])
result = s + pd.offsets.Second(5)
result2 = pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:01:05'), Timestamp(
'20130101 9:02:05')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s - pd.offsets.Second(5)
result2 = -pd.offsets.Second(5) + s
expected = Series([Timestamp('20130101 9:00:55'), Timestamp(
'20130101 9:01:55')])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
result = s + pd.offsets.Milli(5)
result2 = pd.offsets.Milli(5) + s
expected = Series([Timestamp('20130101 9:01:00.005'), Timestamp(
'20130101 9:02:00.005')])
| assert_series_equal(result, expected) | pandas.util.testing.assert_series_equal |
import unittest
import pandas as pd
import analysis_tools as analysis
import matplotlib.pyplot as plt
class TestStringMethods(unittest.TestCase):
def setUp(self):
xs = [10, 30, 110, -20, 50, 50, 30, 80, 99, 10]
ys = [20, 60, 110, -40, 100, 100, 60, 160, 200, 20]
ts = [0, 1000, 2000, 3000, 8000, 9000, 10000, 11000, 12000, 13000]
self.dirty_tracking_data = pd.DataFrame({"x": xs, "y": ys, "t": ts})
def test_cleaning(self):
clean_tracking_data = analysis.clean_tracking_data(self.dirty_tracking_data)
answer = self.dirty_tracking_data[[True, True, True, False, True, True, True, True, True, False]]
self.assertTrue(all(answer == clean_tracking_data))
def test_frac_conv(self):
frac_coords = analysis.convert_to_frac_coords(self.dirty_tracking_data, (0, 100), (0, 200))
self.assertEqual(frac_coords["x"][0], 0.1)
self.assertEqual(frac_coords["y"][0], 0.1)
self.assertEqual(frac_coords["x"][9], 0.1)
def test_mid_splats(self):
heatmap = analysis.calc_heatmap( | pd.DataFrame([{"x": 0.5, "y": 0.5, "t": 0}]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
import logging
import warnings
import weakref
from pathlib import Path
from typing import List, Union, Set, cast
from PyQt5.QtWidgets import QInputDialog
from pandas import DataFrame, Timestamp, concat
from PyQt5.QtCore import Qt
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from dgp.core import OID, Icon
from dgp.core.hdf5_manager import HDF5Manager
from dgp.core.models.datafile import DataFile
from dgp.core.models.dataset import DataSet, DataSegment
from dgp.core.types.enumerations import DataType
from dgp.lib.etc import align_frames
from dgp.gui.plotting.helpers import LineUpdate
from . import controller_helpers
from .gravimeter_controller import GravimeterController
from .controller_interfaces import IFlightController, IDataSetController, VirtualBaseController
from .project_containers import ProjectFolder
from .datafile_controller import DataFileController
_log = logging.getLogger(__name__)
class DataSegmentController(VirtualBaseController):
"""Controller for :class:`DataSegment`
Implements reference tracking feature allowing the mutation of segments
representations displayed on a plot surface.
"""
def __init__(self, segment: DataSegment, project,
parent: IDataSetController = None):
super().__init__(segment, project, parent=parent)
self.update()
self._menu = [
('addAction', ('Delete', self._action_delete)),
('addAction', ('Properties', self._action_properties))
]
@property
def entity(self) -> DataSegment:
return cast(DataSegment, super().entity)
@property
def menu(self):
return self._menu
def clone(self) -> 'DataSegmentController':
clone = DataSegmentController(self.entity, self.project, self.get_parent())
self.register_clone(clone)
return clone
def _action_delete(self):
self.get_parent().remove_child(self.uid, confirm=True)
def update(self):
super().update()
self.setText(str(self.entity))
self.setToolTip(repr(self.entity))
def _action_properties(self):
warnings.warn("Properties feature not yet implemented")
class DataSetController(IDataSetController):
def __init__(self, dataset: DataSet, project, flight: IFlightController):
super().__init__(model=dataset, project=project, parent=flight)
self.setIcon(Icon.PLOT_LINE.icon())
self._grav_file = DataFileController(self.entity.gravity, self.project, self)
self._traj_file = DataFileController(self.entity.trajectory, self.project, self)
self._child_map = {DataType.GRAVITY: self._grav_file,
DataType.TRAJECTORY: self._traj_file}
self._segments = ProjectFolder("Segments", Icon.LINE_MODE.icon())
for segment in dataset.segments:
seg_ctrl = DataSegmentController(segment, project, parent=self)
self._segments.appendRow(seg_ctrl)
self.appendRow(self._grav_file)
self.appendRow(self._traj_file)
self.appendRow(self._segments)
self._sensor = None
if dataset.sensor is not None:
ctrl = self.project.get_child(dataset.sensor.uid)
if ctrl is not None:
self._sensor = ctrl.clone()
self.appendRow(self._sensor)
self._gravity: DataFrame = DataFrame()
self._trajectory: DataFrame = DataFrame()
self._dataframe: DataFrame = DataFrame()
self._channel_model = QStandardItemModel()
self._menu_bindings = [ # pragma: no cover
('addAction', ('Open', lambda: self.model().item_activated(self.index()))),
('addAction', ('Set Name', self._action_set_name)),
('addAction', (Icon.METER.icon(), 'Set Sensor',
self._action_set_sensor_dlg)),
('addSeparator', ()),
('addAction', (Icon.GRAVITY.icon(), 'Import Gravity',
lambda: self.project.load_file_dlg(DataType.GRAVITY, dataset=self))),
('addAction', (Icon.TRAJECTORY.icon(), 'Import Trajectory',
lambda: self.project.load_file_dlg(DataType.TRAJECTORY, dataset=self))),
('addAction', ('Align Data', self.align)),
('addSeparator', ()),
('addAction', ('Delete', self._action_delete)),
('addAction', ('Properties', self._action_properties))
]
self._clones: Set[DataSetController] = weakref.WeakSet()
def clone(self):
clone = DataSetController(self.entity, self.get_parent(), self.project)
self.register_clone(clone)
return clone
@property
def entity(self) -> DataSet:
return cast(DataSet, super().entity)
@property
def menu(self): # pragma: no cover
return self._menu_bindings
@property
def hdfpath(self) -> Path:
return self.project.hdfpath
@property
def series_model(self) -> QStandardItemModel:
if 0 == self._channel_model.rowCount():
self._update_channel_model()
return self._channel_model
@property
def segment_model(self) -> QStandardItemModel: # pragma: no cover
return self._segments.internal_model
@property
def columns(self) -> List[str]:
return [col for col in self.dataframe()]
def _update_channel_model(self):
df = self.dataframe()
self._channel_model.clear()
for col in df:
series_item = QStandardItem(col)
series_item.setData(df[col], Qt.UserRole)
self._channel_model.appendRow(series_item)
@property
def gravity(self) -> Union[DataFrame]:
if not self._gravity.empty:
return self._gravity
if self.entity.gravity is None:
return self._gravity
try:
self._gravity = HDF5Manager.load_data(self.entity.gravity, self.hdfpath)
except Exception:
_log.exception(f'Exception loading gravity from HDF')
finally:
return self._gravity
@property
def trajectory(self) -> Union[DataFrame, None]:
if not self._trajectory.empty:
return self._trajectory
if self.entity.trajectory is None:
return self._trajectory
try:
self._trajectory = HDF5Manager.load_data(self.entity.trajectory, self.hdfpath)
except Exception:
_log.exception(f'Exception loading trajectory data from HDF')
finally:
return self._trajectory
def dataframe(self) -> DataFrame:
if self._dataframe.empty:
self._dataframe: DataFrame = concat([self.gravity, self.trajectory], axis=1, sort=True)
return self._dataframe
def align(self): # pragma: no cover
"""
TODO: Utility of this is questionable, is it built into transform graphs?
"""
if self.gravity.empty or self.trajectory.empty:
_log.info(f'Gravity or Trajectory is empty, cannot align.')
return
from dgp.lib.gravity_ingestor import DGS_AT1A_INTERP_FIELDS
from dgp.lib.trajectory_ingestor import TRAJECTORY_INTERP_FIELDS
fields = DGS_AT1A_INTERP_FIELDS | TRAJECTORY_INTERP_FIELDS
n_grav, n_traj = align_frames(self._gravity, self._trajectory,
interp_only=fields)
self._gravity = n_grav
self._trajectory = n_traj
_log.info(f'DataFrame aligned.')
def add_datafile(self, datafile: DataFile) -> None:
if datafile.group is DataType.GRAVITY:
self.entity.gravity = datafile
self._grav_file.set_datafile(datafile)
self._gravity = | DataFrame() | pandas.DataFrame |
import gzip
import numpy as np
import pandas as pd
from scipy.sparse import coo_matrix
from typing import Optional, Union
from ..io import read_lasso
from ..obtain_dataset import fm_gene2GO
def find_nuclear_genes(
path: str = None, save: str = None, gene_num: Union[str, int] = "all"
) -> pd.DataFrame:
"""
Finding nuclear localized genes in slices based on GO annotations.
Parameters
----------
path: `str` (default: `None`)
Path to lasso file.
save: `str` (default: `None`)
Output filename.
gene_num: `str` or `list` (default: `'all'`)
The number of nuclear localized genes. If gene_num is `'all'`, output all nuclear localized genes found.
Returns
-------
new_lasso: `pd.DataFrame`
"""
# load data
lasso = read_lasso(path=path)
lasso_genes = lasso["geneID"].unique().tolist()
# the GO terms for a particular gene list
go_data = fm_gene2GO(
gene=lasso_genes, gene_identifier="symbol", GO_namespace="cellular_component"
)
# go_data.to_excel("E14-16h_a_S09_cellular_component.xlsx", index=False)
# find nuclear-localized genes
nucleus_info = "chromosome|chromatin|euchromatin|heterochromatin|nuclear|nucleus|nucleoplasm|nucleolus|transcription factor"
nucleus_data = go_data[go_data["GO name"].str.contains(nucleus_info)]
nucleus_data = nucleus_data[~nucleus_data["GO name"].str.contains("mitochond")]
nucleus_genes = nucleus_data["gene symbol"].unique().tolist()
# remove pseudo positives
nucleus_filter_data = go_data[go_data["gene symbol"].isin(nucleus_genes)]
nucleus_filter_groups = nucleus_filter_data.groupby(["gene symbol"])["GO name"]
nucleus_filter_genes = []
for i, group in nucleus_filter_groups:
tf = group.str.contains(nucleus_info).unique().tolist()
if len(tf) == 1 and tf[0] is True:
nucleus_filter_genes.append(i)
new_lasso = lasso[lasso["geneID"].isin(nucleus_filter_genes)]
# determine the final number of genes obtained
if gene_num is not "all":
genes_exp = (
new_lasso[["geneID", "MIDCounts"]]
.groupby(["geneID"])["MIDCounts"]
.sum()
.to_frame("MIDCounts")
.reset_index()
)
genes_exp.sort_values(by=["MIDCounts", "geneID"], inplace=True, ascending=False)
top_num_genes = genes_exp["geneID"].head(gene_num)
new_lasso = new_lasso[new_lasso["geneID"].isin(top_num_genes)]
print(
f"The number of nuclear localized genes found is: {len(new_lasso['geneID'].unique())}."
)
# save
if save is not None:
new_lasso.to_csv(save, sep="\t", index=False)
return new_lasso
def mapping2lasso(
total_file, nucleus_file, cells_file: str = None, save: Optional[str] = None
) -> pd.DataFrame:
"""
Map cell type information to the original lasso file.
Parameters
----------
total_file: `str` (default: `None`)
Lasso file containing all genes.
nucleus_file: `str` (default: `None`)
Lasso file containing only nuclear localized genes.=
cells_file: `str` (default: `None`)
Matrix file generated by cell segmentation.
save: `str` (default: `None`)
Path to save the newly generated lasso file.
Returns
-------
total_cells: `pd.DataFrame`
The columns of the dataframe include 'geneID', 'x', 'y', 'MIDCounts', 'cell'.
"""
total_lasso = read_lasso(path=total_file)
nucleus_lasso = read_lasso(path=nucleus_file)
# cells processing
if cells_file.endswith(".gz"):
with gzip.open(cells_file, "r") as f:
mtx = coo_matrix(np.load(f))
else:
mtx = coo_matrix(np.load(cells_file))
x = pd.Series(mtx.row) + np.min(nucleus_lasso["x"])
y = pd.Series(mtx.col) + np.min(nucleus_lasso["y"])
value = pd.Series(mtx.data)
cells = pd.concat([x, y, value], axis=1)
cells.columns = ["x", "y", "cell"]
# map to the total lasso file
total_cells = | pd.merge(total_lasso, cells, on=["x", "y"], how="inner") | pandas.merge |
# ====================
# Necessary packages
# ====================
import numpy as np
import pandas as pd
import os
# ============================
# Functions for loading data
# ============================
def read_out(DIR_NAME, prefix='Rat'):
"""
Reads all MedPC .out files from the input directory and saves
as .raw.csv file in the same directory.
Note: This file does not resave files if they already exist. To rewrite
files the existing file must be deleted.
Parameters
----------
DIR_NAM : str
directory containing .out files
prefix : str, optional (default ='Rat')
desired ID prefix. (ex. 'Rat' for Rat1, Rat2, Rat3, etc.)
Returns
-------
This function does not return variables, but instead saves the read data
into a new .raw.csv file.
"""
os.chdir(DIR_NAME)
for file in os.listdir(DIR_NAME):
if (file.endswith('.out')):
columns = _read_names(DIR_NAME, file, prefix)
names_idx = [i for i, s in enumerate(columns) if not 'DELETE' in s]
names = [columns[i] for i in names_idx]
data = _read_data(DIR_NAME, file, names_idx)
df = pd.DataFrame(data=data, columns=names)
NewCSVname = os.path.splitext(file)[0] + '.raw.csv'
if not os.path.isfile(NewCSVname):
df.to_csv(NewCSVname, index=False)
print('Saved ' + NewCSVname +'!')
else:
print('"' + NewCSVname + '" already exists. Delete to rewrite file.')
print('Finished reading .out files')
def read_rawcsv(fname, delete='Y'):
"""Reads and creates a pandas dataframe from a specified .raw.csv file created by read_out.
Can also delete specified columns so that only desired data is loaded.
Parameters
----------
fname : str
name of the desired file
delete : str, optional
deletes columns labled 'DELETE' in the .raw.csv file. 'y' for yes, 'n' for no. ('y' by default)
Returns
-------
rawcsv_df : pandas dataframe
dataframe containing .raw.csv data and corresponding IDs
"""
rawcsv_df = pd.read_csv(fname, sep=',')
if delete.lower() in ['y', 'yes']:
rawcsv_df.drop(rawcsv_df.filter(regex='DELETE'),1,inplace=True)
print('Dropped "DELETE" columns in ' + fname)
else:
print('Did not drop "DELETE" columns in ' + fname)
return rawcsv_df
def read_metadata(metadata_dir, sep=','):
"""Find and load .metadata.csv files from a given directory
Parameters
----------
metadata_dir : str
the directory where the .metadata.csv file is located
sep : str, optional
the seperator used to read the csv file (default: ',')
Returns
-------
metadata : pandas dataframe
dataframe containing the respective metadata
"""
os.chdir(metadata_dir) # change directory
for fname in os.listdir(metadata_dir): # search directory
if (fname.endswith('.metadata.csv')): # find metadata
metadata = pd.read_csv(os.path.join(metadata_dir, fname), sep) # open metadata
print('Metadata from ' +fname+ ' successfully loaded')
return metadata
def _read_names(DIR_NAME, file, prefix):
"""
Reads the column names from .out files and adds a prefix if none exists
Parameters
----------
DIR_NAM : str
directory containing .out files
file : str
.out file to read
prefix : str, optional (default ='Rat')
desired ID prefix. (ex. 'Rat' for Rat1, Rat2, Rat3, etc.)
Returns
-------
names : list
list of column names
"""
# read row with names from .out
names = pd.read_csv(os.path.join(DIR_NAME, file),
engine = 'python',
skiprows = 10,
nrows = 1,
header= 0,
sep = 'Subject ID ',
skipinitialspace = True)
names = names.drop(names.columns[0], axis=1) # drop first blank column
names = names.columns.ravel() # remove brackets from numbers
# if int change to char and add prefix
if names.dtype.type is np.int_:
names = np.char.mod('%d', names)
names = [prefix + i for i in names]
# np array to list and remove whitespace
names = names.tolist()
names = [x.strip() for x in names]
return names
def _read_data(DIR_NAME, file, index):
"""
Reads the data from specified columns in .out files
Parameters
----------
DIR_NAM : str
directory containing .out files
file : str
.out file to read
index : list
indexes of the columns containing behavioral data.
(i.e. not 'DELETE' columns)
Returns
-------
data : array
an array (matrix) of threshold data from .out files
"""
df = pd.read_csv(os.path.join(DIR_NAME, file),
delimiter='\t',
skiprows = 13,
header=None,)
df = df.drop(df.columns[0], axis=1) # drop first column (blank)
data = df.iloc[:, index].values # only get desired indexes
return data
# ==================================
# Functions for detecting freezing
# ==================================
def detect_freezing(Threshold_df, threshold = 10):
"""
Detects freezing using a threshold from raw motion (MedPC Threshold) data. This function
loops through the array one row at a time and determines if the values are < 'threshold' for
at least one second. A new array of 1s and 0s are created denoting freezing and not freezing,
respectively.
Note: This code will need to be updated with inputs defining Fs and freezing (ex. immobile for
< 1 sec or more/less) to be broadly useful outside the Maren Lab.
Parameters
----------
Threshold_df : pandas dataframe
dataframe generated from readRaw()
threshold : int, optional
desired threshold, 10 by default
Returns
-------
Behav_df : pandas dataframe
dataframe containing both freezing (key: 'Freezing') and raw motion (key: 'Threshold') data.
Freezing data is an array of 1s and 0s denoting freezing and not-freezing.
"""
Threshold_values = Threshold_df.values
rows = np.size(Threshold_values, 0)
columns = np.size(Threshold_values, 1)
Freezing_np = np.zeros((rows, columns))
for c in range(columns):
for r in range(5, rows):
data2check = Threshold_values[r-5:r, c] # 1 second of data to check
if all(n <= threshold for n in data2check):
Freezing_np[r, c] = 1
Freezing_df = pd.DataFrame(Freezing_np)
Corrected_Freezing = _correct_freezing(Freezing_df)
Corrected_Freezing.columns = Threshold_df.columns
Corrected_Freezing = Corrected_Freezing.multiply(100)
Behav_df = pd.concat([Threshold_df, Corrected_Freezing],keys=['Threshold', 'Freezing'])
return Behav_df
def _correct_freezing(Freezing_df):
"""
Corrects freezing detected by detectFreezing(), which only defines
freezing one sample at a time,and thus cannot account for freezing onset in
which 1 second of freezing must be counted. For example, at freezing onset
5 samples (1 sec) must be below below threhsold values, but only detectFreezing()
only defines 0.2 sec of freezing at time. So, this function looks for
freezing onset and changes that '1' to a '5' to account for this.
Note: This code will also need to be updated with Fs to be used outside the Maren Lab.
Parameters
----------
Freezing_df : pandas dataframe
dataframe generated from detectFreezing()
Returns
-------
Corrected_Freezing_df : pandas dataframe
dataframe containing 0s, 1s, and 5s denoting not-freezing, freezing, and freezing-onset.
"""
# prep data
Freezing_values = Freezing_df.values
rows = np.size(Freezing_values,0)
columns = np.size(Freezing_values,1)
# correct freezing
Freezing_final = np.zeros((rows,columns))
for c in range(0,columns):
for r in range(4,rows):
previous = Freezing_values[r-1,c]
current = Freezing_values[r,c]
if current == 0:
Freezing_final[r, c] = 0
elif current == 1 and previous == 0:
Freezing_final[r, c] = 5
elif current == 1 and previous == 1:
Freezing_final[r, c] = 1
Corrected_Freezing_df = pd.DataFrame(Freezing_final)
return Corrected_Freezing_df
# ============================
# Functions for slicing data
# ============================
def slicedata(df, n_trials, start_time, length, ITI, fs=5, Behav='Freezing'):
"""Gets timestamps then slices and averages data accordingly
Parameters
----------
df : pandas dataframe
dataframe generated from Threshold2Freezing()
n_trials : int, optional
number of trials
start_time : int, optional
time of first onset of specified stimulus (CS, US, ISI, etc.)
length : int, optional
length in seconds of specified stimulus
ITI : int, optional
lenth in seconds of ITI
fs : int, optional
sampling frequency (default=5 Hz)
Behav : str
desired behavioral data ('Freezing' or 'Threshold'; default='Freezing')
Returns
-------
final_data
a pandas dataframe of averaged data slices from the specified stimulus
"""
# TODO: Need to make this its own function. Would be useful for other things
# get timestamps
timestamps = np.zeros([n_trials,2],dtype=int) # initialize
for trial in range(0,n_trials): # loop through trials
timestamps[trial] = [start_time+ITI*trial, start_time+length+ITI*trial] # start, stop timestamps
# slice data with timestamps and average
final_data = np.array([]) # initialize
for (start, stop) in timestamps: # loop through timestamps
averaged_trial = df.xs(Behav)[start*fs+1:stop*fs+1].mean().values # slice and average
final_data = np.append(final_data, averaged_trial) # append
return final_data
def get_averagedslices(df,Trials,BL=180,CS=10,US=2,ISI=58,fs=5,Behav='Freezing',Group=[]):
"""Slices and averages data for baseline and individual stimuli within trials
Parameters
----------
df : pandas dataframe
dataframe generated from Threshold2Freezing()
BL : int, optional
length of baseline period in seconds
CS : int, optional
lengths of CS in seconds
US : int, optional
length in seconds of US
ISI : int, optional
lenth in seconds of ISI
Trials : int, optional
numbers of trials
fs : int, optional
sampling frequency (default=5 Hz)
Behav : str
desired behavioral data ('Freezing' or 'Threshold'; default='Freezing')
Group: str
group metadata to assign. mainly useful for within-subjects data where the same subjects
have different experimental conditions. Leave as default if not within-subjects.
Returns
-------
BL_df
a pandas dataframe with the averaged baseline data
Trials_df
a pandas dataframe with averaged CS, US, and ISI data
"""
# Baseline
ID = df.xs(Behav).columns # get IDs
BL_timestamps = [0,BL*fs] # BL timestamps
BL_data = df.xs(Behav)[BL_timestamps[0]:BL_timestamps[1]].mean().values # slice and average data
dict4pandas = {'ID': ID, 'BL': BL_data} # BL dataframe
BL_df = pd.DataFrame(dict4pandas)
# Trial prep
ID_metadata = np.tile(ID,Trials) # ID metadata
Trial_metadata = [ele for ele in range(1,Trials+1) for i in range(len(ID))] # trial metadata length of n_rats
ITI = CS+US+ISI # ITI length
# CS
CS_data = slicedata(df, n_trials=Trials, start_time=BL, # slice data
length=CS, ITI=ITI, Behav=Behav)
dict4pandas = {'ID': ID_metadata, 'Trial': Trial_metadata, 'CS': CS_data} # CS dataframe
CS_df = pd.DataFrame(dict4pandas)
# US
start_time = BL + CS # start time
US_data = slicedata(df, n_trials=Trials, start_time=start_time, # slice data
length=US, ITI=ITI, Behav=Behav)
dict4pandas = {'ID': ID_metadata, 'Trial': Trial_metadata, 'US': US_data} # US dataframe
US_df = pd.DataFrame(dict4pandas)
# ISI
start_time = BL + CS + US # start time
ISI_data = slicedata(df, n_trials=Trials, start_time=start_time, # slice data
length = ISI, ITI = ITI, Behav=Behav)
dict4pandas = {'ID': ID_metadata, 'Trial': Trial_metadata, 'ISI': ISI_data} # ISI dataframe
ISI_df = pd.DataFrame(dict4pandas)
# Make Trials df
Trials_df = pd.merge(CS_df, US_df, on=['ID', 'Trial'], copy='True') # combine CS and US data
Trials_df = pd.merge(Trials_df, ISI_df, on=['ID', 'Trial'], copy='True') # add ISI data
# Add Group metadata, if any
if any(Group):
Group_metadata = [ele for ele in [Group] for i in range(len(ID))] # group metadata
dict4pandas = {'ID': ID, 'Group': Group_metadata} # group dataframe
Group_df = pd.DataFrame(dict4pandas)
# merge group df to others
BL_df = | pd.merge(Group_df,BL_df,on='ID',copy='True') | pandas.merge |
## == JHU data location == ##
path_JHUdata = '../../COVID-19/csse_covid_19_data/csse_covid_19_daily_reports/'
## == province_state translation dictionary == ##
stateTranslation = [
# ['Alberta', 'AB'],
['British Columbia', 'BC'],
# ['Manitoba', 'MB'],
# ['New Brunswick', 'NB'],
['Newfoundland and Labrador', 'NL'],
['Northwest Territories', 'NWT'],
# ['Nova Scotia', 'NS'],
# ['Nunavut', 'NU'],
['Ontario', 'ON'],
['Prince Edward Island', 'PEI'],
['Quebec', 'QC'],
# ['Saskatchewan', 'SK'],
# ['Yukon', 'YT'],
]
stateDict = {}
for el in stateTranslation:
stateDict[ el[1] ] = el[0]
## == translate inconsistent provine_state names == ##
def translateState(row):
state = str(row["Province_State"]).strip()
if ("," in state):
if state == "Calgary, Alberta" or state == "Edmonton, Alberta":
row["Province_State"] = "Alberta"
else:
stateCode = state[-2:]
if stateCode in stateDict:
row["Province_State"] = stateDict[stateCode]
return row
## == process file == ##
def processDate(date):
print(date)
# read file
df = pd.read_csv(path_JHUdata + date + ".csv")
# rename column headings for consistency
if 'Country/Region' in df:
df = df.rename(columns={
'Country/Region': 'Country_Region',
'Province/State': 'Province_State'
})
# remove cruise ship entries
df = df[ df['Province_State'].str.contains('Diamond Princess') != True ]
df = df[ df['Province_State'].str.contains('Grand Princess') != True ]
# translate province_state names
df = df.apply( translateState, axis=1 )
# group and sum country data, set province_state as empty
countrydata = df.groupby(['Country_Region']).agg('sum').reset_index()
countrydata['Province_State'] = ""
df = countrydata.sort_values(by='Country_Region')
# calculate 'active' cases if not provided
if 'Active' not in df:
df['Active'] = df['Confirmed'] - df['Recovered'] - df['Deaths']
else:
for value in df['Active']:
if value <= 0 or value == "":
df['Active'] = df['Confirmed'] - df['Recovered'] - df['Deaths']
else:
continue
# select only columns used for website
df = df[ ["Country_Region", "Province_State", "Confirmed", "Recovered", "Active", "Deaths"] ]
# set date of current data
df["Date"] = datetime.strptime(date, '%m-%d-%Y')
return df
## == main process == ##
import pandas as pd
import os
import time
from datetime import datetime
# start time
startTime = time.time()
# create output storage variable
df = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2022, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import json
import logging
from datetime import datetime
from enum import Enum
from typing import List, Optional, Tuple, Union
import pandas as pd
from numpy.distutils.misc_util import as_list
from wetterdienst.core.scalar.request import ScalarRequestCore
from wetterdienst.core.scalar.values import ScalarValuesCore
from wetterdienst.metadata.columns import Columns
from wetterdienst.metadata.datarange import DataRange
from wetterdienst.metadata.kind import Kind
from wetterdienst.metadata.period import Period, PeriodType
from wetterdienst.metadata.provider import Provider
from wetterdienst.metadata.resolution import Resolution, ResolutionType
from wetterdienst.metadata.timezone import Timezone
from wetterdienst.metadata.unit import OriginUnit, SIUnit
from wetterdienst.util.cache import CacheExpiry
from wetterdienst.util.network import download_file
from wetterdienst.util.parameter import DatasetTreeCore
log = logging.getLogger(__file__)
class EaHydrologyResolution(Enum):
MINUTE_15 = Resolution.MINUTE_15.value
HOUR_6 = Resolution.HOUR_6.value
DAILY = Resolution.DAILY.value
class EaHydrologyParameter(DatasetTreeCore):
class MINUTE_15(Enum):
FLOW = "flow"
GROUNDWATER_LEVEL = "groundwater_level"
class HOUR_6(Enum):
FLOW = "flow"
GROUNDWATER_LEVEL = "groundwater_level"
class DAILY(Enum):
FLOW = "flow"
GROUNDWATER_LEVEL = "groundwater_level"
PARAMETER_MAPPING = {"flow": "Water Flow", "groundwater_level": "Groundwater level"}
class EaHydrologyUnit(DatasetTreeCore):
class MINUTE_15(Enum):
FLOW = OriginUnit.CUBIC_METERS_PER_SECOND.value, SIUnit.CUBIC_METERS_PER_SECOND.value
GROUNDWATER_LEVEL = OriginUnit.METER.value, SIUnit.METER.value
class HOUR_6(Enum):
FLOW = OriginUnit.CUBIC_METERS_PER_SECOND.value, SIUnit.CUBIC_METERS_PER_SECOND.value
GROUNDWATER_LEVEL = OriginUnit.METER.value, SIUnit.METER.value
class DAILY(Enum):
FLOW = OriginUnit.CUBIC_METERS_PER_SECOND.value, SIUnit.CUBIC_METERS_PER_SECOND.value
GROUNDWATER_LEVEL = OriginUnit.METER.value, SIUnit.METER.value
class EaHydrologyPeriod(Enum):
HISTORICAL = Period.HISTORICAL.value
class EaHydrologyValues(ScalarValuesCore):
_base_url = "https://environment.data.gov.uk/hydrology/id/stations/{station_id}.json"
@property
def _irregular_parameters(self) -> Tuple[str]:
return ()
@property
def _string_parameters(self) -> Tuple[str]:
return ()
@property
def _date_parameters(self) -> Tuple[str]:
return ()
@property
def _data_tz(self) -> Timezone:
return Timezone.UK
def _collect_station_parameter(self, station_id: str, parameter: Enum, dataset: Enum) -> pd.DataFrame:
endpoint = self._base_url.format(station_id=station_id)
payload = download_file(endpoint, CacheExpiry.NO_CACHE)
measures_list = json.loads(payload.read())["items"]
measures_list = (
| pd.Series(measures_list) | pandas.Series |
import numpy as np
import time
from tqdm import tqdm
import pandas as pd
from drawer import NetworkDrawer
import cv2
# want to test this hypothesis:
# randomly selecting peers is inviable for producing the lowest average + max path length between any two nodes
MAX_PEERS = 8
class Peer(object):
def __init__(self, index, ttl, bandwidth=8):
self.index = index
self.peers = []
self.ttl = int(ttl)
self.alive = 0
self.bandwidth = max(4, int(bandwidth))
self.team = -1
def random_connect(self, peers_list):
if len(self.peers) == 0:
# connect first to a random peer
free_peers = list(
filter(
lambda x: x not in self.peers
and len(x.peers) < x.bandwidth
and x.index != self.index,
peers_list,
)
)
if len(free_peers):
i = np.random.randint(len(free_peers))
# print("rc connect")
self.connect(free_peers[i])
if len(self.peers) > 0 and len(self.peers) < self.bandwidth:
# now bfs through peer's peers and add more connections
queue = self.peers[:]
closed = set(self.peers + [self])
while len(queue):
v = queue.pop(0)
if len(v.peers) < v.bandwidth and v not in closed:
# found a free peer!
# print("bfs connect")
self.connect(v)
if len(self.peers) == self.bandwidth:
break
closed.add(v)
queue.extend(
list(
filter(
lambda peer: peer not in closed and peer not in queue,
v.peers,
)
)
)
for peer in self.peers:
assert self in peer.peers
def connect(self, peer):
assert len(self.peers) < self.bandwidth
assert len(peer.peers) < peer.bandwidth
assert peer not in self.peers and self not in peer.peers
peer.peers.append(self)
self.peers.append(peer)
def disconnect(self, peer):
assert peer in self.peers and self in peer.peers
peer.peers.remove(self)
self.peers.remove(peer)
def step(self, peers_list):
self.alive += 1
for peer in self.peers:
assert self in peer.peers, f"{self} {peer}"
if self.alive > self.ttl:
# disconnect from the network
for peer in self.peers[:]:
self.disconnect(peer)
peers_list.remove(self)
return
if len(self.peers) < self.bandwidth:
self.random_connect(peers_list)
def bfs(self, target):
queue = self.peers[:]
closed = set()
came_from = {}
while len(queue):
v = queue.pop(0)
if v.index == target.index:
# found target
path = [v]
while v in came_from:
v = came_from[v]
path.append(v)
return path
else:
closed.add(v)
for peer in filter(
lambda peer: peer not in closed and peer not in queue, v.peers
):
queue.append(peer)
came_from[peer] = v
def __repr__(self):
return f"<Peer {self.index} {self.ttl} {list(map(lambda x : x.index, self.peers))}>"
def simulate(n=10000, ttl_mu=20, ttl_sigma=5, neighbours_mu=8, draw=False):
network = []
drawer = NetworkDrawer()
for i in tqdm(range(n)):
# print(f"iter {i}")
# print(network)
for peer in network:
peer.step(network)
# print(network)
for peer in network:
for other in peer.peers:
assert other in network
if len(network) < 1000:
new_peer = Peer(
i,
np.random.normal(ttl_mu, ttl_sigma),
np.random.normal(neighbours_mu, 2),
)
network.append(new_peer)
new_peer.random_connect(network)
if len(new_peer.peers) == 0 and i > 0:
# new peer couldn't connect, remove them
network.remove(new_peer)
# if draw and i % 1000 == 0:
# for i in range(1):
# drawer.step(network)
total_dist = 0
max_dist, min_dist = 0, 0
total_pairs = 0
no_paths = 0
# print(network)
for a in network:
for b in network:
if b.index > a.index:
total_pairs += 1
path = a.bfs(b)
if path is not None:
total_dist += len(path)
max_dist = max(max_dist, len(path))
min_dist = min(min_dist, len(path))
else:
no_paths += 1
return network, total_pairs, total_dist, max_dist, min_dist, no_paths
data = {
"ttl_mu": [],
"nmax_mu": [],
"network": [],
"pairs": [],
"dist": [],
"max_dist": [],
"min_dist": [],
"no_paths": [],
"subsets": [],
}
for ttl_mu in [20, 50, 100, 200]:
for neighbours_mu in [4, 6, 8, 10, 12]:
network, pairs, dist, max_dist, min_dist, no_paths = simulate(
2000, ttl_mu=ttl_mu, neighbours_mu=neighbours_mu, draw=True
)
data["ttl_mu"].append(ttl_mu)
data["nmax_mu"].append(neighbours_mu)
data["network"].append(len(network))
data["pairs"].append(pairs)
data["dist"].append(dist)
data["max_dist"].append(max_dist)
data["min_dist"].append(min_dist)
data["no_paths"].append(no_paths)
drawer = NetworkDrawer()
subsets = drawer.connect_subsets(network)
data["subsets"].append(subsets)
df = | pd.DataFrame.from_dict(data) | pandas.DataFrame.from_dict |
# -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import logging
import operator
from abc import abstractmethod
from enum import Enum
from typing import Dict, Generator, List, Tuple, Union
import numpy as np
import pandas as pd
from pint import Quantity
from pytz import timezone
from tqdm import tqdm
from wetterdienst.core.scalar.result import StationsResult, ValuesResult
from wetterdienst.metadata.columns import Columns
from wetterdienst.metadata.resolution import Resolution
from wetterdienst.metadata.timezone import Timezone
from wetterdienst.metadata.unit import REGISTRY, OriginUnit, SIUnit
from wetterdienst.util.enumeration import parse_enumeration_from_template
from wetterdienst.util.logging import TqdmToLogger
log = logging.getLogger(__name__)
class ScalarValuesCore:
""" Core for sources of point data where data is related to a station """
# Fields for type coercion, needed for separation from fields with actual data
# that have to be parsed differently when having data in tabular form
@property
def _meta_fields(self) -> List[str]:
"""
Metadata fields that are independent of actual values and should be parsed
differently
:return: list of strings representing the metadata fields/columns
"""
if not self.stations.stations.tidy:
fields = [
Columns.STATION_ID.value,
Columns.DATE.value,
]
else:
fields = [
Columns.STATION_ID.value,
Columns.DATASET.value,
Columns.PARAMETER.value,
Columns.DATE.value,
Columns.VALUE.value,
Columns.QUALITY.value,
]
return fields
# Fields for date coercion
_date_fields = [Columns.DATE.value, Columns.FROM_DATE.value, Columns.TO_DATE.value]
# TODO: add data type (forecast, observation, ...)
# @property
# @abstractmethod
# def _has_quality(self) -> bool:
# """Attribute that tells if a weather service has quality, which otherwise will
# have to be set to NaN"""
# pass
@property
def data_tz(self) -> timezone:
""" Timezone of the published data """
return timezone(self._data_tz.value)
@property
@abstractmethod
def _data_tz(self) -> Timezone:
""" Timezone enumeration of published data. """
pass
@property
@abstractmethod
def _irregular_parameters(self) -> Tuple[str]:
"""Declaration of irregular parameters which will have to be parsed differently
then others e.g. when a parameter is a date."""
pass
@property
@abstractmethod
def _integer_parameters(self) -> Tuple[str]:
""" Integer parameters that will be parsed to integers. """
pass
@property
@abstractmethod
def _string_parameters(self) -> Tuple[str]:
""" String parameters that will be parsed to integers. """
pass
@property
def _complete_dates(self) -> pd.DatetimeIndex:
"""
Complete datetime index for the requested start and end date, used for
building a complementary pandas DataFrame with the date column on which
other DataFrames can be joined on
:return: pandas.DatetimeIndex
"""
start_date, end_date = self.stations.start_date, self.stations.end_date
if self.stations.stations.resolution == Resolution.MONTHLY:
end_date += pd.Timedelta(days=31)
elif self.stations.stations.resolution == Resolution.ANNUAL:
end_date += pd.Timedelta(year=366)
date_range = pd.date_range(
start_date,
end_date,
freq=self.stations.frequency.value,
tz=self.data_tz,
)
return date_range
@property
def _base_df(self) -> pd.DataFrame:
"""
Base dataframe which is used for creating empty dataframes if no data is
found or for merging other dataframes on the full dates
:return: pandas DataFrame with a date column with complete dates
"""
return pd.DataFrame({Columns.DATE.value: self._complete_dates})
def convert_values_to_si(self, df: pd.DataFrame, dataset) -> pd.DataFrame:
"""
Function to convert values to metric units with help of conversion factors
:param df: pandas DataFrame that should be converted to SI units
:param dataset: dataset for which the conversion factors are created
:return: pandas DataFrame with converted (SI) values
"""
def _convert_values_to_si(series):
"""
Helper function to apply conversion factors column wise to a pandas DataFrame
:param series: pandas Series that should be converted
:return: converted pandas Series
"""
op, factor = conversion_factors.get(series.name, (None, None))
if not op or not factor:
return series
return op(series, factor)
conversion_factors = self._create_conversion_factors(dataset)
df = df.apply(_convert_values_to_si, axis=0)
return df
def _create_conversion_factors(
self, dataset
) -> Dict[str, Tuple[Union[operator.add, operator.mul], float]]:
"""
Function to create conversion factors based on a given dataset
:param dataset: dataset for which conversion factors are created
:return: dictionary with conversion factors for given parameter name
"""
dataset = dataset.name
dataset_accessor = self.stations.stations._dataset_accessor
if self.stations.stations._unique_dataset:
units = self.stations.stations._unit_tree[dataset_accessor]
else:
units = self.stations.stations._unit_tree[dataset_accessor][dataset]
conversion_factors = {}
# TODO eventually we may split this into smaller functions
for parameter in units:
origin_unit, si_unit = parameter.value
# Get parameter name
parameter = parameter.name
if self.stations.stations._unique_dataset:
parameter_value = self.stations.stations._dataset_tree[
dataset_accessor
][parameter].value
else:
parameter_value = self.stations.stations._dataset_tree[
dataset_accessor
][dataset][parameter].value
if si_unit == SIUnit.KILOGRAM_PER_SQUARE_METER.value:
# Fixed conversion factors to kg / m², as it only applies
# for water with density 1 g / cm³
if origin_unit == OriginUnit.MILLIMETER.value:
conversion_factors[parameter_value] = (operator.mul, 1)
else:
raise ValueError(
"manually set conversion factor for precipitation unit"
)
elif si_unit == SIUnit.DEGREE_KELVIN.value:
# Apply offset addition to temperature measurements
# Take 0 as this is appropriate for adding on other numbers
# (just the difference)
degree_offset = Quantity(0, origin_unit).to(si_unit).magnitude
conversion_factors[parameter_value] = (operator.add, degree_offset)
elif si_unit == SIUnit.PERCENT.value:
factor = REGISTRY(str(origin_unit)).to(str(si_unit)).magnitude
conversion_factors[parameter_value] = (operator.mul, factor)
else:
# For multiplicative units we need to use 1 as quantity to apply the
# appropriate factor
conversion_factors[parameter_value] = (
operator.mul,
Quantity(1, origin_unit).to(si_unit).magnitude,
)
return conversion_factors
def __init__(self, stations: StationsResult) -> None:
self.stations = stations
@classmethod
def from_stations(cls, stations: StationsResult):
return cls(stations)
def __eq__(self, other):
""" Equal method of request object """
return (
self.stations.station_id == other.stations.station_id
and self.stations.parameter == other.stations.parameter
and self.stations.start_date == other.stations.start_date
and self.stations.end_date == other.stations.end_date
)
pass
def __str__(self):
""" Str representation of request object """
# TODO: include source
# TODO: include data type
station_ids_joined = "& ".join(
[str(station_id) for station_id in self.stations.station_id]
)
parameters_joined = "& ".join(
[
parameter.value
for parameter, parameter_set in self.stations.stations.parameter
]
)
return ", ".join(
[
f"station_ids {station_ids_joined}",
f"parameters {parameters_joined}",
str(self.stations.start_date),
str(self.stations.end_date),
]
)
pass
def _create_empty_station_parameter_df(
self, station_id: str, parameter: Enum, dataset: Enum
) -> pd.DataFrame:
"""
Function to create an empty DataFrame
:param station_id:
:param parameter:
:return:
"""
dataset_tree = self.stations.stations._dataset_tree
resolution = self.stations.stations.resolution
# if parameter is a whole dataset, take every parameter from the dataset instead
if parameter == dataset:
if self.stations.stations._unique_dataset:
parameter = [*dataset_tree[resolution.name]]
else:
parameter = [*dataset_tree[resolution.name][dataset.name]]
if self.stations.stations.tidy:
if not self.stations.stations.start_date:
return pd.DataFrame(None, columns=self._meta_fields)
data = []
for par in pd.Series(parameter):
if par.name.startswith("QUALITY"):
continue
par_df = self._base_df
par_df[Columns.PARAMETER.value] = par.value
data.append(par_df)
df = pd.concat(data)
df[Columns.STATION_ID.value] = station_id
df[Columns.DATASET.value] = dataset.name
df[Columns.VALUE.value] = pd.NA
df[Columns.QUALITY.value] = pd.NA
return df
else:
parameter = pd.Series(parameter).map(lambda x: x.value).tolist()
# Base columns
columns = [*self._meta_fields, *parameter]
if self.stations.stations.start_date:
return pd.DataFrame(None, columns=columns)
df = self._base_df
df = df.reindex(columns=columns)
df[Columns.STATION_ID.value] = station_id
return df
def _build_complete_df(
self, df: pd.DataFrame, station_id: str, parameter: Enum, dataset: Enum
) -> pd.DataFrame:
# For cases where requests are not defined by start and end date but rather by
# periods, use the returned df without modifications
# We may put a standard date range here if no data is found
if not self.stations.start_date:
return df
if parameter != dataset or not self.stations.stations.tidy:
df = pd.merge(
left=self._base_df,
right=df,
left_on=Columns.DATE.value,
right_on=Columns.DATE.value,
how="left",
)
df[Columns.STATION_ID.value] = station_id
if self.stations.tidy:
df[Columns.PARAMETER.value] = parameter.value
df[Columns.PARAMETER.value] = pd.Categorical(
df[Columns.PARAMETER.value]
)
if dataset:
df[Columns.DATASET.value] = dataset.name.lower()
df[Columns.DATASET.value] = pd.Categorical(
df[Columns.DATASET.value]
)
return df
else:
data = []
for parameter, group in df.groupby(Columns.PARAMETER.value, sort=False):
if self.stations.stations._unique_dataset:
parameter_ = parse_enumeration_from_template(
parameter,
self.stations.stations._parameter_base[
self.stations.resolution.name
],
)
else:
parameter_ = parse_enumeration_from_template(
parameter,
self.stations.stations._dataset_tree[
self.stations.resolution.name
][dataset.name],
)
df = pd.merge(
left=self._base_df,
right=group,
left_on=Columns.DATE.value,
right_on=Columns.DATE.value,
how="left",
)
df[Columns.STATION_ID.value] = station_id
df[Columns.PARAMETER.value] = parameter_.value
df[Columns.DATASET.value] = dataset.name.lower()
df[Columns.DATASET.value] = pd.Categorical(df[Columns.DATASET.value])
data.append(df)
return pd.concat(data)
def _organize_df_columns(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Method to reorder index to always have the same order of columns
:param df:
:return:
"""
columns = self._meta_fields
columns.extend(df.columns.difference(columns, sort=False))
df = df.reindex(columns=columns)
return df
def query(self) -> Generator[ValuesResult, None, None]:
"""
Core method for data collection, iterating of station ids and yielding a
DataFrame for each station with all found parameters. Takes care of type
coercion of data, date filtering and humanizing of parameters.
:return:
"""
for station_id in self.stations.station_id:
# TODO: add method to return empty result with correct response string e.g.
# station id not available
station_data = []
for parameter, dataset in self.stations.parameter:
parameter_df = self._collect_station_parameter(
station_id, parameter, dataset
)
if parameter_df.empty:
continue
# Merge on full date range if values are found to ensure result
# even if no actual values exist
self._coerce_date_fields(parameter_df)
parameter_df = self._coerce_parameter_types(parameter_df)
if self.stations.stations.si_units:
parameter_df = self.convert_values_to_si(parameter_df, dataset)
if self.stations.stations.tidy:
parameter_df = self.tidy_up_df(parameter_df, dataset)
if parameter != dataset:
parameter_df = parameter_df[
parameter_df[Columns.PARAMETER.value]
== parameter.value.lower()
]
parameter_df = self._build_complete_df(
parameter_df, station_id, parameter, dataset
)
parameter_df = self._organize_df_columns(parameter_df)
station_data.append(parameter_df)
try:
station_df = pd.concat(station_data, ignore_index=True)
except ValueError:
station_df = self._create_empty_station_parameter_df(
station_id, parameter
)
station_df = self._coerce_meta_fields(station_df)
# Filter for dates range if start_date and end_date are defined
if not station_df.empty and self.stations.start_date:
station_df = station_df[
(station_df[Columns.DATE.value] >= self.stations.start_date)
& (station_df[Columns.DATE.value] <= self.stations.end_date)
]
station_df = self._coerce_parameter_types(station_df)
# Assign meaningful parameter names (humanized).
if self.stations.humanize:
station_df = self._humanize(station_df)
# Empty dataframe should be skipped
if station_df.empty:
continue
# TODO: add more meaningful metadata here
yield ValuesResult(stations=self.stations, df=station_df)
@abstractmethod
def _collect_station_parameter(
self, station_id: str, parameter: Enum, dataset: Enum
) -> pd.DataFrame:
"""
Implementation of data collection for a station id plus parameter from the
specified weather service. Takes care of the gathering of the data and putting
it in shape, either tabular with one parameter per column or tidied with a set
of station id, date, parameter, value and quality in one row.
:param station_id: station id for which the data is being collected
:param parameter: parameter for which the data is collected
:param dataset: dataset for which the data is collected
:return: pandas.DataFrame with the data for given station id and parameter
"""
pass
def tidy_up_df(self, df: pd.DataFrame, dataset: Enum) -> pd.DataFrame:
"""
Function to tidy a DataFrame
:param df:
:param dataset:
:return:
"""
df = self._tidy_up_df(df, dataset)
df[Columns.DATASET.value] = pd.Series(
dataset.name.lower(), index=df.index, dtype=str
)
df[Columns.VALUE.value] = pd.to_numeric(df[Columns.VALUE.value]).astype(float)
if Columns.QUALITY.value not in df:
df[Columns.QUALITY.value] = np.nan
df[Columns.QUALITY.value] = pd.to_numeric(df[Columns.QUALITY.value]).astype(
float
)
df.loc[df[Columns.VALUE.value].isna(), Columns.QUALITY.value] = np.NaN
return df
@abstractmethod
def _tidy_up_df(self, df: pd.DataFrame, dataset) -> pd.DataFrame:
"""
Abstract method to be implemented by services to tidy a DataFrame
:param df:
:return:
"""
pass
def _coerce_date_fields(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Function for coercion of possible date fields
:param df:
:return:
"""
for column in (
Columns.DATE.value,
Columns.FROM_DATE.value,
Columns.TO_DATE.value,
):
try:
df[column] = self._coerce_dates(df[column])
except KeyError:
pass
return df
def _coerce_meta_fields(self, df: pd.DataFrame) -> pd.DataFrame:
"""
Method that coerces meta fields. Those fields are expected to be found in the
DataFrame in a columnar shape. Thore are basically the station id and the date
fields. Furthermore if the data is tidied parameter can be found as well as
quality. For station id, parameter and quality those columns are additionally
coerced to categories to reduce consumption of the DataFrame.
:param df: pandas.DataFrame with the "fresh" data
:return: pandas.DataFrame with meta fields being coerced
"""
df[Columns.STATION_ID.value] = self._parse_station_id(
df[Columns.STATION_ID.value]
).astype("category")
if self.stations.stations.tidy:
for column in (Columns.DATASET.value, Columns.PARAMETER.value):
df[column] = self._coerce_strings(df[column]).astype("category")
df[Columns.VALUE.value] = pd.to_numeric(df[Columns.VALUE.value]).astype(
float
)
df[Columns.QUALITY.value] = pd.to_numeric(df[Columns.QUALITY.value]).astype(
float
)
return df
def _parse_station_id(self, series: pd.Series) -> pd.Series:
"""
Dedicated method for parsing station ids, by default uses the same method as
parse_strings but could be modified by the implementation class
:param series:
:return:
"""
return self.stations.stations._parse_station_id(series)
def _coerce_dates(self, series: pd.Series) -> pd.Series:
"""
Method to parse dates in the pandas.DataFrame. Leverages the data timezone
attribute to ensure correct comparison of dates.
:param series:
:return:
"""
return pd.to_datetime(series, infer_datetime_format=True).dt.tz_localize(
self.data_tz
)
@staticmethod
def _coerce_integers(series: pd.Series) -> pd.Series:
"""
Method to parse integers for type coercion. Uses pandas.Int64Dtype() to
allow missing values.
:param series:
:return:
"""
return (
pd.to_numeric(series, errors="coerce")
.astype(pd.Float64Dtype())
.astype(pd.Int64Dtype())
)
@staticmethod
def _coerce_strings(series: pd.Series) -> pd.Series:
"""
Method to parse strings for type coercion.
:param series:
:return:
"""
return series.astype(pd.StringDtype())
@staticmethod
def _coerce_floats(series: pd.Series) -> pd.Series:
"""
Method to parse floats for type coercion.
:param series:
:return:
"""
return | pd.to_numeric(series, errors="coerce") | pandas.to_numeric |
import pandas as pd
import bioGRID as bg
import traversalHelper as tr
import numpy as np
import os
from collections import defaultdict
from statistics import mean
from scipy import stats
def parse_uniProt_map(uniProtMapF):
df = pd.read_csv(uniProtMapF, sep='\t')
df.dropna(inplace=True)
uniProtMapping = dict(zip([i.split(";")[0] for i in df['Cross-reference (STRING)']], list(df['Gene names (primary )'])))
return uniProtMapping
def parse_STRING(ppiFile='./data/STRING/4932.protein.links.v11.0.txt'
, typeFile='./data/STRING/4932.protein.actions.v11.0.txt'
, uniProtMap='./data/UniProt/uniprot-taxonomy_559292_STRING.tab', root='./'
, wFile_GGI='./data/parsed/STRING_GGI.pkl', wFile_PPI='./data/parsed/STRING_PPI.pkl'):
ppiFile, typeFile, wFile_GGI, wFile_PPI, uniProtMap = root+ppiFile, root+typeFile, root+wFile_GGI, root+wFile_PPI, root+uniProtMap
if os.path.exists(wFile_GGI) and os.path.exists(wFile_PPI):
return pd.read_pickle(wFile_GGI), pd.read_pickle(wFile_PPI)
# Sys name (used by STRING) => gene name (used by this project)
reverseGeneMap = parse_uniProt_map(uniProtMap)
df_STRING = | pd.read_csv(ppiFile, sep=' ') | pandas.read_csv |
import igraph
import numpy as np
import pandas as pd
import geopandas
from shapely.geometry import LineString
from skimage.graph import MCP_Geometric, MCP
from skimage import graph
from pyproj import Transformer
from scipy import stats
def cost_tobler_hiking_function(S,symmetric=True):
"""
Applies Tobler's Hiking Function to slope data supplied in DEGREES.
From Tobler. 1993. Three Presentation on Geographical Analysis and Modeling.
Simple Example:
C = lcp.cost_tobler_hiking_function(S,symmetric=True)
Parameters:
- 'S' is an array (any dimension) of slope values in DEGREES.
- 'symmetric' flags whether to consider slope values symmetrically. Note that this_end
is NOT the same as just taking the positive values. This returns an average
of the positive and negative value for the given slope.
Returns:
- 'C' a cost surface of velocity in km/hr
"""
# Convert to dz/dx
S = np.tan(np.deg2rad(S))
V = 6 * np.exp(-3.5 * np.abs(S + .05))
if symmetric:
V2 = 6 * np.exp(-3.5 * np.abs(-S + .05))
V = (V + V2) / 2
return 1 / V
def cost_rademaker(S,weight=50,pack_weight=0,terrain_coefficient=1.1,velocity=1.2):
"""
Applies Rademaker et al's model (2012) to slope values for LCP calculation.
Simple Example:
C = lcp.cost_rademaker(S,weight=50,pack_weight=0,terrain_coefficient=1.1,velocity=1.2)
Parameters:
- 'S' is an array (any dimension) of slope values in DEGREES.
- 'weight' is weight of traveler is given in kg
- 'pack_weight' is cargo weight, given in kg
- 'terrain_coefficient' is a value to introduce "friction". Values greater than
one have more than 'average' friction.
- 'velocity' is mean walking speed in meters per second
Returns:
- 'C' a cost surface of shape S.
"""
# Rademaker assumes a grade in percent (0 to 100, rather than 0 to 1):
G = 100 * np.arctan(np.deg2rad(S))
W = weight
L = pack_weight
tc = terrain_coefficient
V = velocity
# Cost, in MWatts
MW = 1.5*W + 2.0 * (W + L) * ((L/W)**2) + tc * (W+L) * (1.5 * V**2 + .35 * V * G)
return MW
def cost_pingel_exponential(S,scale_factor=9.25):
"""
Applies the exponental LCP cost function described by Pingel (2010).
Simple Example:
C = lcp.cost_pingel_exponential(S,scale_factor=9.25)
Parameters:
- 'S' is an array (any dimension) of slope values in DEGREES.
- 'scale_factor' is a value in degrees that generally corresponds to the mean slope
(in degrees) of a path network. Larger values represent a larger tolerance for
steeper slopes. Smaller values will cause an LCP to avoid steeper slopes.
"""
EXP = stats.expon.pdf(0,0,scale_factor) / stats.expon.pdf(S,0,scale_factor)
return EXP
def ve(S,ve=2.3):
"""
Applies a vertical exaggeration to a slope raster and returns it. Slope raster must be in DEGREES.
Simple Example:
S_ve = lcp.ve(S,2.3)
"""
S = np.tan(np.deg2rad(S))
S = np.rad2deg(np.arctan(ve * S))
return S
def get_lists(nodes,edges):
"""
Simple Example:
start_list, end_list, ids, start_coords, end_coords = lcp.get_lists(nodes, edges)
Internal method to transform nodes and edges into lists of start coords and lists of lists of end coords.
Returns: start_list, end_list, ids, start_coords, end_coords
"""
nodes['coords'] = list(zip(nodes.iloc[:,0], nodes.iloc[:,1]))
start_list = edges.iloc[:,0].unique()
end_list = [edges.iloc[:,1].loc[edges.iloc[:,0]==item].values for item in start_list]
start_coords = []
end_coords = []
ids = []
for i, this_start in enumerate(start_list):
these_ends = end_list[i]
these_ids = [this_start + '_to_' + te for te in these_ends]
these_start_coords = nodes.loc[this_start,'coords']
these_end_coords = nodes.loc[these_ends,'coords'].values
start_coords.append(these_start_coords)
end_coords.append(these_end_coords)
ids.append(these_ids)
return start_list, end_list, ids, start_coords, end_coords
def direct_routes(nodes,edges):
"""
Returns a straight-line path between edges.
Simple Example:
gdf = lcp.direct_routes(nodes, edges)
Parameters:
- 'nodes' is a Pandas DataFrame where the first column is a unique ID, the second is
an x coordinate (e.g., longitude) and the third is a y coordinate (e.g.,
latitude).
- 'edges' is a Pandas DataFrame were the first column is a source ID (matching a node)
and the second column is a destination. At the moment, we assume no
directionality / edges are symmetric.
- 'array' is a numpy array representing the cost surface.
- 'meta' is a dictionary, that must contain 'crs' and 'transform' items corresponding
to those returned by rasterio. neilpy.imread returns such a dictionary
by default.
- 'label' is used to identify the type of cost path/surface in the GeoDataFrame output
rows.
Output:
- 'gdf' is a GeoPandas GeoDataFrame with fields 'ids' describing the source and target
, 'label' corresponding to the label, and a geometry field containing the
path in shapely / WKT format.
"""
start_list, end_list, ids, start_coords, end_coords = get_lists(nodes,edges)
gdf = pd.DataFrame()
for i,this_start in enumerate(start_coords):
df = pd.DataFrame()
these_end_coords = end_coords[i]
df['ids'] = ids[i]
df['label'] = 'direct'
df['geometry'] = [LineString([this_start,this_end]) for this_end in these_end_coords]
gdf = gdf.append(df,ignore_index=True)
gdf = geopandas.GeoDataFrame(gdf,geometry=gdf['geometry'],crs=4326)
return gdf
def lcp_coordinate_conversion(start_coords,end_coords,crs,transform):
"""
Simple Example:
network = lcp.create_raster_network(array)
Parameters:
- 'start_coords' is a list of tuples (lon,lat)
- 'end_coords' is a list of lists of tuples. Each list of end points corresponds to
a start point, so len(start_coords) must equal len(end_coords), although each
list OF end points can be of any length one or greater.
- 'crs' is a Coordinate Reference System of the type returned by rasterio (or neilpy).
- 'transform' is an Affine transformation matrix as returned by rasterio (or neilpy).
Output:
- 'converted_start_coords' is a list of tuples of PIXEL coordinates.
- 'converted_end_coords' is a list of list of tupes of pixel coordiantes.
"""
converted_start_coords = []
converted_end_coords = []
for i,this_start_coord in enumerate(start_coords):
these_end_coords = end_coords[i]
# Convert from lat/lon to map coordinates
this_start_coord = coord_transform(*this_start_coord,4326,crs)
these_end_coords = [coord_transform(*item,4326,crs) for item in these_end_coords]
# Convert from map coordinates to pixel coordinates
this_start_coord = (~transform*this_start_coord)[::-1]
these_end_coords = [(~transform*item)[::-1] for item in these_end_coords]
# Round them to ints
this_start_coord = tuple(np.round(this_start_coord).astype(np.uint32))
these_end_coords = [tuple(item) for item in np.round(these_end_coords).astype(np.uint32)]
converted_start_coords.append(this_start_coord)
converted_end_coords.append(these_end_coords)
return converted_start_coords, converted_end_coords
def get_areal_routes(nodes,edges,surface,meta,label='areal'):
"""
Simple Example:
gdf = lcp.get_areal_routes(nodes, edges, array, meta, label)
Parameters:
- 'nodes' is a Pandas DataFrame where the first column is a unique ID, the second is
an x coordinate (e.g., longitude) and the third is a y coordinate (e.g.,
latitude).
- 'edges' is a Pandas DataFrame were the first column is a source ID (matching a node)
and the second column is a destination. At the moment, we assume no
directionality / edges are symmetric.
- 'array' is a numpy array representing the cost surface.
- 'meta' is a dictionary, that must contain 'crs' and 'transform' items corresponding
to those returned by rasterio. neilpy.imread returns such a dictionary
by default.
- 'label' is used to identify the type of cost path/surface in the GeoDataFrame output
rows.
Output:
- 'gdf' is a GeoPandas GeoDataFrame with fields 'ids' describing the source and target
, 'label' corresponding to the label, and a geometry field containing the
path in shapely / WKT format.
"""
gdf = pd.DataFrame()
print('Creating surface network for',label)
m = MCP_Geometric(surface,fully_connected=True)
print('Done creating surface network.')
start_list, end_list, ids, start_coords, end_coords = get_lists(nodes,edges)
conv_start_coords, conv_end_coords = lcp_coordinate_conversion(start_coords,end_coords,meta['crs'],meta['transform'])
for i,this_start_coord in enumerate(conv_start_coords):
these_end_coords = conv_end_coords[i]
print('Calculating costs and routes.')
costs, traceback_array = m.find_costs([this_start_coord],these_end_coords,find_all_ends=True)
print('Done calculating costs and routes.')
# Pull routes and convert
routes = [m.traceback(this_end_coord) for this_end_coord in these_end_coords]
geometries= [LineString(np.vstack(meta['transform']*np.fliplr(route).T).T) for route in routes]
df = pd.DataFrame()
df['ids'] = ids[i]
df['label'] = label
df['geometry'] = geometries
gdf = gdf.append(df,ignore_index=True)
gdf = geopandas.GeoDataFrame(gdf,geometry=gdf['geometry'],crs=meta['crs'])
return gdf
def create_raster_network(X):
"""
Simple Example:
network = lcp.create_raster_network(array)
Parameters:
- 'array' is a numpy array representing the cost surface.
Output:
- 'network' is a Pandas DataFrame with fields 'source' and 'target' representing 1D
(flattened) indices, source_value and target_value for pixel data, 'distance'
which is the pixel distance (1 for orthogonal, 2**.5 for diagonal). These
should be used directly by the operator to calculate a 'weight' field
before passing to lcp.get_linear_routes()
"""
m,n = np.shape(X)
I = np.reshape(np.arange(np.size(X),dtype=np.int32),np.shape(X))
df = pd.DataFrame()
df['source'] = np.hstack((I[1:,1:].flatten(),
I[1:,:].flatten(),
I[1:,:-1].flatten(),
I[:,:-1].flatten()))
df['target'] = np.hstack((ashift(I,0)[1:,1:].flatten(),
ashift(I,1)[1:,:].flatten(),
ashift(I,2)[1:,:-1].flatten(),
ashift(I,3)[:,:-1].flatten()))
df['source_value'] = X.flatten()[df['source'].values]
df['target_value'] = X.flatten()[df['target'].values]
df['distance'] = np.hstack((2**.5*np.ones((m-1)*(n-1)),
np.ones(n*(m-1)),
2**.5*np.ones((m-1)*(n-1)),
np.ones(m*(n-1))))
return df
def get_linear_routes(nodes,edges,df,meta,label='linear'):
"""
Simple Example:
network = lcp.create_raster_network(array)
network['weight'] = np.abs(network['source_value'] - network['target_value']) / network['distance']
gdf = lcp.get_linear_routes(nodes, edges, network, meta, label)
Parameters:
- 'nodes' is a Pandas DataFrame where the first column is a unique ID, the second is
an x coordinate (e.g., longitude) and the third is a y coordinate (e.g.,
latitude).
- 'edges' is a Pandas DataFrame were the first column is a source ID (matching a node)
and the second column is a destination. At the moment, we assume no
directionality / edges are symmetric.
- 'network' is a Pandas DataFrame created by lcp.create_raster_network(). It MUST
include a column called 'weight'.
- 'meta' is a dictionary, that must contain 'crs' and 'transform' items corresponding
to those returned by rasterio. It must also contain 'height' and
'width' items. neilpy.imread returns such a dictionary by default.
- 'label' is used to identify the type of cost path/surface in the GeoDataFrame output
rows.
Output:
- 'gdf' is a GeoPandas GeoDataFrame with fields 'ids' describing the source and target
, 'label' corresponding to the label, and a geometry field containing the
path in shapely / WKT format.
"""
img_dim = (meta['height'],meta['width'])
G = igraph.Graph()
G.add_vertices(img_dim[0] * img_dim[1])
G.add_edges(list(zip(df.source,df.target)),attributes={'weight':df.weight})
del df
gdf = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import datetime
import numpy as np
import os
import pandas as pd
import pandas.testing as tm
from fastparquet import ParquetFile
from fastparquet import write, parquet_thrift
from fastparquet import writer, encoding
from pandas.testing import assert_frame_equal
from pandas.api.types import CategoricalDtype
import pytest
from fastparquet.util import default_mkdirs
from .util import s3, tempdir, sql, TEST_DATA
from fastparquet import cencoding
def test_uvarint():
values = np.random.randint(0, 15000, size=100)
buf = np.zeros(30, dtype=np.uint8)
o = cencoding.NumpyIO(buf)
for v in values:
o.seek(0)
cencoding.encode_unsigned_varint(v, o)
o.seek(0)
out = cencoding.read_unsigned_var_int(o)
assert v == out
def test_bitpack():
for _ in range(10):
values = np.random.randint(0, 15000, size=np.random.randint(10, 100),
dtype=np.int32)
width = cencoding.width_from_max_int(values.max())
buf = np.zeros(900, dtype=np.uint8)
o = cencoding.NumpyIO(buf)
cencoding.encode_bitpacked(values, width, o)
o.seek(0)
head = cencoding.read_unsigned_var_int(o)
buf2 = np.zeros(300, dtype=np.int32)
out = cencoding.NumpyIO(buf2.view("uint8"))
cencoding.read_bitpacked(o, head, width, out)
assert (values == buf2[:len(values)]).all()
assert buf2[len(values):].sum() == 0 # zero padding
assert out.tell() // 8 - len(values) < 8
def test_length():
lengths = np.random.randint(0, 15000, size=100)
buf = np.zeros(900, dtype=np.uint8)
o = cencoding.NumpyIO(buf)
for l in lengths:
o.seek(0)
o.write_int(l)
o.seek(0)
out = buf.view('int32')[0]
assert l == out
def test_rle_bp():
for _ in range(10):
values = np.random.randint(0, 15000, size=np.random.randint(10, 100),
dtype=np.int32)
buf = np.empty(len(values) + 5, dtype=np.int32)
out = cencoding.NumpyIO(buf.view('uint8'))
buf2 = np.zeros(900, dtype=np.uint8)
o = cencoding.NumpyIO(buf2)
width = cencoding.width_from_max_int(values.max())
# without length
cencoding.encode_rle_bp(values, width, o)
l = o.tell()
o.seek(0)
cencoding.read_rle_bit_packed_hybrid(o, width, length=l, o=out)
assert (buf[:len(values)] == values).all()
def test_roundtrip_s3(s3):
data = pd.DataFrame({'i32': np.arange(1000, dtype=np.int32),
'i64': np.arange(1000, dtype=np.int64),
'f': np.arange(1000, dtype=np.float64),
'bhello': np.random.choice([b'hello', b'you',
b'people'], size=1000).astype("O")})
data['hello'] = data.bhello.str.decode('utf8')
data['bcat'] = data.bhello.astype('category')
data.loc[100, 'f'] = np.nan
data['cat'] = data.hello.astype('category')
noop = lambda x: True
myopen = s3.open
write(TEST_DATA+'/temp_parq', data, file_scheme='hive',
row_group_offsets=[0, 500], open_with=myopen, mkdirs=noop)
myopen = s3.open
pf = ParquetFile(TEST_DATA+'/temp_parq', open_with=myopen)
df = pf.to_pandas(categories=['cat', 'bcat'])
for col in data:
assert (df[col] == data[col])[~df[col].isnull()].all()
@pytest.mark.parametrize('scheme', ['simple', 'hive'])
@pytest.mark.parametrize('row_groups', [[0], [0, 500]])
@pytest.mark.parametrize('comp', ['SNAPPY', None, 'GZIP'])
def test_roundtrip(tempdir, scheme, row_groups, comp):
data = pd.DataFrame({'i32': np.arange(1000, dtype=np.int32),
'i64': np.arange(1000, dtype=np.int64),
'u64': np.arange(1000, dtype=np.uint64),
'f': np.arange(1000, dtype=np.float64),
'bhello': np.random.choice([b'hello', b'you',
b'people'], size=1000).astype("O")})
data['a'] = np.array([b'a', b'b', b'c', b'd', b'e']*200, dtype="S1")
data['aa'] = data['a'].map(lambda x: 2*x).astype("S2")
data['hello'] = data.bhello.str.decode('utf8')
data['bcat'] = data.bhello.astype('category')
data['cat'] = data.hello.astype('category')
fname = os.path.join(tempdir, 'test.parquet')
write(fname, data, file_scheme=scheme, row_group_offsets=row_groups,
compression=comp)
r = ParquetFile(fname)
assert r.fmd.num_rows == r.count() == 1000
df = r.to_pandas()
assert data.cat.dtype == 'category'
for col in r.columns:
assert (df[col] == data[col]).all()
# tests https://github.com/dask/fastparquet/issues/250
assert isinstance(data[col][0], type(df[col][0]))
def test_bad_coltype(tempdir):
df = pd.DataFrame({'0': [1, 2], (0, 1): [3, 4]})
fn = os.path.join(tempdir, 'temp.parq')
with pytest.raises((ValueError, TypeError)) as e:
write(fn, df)
assert "tuple" in str(e.value)
def test_bad_col(tempdir):
df = pd.DataFrame({'x': [1, 2]})
fn = os.path.join(tempdir, 'temp.parq')
with pytest.raises(ValueError) as e:
write(fn, df, has_nulls=['y'])
@pytest.mark.parametrize('scheme', ('simple', 'hive'))
def test_roundtrip_complex(tempdir, scheme,):
import datetime
data = pd.DataFrame({'ui32': np.arange(1000, dtype=np.uint32),
'i16': np.arange(1000, dtype=np.int16),
'ui8': np.array([1, 2, 3, 4]*250, dtype=np.uint8),
'f16': np.arange(1000, dtype=np.float16),
'dicts': [{'oi': 'you'}] * 1000,
't': [datetime.datetime.now()] * 1000,
'td': [datetime.timedelta(seconds=1)] * 1000,
'bool': np.random.choice([True, False], size=1000)
})
data.loc[100, 't'] = None
fname = os.path.join(tempdir, 'test.parquet')
write(fname, data, file_scheme=scheme)
r = ParquetFile(fname)
df = r.to_pandas()
for col in r.columns:
assert (df[col] == data[col])[~data[col].isnull()].all()
@pytest.mark.parametrize('df', [
| pd.util.testing.makeMixedDataFrame() | pandas.util.testing.makeMixedDataFrame |
## Real Estate price predictor
import pandas as pd
import numpy as np
housing = | pd.read_csv("data.csv") | pandas.read_csv |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, date_range, timedelta_range
import pandas._testing as tm
class TestTimeSeries:
def test_contiguous_boolean_preserve_freq(self):
rng = date_range("1/1/2000", "3/1/2000", freq="B")
mask = np.zeros(len(rng), dtype=bool)
mask[10:20] = True
masked = rng[mask]
expected = rng[10:20]
assert expected.freq == rng.freq
tm.assert_index_equal(masked, expected)
mask[22] = True
masked = rng[mask]
assert masked.freq is None
def test_promote_datetime_date(self):
rng = date_range("1/1/2000", periods=20)
ts = Series(np.random.randn(20), index=rng)
ts_slice = ts[5:]
ts2 = ts_slice.copy()
ts2.index = [x.date() for x in ts2.index]
result = ts + ts2
result2 = ts2 + ts
expected = ts + ts[5:]
expected.index = expected.index._with_freq(None)
tm.assert_series_equal(result, expected)
tm.assert_series_equal(result2, expected)
# test asfreq
result = ts2.asfreq("4H", method="ffill")
expected = ts[5:].asfreq("4H", method="ffill")
tm.assert_series_equal(result, expected)
result = rng.get_indexer(ts2.index)
expected = rng.get_indexer(ts_slice.index)
tm.assert_numpy_array_equal(result, expected)
def test_series_map_box_timedelta(self):
# GH 11349
s = Series( | timedelta_range("1 day 1 s", periods=5, freq="h") | pandas.timedelta_range |
import os
import sys
import time
import sqlite3
import pyupbit
import pandas as pd
from PyQt5.QtCore import QThread
from pyupbit import WebSocketManager
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from utility.setting import *
from utility.static import now, timedelta_sec, strf_time, timedelta_hour, strp_time
class TraderUpbit(QThread):
def __init__(self, windowQ, coinQ, queryQ, soundQ, cstgQ, teleQ):
super().__init__()
self.windowQ = windowQ
self.coinQ = coinQ
self.queryQ = queryQ
self.soundQ = soundQ
self.cstgQ = cstgQ
self.teleQ = teleQ
self.upbit = None # 매도수 주문 및 체결 확인용 객체
self.buy_uuid = None # 매수 주문 저장용 list: [티커명, uuid]
self.sell_uuid = None # 매도 주문 저장용 list: [티커명, uuid]
self.websocketQ = None # 실시간데이터 수신용 웹소켓큐
self.df_cj = pd.DataFrame(columns=columns_cj) # 체결목록
self.df_jg = pd.DataFrame(columns=columns_jg) # 잔고목록
self.df_tj = pd.DataFrame(columns=columns_tj) # 잔고평가
self.df_td = pd.DataFrame(columns=columns_td) # 거래목록
self.df_tt = pd.DataFrame(columns=columns_tt) # 실현손익
self.str_today = strf_time('%Y%m%d', timedelta_hour(-9))
self.dict_jcdt = {} # 종목별 체결시간 저장용
self.dict_intg = {
'예수금': 0,
'종목당투자금': 0, # 종목당 투자금은 int(예수금 / 최대매수종목수)로 계산
'최대매수종목수': 10,
'업비트수수료': 0. # 0.5% 일경우 0.005로 입력
}
self.dict_bool = {
'모의투자': True,
'알림소리': True
}
self.dict_time = {
'매수체결확인': now(), # 1초 마다 매수 체결 확인용
'매도체결확인': now(), # 1초 마다 매도 체결 확인용
'거래정보': now()
}
def run(self):
self.LoadDatabase()
self.GetKey()
self.GetBalances()
self.EventLoop()
def LoadDatabase(self):
"""
프로그램 구동 시 당일 체결목록, 당일 거래목록, 잔고목록을 불러온다.
체결과 거래목록은 바로 갱신하고 잔고목록은 예수금을 불러온 이후 갱신한다.
"""
con = sqlite3.connect(db_tradelist)
df = pd.read_sql(f"SELECT * FROM chegeollist WHERE 체결시간 LIKE '{self.str_today}%'", con)
self.df_cj = df.set_index('index').sort_values(by=['체결시간'], ascending=False)
df = | pd.read_sql(f'SELECT * FROM jangolist', con) | pandas.read_sql |
"""
This file is inspired by the work of the third place winner of the Rossman
competition on Kaggle as well as this notebook by fast.ai:
https://github.com/fastai/fastai/blob/master/courses/dl1/lesson3-rossman.ipynb
The resulting csv of this notebook can be submitted on this page:
https://www.kaggle.com/c/rossmann-store-sales
The private leaderboard is the one to watch for the scoring
"""
import os
import pandas as pd
from multiprocessing import cpu_count
import numpy as np
import torch.nn.functional as F
import isoweek
import torch.optim as optim
from tqdm import tqdm
import datetime
from sklearn.preprocessing import StandardScaler
from torchlite.torch.learner import Learner
from torchlite.torch.learner.cores import ClassifierCore
import torchlite.torch.metrics as metrics
from torchlite.data.fetcher import WebFetcher
import torchlite.torch.shortcuts as shortcuts
import torchlite.pandas.date as edate
from torchlite.torch.train_callbacks import CosineAnnealingCallback
from torchlite.pandas.tabular_encoder import TreeEncoder
import torchlite.pandas.merger as tmerger
import torchlite.pandas.splitter as tsplitter
def to_csv(test_file, output_file, identifier_field, predicted_field,
predictions, read_format='csv'):
df = None
if read_format == 'csv':
df = | pd.read_csv(test_file) | pandas.read_csv |
from __future__ import division
from __future__ import print_function
import functools
import os
import pickle
import re
import sys
import time
import warnings
import nltk
import numpy as np
import pandas as pd
from dostoevsky.models import FastTextSocialNetworkModel
from dostoevsky.tokenization import RegexTokenizer
from gensim.models import KeyedVectors
from gensim.models import Word2Vec
from nltk.translate import bleu_score, chrf_score
from scipy import spatial
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics.pairwise import paired_cosine_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from utils.features_processor_variables import MORPH_FEATS, FPOS_COMBINATIONS, count_words_x, \
count_words_y, pairs_words, relations_related
from utils.synonyms_vocabulary import synonyms_vocabulary
warnings.filterwarnings('ignore')
import razdel
def tokenize(text):
result = ' '.join([tok.text for tok in razdel.tokenize(text)])
return result
class FeaturesProcessor:
CATEGORY = 'category_id'
def __init__(self,
model_dir_path: str,
verbose=0,
use_markers=True,
use_morphology=True,
embed_model_stopwords=True,
use_w2v=True,
use_sentiment=True):
"""
:param str model_dir_path: path with all the models
:param int verbose: 0 for no logging, 1 for time counts logging and 2 for all warnings
:param bool embed_model_stopwords: do count stopwords during w2v vectorization
:param use_w2v: do w2v vectorization
:param use_sentiment: do sentiment extraction
"""
self._model_dir_path = model_dir_path
self._verbose = verbose
self._use_markers = use_markers
self._use_morphology = use_morphology
self._embed_model_stopwords = embed_model_stopwords
self._use_w2v = use_w2v
self._use_sentiment = use_sentiment
if self._verbose:
print("Processor initialization...\t", end="", flush=True)
self.relations_related = relations_related
self.stop_words = nltk.corpus.stopwords.words('russian')
self.count_words_x = count_words_x
self.count_words_y = count_words_y
self.pairs_words = pairs_words
self.vectorizer = pickle.load(open(os.path.join(model_dir_path, 'tf_idf', 'pipeline.pkl'), 'rb'))
# preprocessing functions
self._uppercased = lambda snippet, length: sum(
[word[0].isupper() if len(word) > 0 else False for word in snippet.split()]) / length
self._start_with_uppercase = lambda snippet, length: sum(
[word[0].isupper() if len(word) > 0 else False for word in snippet.split(' ')]) / length
if self._use_w2v:
self.embed_model_path = os.path.join(model_dir_path, 'w2v', 'default', 'model.vec')
self._synonyms_vocabulary = synonyms_vocabulary
# embeddings
if self.embed_model_path[-4:] in ['.vec', '.bin']:
self.word2vec_model = KeyedVectors.load_word2vec_format(self.embed_model_path,
binary=self.embed_model_path[-4:] == '.bin')
else:
self.word2vec_model = Word2Vec.load(self.embed_model_path)
test_word = ['дерево', 'NOUN']
try:
self.word2vec_vector_length = len(self.word2vec_model.wv.get_vector(test_word[0]))
self.word2vec_tag_required = False
except KeyError:
self.word2vec_vector_length = len(self.word2vec_model.wv.get_vector('_'.join(test_word)))
self.word2vec_tag_required = True
self.word2vec_stopwords = embed_model_stopwords
self._remove_stop_words = lambda lemmatized_snippet: [word for word in lemmatized_snippet if
word not in self.stop_words]
self.fpos_combinations = FPOS_COMBINATIONS
if self._use_sentiment:
self.sentiment_model = FastTextSocialNetworkModel(tokenizer=RegexTokenizer())
# self._context_length = 3
if self._verbose:
print('[DONE]')
def _find_y(self, snippet_x, snippet_y, loc_x):
result = self.annot_text.find(snippet_y, loc_x + len(snippet_x) - 1)
if result < 1:
result = self.annot_text.find(snippet_y, loc_x + 1)
if result < 1:
result = loc_x + 1
return result
def __call__(self, df_, annot_text, annot_tokens, annot_sentences, annot_lemma, annot_morph, annot_postag,
annot_syntax_dep_tree):
df = df_[:]
df.snippet_x = df.snippet_x.replace('\n', ' ', regex=True).replace(' ', ' ', regex=True)
df.snippet_x = df.snippet_x.map(tokenize)
df.snippet_y = df.snippet_y.replace('\n', ' ', regex=True).replace(' ', ' ', regex=True)
df.snippet_y = df.snippet_y.map(tokenize)
# self.annot_text = annot_text.replace('\n', ' ').replace(' ', ' ')
self.annot_text = tokenize(annot_text)
self.annot_tokens = annot_tokens
self.annot_sentences = annot_sentences
self.annot_lemma = annot_lemma
self.annot_morph = annot_morph
self.annot_postag = annot_postag
self.annot_syntax_dep_tree = annot_syntax_dep_tree
t, t_final = None, None
if self._verbose:
t = time.time()
t_final = t
print('1\t', end="", flush=True)
df['is_broken'] = False
# map discourse units to annotations
if not 'loc_x' in df.keys():
df['loc_x'] = df.snippet_x.map(self.annot_text.find)
if not 'loc_y' in df.keys():
df['loc_y'] = df.apply(lambda row: self._find_y(row.snippet_x, row.snippet_y, row.loc_x - 1), axis=1)
df['token_begin_x'] = df.loc_x.map(self.locate_token)
df['token_begin_y'] = df.loc_y.map(self.locate_token)
try:
df['token_end_y'] = df.apply(lambda row: self.locate_token(row.loc_y + len(row.snippet_y)), axis=1) # -1
df['token_end_y'] = df['token_end_y'] + (df['token_end_y'] == df['token_begin_y']) * 1
except:
if self._verbose == 2:
print(f'Unable to locate second snippet >>> {(df.snippet_x.values, df.snippet_y.values)}',
file=sys.stderr)
print(self.annot_text, file=sys.stderr)
df['tokens_x'] = df.snippet_x.map(lambda row: row.split())
df['tokens_y'] = df.snippet_y.map(lambda row: row.split())
# df['left_context'] = ['_END_'] * self._context_length
# df['right_context'] = ['_END_'] * self._context_length
df['same_sentence'] = 0
df['is_broken'] = True
return df
# length of tokens sequence
df['len_w_x'] = df['token_begin_y'] - df['token_begin_x']
df['len_w_y'] = df['token_end_y'] - df['token_begin_y'] # +1
df['snippet_x_locs'] = df.apply(lambda row: [[pair for pair in [self.token_to_sent_word(token) for token in
range(row.token_begin_x, row.token_begin_y)]]],
axis=1)
df['snippet_x_locs'] = df.snippet_x_locs.map(lambda row: row[0])
# print(df[['snippet_x', 'snippet_y', 'snippet_x_locs']].values)
broken_pair = df[df.snippet_x_locs.map(len) < 1]
if not broken_pair.empty:
print(
f"Unable to locate first snippet >>> {df[df.snippet_x_locs.map(len) < 1][['snippet_x', 'snippet_y', 'token_begin_x', 'token_begin_y', 'loc_x', 'loc_y']].values}",
file=sys.stderr)
print(self.annot_text, file=sys.stderr)
df = df[df.snippet_x_locs.map(len) > 0]
# print(df[['snippet_x', 'snippet_y', 'token_begin_y', 'token_end_y']])
df['snippet_y_locs'] = df.apply(lambda row: [
[pair for pair in [self.token_to_sent_word(token) for token in range(row.token_begin_y, row.token_end_y)]]],
axis=1)
df['snippet_y_locs'] = df.snippet_y_locs.map(lambda row: row[0])
broken_pair = df[df.snippet_y_locs.map(len) < 1]
if not broken_pair.empty:
print(
f"Unable to locate second snippet >>> {df[df.snippet_y_locs.map(len) < 1][['snippet_x', 'snippet_y', 'token_begin_x', 'token_begin_y', 'token_end_y', 'loc_x', 'loc_y']].values}",
file=sys.stderr)
print(self.annot_text, file=sys.stderr)
df2 = df[df.snippet_y_locs.map(len) < 1]
_df2 = pd.DataFrame({
'snippet_x': df2['snippet_y'].values,
'snippet_y': df2['snippet_x'].values,
'loc_y': df2['loc_x'].values,
'token_begin_y': df2['token_begin_x'].values,
})
df2 = _df2[:]
df2['loc_x'] = df2.apply(lambda row: self.annot_text.find(row.snippet_x, row.loc_y - 3), axis=1)
df2['token_begin_x'] = df2.loc_x.map(self.locate_token)
# df2['loc_y'] = df2.apply(lambda row: self._find_y(row.snippet_x, row.snippet_y, row.loc_x), axis=1)
df2['token_end_y'] = df2.apply(lambda row: self.locate_token(row.loc_y + len(row.snippet_y)), # + 1,
axis=1) # -1
# df2['token_begin_x'] = df2['token_begin_y']
# df2['token_begin_y'] = df2.loc_y.map(self.locate_token)
df2['len_w_x'] = df2['token_begin_y'] - df2['token_begin_x']
df2['len_w_y'] = df2['token_end_y'] - df2['token_begin_y'] # +1
df2['snippet_x_locs'] = df2.apply(
lambda row: [[pair for pair in [self.token_to_sent_word(token) for token in
range(row.token_begin_x, row.token_begin_y)]]], axis=1)
df2['snippet_x_locs'] = df2.snippet_x_locs.map(lambda row: row[0])
df2['snippet_y_locs'] = df2.apply(
lambda row: [[pair for pair in [self.token_to_sent_word(token) for token in
range(row.token_begin_y, row.token_end_y)]]], axis=1)
df2['snippet_y_locs'] = df2.snippet_y_locs.map(lambda row: row[0])
broken_pair = df2[df2.snippet_y_locs.map(len) < 1]
if not broken_pair.empty:
print(
f"Unable to locate second snippet AGAIN >>> {df2[df2.snippet_y_locs.map(len) < 1][['snippet_x', 'snippet_y', 'token_begin_x', 'token_begin_y', 'token_end_y', 'loc_x', 'loc_y']].values}",
file=sys.stderr)
df = df[df.snippet_y_locs.map(len) > 0]
df2 = df2[df2.snippet_x_locs.map(len) > 0]
df = pd.concat([df, df2])
# print(df[['snippet_x', 'snippet_y', 'snippet_y_locs', 'loc_x', 'loc_y']].values)
df.drop(columns=['loc_x', 'loc_y'], inplace=True)
if self._verbose:
print(time.time() - t)
t = time.time()
print('2\t', end="", flush=True)
# define a number of sentences and whether x and y are in the same sentence
df['sentence_begin_x'] = df.snippet_x_locs.map(lambda row: row[0][0])
df['sentence_begin_y'] = df.snippet_y_locs.map(lambda row: row[0][0])
df['sentence_end_y'] = df.snippet_y_locs.map(lambda row: row[-1][0])
df['number_sents_x'] = (df['sentence_begin_y'] - df['sentence_begin_x']) | 1
df['number_sents_y'] = (df['sentence_end_y'] - df['sentence_begin_y']) | 1
df['same_sentence'] = (df['sentence_begin_x'] == df['sentence_begin_y']).astype(int)
df['same_paragraph'] = df.apply(
lambda row: annot_text.find('\n', row.sentence_begin_x, row.sentence_end_y) != -1,
axis=1).astype(int)
df['same_paragraph'] = df['same_sentence'] | df['same_paragraph']
# find the common syntax root of x and y
df['common_root'] = df.apply(lambda row: [self.locate_root(row)], axis=1)
# find its relative position in text
# df['common_root_position'] = df.common_root.map(lambda row: self.map_to_token(row[0])) / len(annot_tokens)
# define its fPOS
# df['common_root_fpos'] = df.common_root.map(lambda row: self.get_postag(row)[0])
# 1 if it is located in y
df['root_in_y'] = df.apply(
lambda row: self.map_to_token(row.common_root[0]) > row.token_begin_y, axis=1).astype(int)
df.drop(columns=['common_root'], inplace=True)
if self._verbose:
print(time.time() - t)
t = time.time()
print('3\t', end="", flush=True)
# find certain markers for various relations
if self._use_markers:
for relation in self.relations_related:
df[relation + '_count' + '_x'] = df.snippet_x.map(lambda row: self._relation_score(relation, row))
df[relation + '_count' + '_y'] = df.snippet_y.map(lambda row: self._relation_score(relation, row))
if self._verbose:
print(time.time() - t)
t = time.time()
print('4\t', end="", flush=True)
# get tokens
df['tokens_x'] = df.apply(lambda row: self.get_tokens(row.token_begin_x, row.token_begin_y), axis=1)
df['tokens_x'] = df.apply(lambda row: row.tokens_x if len(row.tokens_x) > 0 else row.snippet_x.split(), axis=1)
df['tokens_y'] = df.apply(lambda row: self.get_tokens(row.token_begin_y, row.token_end_y - 1), axis=1)
df['tokens_y'] = df.apply(lambda row: row.tokens_y if len(row.tokens_y) > 0 else row.snippet_y.split(), axis=1)
# get lemmas
df['lemmas_x'] = df.snippet_x_locs.map(self.get_lemma)
df['lemmas_y'] = df.snippet_y_locs.map(self.get_lemma)
if self._verbose:
print(time.time() - t)
t = time.time()
print('5\t', end="", flush=True)
# ratio of uppercased words
df['upper_x'] = df.tokens_x.map(lambda row: sum(token.isupper() for token in row) / (len(row) + 1e-5))
df['upper_y'] = df.tokens_y.map(lambda row: sum(token.isupper() for token in row) / (len(row) + 1e-5))
# ratio of the words starting with upper case
df['st_up_x'] = df.tokens_x.map(lambda row: sum(token[0].isupper() for token in row) / (len(row) + 1e-5))
df['st_up_y'] = df.tokens_y.map(lambda row: sum(token[0].isupper() for token in row) / (len(row) + 1e-5))
# whether DU starts with upper case
df['du_st_up_x'] = df.tokens_x.map(lambda row: row[0][0].isupper()).astype(int)
df['du_st_up_y'] = df.tokens_y.map(lambda row: row[0][0].isupper()).astype(int)
if self._verbose:
print(time.time() - t)
t = time.time()
print('6\t', end="", flush=True)
# get morphology
if self._use_morphology:
df['morph_x'] = df.snippet_x_locs.map(self.get_morph)
df['morph_y'] = df.snippet_y_locs.map(self.get_morph)
# count presence and/or quantity of various language features in the whole DUs and at the beginning/end of them
df = df.apply(lambda row: self._linguistic_features(row, tags=MORPH_FEATS), axis=1)
df = df.apply(lambda row: self._first_and_last_pair(row), axis=1)
if self._verbose:
print(time.time() - t)
t = time.time()
print('7\t', end="", flush=True)
# count various vectors similarity metrics for morphology
if self._use_morphology:
linknames_for_snippet_x = df[[name + '_x' for name in MORPH_FEATS]]
linknames_for_snippet_y = df[[name + '_y' for name in MORPH_FEATS]]
df.reset_index(inplace=True)
df['morph_vec_x'] = pd.Series(self.columns_to_vectors_(linknames_for_snippet_x))
df['morph_vec_y'] = pd.Series(self.columns_to_vectors_(linknames_for_snippet_y))
df['morph_correlation'] = df[['morph_vec_x', 'morph_vec_y']].apply(
lambda row: spatial.distance.correlation(*row), axis=1)
df['morph_hamming'] = df[['morph_vec_x', 'morph_vec_y']].apply(lambda row: spatial.distance.hamming(*row),
axis=1)
df['morph_matching'] = df[['morph_vec_x', 'morph_vec_y']].apply(
lambda row: self.get_match_between_vectors_(*row), axis=1)
df.set_index('index', drop=True, inplace=True)
df = df.drop(columns=['morph_vec_x', 'morph_vec_y'])
if self._verbose:
print(time.time() - t)
t = time.time()
print('8\t', end="", flush=True)
# detect discourse markers
if self._use_markers:
for word in self.count_words_x:
df[word + '_count' + '_x'] = df.snippet_x.map(lambda row: self.count_marker_(word, row))
for word in self.count_words_y:
df[word + '_count' + '_y'] = df.snippet_y.map(lambda row: self.count_marker_(word, row))
# count stop words in the texts
df['stopwords_x'] = df.lemmas_x.map(self._count_stop_words)
df['stopwords_y'] = df.lemmas_y.map(self._count_stop_words)
if self._verbose:
print(time.time() - t)
t = time.time()
print('9\t', end="", flush=True)
# dummy function needed for self.vectorizer (do NOT remove)
def dummy(x):
return x
df.reset_index(drop=True, inplace=True)
tf_idf_x = self.vectorizer.transform(df['tokens_x'].map(lambda row: [_token.lower() for _token in row]))
tf_idf_y = self.vectorizer.transform(df['tokens_y'].map(lambda row: [_token.lower() for _token in row]))
df['cos_tf_idf_dist'] = paired_cosine_distances(tf_idf_x, tf_idf_y)
df['ang_cos_tf_idf_sim'] = 1. - np.arccos(df['cos_tf_idf_dist']) * 2. / np.pi
tf_idf_x = pd.DataFrame(tf_idf_x).add_prefix('tf_idf_x_')
tf_idf_y = pd.DataFrame(tf_idf_y).add_prefix('tf_idf_y_')
df = | pd.concat([df, tf_idf_x, tf_idf_y], axis=1) | pandas.concat |
import numpy as np
import math
import pandas as pd
import requests
import us
from adjustText import adjust_text
from matplotlib import pyplot as plt
# Calculate studentized residuals - for detecting outliers
def internally_studentized_residual(X, Y):
"""
https://stackoverflow.com/a/57155553/12366110
"""
X = np.array(X, dtype=float)
Y = np.array(Y, dtype=float)
mean_X = np.mean(X)
mean_Y = np.mean(Y)
n = len(X)
diff_mean_sqr = np.dot((X - mean_X), (X - mean_X))
beta1 = np.dot((X - mean_X), (Y - mean_Y)) / diff_mean_sqr
beta0 = mean_Y - beta1 * mean_X
y_hat = beta0 + beta1 * X
residuals = Y - y_hat
h_ii = (X - mean_X) ** 2 / diff_mean_sqr + (1 / n)
Var_e = math.sqrt(sum((Y - y_hat) ** 2) / (n - 2))
SE_regression = Var_e * ((1 - h_ii) ** 0.5)
studentized_residuals = residuals / SE_regression
return studentized_residuals
# ----- Scraping -----
# Get 2020 per-state presidential vote counts from Dave Leip's Election Atlas
url = "https://uselectionatlas.org/RESULTS/data.php?year=2020&datatype=national&def=1&f=1&off=0&elect=0"
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36",
"X-Requested-With": "XMLHttpRequest",
}
r = requests.get(url, headers=header)
dfs_2020 = pd.read_html(r.text)
# Truncate data to the 50 states (and D.C.)
df_2020 = dfs_2020[2][1:52][["State", "Biden.1", "Trump.1"]]
df_2020.columns = ["State", "2020 D", "2020 R"]
# Repeat for 2016
url = "https://uselectionatlas.org/RESULTS/data.php?year=2016&datatype=national&def=1&f=1&off=0&elect=0"
r = requests.get(url, headers=header)
dfs_2016 = pd.read_html(r.text)
df_2016 = dfs_2016[2][1:52][["State", "Clinton.1", "Trump.1"]]
df_2016.columns = ["State", "2016 D", "2016 R"]
# Get urbanization indexes from 538 article
url = "https://fivethirtyeight.com/features/how-urban-or-rural-is-your-state-and-what-does-that-mean-for-the-2020-election/"
r = requests.get(url, headers=header)
dfs_538 = pd.read_html(r.text)
urbanization_dfs = [
dfs_538[0][["State", "Urbanization Index"]],
dfs_538[0][["State.1", "Urbanization Index.1"]],
]
for df in urbanization_dfs:
df.columns = ["State", "Urbanization Index"]
urbanization_df = pd.concat(urbanization_dfs)
# Merge dataframes, we lose D.C. as 538 doesn't provide an urbanization index
df = pd.merge(df_2016, df_2020, on="State")
df = | pd.merge(urbanization_df, df, on="State", how="inner") | pandas.merge |
import pandas as pd
from firebase import firebase as frb
import json
import os
from dotenv import load_dotenv
from datetime import datetime, date
from time import gmtime, time, strftime, sleep
from pytz import timezone
import schedule
from tzlocal import get_localzone
load_dotenv()
columns = ['Time', 'Price', 'Net Change', 'Sell', 'Buy', 'Trading Volume']
kospi_columns = ['Time', 'Price', 'Net Change', 'Trading Volume', 'Dollar Volume']
exg_columns = ['Inquiry', 'Standard Rate', 'Net Change', 'Cash Buy', 'Cash Sell']
def crawl_intraday_data(code, time):
intra_df = pd.DataFrame()
for i in range(1, 41):
page_df = pd.read_html(os.getenv("INTRADAY_DATA_SOURCE_ADDRESS").format(code=code, time=time, page=i))[0]
intra_df = intra_df.append(page_df)
intra_df.dropna(inplace=True)
intra_df.drop(intra_df.columns[6], axis=1, inplace=True)
intra_df.reset_index(inplace=True, drop=True)
intra_df.columns = columns
yesterday_df = pd.read_html(os.getenv("DAILY_DATA_SOURCE_ADDRESS").format(code=code))[0]
yesterday_df.dropna(inplace=True)
price_yesterday = yesterday_df[yesterday_df.columns[1]].iloc[1]
intra_df['Net Change'] = intra_df['Price'] - price_yesterday
return intra_df
def save_intraday_data(code, date, df):
firebase = frb.FirebaseApplication(os.getenv("FIREBASE_ADDRESS"), None)
df.apply(lambda r: firebase.post('/stock/{code}/{date}'.format(code=code, date=date), json.loads(r.to_json())), axis=1)
def retrieve_intraday_data(code, date):
firebase = frb.FirebaseApplication(os.getenv("FIREBASE_ADDRESS"), None)
data = firebase.get('/stock/{code}/{date}'.format(code=code, date=date), None)
result = pd.DataFrame.from_dict(data, orient='index')
result = result[columns]
result.reset_index(inplace=True, drop=True)
return result
def crawl_intraday_kospi_data(time):
kospi_df = pd.DataFrame()
for i in range(1, 3):
page_df = pd.read_html(os.getenv("INTRADAY_KOSPI_SOURCE_ADDRESS").format(time=time, page=i))[0]
kospi_df = kospi_df.append(page_df)
kospi_df.dropna(inplace=True)
kospi_df.drop(kospi_df.columns[3], axis=1, inplace=True)
kospi_df.columns = kospi_columns
yesterday_df = pd.read_html(os.getenv("DAILY_KOSPI_SOURCE_ADDRESS"))[0]
yesterday_df.dropna(inplace=True)
price_yesterday = yesterday_df[yesterday_df.columns[1]].iloc[1]
kospi_df['Net Change'] = kospi_df['Price'] - price_yesterday
kospi_df.reset_index(inplace=True, drop=True)
return kospi_df
def save_intraday_kospi_data(date, df):
firebase = frb.FirebaseApplication(os.getenv("FIREBASE_ADDRESS"), None)
df.apply(lambda r: firebase.post('/stock/kospi/{date}'.format(date=date), json.loads(r.to_json())), axis=1)
def retrieve_intraday_kospi_data(date):
firebase = frb.FirebaseApplication(os.getenv("FIREBASE_ADDRESS"), None)
data = firebase.get('/stock/kospi/{date}'.format(date=date), None)
result = pd.DataFrame.from_dict(data, orient='index')
result = result[kospi_columns]
result.reset_index(inplace=True, drop=True)
return result
def crawl_exchange_rate():
exg_df = pd.DataFrame()
for i in range(1, 41):
page_df = pd.read_html(os.getenv("EXCHANGE_RATE_DATA_SOURCE_ADDRESS").format(page=i))[0]
page_df.drop(page_df.columns[4:8], axis=1, inplace=True)
page_df.columns = exg_columns
exg_df = exg_df.append(page_df)
exg_df.sort_values(exg_df.columns[0], inplace=True)
exg_df['Inquiry'] = | pd.to_numeric(exg_df['Inquiry'].str[:-1]) | pandas.to_numeric |
# --
# Load deps
import keras
import pandas as pd
import urllib2
from hashlib import md5
from bs4 import BeautifulSoup
from pprint import pprint
from matplotlib import pyplot as plt
import sys
sys.path.append('/Users/BenJohnson/projects/what-is-this/wit/')
from wit import *
| pd.set_option('display.max_rows', 50) | pandas.set_option |
# Copyright 2019 Verily Life Sciences LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import unittest
from typing import List, Tuple, Type, Union # noqa: F401
import pandas as pd
from ddt import data, ddt, unpack
from purplequery.bq_abstract_syntax_tree import (EMPTY_NODE, AbstractSyntaxTreeNode, # noqa: F401
DatasetTableContext, EvaluationContext, _EmptyNode)
from purplequery.bq_types import BQScalarType, TypedDataFrame
from purplequery.dataframe_node import TableReference
from purplequery.grammar import data_source
from purplequery.join import ConditionsType # noqa: F401
from purplequery.join import DataSource, Join
from purplequery.query_helper import apply_rule
from purplequery.tokenizer import tokenize
@ddt
class JoinTest(unittest.TestCase):
def setUp(self):
# type: () -> None
self.table_context = DatasetTableContext({
'my_project': {
'my_dataset': {
'my_table': TypedDataFrame(
pd.DataFrame([[1], [2]], columns=['a']),
types=[BQScalarType.INTEGER]
),
'my_table2': TypedDataFrame(
pd.DataFrame([[1, 2], [3, 4]], columns=['a', 'b']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER]
)
}
}
})
def test_data_source(self):
data_source = DataSource((TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE), [])
data_source_context = data_source.create_context(self.table_context)
self.assertEqual(data_source_context.table.to_list_of_lists(), [[1], [2]])
self.assertEqual(list(data_source_context.table.dataframe), ['my_table.a'])
self.assertEqual(data_source_context.table.types, [BQScalarType.INTEGER])
@data(
dict(
join_type='JOIN',
table1=[[1, 9]],
table2=[[1, 2], [3, 4]],
result=[[1, 9, 1, 2]]),
dict(
join_type='INNER JOIN',
table1=[[1, 9]],
table2=[[1, 2], [3, 4]],
result=[[1, 9, 1, 2]]),
dict(
join_type='left join',
table1=[[1, 4], [2, 5], [3, 6]],
table2=[[1, 3], [2, 4]],
result=[[1, 4, 1, 3], [2, 5, 2, 4], [3, 6, None, None]]),
dict(
join_type='LEFT OUTER JOIN',
table1=[[1, 4], [2, 5], [3, 6]],
table2=[[1, 3], [2, 4]],
result=[[1, 4, 1, 3], [2, 5, 2, 4], [3, 6, None, None]]),
dict(
join_type='RIGHT JOIN',
table1=[[1, 5]],
table2=[[1, 2], [3, 4]],
result=[[1, 5, 1, 2], [None, None, 3, 4]]),
dict(
join_type='RIGHT OUTER JOIN',
table1=[[1, 5]],
table2=[[1, 2], [3, 4]],
result=[[1, 5, 1, 2], [None, None, 3, 4]]),
dict(
join_type='FULL JOIN',
table1=[[1, 3], [2, 5]],
table2=[[1, 2], [3, 4]],
result=[[1, 3, 1, 2], [2, 5, None, None], [None, None, 3, 4]]),
dict(
join_type='FULL OUTER JOIN',
table1=[[1, 3], [2, 5]],
table2=[[1, 2], [3, 4]],
result=[[1, 3, 1, 2], [2, 5, None, None], [None, None, 3, 4]]),
dict(
join_type='CROSS JOIN',
table1=[[1, 3], [2, 5]],
table2=[[1, 2], [3, 4]],
result=[[1, 3, 1, 2],
[1, 3, 3, 4],
[2, 5, 1, 2],
[2, 5, 3, 4]]),
dict(
join_type=',',
table1=[[1, 3], [2, 5]],
table2=[[1, 2], [3, 4]],
result=[[1, 3, 1, 2],
[1, 3, 3, 4],
[2, 5, 1, 2],
[2, 5, 3, 4]]),
)
@unpack
def test_data_source_joins(self, join_type, # type: Union[_EmptyNode, str]
table1, # type: List[List[int]]
table2, # type: List[List[int]]
result # type: List[List[int]]
):
# type: (...) -> None
table_context = DatasetTableContext({
'my_project': {
'my_dataset': {
'my_table': TypedDataFrame(
pd.DataFrame(table1, columns=['a', 'b']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER]
),
'my_table2': TypedDataFrame(
pd.DataFrame(table2, columns=['a', 'c']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER]
)
}
}
})
tokens = tokenize('my_table {} my_table2 {}'.format(
join_type, 'USING (a)' if join_type not in (',', 'CROSS JOIN') else ''))
data_source_node, leftover = apply_rule(data_source, tokens)
self.assertFalse(leftover)
assert isinstance(data_source_node, DataSource)
context = data_source_node.create_context(table_context)
self.assertEqual(context.table.to_list_of_lists(), result)
self.assertEqual(list(context.table.dataframe),
['my_table.a', 'my_table.b', 'my_table2.a', 'my_table2.c'])
@data(
dict(
join_type='INNER JOIN',
result=[[1, 2]]),
dict(
join_type='CROSS JOIN', # With an on clause, CROSS functions like inner.
result=[[1, 2]]),
dict(
join_type='LEFT OUTER JOIN',
result=[[1, 2], [2, None]]),
dict(
join_type='RIGHT OUTER JOIN',
result=[[1, 2], [None, 0]]),
dict(
join_type='FULL OUTER JOIN',
result=[[1, 2], [2, None], [None, 0]]),
)
@unpack
def test_data_source_join_on_arbitrary_bool(self, join_type, # type: Union[_EmptyNode, str]
result # type: List[List[int]]
):
# type: (...) -> None
table_context = DatasetTableContext({
'my_project': {
'my_dataset': {
'my_table': TypedDataFrame(
| pd.DataFrame([[1], [2]], columns=['a']) | pandas.DataFrame |
import datetime
import os
from typing import Optional, Dict
import pandas as pd
from .. import QtCore
from ..client import Client as InstrumentClient
from . import read_config
class LoggerParameters:
"""
Holds the different parameters the logger is tracking. It holds all of the metadata as well as fresh data
:param name: Name of the parameter.
:param source_type: Specifies how to gather the data for the parameter (parameter or broadcast).
:param parameter_path: Full name with submodules of the qcodes parameter.
:param server: Location of the server, defaults to 'localhost'.
:param port: Port of the server, defaults to 5555.
:param interval: Interval of time to gather new updates in seconds,
only impactful if source_type is of the parameter type. defaults to 1.
"""
def __init__(self, name: str,
source_type: str,
parameter_path: str,
server: Optional[str] = 'localhost',
port: Optional[int] = 5555,
interval: Optional[int] = 1):
# load values
self.name = name
self.source_type = source_type
self.parameter_path = parameter_path
self.server = server
self.port = port
self.address = f"tcp://{self.server}:{self.port}"
self.interval = interval
self.data = []
self.time = []
# locate the instrument this parameter is located
submodules = parameter_path.split('.')
self.instrument_name = submodules[0]
self.client = InstrumentClient(self.server, self.port)
self.instrument = self.client.get_instrument(self.instrument_name)
# get the name of the parameter with submodules
parameter_name = ''
for i in range(1, len(submodules)):
parameter_name = parameter_name + submodules[i]
self.parameter_name = parameter_name
# record the time the parameter was created
self.last_saved_t = datetime.datetime.now()
def update(self):
"""
Updates the parameter and save a new data point in memory
"""
# check that the source type is parameter
if self.source_type == 'parameter':
# gather new data and save it in memory
new_data = self.instrument.get(self.parameter_name)
current_time = datetime.datetime.now()
self.data.append(new_data)
self.time.append(current_time)
if self.source_type == 'broadcast':
raise NotImplementedError
class ParameterLogger(QtCore.QObject):
"""
Main class of the logger. All of the parameters are saved inside this class
:param config: The dictionary from the config file.
"""
def __init__(self, config: Dict):
super().__init__()
self.parameters = []
# read the config file
parameters, refresh, save_directory = read_config('logger', config)
# create the LoggerParameters based on the config file
for params in parameters:
self.parameters.append(LoggerParameters(name=params[0],
source_type=params[1],
parameter_path=params[2],
server=params[3],
port=params[4],
interval=params[5]))
# check if the values are none.
# if they are set the default one, if not, set the specified one in the config file
if refresh is not None:
self.refresh = refresh
else:
self.refresh = 10
if save_directory is not None:
self.save_directory = save_directory
else:
self.save_directory = os.path.join(os.getcwd(), 'dashboard_data.csv')
self.active = False
self.last_saved_t = datetime.datetime.now()
def save_data(self):
"""
Saves the data in the specified file indicated in the config dictionary.
Deletes the data from memory once it has been saved to storage.
"""
# go through the parameters and create DataFrames with their data
df_list = []
for params in self.parameters:
holder_df = pd.DataFrame({'time': params.time,
'value': params.data,
'name': params.name,
'parameter_path': params.parameter_path,
'address': params.address
})
df_list.append(holder_df)
params.data = []
params.time = []
ret = | pd.concat(df_list, ignore_index=True) | pandas.concat |
import yaml
from tunetools import db_utils
import numpy as np
import pandas as pd
from scipy import stats
import json
def _singleton_dict_to_tuple(singleton_dict):
return list(singleton_dict.items())[0]
def _check_param(param, total_param, mark_param: set):
if param is None:
return []
new_param = []
for x in param:
if x.startswith(":"):
x = x[1:]
mark_param.add(x)
if x not in total_param:
raise ValueError("Unknown param: " + x + str(total_param))
new_param.append(x)
return new_param
def _parse(conn, yaml_path):
yml_dict = yaml.load(open(yaml_path), Loader=yaml.FullLoader)
total_params = [x[6:] for x in db_utils.get_columns(conn, "RESULT") if x.startswith("param_")]
target_result = {}
has_direction = False
for x in yml_dict.get("target", []):
if type(x) == dict:
cur_tuple = _singleton_dict_to_tuple(x)
target_result[cur_tuple[1]] = cur_tuple[0]
has_direction = True
else:
target_result[x] = ""
mark_params = set()
group_by_params = _check_param(yml_dict.get("group_by", []), total_params, mark_params)
find_best_params = _check_param(yml_dict.get("find_best", []), total_params, mark_params)
ignore_params = _check_param(yml_dict.get("ignore", []), total_params, set())
if not has_direction and len(find_best_params) != 0:
raise ValueError("Unknown direction for find best params: " + str(find_best_params))
if len(find_best_params) == 0:
find_best_params = [group_by_params[0]]
current_params = group_by_params + find_best_params
left_params = [x for x in total_params if x not in current_params]
where_list = yml_dict.get("where", [])
where_clauses = ["STATUS = 'TERMINATED'"]
where_clause_params = []
for where_condition in where_list:
if type(where_condition) == dict:
item = _singleton_dict_to_tuple(where_condition)
where_clauses.append(str(item[0]) + "=?")
where_clause_params.append(item[1])
elif type(where_condition) == str:
where_clauses.append(where_condition)
where_clauses_statement = " AND ".join(list(map(lambda x: "(%s)" % x, where_clauses)))
statement = "SELECT * FROM RESULT"
if len(where_clauses) != 0:
statement += " WHERE " + where_clauses_statement
cursor = db_utils.execute_sql(conn, statement, where_clause_params)
columns = [description[0] for description in cursor.description]
result = list(cursor)
data = | pd.DataFrame(result, columns=columns) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Main module."""
import pandas as pd
from sklearn.linear_model import LinearRegression, RandomizedLasso
from sklearn.feature_selection import VarianceThreshold
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.preprocessing import Imputer
import numpy as np
from sklearn.pipeline import Pipeline
import logging
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# TODO
# Add MAPE instead of RMSE
# setup logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class FeatureSelect(object):
"""
Base class for feature selection
"""
def __init__(self, path, target):
self.path = path
self.target = target
self.df = pd.DataFrame()
self.selected_features = []
self.quality_report = []
self.model = ""
self.R2 = None
self.mse = None
def load_csv(self, sep='|'):
"""Function to load csv into python data fromae
Args:
path(str): path to csv file.
sep (str): seperator.
Returns:
df: DataFrame of csv file.
"""
self.df = pd.read_csv(self.path, header=None, sep=sep)
return self.df
def data_check(self):
"""Function for creating a data quality report of data.
Args:
df (object): DataFrame.
Returns:
quality_check: Quality report.
"""
df = self.df
data_types = pd.DataFrame(df.dtypes, columns=['dtype'])
missing_data_counts = pd.DataFrame(df.isnull().sum(), columns=['missing'])
present_data_counts = pd.DataFrame(df.count(), columns=['count'])
unique_value_counts = pd.DataFrame(columns=['unique'])
for v in list(df.columns.values):
unique_value_counts.loc[v] = [df[v].nunique()]
minimum_values = | pd.DataFrame(columns=['min']) | pandas.DataFrame |
# License: Apache-2.0
import databricks.koalas as ks
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal, assert_series_equal
from gators.model_building.train_test_split import TrainTestSplit
@pytest.fixture()
def data_ordered():
X = pd.DataFrame(np.arange(40).reshape(8, 5), columns=list("ABCDE"))
y_name = "TARGET"
y = pd.Series([0, 1, 2, 0, 1, 2, 0, 1], name=y_name)
test_ratio = 0.5
obj = TrainTestSplit(test_ratio=test_ratio, strategy="ordered")
X_train_expected = pd.DataFrame(
{
"A": {0: 0, 1: 5, 2: 10, 3: 15},
"B": {0: 1, 1: 6, 2: 11, 3: 16},
"C": {0: 2, 1: 7, 2: 12, 3: 17},
"D": {0: 3, 1: 8, 2: 13, 3: 18},
"E": {0: 4, 1: 9, 2: 14, 3: 19},
}
)
X_test_expected = pd.DataFrame(
{
"A": {4: 20, 5: 25, 6: 30, 7: 35},
"B": {4: 21, 5: 26, 6: 31, 7: 36},
"C": {4: 22, 5: 27, 6: 32, 7: 37},
"D": {4: 23, 5: 28, 6: 33, 7: 38},
"E": {4: 24, 5: 29, 6: 34, 7: 39},
}
)
y_train_expected = pd.Series({0: 0, 1: 1, 2: 2, 3: 0}, name=y_name)
y_test_expected = pd.Series({4: 1, 5: 2, 6: 0, 7: 1}, name=y_name)
return (
obj,
X,
y,
X_train_expected,
X_test_expected,
y_train_expected,
y_test_expected,
)
@pytest.fixture()
def data_random():
X = pd.DataFrame(np.arange(40).reshape(8, 5), columns=list("ABCDE"))
y_name = "TARGET"
y = pd.Series([0, 1, 2, 0, 1, 2, 0, 1], name=y_name)
test_ratio = 0.5
obj = TrainTestSplit(test_ratio=test_ratio, strategy="random", random_state=0)
X_train_expected = pd.DataFrame(
{
"A": {0: 0, 3: 15, 4: 20, 5: 25},
"B": {0: 1, 3: 16, 4: 21, 5: 26},
"C": {0: 2, 3: 17, 4: 22, 5: 27},
"D": {0: 3, 3: 18, 4: 23, 5: 28},
"E": {0: 4, 3: 19, 4: 24, 5: 29},
}
)
X_test_expected = pd.DataFrame(
{
"A": {6: 30, 2: 10, 1: 5, 7: 35},
"B": {6: 31, 2: 11, 1: 6, 7: 36},
"C": {6: 32, 2: 12, 1: 7, 7: 37},
"D": {6: 33, 2: 13, 1: 8, 7: 38},
"E": {6: 34, 2: 14, 1: 9, 7: 39},
}
)
y_train_expected = pd.Series({0: 0, 3: 0, 4: 1, 5: 2}, name=y_name)
y_test_expected = pd.Series({6: 0, 2: 2, 1: 1, 7: 1}, name=y_name)
return (
obj,
X,
y,
X_train_expected,
X_test_expected,
y_train_expected,
y_test_expected,
)
@pytest.fixture()
def data_stratified():
X = pd.DataFrame(np.arange(40).reshape(8, 5), columns=list("ABCDE"))
y_name = "TARGET"
y = pd.Series([0, 1, 2, 0, 1, 2, 0, 1], name=y_name)
test_ratio = 0.5
obj = TrainTestSplit(test_ratio=test_ratio, strategy="stratified", random_state=0)
X_train_expected = pd.DataFrame(
{
"A": {0: 0, 1: 5, 2: 10},
"B": {0: 1, 1: 6, 2: 11},
"C": {0: 2, 1: 7, 2: 12},
"D": {0: 3, 1: 8, 2: 13},
"E": {0: 4, 1: 9, 2: 14},
}
)
X_test_expected = pd.DataFrame(
{
"A": {6: 30, 3: 15, 7: 35, 4: 20, 5: 25},
"B": {6: 31, 3: 16, 7: 36, 4: 21, 5: 26},
"C": {6: 32, 3: 17, 7: 37, 4: 22, 5: 27},
"D": {6: 33, 3: 18, 7: 38, 4: 23, 5: 28},
"E": {6: 34, 3: 19, 7: 39, 4: 24, 5: 29},
}
)
y_train_expected = pd.Series({0: 0, 1: 1, 2: 2}, name=y_name)
y_test_expected = pd.Series({6: 0, 3: 0, 7: 1, 4: 1, 5: 2}, name=y_name)
return (
obj,
X,
y,
X_train_expected,
X_test_expected,
y_train_expected,
y_test_expected,
)
@pytest.fixture()
def data_ordered_ks():
X = ks.DataFrame(np.arange(40).reshape(8, 5), columns=list("ABCDE"))
y_name = "TARGET"
y = ks.Series([0, 1, 2, 0, 1, 2, 0, 1], name=y_name)
test_ratio = 0.5
obj = TrainTestSplit(test_ratio=test_ratio, strategy="ordered")
return obj, X, y
@pytest.fixture()
def data_random_ks():
X = ks.DataFrame(np.arange(40).reshape(8, 5), columns=list("ABCDE"))
y_name = "TARGET"
y = ks.Series([0, 1, 2, 0, 1, 2, 0, 1], name=y_name)
test_ratio = 0.5
obj = TrainTestSplit(test_ratio=test_ratio, strategy="random", random_state=0)
return obj, X, y
@pytest.fixture()
def data_stratified_ks():
X = ks.DataFrame(np.arange(40).reshape(8, 5), columns=list("ABCDE"))
y_name = "TARGET"
y = ks.Series([0, 1, 2, 0, 1, 2, 0, 1], name=y_name)
test_ratio = 0.5
obj = TrainTestSplit(test_ratio=test_ratio, strategy="stratified", random_state=0)
return obj, X, y
def test_ordered(data_ordered):
(
obj,
X,
y,
X_train_expected,
X_test_expected,
y_train_expected,
y_test_expected,
) = data_ordered
X_train, X_test, y_train, y_test = obj.transform(X, y)
assert_frame_equal(X_train, X_train_expected)
assert_frame_equal(X_test, X_test_expected)
| assert_series_equal(y_train, y_train_expected) | pandas.testing.assert_series_equal |
from datetime import (
datetime,
timedelta,
)
import re
import numpy as np
import pytest
from pandas._libs import iNaT
from pandas.errors import InvalidIndexError
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_integer
import pandas as pd
from pandas import (
Categorical,
DataFrame,
DatetimeIndex,
Index,
MultiIndex,
Series,
Timestamp,
date_range,
isna,
notna,
)
import pandas._testing as tm
import pandas.core.common as com
# We pass through a TypeError raised by numpy
_slice_msg = "slice indices must be integers or None or have an __index__ method"
class TestDataFrameIndexing:
def test_getitem(self, float_frame):
# Slicing
sl = float_frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in sl.items():
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in float_frame._series.items():
assert float_frame[key] is not None
assert "random" not in float_frame
with pytest.raises(KeyError, match="random"):
float_frame["random"]
def test_getitem2(self, float_frame):
df = float_frame.copy()
df["$10"] = np.random.randn(len(df))
ad = np.random.randn(len(df))
df["@awesome_domain"] = ad
with pytest.raises(KeyError, match=re.escape("'df[\"$10\"]'")):
df.__getitem__('df["$10"]')
res = df["@awesome_domain"]
tm.assert_numpy_array_equal(ad, res.values)
def test_setitem_list(self, float_frame):
float_frame["E"] = "foo"
data = float_frame[["A", "B"]]
float_frame[["B", "A"]] = data
tm.assert_series_equal(float_frame["B"], data["A"], check_names=False)
tm.assert_series_equal(float_frame["A"], data["B"], check_names=False)
msg = "Columns must be same length as key"
with pytest.raises(ValueError, match=msg):
data[["A"]] = float_frame[["A", "B"]]
newcolumndata = range(len(data.index) - 1)
msg = (
rf"Length of values \({len(newcolumndata)}\) "
rf"does not match length of index \({len(data)}\)"
)
with pytest.raises(ValueError, match=msg):
data["A"] = newcolumndata
def test_setitem_list2(self):
df = DataFrame(0, index=range(3), columns=["tt1", "tt2"], dtype=np.int_)
df.loc[1, ["tt1", "tt2"]] = [1, 2]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
tm.assert_series_equal(result, expected)
df["tt1"] = df["tt2"] = "0"
df.loc[df.index[1], ["tt1", "tt2"]] = ["1", "2"]
result = df.loc[df.index[1], ["tt1", "tt2"]]
expected = Series(["1", "2"], df.columns, name=1)
tm.assert_series_equal(result, expected)
def test_getitem_boolean(self, mixed_float_frame, mixed_int_frame, datetime_frame):
# boolean indexing
d = datetime_frame.index[10]
indexer = datetime_frame.index > d
indexer_obj = indexer.astype(object)
subindex = datetime_frame.index[indexer]
subframe = datetime_frame[indexer]
tm.assert_index_equal(subindex, subframe.index)
with pytest.raises(ValueError, match="Item wrong length"):
datetime_frame[indexer[:-1]]
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
with pytest.raises(ValueError, match="Boolean array expected"):
datetime_frame[datetime_frame]
# test that Series work
indexer_obj = Series(indexer_obj, datetime_frame.index)
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning):
indexer_obj = indexer_obj.reindex(datetime_frame.index[::-1])
subframe_obj = datetime_frame[indexer_obj]
tm.assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [
datetime_frame,
mixed_float_frame,
mixed_int_frame,
]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(
{c: np.where(data[c] > 0, data[c], np.nan) for c in data.columns},
index=data.index,
columns=data.columns,
)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
tm.assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self, datetime_frame):
# don't upcast if we don't need to
df = datetime_frame.copy()
df["E"] = 1
df["E"] = df["E"].astype("int32")
df["E1"] = df["E"].copy()
df["F"] = 1
df["F"] = df["F"].astype("int64")
df["F1"] = df["F"].copy()
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")] * 2
+ [np.dtype("int64")] * 2,
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ["E1", "F1"]] = 0
casted = df[df > 0]
result = casted.dtypes
expected = Series(
[np.dtype("float64")] * 4
+ [np.dtype("int32")]
+ [np.dtype("float64")]
+ [np.dtype("int64")]
+ [np.dtype("float64")],
index=["A", "B", "C", "D", "E", "E1", "F", "F1"],
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
tm.assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = np.random.randn(5, 5)
df = DataFrame(arr.copy(), columns=["A", "B", "C", "D", "E"])
df[df < 0] += 1
arr[arr < 0] += 1
tm.assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=["A"], index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(
np.random.randn(4, 3), index=[1, 10, "C", "E"], columns=[1, 2, 3]
)
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
tm.assert_frame_equal(result, expected)
result = df.loc[[1, 10]]
expected = df.loc[Index([1, 10])]
tm.assert_frame_equal(result, expected)
def test_getitem_ix_mixed_integer2(self):
# 11320
df = DataFrame(
{
"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30],
},
columns=["rna", -1000, 0, 1000],
)
result = df[[1000]]
expected = df.iloc[:, [3]]
tm.assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
tm.assert_frame_equal(result, expected)
def test_getattr(self, float_frame):
tm.assert_series_equal(float_frame.A, float_frame["A"])
msg = "'DataFrame' object has no attribute 'NONEXISTENT_NAME'"
with pytest.raises(AttributeError, match=msg):
float_frame.NONEXISTENT_NAME
def test_setattr_column(self):
df = DataFrame({"foobar": 1}, index=range(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self, float_frame):
# not sure what else to do here
series = float_frame["A"][::2]
float_frame["col5"] = series
assert "col5" in float_frame
assert len(series) == 15
assert len(float_frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=float_frame.index, name="col5")
tm.assert_series_equal(float_frame["col5"], exp)
series = float_frame["A"]
float_frame["col6"] = series
tm.assert_series_equal(series, float_frame["col6"], check_names=False)
# set ndarray
arr = np.random.randn(len(float_frame))
float_frame["col9"] = arr
assert (float_frame["col9"] == arr).all()
float_frame["col7"] = 5
assert (float_frame["col7"] == 5).all()
float_frame["col0"] = 3.14
assert (float_frame["col0"] == 3.14).all()
float_frame["col8"] = "foo"
assert (float_frame["col8"] == "foo").all()
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = float_frame[:2]
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
smaller["col10"] = ["1", "2"]
assert smaller["col10"].dtype == np.object_
assert (smaller["col10"] == ["1", "2"]).all()
def test_setitem2(self):
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
tm.assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
tm.assert_frame_equal(df, expected)
def test_setitem_boolean(self, float_frame):
df = float_frame.copy()
values = float_frame.values
df[df["A"] > 0] = 4
values[values[:, 0] > 0] = 4
tm.assert_almost_equal(df.values, values)
# test that column reindexing works
series = df["A"] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
tm.assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
tm.assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
tm.assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
tm.assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
tm.assert_almost_equal(df.values, values)
msg = "Must pass DataFrame or 2-d ndarray with boolean values only"
with pytest.raises(TypeError, match=msg):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = np.nan
expected.values[mask.values] = np.nan
tm.assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
tm.assert_frame_equal(df, expected)
def test_setitem_cast(self, float_frame):
float_frame["D"] = float_frame["D"].astype("i8")
assert float_frame["D"].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
float_frame["B"] = 0
assert float_frame["B"].dtype == np.int64
# cast if pass array of course
float_frame["B"] = np.arange(len(float_frame))
assert issubclass(float_frame["B"].dtype.type, np.integer)
float_frame["foo"] = "bar"
float_frame["foo"] = 0
assert float_frame["foo"].dtype == np.int64
float_frame["foo"] = "bar"
float_frame["foo"] = 2.5
assert float_frame["foo"].dtype == np.float64
float_frame["something"] = 0
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2
assert float_frame["something"].dtype == np.int64
float_frame["something"] = 2.5
assert float_frame["something"].dtype == np.float64
def test_setitem_corner(self, float_frame):
# corner case
df = DataFrame({"B": [1.0, 2.0, 3.0], "C": ["a", "b", "c"]}, index=np.arange(3))
del df["B"]
df["B"] = [1.0, 2.0, 3.0]
assert "B" in df
assert len(df.columns) == 2
df["A"] = "beginning"
df["E"] = "foo"
df["D"] = "bar"
df[datetime.now()] = "date"
df[datetime.now()] = 5.0
# what to do when empty frame with index
dm = DataFrame(index=float_frame.index)
dm["A"] = "foo"
dm["B"] = "bar"
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm["C"] = 1
assert dm["C"].dtype == np.int64
dm["E"] = 1.0
assert dm["E"].dtype == np.float64
# set existing column
dm["A"] = "bar"
assert "bar" == dm["A"][0]
dm = DataFrame(index=np.arange(3))
dm["A"] = 1
dm["foo"] = "bar"
del dm["foo"]
dm["foo"] = "bar"
assert dm["foo"].dtype == np.object_
dm["coercible"] = ["1", "2", "3"]
assert dm["coercible"].dtype == np.object_
def test_setitem_corner2(self):
data = {
"title": ["foobar", "bar", "foobar"] + ["foobar"] * 17,
"cruft": np.random.random(20),
}
df = DataFrame(data)
ix = df[df["title"] == "bar"].index
df.loc[ix, ["title"]] = "foobar"
df.loc[ix, ["cruft"]] = 0
assert df.loc[1, "title"] == "foobar"
assert df.loc[1, "cruft"] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=range(3), columns=range(3))
coercable_series = Series([Decimal(1) for _ in range(3)], index=range(3))
uncoercable_series = Series(["foo", "bzr", "baz"], index=range(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_None(self, float_frame):
# GH #766
float_frame[None] = float_frame["A"]
tm.assert_series_equal(
float_frame.iloc[:, -1], float_frame["A"], check_names=False
)
tm.assert_series_equal(
float_frame.loc[:, None], float_frame["A"], check_names=False
)
tm.assert_series_equal(float_frame[None], float_frame["A"], check_names=False)
repr(float_frame)
def test_loc_setitem_boolean_mask_allfalse(self):
# GH 9596
df = DataFrame(
{"a": ["1", "2", "3"], "b": ["11", "22", "33"], "c": ["111", "222", "333"]}
)
result = df.copy()
result.loc[result.b.isna(), "a"] = result.a
tm.assert_frame_equal(result, df)
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
assert isna(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=range(0, 20, 2))
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[list(range(5)) + list(range(5, 10))[::-1]]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11]
with pytest.raises(KeyError, match=r"^3$"):
df2.loc[3:11] = 0
@td.skip_array_manager_invalid_test # already covered in test_iloc_col_slice_view
def test_fancy_getitem_slice_mixed(self, float_frame, float_string_frame):
sliced = float_string_frame.iloc[:, -3:]
assert sliced["D"].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
sliced = float_frame.iloc[:, -3:]
assert np.shares_memory(sliced["C"]._values, float_frame["C"]._values)
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
sliced.loc[:, "C"] = 4.0
assert (float_frame["C"] == 4).all()
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
tm.assert_frame_equal(result, expected)
tm.assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
tm.assert_frame_equal(rs, xp)
# GH#1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, df.columns == 1]
xp = df.reindex(index=[0], columns=[1])
tm.assert_frame_equal(rs, xp)
def test_getitem_fancy_scalar(self, float_frame):
f = float_frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_scalar(self, float_frame):
f = float_frame
expected = float_frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
ts = f[col] # noqa
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = np.random.randn()
expected.values[i, j] = val
ix[idx, col] = val
tm.assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self, float_frame):
f = float_frame
ix = f.loc
expected = f.reindex(columns=["B", "D"])
result = ix[:, [False, True, False, True]]
tm.assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=["B", "D"])
result = ix[f.index[5:10], [False, True, False, True]]
tm.assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, :]
tm.assert_frame_equal(result, expected)
result = ix[boolvec, f.columns[2:]]
expected = f.reindex(index=f.index[boolvec], columns=["C", "D"])
tm.assert_frame_equal(result, expected)
@td.skip_array_manager_invalid_test # TODO(ArrayManager) rewrite not using .values
def test_setitem_fancy_boolean(self, float_frame):
# from 2d, set with booleans
frame = float_frame.copy()
expected = float_frame.copy()
mask = frame["A"] > 0
frame.loc[mask] = 0.0
expected.values[mask.values] = 0.0
tm.assert_frame_equal(frame, expected)
frame = float_frame.copy()
expected = float_frame.copy()
frame.loc[mask, ["A", "B"]] = 0.0
expected.values[mask.values, :2] = 0.0
tm.assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self, float_frame):
result = float_frame.iloc[[1, 4, 7]]
expected = float_frame.loc[float_frame.index[[1, 4, 7]]]
tm.assert_frame_equal(result, expected)
result = float_frame.iloc[:, [2, 0, 1]]
expected = float_frame.loc[:, float_frame.columns[[2, 0, 1]]]
tm.assert_frame_equal(result, expected)
def test_getitem_setitem_boolean_misaligned(self, float_frame):
# boolean index misaligned labels
mask = float_frame["A"][::-1] > 1
result = float_frame.loc[mask]
expected = float_frame.loc[mask[::-1]]
tm.assert_frame_equal(result, expected)
cp = float_frame.copy()
expected = float_frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
tm.assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.loc[k1, k2]
expected = df.loc[[0, 2], [1]]
tm.assert_frame_equal(result, expected)
expected = df.copy()
df.loc[np.array([True, False, True]), np.array([False, True])] = 5
expected.loc[[0, 2], [1]] = 5
tm.assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.loc[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4:5]
expected = df.reindex([4, 5]) # reindex with int
tm.assert_frame_equal(result, expected, check_index_type=False)
assert len(result) == 2
result = df.loc[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
tm.assert_frame_equal(result, expected)
assert len(result) == 2
# loc_float changes this to work properly
result = df.loc[1:2]
expected = df.iloc[0:2]
tm.assert_frame_equal(result, expected)
df.loc[1:2] = 0
result = df[1:2]
assert (result == 0).all().all()
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
msg = (
"cannot do positional indexing on Float64Index with "
r"these indexers \[1.0\] of type float"
)
with pytest.raises(TypeError, match=msg):
df.iloc[1.0:5]
result = df.iloc[4:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
cp = df.copy()
with pytest.raises(TypeError, match=_slice_msg):
cp.iloc[1.0:5] = 0
with pytest.raises(TypeError, match=msg):
result = cp.iloc[1.0:5] == 0
assert result.values.all()
assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
cp = df.copy()
cp.iloc[4:5] = 0
assert (cp.iloc[4:5] == 0).values.all()
assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()
# float slicing
result = df.loc[1.0:5]
expected = df
tm.assert_frame_equal(result, expected)
assert len(result) == 5
result = df.loc[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4.51:5]
expected = df.reindex([5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 1
result = df.loc[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
tm.assert_frame_equal(result, expected)
assert len(result) == 5
cp = df.copy()
cp.loc[1.0:5.0] = 0
result = cp.loc[1.0:5.0]
assert (result == 0).values.all()
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(
np.random.randn(5, 3),
index=["a", "b", "c", "d", "e"],
columns=["foo", "bar", "baz"],
)
df["timestamp"] = Timestamp("20010102")
# check our dtypes
result = df.dtypes
expected = Series(
[np.dtype("float64")] * 3 + [np.dtype("datetime64[ns]")],
index=["foo", "bar", "baz", "timestamp"],
)
tm.assert_series_equal(result, expected)
# GH#16674 iNaT is treated as an integer when given by the user
df.loc["b", "timestamp"] = iNaT
assert not isna(df.loc["b", "timestamp"])
assert df["timestamp"].dtype == np.object_
assert df.loc["b", "timestamp"] == iNaT
# allow this syntax (as of GH#3216)
df.loc["c", "timestamp"] = np.nan
assert isna(df.loc["c", "timestamp"])
# allow this syntax
df.loc["d", :] = np.nan
assert not isna(df.loc["c", :]).all()
def test_setitem_mixed_datetime(self):
# GH 9336
expected = DataFrame(
{
"a": [0, 0, 0, 0, 13, 14],
"b": [
datetime(2012, 1, 1),
1,
"x",
"y",
datetime(2013, 1, 1),
datetime(2014, 1, 1),
],
}
)
df = DataFrame(0, columns=list("ab"), index=range(6))
df["b"] = pd.NaT
df.loc[0, "b"] = datetime(2012, 1, 1)
df.loc[1, "b"] = 1
df.loc[[2, 3], "b"] = "x", "y"
A = np.array(
[
[13, np.datetime64("2013-01-01T00:00:00")],
[14, np.datetime64("2014-01-01T00:00:00")],
]
)
df.loc[[4, 5], ["a", "b"]] = A
tm.assert_frame_equal(df, expected)
def test_setitem_frame_float(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
float_frame.loc[float_frame.index[-2] :, ["A", "B"]] = piece.values
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_setitem_frame_mixed(self, float_string_frame):
# GH 3216
# already aligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0]], index=f.index[0:2], columns=["A", "B"]
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(f.loc[f.index[0:2], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_rows_unaligned(self, float_string_frame):
# GH#3216 rows unaligned
f = float_string_frame.copy()
piece = DataFrame(
[[1.0, 2.0], [3.0, 4.0], [5.0, 6.0], [7.0, 8.0]],
index=list(f.index[0:2]) + ["foo", "bar"],
columns=["A", "B"],
)
key = (f.index[slice(None, 2)], ["A", "B"])
f.loc[key] = piece
tm.assert_almost_equal(
f.loc[f.index[0:2:], ["A", "B"]].values, piece.values[0:2]
)
def test_setitem_frame_mixed_key_unaligned(self, float_string_frame):
# GH#3216 key is unaligned with values
f = float_string_frame.copy()
piece = f.loc[f.index[:2], ["A"]]
piece.index = f.index[-2:]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece
piece["B"] = np.nan
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_mixed_ndarray(self, float_string_frame):
# GH#3216 ndarray
f = float_string_frame.copy()
piece = float_string_frame.loc[f.index[:2], ["A", "B"]]
key = (f.index[slice(-2, None)], ["A", "B"])
f.loc[key] = piece.values
tm.assert_almost_equal(f.loc[f.index[-2:], ["A", "B"]].values, piece.values)
def test_setitem_frame_upcast(self):
# needs upcasting
df = DataFrame([[1, 2, "foo"], [3, 4, "bar"]], columns=["A", "B", "C"])
df2 = df.copy()
df2.loc[:, ["A", "B"]] = df.loc[:, ["A", "B"]] + 0.5
expected = df.reindex(columns=["A", "B"])
expected += 0.5
expected["C"] = df["C"]
tm.assert_frame_equal(df2, expected)
def test_setitem_frame_align(self, float_frame):
piece = float_frame.loc[float_frame.index[:2], ["A", "B"]]
piece.index = float_frame.index[-2:]
piece.columns = ["A", "B"]
float_frame.loc[float_frame.index[-2:], ["A", "B"]] = piece
result = float_frame.loc[float_frame.index[-2:], ["A", "B"]].values
expected = piece.values
tm.assert_almost_equal(result, expected)
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc["foo"]
expected = df[:2]
tm.assert_frame_equal(result, expected)
result = df.loc["bar"]
expected = df.iloc[[2, 4]]
tm.assert_frame_equal(result, expected)
result = df.loc["baz"]
expected = df.iloc[3]
tm.assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3), index=["foo", "foo", "bar", "baz", "bar"])
result = df.loc[["bar"]]
exp = df.iloc[[2, 4]]
tm.assert_frame_equal(result, exp)
result = df.loc[df[1] > 0]
exp = df[df[1] > 0]
tm.assert_frame_equal(result, exp)
result = df.loc[df[0] > 0]
exp = df[df[0] > 0]
tm.assert_frame_equal(result, exp)
@pytest.mark.parametrize("bool_value", [True, False])
def test_getitem_setitem_ix_bool_keyerror(self, bool_value):
# #2199
df = DataFrame({"a": [1, 2, 3]})
message = f"{bool_value}: boolean label can not be used without a boolean index"
with pytest.raises(KeyError, match=message):
df.loc[bool_value]
msg = "cannot use a single bool to index into setitem"
with pytest.raises(KeyError, match=msg):
df.loc[bool_value] = 0
# TODO: rename? remove?
def test_single_element_ix_dont_upcast(self, float_frame):
float_frame["E"] = 1
assert issubclass(float_frame["E"].dtype.type, (int, np.integer))
result = float_frame.loc[float_frame.index[5], "E"]
assert is_integer(result)
# GH 11617
df = DataFrame({"a": [1.23]})
df["b"] = 666
result = df.loc[0, "b"]
assert is_integer(result)
expected = Series([666], [0], name="b")
result = df.loc[[0], "b"]
tm.assert_series_equal(result, expected)
def test_iloc_row(self):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
result = df.iloc[1]
exp = df.loc[2]
tm.assert_series_equal(result, exp)
result = df.iloc[2]
exp = df.loc[4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[slice(4, 8)]
expected = df.loc[8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[[1, 2, 4, 6]]
expected = df.reindex(df.index[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_row_slice_view(self, using_array_manager):
df = DataFrame(np.random.randn(10, 4), index=range(0, 20, 2))
original = df.copy()
# verify slice is view
# setting it makes it raise/warn
subset = df.iloc[slice(4, 8)]
assert np.shares_memory(df[2], subset[2])
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
subset.loc[:, 2] = 0.0
exp_col = original[2].copy()
# TODO(ArrayManager) verify it is expected that the original didn't change
if not using_array_manager:
exp_col[4:8] = 0.0
tm.assert_series_equal(df[2], exp_col)
def test_iloc_col(self):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
result = df.iloc[:, 1]
exp = df.loc[:, 2]
tm.assert_series_equal(result, exp)
result = df.iloc[:, 2]
exp = df.loc[:, 4]
tm.assert_series_equal(result, exp)
# slice
result = df.iloc[:, slice(4, 8)]
expected = df.loc[:, 8:14]
tm.assert_frame_equal(result, expected)
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
tm.assert_frame_equal(result, expected)
def test_iloc_col_slice_view(self, using_array_manager):
df = DataFrame(np.random.randn(4, 10), columns=range(0, 20, 2))
original = df.copy()
subset = df.iloc[:, slice(4, 8)]
if not using_array_manager:
# verify slice is view
assert np.shares_memory(df[8]._values, subset[8]._values)
# and that we are setting a copy
msg = r"\nA value is trying to be set on a copy of a slice from a DataFrame"
with pytest.raises(com.SettingWithCopyError, match=msg):
subset.loc[:, 8] = 0.0
assert (df[8] == 0).all()
else:
# TODO(ArrayManager) verify this is the desired behaviour
subset[8] = 0.0
# subset changed
assert (subset[8] == 0).all()
# but df itself did not change (setitem replaces full column)
tm.assert_frame_equal(df, original)
def test_loc_duplicates(self):
# gh-17105
# insert a duplicate element to the index
trange = date_range(
start=Timestamp(year=2017, month=1, day=1),
end=Timestamp(year=2017, month=1, day=5),
)
trange = trange.insert(loc=5, item=Timestamp(year=2017, month=1, day=5))
df = DataFrame(0, index=trange, columns=["A", "B"])
bool_idx = np.array([False, False, False, False, False, True])
# assignment
df.loc[trange[bool_idx], "A"] = 6
expected = DataFrame(
{"A": [0, 0, 0, 0, 6, 6], "B": [0, 0, 0, 0, 0, 0]}, index=trange
)
tm.assert_frame_equal(df, expected)
# in-place
df = DataFrame(0, index=trange, columns=["A", "B"])
df.loc[trange[bool_idx], "A"] += 6
tm.assert_frame_equal(df, expected)
def test_setitem_with_unaligned_tz_aware_datetime_column(self):
# GH 12981
# Assignment of unaligned offset-aware datetime series.
# Make sure timezone isn't lost
column = Series(date_range("2015-01-01", periods=3, tz="utc"), name="dates")
df = DataFrame({"dates": column})
df["dates"] = column[[1, 0, 2]]
tm.assert_series_equal(df["dates"], column)
df = DataFrame({"dates": column})
df.loc[[0, 1, 2], "dates"] = column[[1, 0, 2]]
tm.assert_series_equal(df["dates"], column)
def test_loc_setitem_datetimelike_with_inference(self):
# GH 7592
# assignment of timedeltas with NaT
one_hour = timedelta(hours=1)
df = DataFrame(index=date_range("20130101", periods=4))
df["A"] = np.array([1 * one_hour] * 4, dtype="m8[ns]")
df.loc[:, "B"] = np.array([2 * one_hour] * 4, dtype="m8[ns]")
df.loc[df.index[:3], "C"] = np.array([3 * one_hour] * 3, dtype="m8[ns]")
df.loc[:, "D"] = np.array([4 * one_hour] * 4, dtype="m8[ns]")
df.loc[df.index[:3], "E"] = np.array([5 * one_hour] * 3, dtype="m8[ns]")
df["F"] = np.timedelta64("NaT")
df.loc[df.index[:-1], "F"] = np.array([6 * one_hour] * 3, dtype="m8[ns]")
df.loc[df.index[-3] :, "G"] = date_range("20130101", periods=3)
df["H"] = np.datetime64("NaT")
result = df.dtypes
expected = Series(
[np.dtype("timedelta64[ns]")] * 6 + [np.dtype("datetime64[ns]")] * 2,
index=list("ABCDEFGH"),
)
tm.assert_series_equal(result, expected)
def test_getitem_boolean_indexing_mixed(self):
df = DataFrame(
{
0: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
1: {
35: np.nan,
40: 0.32632316859446198,
43: np.nan,
49: 0.32632316859446198,
50: 0.39114724480578139,
},
2: {
35: np.nan,
40: np.nan,
43: 0.29012581014105987,
49: np.nan,
50: np.nan,
},
3: {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan, 50: np.nan},
4: {
35: 0.34215328467153283,
40: np.nan,
43: np.nan,
49: np.nan,
50: np.nan,
},
"y": {35: 0, 40: 0, 43: 0, 49: 0, 50: 1},
}
)
# mixed int/float ok
df2 = df.copy()
df2[df2 > 0.3] = 1
expected = df.copy()
expected.loc[40, 1] = 1
expected.loc[49, 1] = 1
expected.loc[50, 1] = 1
expected.loc[35, 4] = 1
tm.assert_frame_equal(df2, expected)
df["foo"] = "test"
msg = "not supported between instances|unorderable types"
with pytest.raises(TypeError, match=msg):
df[df > 0.3] = 1
def test_type_error_multiindex(self):
# See gh-12218
mi = MultiIndex.from_product([["x", "y"], [0, 1]], names=[None, "c"])
dg = DataFrame(
[[1, 1, 2, 2], [3, 3, 4, 4]], columns=mi, index=Index([0, 1], name="i")
)
with pytest.raises(InvalidIndexError, match="slice"):
dg[:, 0]
index = Index(range(2), name="i")
columns = MultiIndex(
levels=[["x", "y"], [0, 1]], codes=[[0, 1], [0, 0]], names=[None, "c"]
)
expected = DataFrame([[1, 2], [3, 4]], columns=columns, index=index)
result = dg.loc[:, (slice(None), 0)]
tm.assert_frame_equal(result, expected)
name = ("x", 0)
index = Index(range(2), name="i")
expected = Series([1, 3], index=index, name=name)
result = dg["x", 0]
tm.assert_series_equal(result, expected)
def test_getitem_interval_index_partial_indexing(self):
# GH#36490
df = DataFrame(
np.ones((3, 4)), columns=pd.IntervalIndex.from_breaks(np.arange(5))
)
expected = df.iloc[:, 0]
res = df[0.5]
tm.assert_series_equal(res, expected)
res = df.loc[:, 0.5]
tm.assert_series_equal(res, expected)
def test_setitem_array_as_cell_value(self):
# GH#43422
df = DataFrame(columns=["a", "b"], dtype=object)
df.loc[0] = {"a": np.zeros((2,)), "b": np.zeros((2, 2))}
expected = DataFrame({"a": [np.zeros((2,))], "b": [np.zeros((2, 2))]})
tm.assert_frame_equal(df, expected)
# with AM goes through split-path, loses dtype
@td.skip_array_manager_not_yet_implemented
def test_iloc_setitem_nullable_2d_values(self):
df = DataFrame({"A": [1, 2, 3]}, dtype="Int64")
orig = df.copy()
df.loc[:] = df.values[:, ::-1]
tm.assert_frame_equal(df, orig)
df.loc[:] = pd.core.arrays.PandasArray(df.values[:, ::-1])
tm.assert_frame_equal(df, orig)
df.iloc[:] = df.iloc[:, :]
tm.assert_frame_equal(df, orig)
@pytest.mark.parametrize(
"null", [pd.NaT, pd.NaT.to_numpy("M8[ns]"), pd.NaT.to_numpy("m8[ns]")]
)
def test_setting_mismatched_na_into_nullable_fails(
self, null, any_numeric_ea_dtype
):
# GH#44514 don't cast mismatched nulls to pd.NA
df = | DataFrame({"A": [1, 2, 3]}, dtype=any_numeric_ea_dtype) | pandas.DataFrame |
# This script runs the RDD models for a paper on the impact of COVID-19 on academic publishing
# Importing required modules
import pandas as pd
import datetime
import numpy as np
import statsmodels.api as stats
from matplotlib import pyplot as plt
import gender_guesser.detector as gender
from ToTeX import restab
# Defining a helper function for identifying COVID-19 related papers
def covid(papers, row):
string = str(papers.Title[row]) + str(papers.Abstract[row]) + str(papers.Keywords[row])
if 'covid' in string.lower():
return 1
else:
return 0
# Defining a helper function for isolating the name of the first author
def first_name(auths):
a = auths.index("'")
try:
b = auths[a+1:].index(' ')
except:
b = auths[a+1:].index("'")
return auths[a+1:b+2]
# Defining a helper function for isolating the national affiliation of the first author
def first_nationality(affils):
if str(affils) == 'nan':
affils = ''
else:
try:
a = affils.index("',")
except:
a = len(affils) - 2
c = affils[:a].count(', ')
for j in range(c):
b = affils[:a].index(', ')
affils = affils[b+2:a]
return affils
# Reading in the data
print('Reading in the data.......')
papers = pd.read_csv('C:/Users/User/Documents/Data/COVID-19/MDPI_data.csv')
# Control for COVID-19 related papers
# Creating the list
print('Creating a flag for COVID-19 related papers.......')
c19 = [covid(papers, row) for row in range(len(papers))]
# Adding COVID data to data set
print('Adding COVID-19 flag to the data set.......')
c19 = pd.Series(c19, name = 'COVID')
papers = pd.concat([papers, c19], axis = 1)
# Checking the number of COVID-19 related papers after the time cut-off as an anecdote:
# Note that this stat does not reflect dropping certain papers due to being publishing in unestablished journals
post_study_papers = ['lol' for i in range(len(papers)) if datetime.datetime.strptime(papers.Submitted[i], '%Y-%m-%d') > datetime.datetime.strptime('2020-06-30', '%Y-%m-%d')]
poststudy_covid = ['lol' for i in range(len(papers)) if datetime.datetime.strptime(papers.Submitted[i], '%Y-%m-%d') > datetime.datetime.strptime('2020-06-30', '%Y-%m-%d') and papers.COVID[i] == 1]
# Create a list of journals which will be included in the study - those with pubs prior to 2020
print('Removing papers from journals first published post 2020-01-01.......')
journals = []
for journal in papers.Journal.unique():
j = papers[papers.Journal == journal].reset_index()
if datetime.datetime.strptime(min(j.Accepted), '%Y-%m-%d') < datetime.datetime.strptime('2020-01-01', '%Y-%m-%d') and datetime.datetime.strptime(max(j.Accepted), '%Y-%m-%d') > datetime.datetime.strptime('2019-01-01', '%Y-%m-%d'):
journals.append(j.Journal[0])
# Subset data based on journals
df = papers[papers.Journal.isin(journals)].reset_index(drop = True)
# Subset data based on submission date
print('Removing papers from outside of the study time frame.......')
post1812 = [int(datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') > datetime.datetime.strptime('2018-12-31', '%Y-%m-%d')) for i in range(len(df))]
pre2007 = [int(datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') < datetime.datetime.strptime('2020-07-01', '%Y-%m-%d')) for i in range(len(df))]
study = pd.Series([post1812[i] * pre2007[i] for i in range(len(post1812))], name = 'Study')
df = pd.concat([df, study], axis = 1)
df = df[df.Study == 1].reset_index(drop = True)
# Computing the number of authors
print('Computing the number of authors for each paper.......')
numb_authors = [df.Authors[i].count(',') + 1 for i in range(len(df))]
numb_authors = pd.Series(numb_authors, name = 'Author_Count')
df = pd.concat([df, numb_authors], axis = 1)
# Predict perceived gender of the first author only
print('Predicting the perceived gender of first authors for each paper.......')
gd = gender.Detector()
first_author_gender = [gd.get_gender(first_name(df.Authors[i])) for i in range(len(df))]
first_author_gender = pd.Series(first_author_gender, name = 'Gender')
df = pd.concat([df, first_author_gender], axis = 1)
# Finding the nationality of the first author
print('Finding the nationality of the first author for each paper.......')
first_nat = [first_nationality(df.Affiliations[i]) for i in range(len(df))]
first_nat = pd.Series(first_nat, name = 'Nationality')
df = pd.concat([df, first_nat], axis = 1)
# Estimating the percentage of male / female authors for each paper
# Defining a helper function for the main function below
def inp_trimmer(inp):
a = inp.index("'") # mimic first_name
try:
b = inp[a+1:].index(' ') # mimic first_name
except:
b = inp[a+1:].index("'") # mimic first_name
inp = inp[b+3:] # shorten inp
try:
c = inp.index("',") # find next name or end of inp
inp = inp[c+3:]
except:
inp = ']'
return inp
# Defining a function to parse names and run them through the existing function for first author names
def all_auths(inp,nu):
if nu % 100 == 0: # Just a visual queue because this isn't particularly fast
print('Working on records ' + str(nu+1) + ' through ' + str(nu+101) + ' of 167,703.......')
gd = gender.Detector()
listicle = []
while inp != ']':
listicle.append(gd.get_gender(first_name(inp)))
inp = inp_trimmer(inp)
return listicle
# Applying this function to predict the perceived genders of all authors
# This is currently commented out because it takes quite a long time to run and too many authors are categorized as 'unknown'
#all_genders = [all_auths(df.Authors[i].replace('"',"'"),i) for i in range(len(df))]
# Below are lists of countries categorized by the World Bank Analytical Classification quartiles
high = ['Andorra', 'Antigua and Barbuda', 'Aruba', 'Australia', 'Austria', 'The Bahamas', 'Bahrain',
'Barbados', 'Belgium', 'Bermuda', 'Brunei', 'Canada', 'The Cayman Islands', 'Channel Islands',
'Croatia', 'Cyprus', 'Czech Republic', 'Denmark', 'Equatorial Guinea', 'Estonia', 'Faeroe Islands',
'Finland', 'France', 'French Polynesia', 'Germany', 'Greece', 'Greenland', 'Hong Kong', 'Hungary',
'Iceland', 'Ireland', 'Isle of Man', 'Israel', 'Italy', 'Japan', 'Korea', 'Kuwait', 'Liechtenstein',
'Luxembourg', 'Macao', 'Malta', 'Monaco', 'The Netherlands', 'New Caledonia', 'New Zealand',
'Northern Mariana Islands', 'Norway', 'Oman', 'Portugal', 'Qatar', 'San Marino', 'Saudi Arabia',
'Singapore', 'Slovakia', 'Slovenia', 'Spain', 'Sweden', 'Switzerland', 'Taiwan', 'Trinidad and Tobago',
'United Arab Emirates', 'UK', 'USA']
upper_mid = ['Algeria', 'American Samoa', 'Argentina', 'Belarus', 'Bosnia and Herzegovina', 'Botswana', 'Brazil',
'Bulgaria', 'Chile', 'Colombia', 'Costa Rica', 'Cuba', 'Dominica', 'Dominican Republic', 'Fiji',
'Gabon', 'Grenada', 'Jamaica', 'Kazakhstan', 'Latvia', 'Lebanon', 'Libya', 'Lithuania', 'Macedonia',
'Malaysia', 'Mauritius', 'Mexico', 'Montenegro', 'Namibia', 'Palau', 'Panama', 'Peru', 'Poland',
'Romania', 'Russia', 'Serbia', 'Seychelles', 'South Africa', 'Saint Kitts and Nevis', 'Saint Lucia',
'Saint Vincent and the Grenadines', 'Suriname', 'Turkey', 'Uruguay', 'Venezuela']
lower_mid = ['Albania', 'Angola', 'Armenia', 'Azerbaijan', 'Belize', 'Bhutan', 'Bolivia', 'Cabo Verde', 'Cameroon',
'China', 'Republic of the Congo', 'Ivory Coast', 'Djibouti', 'Ecuador', 'Egypt', 'El Salvador', 'Georgia',
'Guatemala', 'Guyana', 'Honduras', 'India', 'Indonesia', 'Iran', 'Iraq', 'Jordan', 'Kiribati',
'Kosovo', 'Lesotho', 'Maldives', 'Marshall Islands', 'Micronesia', 'Moldova', 'Mongolia', 'Morocco',
'Nicaragua', 'Nigeria', 'Pakistan', 'Papua New Guinea', 'Paraguay', 'Philippines', 'Samoa',
'Sao Tome and Principe', 'Solomon Islands', 'Sri Lanka', 'Sudan', 'Eswatini', 'Syria', 'Palestine',
'Thailand', 'Timor-Leste', 'Tonga', 'Tunisia', 'Turkmenistan', 'Ukraine', 'Vanuatu', 'West Bank and Gaza']
low = ['Afghanistan', 'Bangladesh', 'Benin', 'Burkina Faso', 'Burundi', 'Cambodia', 'Central African Republic',
'Chad', 'Comoros', 'Democratic Republic of the Congo', 'Eritrea', 'Ethiopia', 'The Gambia', 'Ghana', 'Guinea',
'Guinea-Bissau', 'Haiti', 'Kenya', 'Korea, Dem. Rep.', 'Kyrgyzstan', 'Laos', 'Liberia', 'Madagascar', 'Malawi',
'Mali', 'Mauritania', 'Mozambique', 'Myanmar', 'Nepal', 'Niger', 'Rwanda', 'Senegal', 'Sierra Leone', 'Somalia',
'Tajikistan', 'Tanzania', 'Togo', 'Uganda', 'Uzbekistan', 'Vietnam', 'Yemen', 'Zambia', 'Zimbabwe']
# Defining a dictionary for determining the WBAC quartile
qh = {h:'q1' for h in high}
qu = {h:'q2' for h in upper_mid}
qm = {h:'q3' for h in lower_mid}
ql = {h:'q4' for h in low}
qd = {**qh, **qu, **qm, **ql}
# Defining a function for determining the quartile of the first author's nationality
def f_quart(inp):
try:
res = qd[inp]
except:
res = ''
return res
# Determining the quartile of the affiliation of the first author
fq = [f_quart(x) for x in df.Nationality]
fq = pd.Series(fq, name = 'First_Quartile')
df = pd.concat([df, fq], axis = 1)
# Defining a function to determine the 'top quartile' for each paper
def quart(inp,nu):
if nu % 100 == 0: # Just a visual queue because this isn't particularly fast
print('Working on records ' + str(nu+1) + ' through ' + str(nu+101) + ' of 167,703.......')
listicle = []
while inp != ']':
try:
listicle.append(f_quart(first_nationality(inp)))
inp = inp_trimmer(inp)
except:
inp = ']'
if 'q1' in listicle:
res = 'q1'
elif 'q2' in listicle:
res = 'q2'
elif 'q3' in listicle:
res = 'q3'
else:
res = 'q4'
return res
# Determining the 'top quartile' present in each paper
print('Determining the top WBAC quartile present in each paper.......')
quarts = [quart(df.Affiliations[i],i) for i in range(len(df.Affiliations))]
# An indicator variable for whether or not a Q1 (high) nation contributed
q1 = [1 if q == 'q1' else 0 for q in quarts]
# Appending these two lists to the main df
quarts = pd.Series(quarts, name = 'Top_Quartile')
q1 = pd.Series(q1, name = 'Q1')
df = pd.concat([df, quarts, q1], axis = 1)
# 5443 of 167,703 had no discernable Nationality and are dropped here
df = df[df.First_Quartile != ''].reset_index(drop = True)
# Checking the number of COVID-19 related papers after the time cut-off as an anecdote:
# Note that this stat does now reflect dropping certain papers due to being publishing in unestablished journals
post_study_papers2 = ['lol' for i in range(len(papers)) if datetime.datetime.strptime(papers.Submitted[i], '%Y-%m-%d') > datetime.datetime.strptime('2020-06-30', '%Y-%m-%d')]
poststudy_covid2 = ['lol' for i in range(len(papers)) if datetime.datetime.strptime(papers.Submitted[i], '%Y-%m-%d') > datetime.datetime.strptime('2020-06-30', '%Y-%m-%d') and papers.COVID[i] == 1]
# Determining if the journal uses single blind or double blind peer review
print('Determining if the journal uses single blind or double blind peer review.......')
# Lists of journals with a double blind peer review policy
db_journals = ['Adm. Sci.', 'AgriEngineering', 'Arts', 'Buildings',
'Economies', 'Educ. Sci.', 'Games', 'Genealogy', 'Humanities',
'J. Intell.', 'J. Open Innov. Technol. Mark. Complex.',
'Journal. Media.', 'Languages', 'Laws', 'Psych', 'Religions',
'Soc. Sci.', 'Societies', 'Toxins']
db = [1 if j in db_journals else 0 for j in df.Journal]
db = pd.Series(db, name = 'Double_Blind')
df = pd.concat([df, db], axis = 1)
# Computing the distances
print('Calculating distances from thresholds.......')
# Distance from March 16 (middle of March)
XX = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-16', '%Y-%m-%d') for i in range(len(df))]
XX = [x.days for x in XX]
XX = pd.Series(XX, name = 'X-c')
df = pd.concat([df, XX], axis = 1)
# Squared distance from March 16 (middle of March)
XX2 = df['X-c']*df['X-c']
XX2 = pd.Series(XX2, name = '(X-c)^2')
df = pd.concat([df, XX2], axis = 1)
# Cubed distance from March 16 (middle of March)
XX3 = df['X-c']*df['X-c']*df['X-c']
XX3 = pd.Series(XX3, name = '(X-c)^3')
df = pd.concat([df, XX3], axis = 1)
# Distance from surrounding days to serve as robustness checks
# One week prior to March 16
XX01 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-17', '%Y-%m-%d') for i in range(len(df))]
XX02 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-18', '%Y-%m-%d') for i in range(len(df))]
XX03 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-19', '%Y-%m-%d') for i in range(len(df))]
XX04 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-20', '%Y-%m-%d') for i in range(len(df))]
XX05 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-21', '%Y-%m-%d') for i in range(len(df))]
XX06 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-22', '%Y-%m-%d') for i in range(len(df))]
XX07 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-23', '%Y-%m-%d') for i in range(len(df))]
XX01 = [x.days for x in XX01]
XX02 = [x.days for x in XX02]
XX03 = [x.days for x in XX03]
XX04 = [x.days for x in XX04]
XX05 = [x.days for x in XX05]
XX06 = [x.days for x in XX06]
XX07 = [x.days for x in XX07]
XX01 = pd.Series(XX01, name = 'X-1-c')
XX02 = pd.Series(XX02, name = 'X-2-c')
XX03 = pd.Series(XX03, name = 'X-3-c')
XX04 = pd.Series(XX04, name = 'X-4-c')
XX05 = pd.Series(XX05, name = 'X-5-c')
XX06 = pd.Series(XX06, name = 'X-6-c')
XX07 = pd.Series(XX07, name = 'X-7-c')
df = pd.concat([df, XX01, XX02, XX03, XX04, XX05, XX06, XX07], axis = 1)
# One week post March 16
XX11 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-15', '%Y-%m-%d') for i in range(len(df))]
XX12 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-14', '%Y-%m-%d') for i in range(len(df))]
XX13 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-13', '%Y-%m-%d') for i in range(len(df))]
XX14 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-12', '%Y-%m-%d') for i in range(len(df))]
XX15 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-11', '%Y-%m-%d') for i in range(len(df))]
XX16 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-10', '%Y-%m-%d') for i in range(len(df))]
XX17 = [datetime.datetime.strptime(df.Submitted[i], '%Y-%m-%d') - datetime.datetime.strptime('2020-03-09', '%Y-%m-%d') for i in range(len(df))]
XX11 = [x.days for x in XX11]
XX12 = [x.days for x in XX12]
XX13 = [x.days for x in XX13]
XX14 = [x.days for x in XX14]
XX15 = [x.days for x in XX15]
XX16 = [x.days for x in XX16]
XX17 = [x.days for x in XX17]
XX11 = pd.Series(XX11, name = 'X+1-c')
XX12 = pd.Series(XX12, name = 'X+2-c')
XX13 = pd.Series(XX13, name = 'X+3-c')
XX14 = pd.Series(XX14, name = 'X+4-c')
XX15 = pd.Series(XX15, name = 'X+5-c')
XX16 = pd.Series(XX16, name = 'X+6-c')
XX17 = pd.Series(XX17, name = 'X+7-c')
df = pd.concat([df, XX11, XX12, XX13, XX14, XX15, XX16, XX17], axis = 1)
# Adding the post-effect variables for the main regression
D = [1 if df['X-c'][i] >= 0 else 0 for i in range(len(df))]
D = pd.Series(D, name = 'D')
DXc = D*df['X-c']
DXc2 = D*df['X-c']*df['X-c']
DXc3 = D*df['X-c']*df['X-c']*df['X-c']
DXc = pd.Series(DXc, name = 'D(X-c)')
DXc2 = pd.Series(DXc2, name = 'D(X-c)^2')
DXc3 = pd.Series(DXc3, name = 'D(X-c)^3')
df = pd.concat([df, D, DXc, DXc2, DXc3], axis = 1)
# Adding the post-effect variables for the robustness checks
D01 = [1 if df['X-1-c'][i] >= 0 else 0 for i in range(len(df))]
D02 = [1 if df['X-2-c'][i] >= 0 else 0 for i in range(len(df))]
D03 = [1 if df['X-3-c'][i] >= 0 else 0 for i in range(len(df))]
D04 = [1 if df['X-4-c'][i] >= 0 else 0 for i in range(len(df))]
D05 = [1 if df['X-5-c'][i] >= 0 else 0 for i in range(len(df))]
D06 = [1 if df['X-6-c'][i] >= 0 else 0 for i in range(len(df))]
D07 = [1 if df['X-7-c'][i] >= 0 else 0 for i in range(len(df))]
D01 = pd.Series(D01, name = 'D-1')
D02 = pd.Series(D02, name = 'D-2')
D03 = pd.Series(D03, name = 'D-3')
D04 = pd.Series(D04, name = 'D-4')
D05 = pd.Series(D05, name = 'D-5')
D06 = pd.Series(D06, name = 'D-6')
D07 = pd.Series(D07, name = 'D-7')
D11 = [1 if df['X+1-c'][i] >= 0 else 0 for i in range(len(df))]
D12 = [1 if df['X+2-c'][i] >= 0 else 0 for i in range(len(df))]
D13 = [1 if df['X-3-c'][i] >= 0 else 0 for i in range(len(df))]
D14 = [1 if df['X+4-c'][i] >= 0 else 0 for i in range(len(df))]
D15 = [1 if df['X+5-c'][i] >= 0 else 0 for i in range(len(df))]
D16 = [1 if df['X+6-c'][i] >= 0 else 0 for i in range(len(df))]
D17 = [1 if df['X+7-c'][i] >= 0 else 0 for i in range(len(df))]
D11 = pd.Series(D11, name = 'D+1')
D12 = pd.Series(D12, name = 'D+2')
D13 = pd.Series(D13, name = 'D+3')
D14 = pd.Series(D14, name = 'D+4')
D15 = pd.Series(D15, name = 'D+5')
D16 = pd.Series(D16, name = 'D+6')
D17 = pd.Series(D17, name = 'D+7')
df = pd.concat([df, D01, D02, D03, D04, D05, D06, D07, D11, D12, D13, D14, D15, D16, D17], axis = 1)
DXc01 = D01*df['X-1-c']
DXc02 = D02*df['X-2-c']
DXc03 = D03*df['X-3-c']
DXc04 = D04*df['X-4-c']
DXc05 = D05*df['X-5-c']
DXc06 = D06*df['X-6-c']
DXc07 = D07*df['X-7-c']
DXc11 = D11*df['X+1-c']
DXc12 = D12*df['X+2-c']
DXc13 = D13*df['X+3-c']
DXc14 = D14*df['X+4-c']
DXc15 = D15*df['X+5-c']
DXc16 = D16*df['X+6-c']
DXc17 = D17*df['X+7-c']
DXc01 = pd.Series(DXc01, name = 'D-1(X-c)')
DXc02 = pd.Series(DXc02, name = 'D-2(X-c)')
DXc03 = pd.Series(DXc03, name = 'D-3(X-c)')
DXc04 = pd.Series(DXc04, name = 'D-4(X-c)')
DXc05 = pd.Series(DXc05, name = 'D-5(X-c)')
DXc06 = pd.Series(DXc06, name = 'D-6(X-c)')
DXc07 = pd.Series(DXc07, name = 'D-7(X-c)')
DXc11 = pd.Series(DXc11, name = 'D+1(X-c)')
DXc12 = pd.Series(DXc12, name = 'D+2(X-c)')
DXc13 = pd.Series(DXc13, name = 'D+3(X-c)')
DXc14 = pd.Series(DXc14, name = 'D+4(X-c)')
DXc15 = pd.Series(DXc15, name = 'D+5(X-c)')
DXc16 = pd.Series(DXc16, name = 'D+6(X-c)')
DXc17 = pd.Series(DXc17, name = 'D+7(X-c)')
df = pd.concat([df, DXc01, DXc02, DXc03, DXc04, DXc05, DXc06, DXc07, DXc11, DXc12, DXc13, DXc14, DXc15, DXc16, DXc17], axis = 1)
# Calculating a total author time to add to the data set as a potential dependent variable
A = [df.Total[i] - df.Editor[i] for i in range(len(df))]
A = pd.Series(A, name = 'Author')
df = pd.concat([df, A], axis = 1)
# Adding natural logarithm transformed arXiv data
ln_arXiv7 = pd.Series(np.log(df.arXiv7.values), name = 'ln_arXiv7')
ln_arXiv14 = pd.Series(np.log(df.arXiv14.values), name = 'ln_arXiv14')
ln_arXiv30 = pd.Series(np.log(df.arXiv30.values), name = 'ln_arXiv30')
ln_new7 = pd.Series(np.log(df.new7.values), name = 'ln_new7')
ln_new14 = pd.Series(np.log(df.new14.values), name = 'ln_new14')
ln_new30 = pd.Series(np.log(df.new30.values), name = 'ln_new30')
df = pd.concat([df, ln_arXiv7, ln_arXiv14, ln_arXiv30, ln_new7, ln_new14, ln_new30], axis = 1)
# Two journals had a bad date resulting in an infeasible value for Stage1 so they are dropped here
df = df[df.Stage1 >= 0].reset_index(drop = True)
# Defining a function for adding a month dummy
def month(m):
md = {'01':'JAN', '02':'FEB', '03':'MAR', '04':'APR', '05':'MAY', '06':'JUN',
'07':'JUL', '08':'AUG', '09':'SEP', '10':'OCT', '11':'NOV', '12':'DEC', } # a month dictionary
s = m[5:7] # the month as a number stored as a string
mon = md[s]# getting the month from the dictionary
return mon
# Add a month dummy using the function
months = [month(m) for m in df.Submitted]
months = pd.Series(months, name = 'Month')
df = pd.concat([df, months], axis = 1)
# Prepping the data for the regressions
Stage1 = np.log(df.Stage1.values)
Stage2 = np.log(df.Stage2.values)
Stage3 = np.log(df.Stage3.values)
Total = np.log(df.Total.values)
Editor = np.log(df.Editor.values)
XX = stats.add_constant(df[['X-c', '(X-c)^2', '(X-c)^3', 'D', 'D(X-c)', 'D(X-c)^2', 'D(X-c)^3',
'COVID', 'Double_Blind', 'Author_Count', 'ln_arXiv14']])
# Creating the fixed effects
dG = | pd.get_dummies(df['Gender']) | pandas.get_dummies |
"""
This is the streamlit web-app
"""
import streamlit as st
import os
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import pandas as pd
from gluonts.model.predictor import Predictor
from gluonts.dataset.common import ListDataset
from gluonts.transform import FieldName
from gluonts.evaluation.backtest import make_evaluation_predictions
import autodraft.visualization as viz
import autodraft.gluonts as glu
# @st.cache
def load_arima(path='../data/output/arima_results_m3.p'):
data = | pd.read_pickle(path) | pandas.read_pickle |
import os
import tempfile
import pandas as pd
import pytest
from pandas.util import testing as pdt
from .. import simulation as sim
from ...utils.testing import assert_frames_equal
def setup_function(func):
sim.clear_sim()
sim.enable_cache()
def teardown_function(func):
sim.clear_sim()
sim.enable_cache()
@pytest.fixture
def df():
return pd.DataFrame(
{'a': [1, 2, 3],
'b': [4, 5, 6]},
index=['x', 'y', 'z'])
def test_tables(df):
wrapped_df = sim.add_table('test_frame', df)
@sim.table()
def test_func(test_frame):
return test_frame.to_frame() / 2
assert set(sim.list_tables()) == {'test_frame', 'test_func'}
table = sim.get_table('test_frame')
assert table is wrapped_df
assert table.columns == ['a', 'b']
assert table.local_columns == ['a', 'b']
assert len(table) == 3
pdt.assert_index_equal(table.index, df.index)
pdt.assert_series_equal(table.get_column('a'), df.a)
pdt.assert_series_equal(table.a, df.a)
pdt.assert_series_equal(table['b'], df['b'])
table = sim._TABLES['test_func']
assert table.index is None
assert table.columns == []
assert len(table) is 0
pdt.assert_frame_equal(table.to_frame(), df / 2)
pdt.assert_frame_equal(table.to_frame(columns=['a']), df[['a']] / 2)
pdt.assert_index_equal(table.index, df.index)
pdt.assert_series_equal(table.get_column('a'), df.a / 2)
pdt.assert_series_equal(table.a, df.a / 2)
pdt.assert_series_equal(table['b'], df['b'] / 2)
assert len(table) == 3
assert table.columns == ['a', 'b']
def test_table_func_cache(df):
sim.add_injectable('x', 2)
@sim.table(cache=True)
def table(variable='x'):
return df * variable
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 2)
sim.add_injectable('x', 3)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 2)
sim.get_table('table').clear_cached()
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 3)
sim.add_injectable('x', 4)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 3)
sim.clear_cache()
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 4)
sim.add_injectable('x', 5)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 4)
sim.add_table('table', table)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 5)
def test_table_func_cache_disabled(df):
sim.add_injectable('x', 2)
@sim.table('table', cache=True)
def asdf(x):
return df * x
sim.disable_cache()
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 2)
sim.add_injectable('x', 3)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 3)
sim.enable_cache()
sim.add_injectable('x', 4)
pdt.assert_frame_equal(sim.get_table('table').to_frame(), df * 3)
def test_table_copy(df):
sim.add_table('test_frame_copied', df, copy_col=True)
sim.add_table('test_frame_uncopied', df, copy_col=False)
sim.add_table('test_func_copied', lambda: df, copy_col=True)
sim.add_table('test_func_uncopied', lambda: df, copy_col=False)
@sim.table(copy_col=True)
def test_funcd_copied():
return df
@sim.table(copy_col=False)
def test_funcd_uncopied():
return df
@sim.table(copy_col=True)
def test_funcd_copied2(test_frame_copied):
# local returns original, but it is copied by copy_col.
return test_frame_copied.local
@sim.table(copy_col=True)
def test_funcd_copied3(test_frame_uncopied):
# local returns original, but it is copied by copy_col.
return test_frame_uncopied.local
@sim.table(copy_col=False)
def test_funcd_uncopied2(test_frame_copied):
# local returns original.
return test_frame_copied.local
@sim.table(copy_col=False)
def test_funcd_uncopied3(test_frame_uncopied):
# local returns original.
return test_frame_uncopied.local
sim.add_table('test_cache_copied', lambda: df, cache=True, copy_col=True)
sim.add_table(
'test_cache_uncopied', lambda: df, cache=True, copy_col=False)
@sim.table(cache=True, copy_col=True)
def test_cached_copied():
return df
@sim.table(cache=True, copy_col=False)
def test_cached_uncopied():
return df
# Create tables with computed columns.
sim.add_table('test_copied_columns', pd.DataFrame(index=df.index),
copy_col=True)
sim.add_table('test_uncopied_columns', pd.DataFrame(index=df.index),
copy_col=False)
for column_name in ['a', 'b']:
label = "test_frame_uncopied.{}".format(column_name)
func = lambda col=label: col
for table_name in ['test_copied_columns', 'test_uncopied_columns']:
sim.add_column(table_name, column_name, func)
for name in ['test_frame_uncopied', 'test_func_uncopied',
'test_funcd_uncopied', 'test_funcd_uncopied2',
'test_funcd_uncopied3', 'test_cache_uncopied',
'test_cached_uncopied', 'test_uncopied_columns',
'test_frame_copied', 'test_func_copied',
'test_funcd_copied', 'test_funcd_copied2',
'test_funcd_copied3', 'test_cache_copied',
'test_cached_copied', 'test_copied_columns']:
table = sim.get_table(name)
table2 = sim.get_table(name)
# to_frame will always return a copy.
pdt.assert_frame_equal(table.to_frame(), df)
assert table.to_frame() is not df
pdt.assert_frame_equal(table.to_frame(), table.to_frame())
assert table.to_frame() is not table.to_frame()
pdt.assert_series_equal(table.to_frame()['a'], df['a'])
assert table.to_frame()['a'] is not df['a']
pdt.assert_series_equal(table.to_frame()['a'],
table.to_frame()['a'])
assert table.to_frame()['a'] is not table.to_frame()['a']
if 'uncopied' in name:
pdt.assert_series_equal(table['a'], df['a'])
assert table['a'] is df['a']
pdt.assert_series_equal(table['a'], table2['a'])
assert table['a'] is table2['a']
else:
pdt.assert_series_equal(table['a'], df['a'])
assert table['a'] is not df['a']
pdt.assert_series_equal(table['a'], table2['a'])
assert table['a'] is not table2['a']
def test_columns_for_table():
sim.add_column(
'table1', 'col10', pd.Series([1, 2, 3], index=['a', 'b', 'c']))
sim.add_column(
'table2', 'col20', pd.Series([10, 11, 12], index=['x', 'y', 'z']))
@sim.column('table1')
def col11():
return pd.Series([4, 5, 6], index=['a', 'b', 'c'])
@sim.column('table2', 'col21')
def asdf():
return pd.Series([13, 14, 15], index=['x', 'y', 'z'])
t1_col_names = sim._list_columns_for_table('table1')
assert set(t1_col_names) == {'col10', 'col11'}
t2_col_names = sim._list_columns_for_table('table2')
assert set(t2_col_names) == {'col20', 'col21'}
t1_cols = sim._columns_for_table('table1')
assert 'col10' in t1_cols and 'col11' in t1_cols
t2_cols = sim._columns_for_table('table2')
assert 'col20' in t2_cols and 'col21' in t2_cols
def test_columns_and_tables(df):
sim.add_table('test_frame', df)
@sim.table()
def test_func(test_frame):
return test_frame.to_frame() / 2
sim.add_column('test_frame', 'c', pd.Series([7, 8, 9], index=df.index))
@sim.column('test_func', 'd')
def asdf(test_func):
return test_func.to_frame(columns=['b'])['b'] * 2
@sim.column('test_func')
def e(column='test_func.d'):
return column + 1
test_frame = sim.get_table('test_frame')
assert set(test_frame.columns) == set(['a', 'b', 'c'])
assert_frames_equal(
test_frame.to_frame(),
pd.DataFrame(
{'a': [1, 2, 3],
'b': [4, 5, 6],
'c': [7, 8, 9]},
index=['x', 'y', 'z']))
assert_frames_equal(
test_frame.to_frame(columns=['a', 'c']),
pd.DataFrame(
{'a': [1, 2, 3],
'c': [7, 8, 9]},
index=['x', 'y', 'z']))
test_func_df = sim._TABLES['test_func']
assert set(test_func_df.columns) == set(['d', 'e'])
assert_frames_equal(
test_func_df.to_frame(),
pd.DataFrame(
{'a': [0.5, 1, 1.5],
'b': [2, 2.5, 3],
'c': [3.5, 4, 4.5],
'd': [4., 5., 6.],
'e': [5., 6., 7.]},
index=['x', 'y', 'z']))
assert_frames_equal(
test_func_df.to_frame(columns=['b', 'd']),
pd.DataFrame(
{'b': [2, 2.5, 3],
'd': [4., 5., 6.]},
index=['x', 'y', 'z']))
assert set(test_func_df.columns) == set(['a', 'b', 'c', 'd', 'e'])
assert set(sim.list_columns()) == {('test_frame', 'c'), ('test_func', 'd'),
('test_func', 'e')}
def test_column_cache(df):
sim.add_injectable('x', 2)
series = pd.Series([1, 2, 3], index=['x', 'y', 'z'])
key = ('table', 'col')
@sim.table()
def table():
return df
@sim.column(*key, cache=True)
def column(variable='x'):
return series * variable
c = lambda: sim._COLUMNS[key]
pdt.assert_series_equal(c()(), series * 2)
sim.add_injectable('x', 3)
pdt.assert_series_equal(c()(), series * 2)
c().clear_cached()
pdt.assert_series_equal(c()(), series * 3)
sim.add_injectable('x', 4)
pdt.assert_series_equal(c()(), series * 3)
sim.clear_cache()
pdt.assert_series_equal(c()(), series * 4)
sim.add_injectable('x', 5)
pdt.assert_series_equal(c()(), series * 4)
sim.get_table('table').clear_cached()
pdt.assert_series_equal(c()(), series * 5)
sim.add_injectable('x', 6)
pdt.assert_series_equal(c()(), series * 5)
sim.add_column(*key, column=column, cache=True)
pdt.assert_series_equal(c()(), series * 6)
def test_column_cache_disabled(df):
sim.add_injectable('x', 2)
series = pd.Series([1, 2, 3], index=['x', 'y', 'z'])
key = ('table', 'col')
@sim.table()
def table():
return df
@sim.column(*key, cache=True)
def column(x):
return series * x
c = lambda: sim._COLUMNS[key]
sim.disable_cache()
pdt.assert_series_equal(c()(), series * 2)
sim.add_injectable('x', 3)
pdt.assert_series_equal(c()(), series * 3)
sim.enable_cache()
sim.add_injectable('x', 4)
pdt.assert_series_equal(c()(), series * 3)
def test_update_col(df):
wrapped = sim.add_table('table', df)
wrapped.update_col('b', pd.Series([7, 8, 9], index=df.index))
pdt.assert_series_equal(wrapped['b'], pd.Series([7, 8, 9], index=df.index))
wrapped.update_col_from_series('a', pd.Series([]))
| pdt.assert_series_equal(wrapped['a'], df['a']) | pandas.util.testing.assert_series_equal |
import datetime
from datetime import timedelta
from distutils.version import LooseVersion
from io import BytesIO
import os
import re
from warnings import catch_warnings, simplefilter
import numpy as np
import pytest
from pandas.compat import is_platform_little_endian, is_platform_windows
import pandas.util._test_decorators as td
from pandas.core.dtypes.common import is_categorical_dtype
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
Int64Index,
MultiIndex,
RangeIndex,
Series,
Timestamp,
bdate_range,
concat,
date_range,
isna,
timedelta_range,
)
from pandas.tests.io.pytables.common import (
_maybe_remove,
create_tempfile,
ensure_clean_path,
ensure_clean_store,
safe_close,
safe_remove,
tables,
)
import pandas.util.testing as tm
from pandas.io.pytables import (
ClosedFileError,
HDFStore,
PossibleDataLossError,
Term,
read_hdf,
)
from pandas.io import pytables as pytables # noqa: E402 isort:skip
from pandas.io.pytables import TableIterator # noqa: E402 isort:skip
_default_compressor = "blosc"
ignore_natural_naming_warning = pytest.mark.filterwarnings(
"ignore:object name:tables.exceptions.NaturalNameWarning"
)
@pytest.mark.single
class TestHDFStore:
def test_format_kwarg_in_constructor(self, setup_path):
# GH 13291
with ensure_clean_path(setup_path) as path:
with pytest.raises(ValueError):
HDFStore(path, format="table")
def test_context(self, setup_path):
path = create_tempfile(setup_path)
try:
with HDFStore(path) as tbl:
raise ValueError("blah")
except ValueError:
pass
finally:
safe_remove(path)
try:
with HDFStore(path) as tbl:
tbl["a"] = tm.makeDataFrame()
with HDFStore(path) as tbl:
assert len(tbl) == 1
assert type(tbl["a"]) == DataFrame
finally:
safe_remove(path)
def test_conv_read_write(self, setup_path):
path = create_tempfile(setup_path)
try:
def roundtrip(key, obj, **kwargs):
obj.to_hdf(path, key, **kwargs)
return read_hdf(path, key)
o = tm.makeTimeSeries()
tm.assert_series_equal(o, roundtrip("series", o))
o = tm.makeStringSeries()
tm.assert_series_equal(o, roundtrip("string_series", o))
o = tm.makeDataFrame()
tm.assert_frame_equal(o, roundtrip("frame", o))
# table
df = DataFrame(dict(A=range(5), B=range(5)))
df.to_hdf(path, "table", append=True)
result = read_hdf(path, "table", where=["index>2"])
tm.assert_frame_equal(df[df.index > 2], result)
finally:
safe_remove(path)
def test_long_strings(self, setup_path):
# GH6166
df = DataFrame(
{"a": tm.rands_array(100, size=10)}, index=tm.rands_array(100, size=10)
)
with ensure_clean_store(setup_path) as store:
store.append("df", df, data_columns=["a"])
result = store.select("df")
tm.assert_frame_equal(df, result)
def test_api(self, setup_path):
# GH4584
# API issue when to_hdf doesn't accept append AND format args
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.iloc[:10].to_hdf(path, "df", append=True)
df.iloc[10:].to_hdf(path, "df", append=True, format="table")
tm.assert_frame_equal(read_hdf(path, "df"), df)
# append to False
df.iloc[:10].to_hdf(path, "df", append=False, format="table")
df.iloc[10:].to_hdf(path, "df", append=True)
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
df.to_hdf(path, "df", append=False, format="fixed")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False, format="f")
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df", append=False)
tm.assert_frame_equal(read_hdf(path, "df"), df)
df.to_hdf(path, "df")
tm.assert_frame_equal(read_hdf(path, "df"), df)
with ensure_clean_store(setup_path) as store:
path = store._path
df = tm.makeDataFrame()
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=True, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# append to False
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
# formats
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format="table")
tm.assert_frame_equal(store.select("df"), df)
_maybe_remove(store, "df")
store.append("df", df.iloc[:10], append=False, format="table")
store.append("df", df.iloc[10:], append=True, format=None)
tm.assert_frame_equal(store.select("df"), df)
with ensure_clean_path(setup_path) as path:
# Invalid.
df = tm.makeDataFrame()
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="f")
with pytest.raises(ValueError):
df.to_hdf(path, "df", append=True, format="fixed")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=True, format="foo")
with pytest.raises(TypeError):
df.to_hdf(path, "df", append=False, format="bar")
# File path doesn't exist
path = ""
with pytest.raises(FileNotFoundError):
read_hdf(path, "df")
def test_api_default_format(self, setup_path):
# default_format option
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
_maybe_remove(store, "df")
store.put("df", df)
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
store.append("df2", df)
pd.set_option("io.hdf.default_format", "table")
_maybe_remove(store, "df")
store.put("df", df)
assert store.get_storer("df").is_table
_maybe_remove(store, "df2")
store.append("df2", df)
assert store.get_storer("df").is_table
pd.set_option("io.hdf.default_format", None)
with ensure_clean_path(setup_path) as path:
df = tm.makeDataFrame()
pd.set_option("io.hdf.default_format", "fixed")
df.to_hdf(path, "df")
with HDFStore(path) as store:
assert not store.get_storer("df").is_table
with pytest.raises(ValueError):
df.to_hdf(path, "df2", append=True)
pd.set_option("io.hdf.default_format", "table")
df.to_hdf(path, "df3")
with HDFStore(path) as store:
assert store.get_storer("df3").is_table
df.to_hdf(path, "df4", append=True)
with HDFStore(path) as store:
assert store.get_storer("df4").is_table
pd.set_option("io.hdf.default_format", None)
def test_keys(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
assert len(store) == 3
expected = {"/a", "/b", "/c"}
assert set(store.keys()) == expected
assert set(store) == expected
def test_keys_ignore_hdf_softlink(self, setup_path):
# GH 20523
# Puts a softlink into HDF file and rereads
with ensure_clean_store(setup_path) as store:
df = DataFrame(dict(A=range(5), B=range(5)))
store.put("df", df)
assert store.keys() == ["/df"]
store._handle.create_soft_link(store._handle.root, "symlink", "df")
# Should ignore the softlink
assert store.keys() == ["/df"]
def test_iter_empty(self, setup_path):
with ensure_clean_store(setup_path) as store:
# GH 12221
assert list(store) == []
def test_repr(self, setup_path):
with ensure_clean_store(setup_path) as store:
repr(store)
store.info()
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeStringSeries()
store["c"] = tm.makeDataFrame()
df = tm.makeDataFrame()
df["obj1"] = "foo"
df["obj2"] = "bar"
df["bool1"] = df["A"] > 0
df["bool2"] = df["B"] > 0
df["bool3"] = True
df["int1"] = 1
df["int2"] = 2
df["timestamp1"] = Timestamp("20010102")
df["timestamp2"] = Timestamp("20010103")
df["datetime1"] = datetime.datetime(2001, 1, 2, 0, 0)
df["datetime2"] = datetime.datetime(2001, 1, 3, 0, 0)
df.loc[3:6, ["obj1"]] = np.nan
df = df._consolidate()._convert(datetime=True)
with catch_warnings(record=True):
simplefilter("ignore", pd.errors.PerformanceWarning)
store["df"] = df
# make a random group in hdf space
store._handle.create_group(store._handle.root, "bah")
assert store.filename in repr(store)
assert store.filename in str(store)
store.info()
# storers
with ensure_clean_store(setup_path) as store:
df = tm.makeDataFrame()
store.append("df", df)
s = store.get_storer("df")
repr(s)
str(s)
@ignore_natural_naming_warning
def test_contains(self, setup_path):
with ensure_clean_store(setup_path) as store:
store["a"] = tm.makeTimeSeries()
store["b"] = tm.makeDataFrame()
store["foo/bar"] = tm.makeDataFrame()
assert "a" in store
assert "b" in store
assert "c" not in store
assert "foo/bar" in store
assert "/foo/bar" in store
assert "/foo/b" not in store
assert "bar" not in store
# gh-2694: tables.NaturalNameWarning
with catch_warnings(record=True):
store["node())"] = | tm.makeDataFrame() | pandas.util.testing.makeDataFrame |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.