Multichem's picture
Update app.py
be550d2 verified
raw
history blame
10.7 kB
import streamlit as st
st.set_page_config(layout="wide")
import numpy as np
import pandas as pd
import gspread
import pymongo
@st.cache_resource(ttl = 600)
def init_conn():
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
credentials = {
"type": "service_account",
"project_id": "model-sheets-connect",
"private_key_id": "0e0bc2fdef04e771172fe5807392b9d6639d945e",
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDiu1v/e6KBKOcK\ncx0KQ23nZK3ZVvADYy8u/RUn/EDI82QKxTd/DizRLIV81JiNQxDJXSzgkbwKYEDm\n48E8zGvupU8+Nk76xNPakrQKy2Y8+VJlq5psBtGchJTuUSHcXU5Mg2JhQsB376PJ\nsCw552K6Pw8fpeMDJDZuxpKSkaJR6k9G5Dhf5q8HDXnC5Rh/PRFuKJ2GGRpX7n+2\nhT/sCax0J8jfdTy/MDGiDfJqfQrOPrMKELtsGHR9Iv6F4vKiDqXpKfqH+02E9ptz\nBk+MNcbZ3m90M8ShfRu28ebebsASfarNMzc3dk7tb3utHOGXKCf4tF8yYKo7x8BZ\noO9X4gSfAgMBAAECggEAU8ByyMpSKlTCF32TJhXnVJi/kS+IhC/Qn5JUDMuk4LXr\naAEWsWO6kV/ZRVXArjmuSzuUVrXumISapM9Ps5Ytbl95CJmGDiLDwRL815nvv6k3\nUyAS8EGKjz74RpoIoH6E7EWCAzxlnUgTn+5oP9Flije97epYk3H+e2f1f5e1Nn1d\nYNe8U+1HqJgILcxA1TAUsARBfoD7+K3z/8DVPHI8IpzAh6kTHqhqC23Rram4XoQ6\nzj/ZdVBjvnKuazETfsD+Vl3jGLQA8cKQVV70xdz3xwLcNeHsbPbpGBpZUoF73c65\nkAXOrjYl0JD5yAk+hmYhXr6H9c6z5AieuZGDrhmlFQKBgQDzV6LRXmjn4854DP/J\nI82oX2GcI4eioDZPRukhiQLzYerMQBmyqZIRC+/LTCAhYQSjNgMa+ZKyvLqv48M0\n/x398op/+n3xTs+8L49SPI48/iV+mnH7k0WI/ycd4OOKh8rrmhl/0EWb9iitwJYe\nMjTV/QxNEpPBEXfR1/mvrN/lVQKBgQDuhomOxUhWVRVH6x03slmyRBn0Oiw4MW+r\nrt1hlNgtVmTc5Mu+4G0USMZwYuOB7F8xG4Foc7rIlwS7Ic83jMJxemtqAelwOLdV\nXRLrLWJfX8+O1z/UE15l2q3SUEnQ4esPHbQnZowHLm0mdL14qSVMl1mu1XfsoZ3z\nJZTQb48CIwKBgEWbzQRtKD8lKDupJEYqSrseRbK/ax43DDITS77/DWwHl33D3FYC\nMblUm8ygwxQpR4VUfwDpYXBlklWcJovzamXpSnsfcYVkkQH47NuOXPXPkXQsw+w+\nDYcJzeu7F/vZqk9I7oBkWHUrrik9zPNoUzrfPvSRGtkAoTDSwibhoc5dAoGBAMHE\nK0T/ANeZQLNuzQps6S7G4eqjwz5W8qeeYxsdZkvWThOgDd/ewt3ijMnJm5X05hOn\ni4XF1euTuvUl7wbqYx76Wv3/1ZojiNNgy7ie4rYlyB/6vlBS97F4ZxJdxMlabbCW\n6b3EMWa4EVVXKoA1sCY7IVDE+yoQ1JYsZmq45YzPAoGBANWWHuVueFGZRDZlkNlK\nh5OmySmA0NdNug3G1upaTthyaTZ+CxGliwBqMHAwpkIRPwxUJpUwBTSEGztGTAxs\nWsUOVWlD2/1JaKSmHE8JbNg6sxLilcG6WEDzxjC5dLL1OrGOXj9WhC9KX3sq6qb6\nF/j9eUXfXjAlb042MphoF3ZC\n-----END PRIVATE KEY-----\n",
"client_email": "[email protected]",
"client_id": "100369174533302798535",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/gspread-connection%40model-sheets-connect.iam.gserviceaccount.com"
}
uri = "mongodb+srv://multichem:[email protected]/?retryWrites=true&w=majority&appName=TestCluster"
client = pymongo.MongoClient(uri, retryWrites=True, serverSelectionTimeoutMS=100000)
db = client["testing_db"]
collection = db["DK_MLB_seed_frame"]
cursor = collection.find()
raw_display = pd.DataFrame(list(cursor))
raw_display = raw_display[['SP1', 'SP2', 'C', '1B', '2B', '3B', 'SS', 'OF1', 'OF2', 'OF3', 'salary', 'proj', 'Team', 'Team_count', 'Secondary', 'Secondary_count']]
DK_seed = raw_display.to_numpy()
collection = db["FD_MLB_seed_frame"]
cursor = collection.find()
raw_display = pd.DataFrame(list(cursor))
raw_display = raw_display[['P', 'C_1B', '2B', '3B', 'SS', 'OF1', 'OF2', 'OF3', 'UTIL', 'salary', 'proj', 'Team', 'Team_count', 'Secondary', 'Secondary_count']]
FD_seed = raw_display.to_numpy()
MLB_Data = 'https://docs.google.com/spreadsheets/d/1f42Ergav8K1VsOLOK9MUn7DM_MLMvv4GR2Fy7EfnZTc/edit#gid=340831852'
gc_con = gspread.service_account_from_dict(credentials, scope)
client.close()
return gc_con, client, db, DK_seed, FD_seed, MLB_Data
gcservice_account, client, db, DK_seed, FD_seed, MLB_Data = init_conn()
percentages_format = {'Exposure': '{:.2%}'}
dk_columns = ['SP1', 'SP2', 'C', '1B', '2B', '3B', 'SS', 'OF1', 'OF2', 'OF3', 'salary', 'proj', 'Team', 'Team_count', 'Secondary', 'Secondary_count']
fd_columns = ['P', 'C_1B', '2B', '3B', 'SS', 'OF1', 'OF2', 'OF3', 'UTIL', 'salary', 'proj', 'Team', 'Team_count', 'Secondary', 'Secondary_count']
@st.cache_data(ttl = 60)
def init_baselines():
sh = gcservice_account.open_by_url(MLB_Data)
worksheet = sh.worksheet('DK_Projections')
load_display = pd.DataFrame(worksheet.get_all_records())
load_display.replace('', np.nan, inplace=True)
dk_raw = load_display.dropna(subset=['Median'])
worksheet = sh.worksheet('FD_Projections')
load_display = pd.DataFrame(worksheet.get_all_records())
load_display.replace('', np.nan, inplace=True)
fd_raw = load_display.dropna(subset=['Median'])
return dk_raw, fd_raw
@st.cache_data
def convert_df(array):
array = pd.DataFrame(array, columns=column_names)
return array.to_csv().encode('utf-8')
@st.cache_data
def calculate_DK_value_frequencies(np_array):
unique, counts = np.unique(np_array[:, :9], return_counts=True)
frequencies = counts / len(np_array) # Normalize by the number of rows
combined_array = np.column_stack((unique, frequencies))
return combined_array
@st.cache_data
def calculate_FD_value_frequencies(np_array):
unique, counts = np.unique(np_array[:, :8], return_counts=True)
frequencies = counts / len(np_array) # Normalize by the number of rows
combined_array = np.column_stack((unique, frequencies))
return combined_array
dk_raw, fd_raw = init_baselines()
tab1, tab2 = st.tabs(['Data Export', 'Contest Sims'])
with tab1:
col1, col2 = st.columns([1, 7])
with col1:
if st.button("Load/Reset Data", key='reset1'):
st.cache_data.clear()
for key in st.session_state.keys():
del st.session_state[key]
dk_raw, fd_raw = init_baselines()
slate_var1 = st.radio("Which data are you loading?", ('Main Slate', 'Other Main Slate'))
site_var1 = st.radio("What site are you working with?", ('Draftkings', 'Fanduel'))
if site_var1 == 'Draftkings':
raw_baselines = dk_raw
column_names = dk_columns
team_var1 = st.radio("Do you want a frame with specific teams?", ('Full Slate', 'Specific Teams'), key='team_var1')
if team_var1 == 'Specific Teams':
team_var2 = st.multiselect('Which teams do you want?', options = dk_raw['Team'].unique())
elif team_var1 == 'Full Slate':
team_var2 = dk_raw.Team.values.tolist()
stack_var1 = st.radio("Do you want a frame with specific stack sizes?", ('Full Slate', 'Specific Stack Sizes'), key='stack_var1')
if stack_var1 == 'Specific Stack Sizes':
stack_var2 = st.multiselect('Which stack sizes do you want?', options = [5, 4, 3, 2, 1, 0])
elif stack_var1 == 'Full Slate':
stack_var2 = [5, 4, 3, 2, 1, 0]
elif site_var1 == 'Fanduel':
raw_baselines = fd_raw
column_names = fd_columns
team_var1 = st.radio("Do you want a frame with specific teams?", ('Full Slate', 'Specific Teams'), key='team_var1')
if team_var1 == 'Specific Teams':
team_var2 = st.multiselect('Which teams do you want?', options = fd_raw['Team'].unique())
elif team_var1 == 'Full Slate':
team_var2 = fd_raw.Team.values.tolist()
stack_var1 = st.radio("Do you want a frame with specific stack sizes?", ('Full Slate', 'Specific Stack Sizes'), key='stack_var1')
if stack_var1 == 'Specific Stack Sizes':
stack_var2 = st.multiselect('Which stack sizes do you want?', options = [4, 3, 2, 1, 0])
elif stack_var1 == 'Full Slate':
stack_var2 = [4, 3, 2, 1, 0]
with col2:
if site_var1 == 'Draftkings':
st.session_state.working_seed = DK_seed.copy()
st.session_state.working_seed = st.session_state.working_seed[np.isin(st.session_state.working_seed[:, 12], team_var2)]
st.session_state.working_seed = st.session_state.working_seed[np.isin(st.session_state.working_seed[:, 13], stack_var2)]
st.session_state.data_export_display = pd.DataFrame(st.session_state.working_seed[0:1000], columns=column_names)
st.session_state.data_export_freq = calculate_DK_value_frequencies(st.session_state.working_seed)
elif site_var1 == 'Fanduel':
st.session_state.working_seed = FD_seed.copy()
st.session_state.working_seed = st.session_state.working_seed[np.isin(st.session_state.working_seed[:, 11], team_var2)]
st.session_state.working_seed = st.session_state.working_seed[np.isin(st.session_state.working_seed[:, 12], stack_var2)]
st.session_state.data_export_display = pd.DataFrame(st.session_state.working_seed[0:1000], columns=column_names)
st.session_state.data_export_freq = calculate_FD_value_frequencies(st.session_state.working_seed)
with st.container():
if 'data_export_display' in st.session_state:
st.dataframe(st.session_state.data_export_display.style.format(precision=2), height=500, use_container_width=True)
with st.container():
if 'data_export_freq' in st.session_state:
st.dataframe(st.session_state.data_export_freq.style.format(percentages_format, precision=2), height=500, use_container_width=True)
if st.button("Prepare data export", key='data_export'):
data_export = st.session_state.working_seed.copy()
st.download_button(
label="Export optimals set",
data=convert_df(st.session_state.data_export),
file_name='MLB_optimals_export.csv',
mime='text/csv',
)
with tab2:
col1, col2 = st.columns([1, 7])
with col1:
if st.button("Load/Reset Data", key='reset2'):
st.cache_data.clear()
for key in st.session_state.keys():
del st.session_state[key]
DK_seed, FD_seed, dk_raw, fd_raw = init_baselines()
with col2:
st.write("Things will go here")