James McCool commited on
Commit
5bc4b14
·
1 Parent(s): 3182a0d

Add Streamlit NBA DFS pivot analysis app with Google Sheets integration

Browse files
Files changed (3) hide show
  1. app.py +367 -0
  2. app.yaml +10 -0
  3. requirements.txt +9 -0
app.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import pandas as pd
3
+ import streamlit as st
4
+ import gspread
5
+
6
+ st.set_page_config(layout="wide")
7
+
8
+ @st.cache_resource
9
+ def init_conn():
10
+ scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
11
+
12
+ credentials = {
13
+ "type": "service_account",
14
+ "project_id": "model-sheets-connect",
15
+ "private_key_id": st.secrets['model_sheets_connect_pk'],
16
+ "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDiu1v/e6KBKOcK\ncx0KQ23nZK3ZVvADYy8u/RUn/EDI82QKxTd/DizRLIV81JiNQxDJXSzgkbwKYEDm\n48E8zGvupU8+Nk76xNPakrQKy2Y8+VJlq5psBtGchJTuUSHcXU5Mg2JhQsB376PJ\nsCw552K6Pw8fpeMDJDZuxpKSkaJR6k9G5Dhf5q8HDXnC5Rh/PRFuKJ2GGRpX7n+2\nhT/sCax0J8jfdTy/MDGiDfJqfQrOPrMKELtsGHR9Iv6F4vKiDqXpKfqH+02E9ptz\nBk+MNcbZ3m90M8ShfRu28ebebsASfarNMzc3dk7tb3utHOGXKCf4tF8yYKo7x8BZ\noO9X4gSfAgMBAAECggEAU8ByyMpSKlTCF32TJhXnVJi/kS+IhC/Qn5JUDMuk4LXr\naAEWsWO6kV/ZRVXArjmuSzuUVrXumISapM9Ps5Ytbl95CJmGDiLDwRL815nvv6k3\nUyAS8EGKjz74RpoIoH6E7EWCAzxlnUgTn+5oP9Flije97epYk3H+e2f1f5e1Nn1d\nYNe8U+1HqJgILcxA1TAUsARBfoD7+K3z/8DVPHI8IpzAh6kTHqhqC23Rram4XoQ6\nzj/ZdVBjvnKuazETfsD+Vl3jGLQA8cKQVV70xdz3xwLcNeHsbPbpGBpZUoF73c65\nkAXOrjYl0JD5yAk+hmYhXr6H9c6z5AieuZGDrhmlFQKBgQDzV6LRXmjn4854DP/J\nI82oX2GcI4eioDZPRukhiQLzYerMQBmyqZIRC+/LTCAhYQSjNgMa+ZKyvLqv48M0\n/x398op/+n3xTs+8L49SPI48/iV+mnH7k0WI/ycd4OOKh8rrmhl/0EWb9iitwJYe\nMjTV/QxNEpPBEXfR1/mvrN/lVQKBgQDuhomOxUhWVRVH6x03slmyRBn0Oiw4MW+r\nrt1hlNgtVmTc5Mu+4G0USMZwYuOB7F8xG4Foc7rIlwS7Ic83jMJxemtqAelwOLdV\nXRLrLWJfX8+O1z/UE15l2q3SUEnQ4esPHbQnZowHLm0mdL14qSVMl1mu1XfsoZ3z\nJZTQb48CIwKBgEWbzQRtKD8lKDupJEYqSrseRbK/ax43DDITS77/DWwHl33D3FYC\nMblUm8ygwxQpR4VUfwDpYXBlklWcJovzamXpSnsfcYVkkQH47NuOXPXPkXQsw+w+\nDYcJzeu7F/vZqk9I7oBkWHUrrik9zPNoUzrfPvSRGtkAoTDSwibhoc5dAoGBAMHE\nK0T/ANeZQLNuzQps6S7G4eqjwz5W8qeeYxsdZkvWThOgDd/ewt3ijMnJm5X05hOn\ni4XF1euTuvUl7wbqYx76Wv3/1ZojiNNgy7ie4rYlyB/6vlBS97F4ZxJdxMlabbCW\n6b3EMWa4EVVXKoA1sCY7IVDE+yoQ1JYsZmq45YzPAoGBANWWHuVueFGZRDZlkNlK\nh5OmySmA0NdNug3G1upaTthyaTZ+CxGliwBqMHAwpkIRPwxUJpUwBTSEGztGTAxs\nWsUOVWlD2/1JaKSmHE8JbNg6sxLilcG6WEDzxjC5dLL1OrGOXj9WhC9KX3sq6qb6\nF/j9eUXfXjAlb042MphoF3ZC\n-----END PRIVATE KEY-----\n",
17
+ "client_email": "[email protected]",
18
+ "client_id": "100369174533302798535",
19
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
20
+ "token_uri": "https://oauth2.googleapis.com/token",
21
+ "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
22
+ "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/gspread-connection%40model-sheets-connect.iam.gserviceaccount.com"
23
+ }
24
+
25
+ credentials2 = {
26
+ "type": "service_account",
27
+ "project_id": "sheets-api-connect-378620",
28
+ "private_key_id": st.secrets['sheets_api_connect_pk'],
29
+ "private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCtKa01beXwc88R\nnPZVQTNPVQuBnbwoOfc66gW3547ja/UEyIGAF112dt/VqHprRafkKGmlg55jqJNt\na4zceLKV+wTm7vBu7lDISTJfGzCf2TrxQYNqwMKE2LOjI69dBM8u4Dcb4k0wcp9v\ntW1ZzLVVuwTvmrg7JBHjiSaB+x5wxm/r3FOiJDXdlAgFlytzqgcyeZMJVKKBQHyJ\njEGg/1720A0numuOCt71w/2G0bDmijuj1e6tH32MwRWcvRNZ19K9ssyDz2S9p68s\nYDhIxX69OWxwScTIHLY6J2t8txf/XMivL/636fPlDADvBEVTdlT606n8CcKUVQeq\npUVdG+lfAgMBAAECggEAP38SUA7B69eTfRpo658ycOs3Amr0JW4H/bb1rNeAul0K\nZhwd/HnU4E07y81xQmey5kN5ZeNrD5EvqkZvSyMJHV0EEahZStwhjCfnDB/cxyix\nZ+kFhv4y9eK+kFpUAhBy5nX6T0O+2T6WvzAwbmbVsZ+X8kJyPuF9m8ldcPlD0sce\ntj8NwVq1ys52eosqs7zi2vjt+eMcaY393l4ls+vNq8Yf27cfyFw45W45CH/97/Nu\n5AmuzlCOAfFF+z4OC5g4rei4E/Qgpxa7/uom+BVfv9G0DIGW/tU6Sne0+37uoGKt\nW6DzhgtebUtoYkG7ZJ05BTXGp2lwgVcNRoPwnKJDxQKBgQDT5wYPUBDW+FHbvZSp\nd1m1UQuXyerqOTA9smFaM8sr/UraeH85DJPEIEk8qsntMBVMhvD3Pw8uIUeFNMYj\naLmZFObsL+WctepXrVo5NB6RtLB/jZYxiKMatMLUJIYtcKIp+2z/YtKiWcLnwotB\nWdCjVnPTxpkurmF2fWP/eewZ+wKBgQDRMtJg7etjvKyjYNQ5fARnCc+XsI3gkBe1\nX9oeXfhyfZFeBXWnZzN1ITgFHplDznmBdxAyYGiQdbbkdKQSghviUQ0igBvoDMYy\n1rWcy+a17Mj98uyNEfmb3X2cC6WpvOZaGHwg9+GY67BThwI3FqHIbyk6Ko09WlTX\nQpRQjMzU7QKBgAfi1iflu+q0LR+3a3vvFCiaToskmZiD7latd9AKk2ocsBd3Woy9\n+hXXecJHPOKV4oUJlJgvAZqe5HGBqEoTEK0wyPNLSQlO/9ypd+0fEnArwFHO7CMF\nycQprAKHJXM1eOOFFuZeQCaInqdPZy1UcV5Szla4UmUZWkk1m24blHzXAoGBAMcA\nyH4qdbxX9AYrC1dvsSRvgcnzytMvX05LU0uF6tzGtG0zVlub4ahvpEHCfNuy44UT\nxRWW/oFFaWjjyFxO5sWggpUqNuHEnRopg3QXx22SRRTGbN45li/+QAocTkgsiRh1\nqEcYZsO4mPCsQqAy6E2p6RcK+Xa+omxvSnVhq0x1AoGAKr8GdkCl4CF6rieLMAQ7\nLNBuuoYGaHoh8l5E2uOQpzwxVy/nMBcAv+2+KqHEzHryUv1owOi6pMLv7A9mTFoS\n18B0QRLuz5fSOsVnmldfC9fpUc6H8cH1SINZpzajqQA74bPwELJjnzrCnH79TnHG\nJuElxA33rFEjbgbzdyrE768=\n-----END PRIVATE KEY-----\n",
30
+ "client_email": "gspread-connection@sheets-api-connect-378620.iam.gserviceaccount.com",
31
+ "client_id": "106625872877651920064",
32
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
33
+ "token_uri": "https://oauth2.googleapis.com/token",
34
+ "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
35
+ "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/gspread-connection%40sheets-api-connect-378620.iam.gserviceaccount.com"
36
+ }
37
+
38
+ NBA_Data = st.secrets['NBA_Data']
39
+
40
+ gc = gspread.service_account_from_dict(credentials)
41
+ gc2 = gspread.service_account_from_dict(credentials2)
42
+
43
+ return gc, gc2, NBA_Data
44
+
45
+ gcservice_account, gcservice_account2, NBA_Data = init_conn()
46
+
47
+ player_roo_format = {'Top_finish': '{:.2%}','Top_5_finish': '{:.2%}', 'Top_10_finish': '{:.2%}', '20+%': '{:.2%}', '4x%': '{:.2%}', '5x%': '{:.2%}',
48
+ '6x%': '{:.2%}','GPP%': '{:.2%}'}
49
+
50
+ @st.cache_resource(ttl = 300)
51
+ def init_stat_load():
52
+ try:
53
+ sh = gcservice_account.open_by_url(NBA_Data)
54
+ worksheet = sh.worksheet('Player_Level_ROO')
55
+ raw_display = pd.DataFrame(worksheet.get_all_records())
56
+ raw_display = raw_display.rename(columns={"Minutes Proj": "Minutes"})
57
+ except:
58
+ sh = gcservice_account2.open_by_url(NBA_Data)
59
+ worksheet = sh.worksheet('Player_Level_ROO')
60
+ raw_display = pd.DataFrame(worksheet.get_all_records())
61
+ raw_display = raw_display.rename(columns={"Minutes Proj": "Minutes"})
62
+
63
+ raw_display = raw_display[['Player', 'Salary', 'Position', 'Team', 'Minutes', 'Median', 'Own', 'site', 'slate', 'timestamp']]
64
+ raw_display.replace("", 'Welp', inplace=True)
65
+ raw_display = raw_display.loc[raw_display['Player'] != 'Welp']
66
+ raw_display = raw_display.loc[raw_display['Median'] > 0]
67
+ raw_display = raw_display.apply(pd.to_numeric, errors='ignore')
68
+ proj_raw = raw_display.sort_values(by='Median', ascending=False)
69
+
70
+ timestamp = proj_raw['timestamp'].iloc[0]
71
+
72
+ return proj_raw, timestamp
73
+
74
+ @st.cache_data
75
+ def convert_df_to_csv(df):
76
+ return df.to_csv().encode('utf-8')
77
+
78
+ proj_raw, timestamp = init_stat_load()
79
+ t_stamp = f"Last Update: " + str(timestamp) + f" CST"
80
+
81
+ tab1, tab2 = st.tabs(['Pivot Finder', 'Uploads and Info'])
82
+
83
+ with tab2:
84
+ st.info("The Projections file can have any columns in any order, but must contain columns explicitly named: 'Player', 'Salary', 'Position', 'Team', 'Minutes', 'Median', 'Own'.")
85
+ col1, col2 = st.columns([1, 5])
86
+
87
+ with col1:
88
+ proj_file = st.file_uploader("Upload Projections File", key = 'proj_uploader')
89
+
90
+ if proj_file is not None:
91
+ try:
92
+ proj_dataframe = pd.read_csv(proj_file)
93
+ try:
94
+ proj_dataframe = proj_dataframe.replace(',','', regex=True)
95
+ proj_dataframe['Salary'] = proj_dataframe['Salary'].astype(int)
96
+ except:
97
+ pass
98
+ except:
99
+ proj_dataframe = pd.read_excel(proj_file)
100
+ try:
101
+ proj_dataframe = proj_dataframe.replace(',','', regex=True)
102
+ proj_dataframe['Salary'] = proj_dataframe['Salary'].astype(int)
103
+ except:
104
+ pass
105
+ with col2:
106
+ if proj_file is not None:
107
+ st.dataframe(proj_dataframe.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(precision=2), use_container_width = True)
108
+
109
+
110
+ with tab1:
111
+ col1, col2 = st.columns([1, 9])
112
+ with col1:
113
+ st.info(t_stamp)
114
+ if st.button("Load/Reset Data", key='reset1'):
115
+ st.cache_data.clear()
116
+ proj_raw, timestamp = init_stat_load()
117
+ t_stamp = f"Last Update: " + str(timestamp) + f" CST"
118
+ for key in st.session_state.keys():
119
+ del st.session_state[key]
120
+ data_var1 = st.radio("Which data are you loading?", ('Paydirt', 'User'), key='data_var1')
121
+ site_var1 = st.radio("What site are you working with?", ('Draftkings', 'Fanduel'), key='site_var1')
122
+ slate_var1 = st.radio("What slate are you working with?", ('Main Slate', 'Secondary Slate'), key='slate_var1')
123
+ if site_var1 == 'Draftkings':
124
+ if data_var1 == 'User':
125
+ raw_baselines = proj_dataframe
126
+ elif data_var1 != 'User':
127
+ raw_baselines = proj_raw[proj_raw['site'] == 'Draftkings']
128
+ if slate_var1 == 'Main Slate':
129
+ raw_baselines = raw_baselines[raw_baselines['slate'] == 'Main Slate']
130
+ elif slate_var1 == 'Secondary Slate':
131
+ raw_baselines = raw_baselines[raw_baselines['slate'] == 'Secondary Slate']
132
+ raw_baselines = raw_baselines.sort_values(by='Own', ascending=False)
133
+ elif site_var1 == 'Fanduel':
134
+ if data_var1 == 'User':
135
+ raw_baselines = proj_dataframe
136
+ elif data_var1 != 'User':
137
+ raw_baselines = proj_raw[proj_raw['site'] == 'Fanduel']
138
+ if slate_var1 == 'Main Slate':
139
+ raw_baselines = raw_baselines[raw_baselines['slate'] == 'Main Slate']
140
+ elif slate_var1 == 'Secondary Slate':
141
+ raw_baselines = raw_baselines[raw_baselines['slate'] == 'Secondary Slate']
142
+ raw_baselines = raw_baselines.sort_values(by='Own', ascending=False)
143
+ check_seq = st.radio("Do you want to check a single player or the top 10 in ownership?", ('Single Player', 'Top X Owned'), key='check_seq')
144
+ if check_seq == 'Single Player':
145
+ player_check = st.selectbox('Select player to create comps', options = raw_baselines['Player'].unique(), key='dk_player')
146
+ elif check_seq == 'Top X Owned':
147
+ top_x_var = st.number_input('How many players would you like to check?', min_value = 1, max_value = 10, value = 5, step = 1)
148
+ Salary_var = st.number_input('Acceptable +/- Salary range', min_value = 0, max_value = 1000, value = 300, step = 100)
149
+ Median_var = st.number_input('Acceptable +/- Median range', min_value = 0, max_value = 10, value = 3, step = 1)
150
+ pos_var1 = st.radio("Compare to all positions or specific positions?", ('All Positions', 'Specific Positions'), key='pos_var1')
151
+ if pos_var1 == 'Specific Positions':
152
+ pos_var_list = st.multiselect('Which positions would you like to include?', options = ['PG', 'SG', 'SF', 'PF', 'C'], key='pos_var_list')
153
+ elif pos_var1 == 'All Positions':
154
+ pos_var_list = ['PG', 'SG', 'SF', 'PF', 'C']
155
+ split_var1 = st.radio("Are you running the full slate or certain games?", ('Full Slate Run', 'Specific Games'), key='split_var1')
156
+ if split_var1 == 'Specific Games':
157
+ team_var1 = st.multiselect('Which teams would you like to include?', options = raw_baselines['Team'].unique(), key='team_var1')
158
+ elif split_var1 == 'Full Slate Run':
159
+ team_var1 = raw_baselines.Team.values.tolist()
160
+
161
+ with col2:
162
+ placeholder = st.empty()
163
+ displayholder = st.empty()
164
+
165
+ if st.button('Simulate appropriate pivots'):
166
+ with placeholder:
167
+ if site_var1 == 'Draftkings':
168
+ working_roo = raw_baselines
169
+ working_roo.replace('', 0, inplace=True)
170
+ if site_var1 == 'Fanduel':
171
+ working_roo = raw_baselines
172
+ working_roo.replace('', 0, inplace=True)
173
+
174
+ own_dict = dict(zip(working_roo.Player, working_roo.Own))
175
+ team_dict = dict(zip(working_roo.Player, working_roo.Team))
176
+ pos_dict = dict(zip(working_roo.Player, working_roo.Position))
177
+ min_dict = dict(zip(working_roo.Player, working_roo.Minutes))
178
+ total_sims = 1000
179
+
180
+ if check_seq == 'Single Player':
181
+ player_var = working_roo.loc[working_roo['Player'] == player_check]
182
+ player_var = player_var.reset_index()
183
+ working_roo = working_roo[working_roo['Position'].apply(lambda x: any(pos in x.split('/') for pos in pos_var_list))]
184
+ working_roo = working_roo[working_roo['Team'].isin(team_var1)]
185
+ working_roo = working_roo.loc[(working_roo['Salary'] >= player_var['Salary'][0] - Salary_var) & (working_roo['Salary'] <= player_var['Salary'][0] + Salary_var)]
186
+ working_roo = working_roo.loc[(working_roo['Median'] >= player_var['Median'][0] - Median_var) & (working_roo['Median'] <= player_var['Median'][0] + Median_var)]
187
+
188
+ flex_file = working_roo[['Player', 'Position', 'Salary', 'Median', 'Minutes']]
189
+ flex_file['Floor'] = (flex_file['Median'] * .25) + (flex_file['Minutes'] * .25)
190
+ flex_file['Ceiling'] = flex_file['Median'] + 10 + (flex_file['Minutes'] * .25)
191
+ flex_file['STD'] = (flex_file['Median']/4)
192
+ flex_file = flex_file[['Player', 'Position', 'Salary', 'Floor', 'Median', 'Ceiling', 'STD']]
193
+ hold_file = flex_file.copy()
194
+ overall_file = flex_file.copy()
195
+ salary_file = flex_file.copy()
196
+
197
+ overall_players = overall_file[['Player']]
198
+
199
+ for x in range(0,total_sims):
200
+ salary_file[x] = salary_file['Salary']
201
+
202
+ salary_file=salary_file.drop(['Player', 'Position', 'Salary', 'Floor', 'Median', 'Ceiling', 'STD'], axis=1)
203
+
204
+ salary_file = salary_file.div(1000)
205
+
206
+ for x in range(0,total_sims):
207
+ overall_file[x] = np.random.normal(overall_file['Median'],overall_file['STD'])
208
+
209
+ overall_file=overall_file.drop(['Player', 'Position', 'Salary', 'Floor', 'Median', 'Ceiling', 'STD'], axis=1)
210
+
211
+ players_only = hold_file[['Player']]
212
+ raw_lineups_file = players_only
213
+
214
+ for x in range(0,total_sims):
215
+ maps_dict = {'proj_map':dict(zip(hold_file.Player,overall_file[x]))}
216
+ raw_lineups_file[x] = sum([raw_lineups_file['Player'].map(maps_dict['proj_map'])])
217
+ players_only[x] = raw_lineups_file[x].rank(ascending=False)
218
+
219
+ players_only=players_only.drop(['Player'], axis=1)
220
+
221
+ salary_4x_check = (overall_file - (salary_file*4))
222
+ salary_5x_check = (overall_file - (salary_file*5))
223
+ salary_6x_check = (overall_file - (salary_file*6))
224
+ gpp_check = (overall_file - ((salary_file*5)+10))
225
+
226
+ players_only['Average_Rank'] = players_only.mean(axis=1)
227
+ players_only['Top_finish'] = players_only[players_only == 1].count(axis=1)/total_sims
228
+ players_only['Top_5_finish'] = players_only[players_only <= 5].count(axis=1)/total_sims
229
+ players_only['Top_10_finish'] = players_only[players_only <= 10].count(axis=1)/total_sims
230
+ players_only['20+%'] = overall_file[overall_file >= 20].count(axis=1)/float(total_sims)
231
+ players_only['4x%'] = salary_4x_check[salary_4x_check >= 1].count(axis=1)/float(total_sims)
232
+ players_only['5x%'] = salary_5x_check[salary_5x_check >= 1].count(axis=1)/float(total_sims)
233
+ players_only['6x%'] = salary_6x_check[salary_6x_check >= 1].count(axis=1)/float(total_sims)
234
+ players_only['GPP%'] = salary_4x_check[gpp_check >= 1].count(axis=1)/float(total_sims)
235
+
236
+ players_only['Player'] = hold_file[['Player']]
237
+
238
+ final_outcomes = players_only[['Player', 'Top_finish', 'Top_5_finish', 'Top_10_finish', '20+%', '4x%', '5x%', '6x%', 'GPP%']]
239
+
240
+ final_Proj = pd.merge(hold_file, final_outcomes, on="Player")
241
+ final_Proj = final_Proj[['Player', 'Position', 'Salary', 'Floor', 'Median', 'Ceiling', 'Top_finish', 'Top_5_finish', 'Top_10_finish', '20+%', '4x%', '5x%', '6x%', 'GPP%']]
242
+
243
+ final_Proj['Own'] = final_Proj['Player'].map(own_dict)
244
+ final_Proj['Minutes Proj'] = final_Proj['Player'].map(min_dict)
245
+ final_Proj['Team'] = final_Proj['Player'].map(team_dict)
246
+ final_Proj['Own'] = final_Proj['Own'].astype('float')
247
+ final_Proj = final_Proj[['Player', 'Minutes Proj', 'Position', 'Team', 'Salary', 'Floor', 'Median', 'Ceiling', 'Top_finish', 'Top_5_finish', 'Top_10_finish', '20+%', '4x%', '5x%', '6x%', 'GPP%', 'Own']]
248
+ final_Proj['Projection Rank'] = final_Proj.Top_finish.rank(pct = True)
249
+ final_Proj['Own Rank'] = final_Proj.Own.rank(pct = True)
250
+ final_Proj['LevX'] = (final_Proj['Projection Rank'] - final_Proj['Own Rank']) * 100
251
+ final_Proj['ValX'] = ((final_Proj[['4x%', '5x%']].mean(axis=1))*100) + final_Proj['LevX']
252
+ final_Proj['ValX'] = np.where(final_Proj['ValX'] > 100, 100, final_Proj['ValX'])
253
+ final_Proj['ValX'] = np.where(final_Proj['ValX'] < 0, 0, final_Proj['ValX'])
254
+
255
+ final_Proj = final_Proj[['Player', 'Minutes Proj', 'Position', 'Team', 'Salary', 'Floor', 'Median', 'Ceiling', 'Top_finish', 'Top_5_finish', 'Top_10_finish', '20+%', '4x%', '5x%', '6x%', 'GPP%', 'Own', 'LevX', 'ValX']]
256
+ final_Proj = final_Proj.set_index('Player')
257
+
258
+ st.session_state.final_Proj = final_Proj.sort_values(by='Top_finish', ascending=False)
259
+
260
+ elif check_seq == 'Top X Owned':
261
+ if pos_var1 == 'Specific Positions':
262
+ raw_baselines = raw_baselines[raw_baselines['Position'].apply(lambda x: any(pos in x.split('/') for pos in pos_var_list))]
263
+ player_check = raw_baselines['Player'].head(top_x_var).tolist()
264
+ st.write(player_check)
265
+ final_proj_list = []
266
+ for players in player_check:
267
+ players_pos = pos_dict[players]
268
+ player_var = working_roo.loc[working_roo['Player'] == players]
269
+ player_var = player_var.reset_index()
270
+ working_roo_temp = working_roo[working_roo['Team'].isin(team_var1)]
271
+
272
+ working_roo_temp = working_roo_temp.loc[(working_roo_temp['Salary'] >= player_var['Salary'][0] - Salary_var) & (working_roo_temp['Salary'] <= player_var['Salary'][0] + Salary_var)]
273
+ working_roo_temp = working_roo_temp.loc[(working_roo_temp['Median'] >= player_var['Median'][0] - Median_var) & (working_roo_temp['Median'] <= player_var['Median'][0] + Median_var)]
274
+
275
+ flex_file = working_roo_temp[['Player', 'Position', 'Salary', 'Median', 'Minutes']]
276
+ flex_file['Floor'] = (flex_file['Median'] * .25) + (flex_file['Minutes'] * .25)
277
+ flex_file['Ceiling'] = flex_file['Median'] + 10 + (flex_file['Minutes'] * .25)
278
+ flex_file['STD'] = (flex_file['Median']/4)
279
+ flex_file = flex_file[['Player', 'Position', 'Salary', 'Floor', 'Median', 'Ceiling', 'STD']]
280
+ hold_file = flex_file.copy()
281
+ overall_file = flex_file.copy()
282
+ salary_file = flex_file.copy()
283
+
284
+ overall_players = overall_file[['Player']]
285
+
286
+ for x in range(0,total_sims):
287
+ salary_file[x] = salary_file['Salary']
288
+
289
+ salary_file=salary_file.drop(['Player', 'Position', 'Salary', 'Floor', 'Median', 'Ceiling', 'STD'], axis=1)
290
+
291
+ salary_file = salary_file.div(1000)
292
+
293
+ for x in range(0,total_sims):
294
+ overall_file[x] = np.random.normal(overall_file['Median'],overall_file['STD'])
295
+
296
+ overall_file=overall_file.drop(['Player', 'Position', 'Salary', 'Floor', 'Median', 'Ceiling', 'STD'], axis=1)
297
+
298
+ players_only = hold_file[['Player']]
299
+ raw_lineups_file = players_only
300
+
301
+ for x in range(0,total_sims):
302
+ maps_dict = {'proj_map':dict(zip(hold_file.Player,overall_file[x]))}
303
+ raw_lineups_file[x] = sum([raw_lineups_file['Player'].map(maps_dict['proj_map'])])
304
+ players_only[x] = raw_lineups_file[x].rank(ascending=False)
305
+
306
+ players_only=players_only.drop(['Player'], axis=1)
307
+
308
+ salary_4x_check = (overall_file - (salary_file*4))
309
+ salary_5x_check = (overall_file - (salary_file*5))
310
+ salary_6x_check = (overall_file - (salary_file*6))
311
+ gpp_check = (overall_file - ((salary_file*5)+10))
312
+
313
+ players_only['Average_Rank'] = players_only.mean(axis=1)
314
+ players_only['Top_finish'] = players_only[players_only == 1].count(axis=1)/total_sims
315
+ players_only['Top_5_finish'] = players_only[players_only <= 5].count(axis=1)/total_sims
316
+ players_only['Top_10_finish'] = players_only[players_only <= 10].count(axis=1)/total_sims
317
+ players_only['20+%'] = overall_file[overall_file >= 20].count(axis=1)/float(total_sims)
318
+ players_only['4x%'] = salary_4x_check[salary_4x_check >= 1].count(axis=1)/float(total_sims)
319
+ players_only['5x%'] = salary_5x_check[salary_5x_check >= 1].count(axis=1)/float(total_sims)
320
+ players_only['6x%'] = salary_6x_check[salary_6x_check >= 1].count(axis=1)/float(total_sims)
321
+ players_only['GPP%'] = salary_4x_check[gpp_check >= 1].count(axis=1)/float(total_sims)
322
+
323
+ players_only['Player'] = hold_file[['Player']]
324
+
325
+ final_outcomes = players_only[['Player', 'Top_finish', 'Top_5_finish', 'Top_10_finish', '20+%', '4x%', '5x%', '6x%', 'GPP%']]
326
+
327
+ final_Proj = pd.merge(hold_file, final_outcomes, on="Player")
328
+ final_Proj = final_Proj[['Player', 'Position', 'Salary', 'Floor', 'Median', 'Ceiling', 'Top_finish', 'Top_5_finish', 'Top_10_finish', '20+%', '4x%', '5x%', '6x%', 'GPP%']]
329
+
330
+ final_Proj['Own'] = final_Proj['Player'].map(own_dict)
331
+ final_Proj['Minutes Proj'] = final_Proj['Player'].map(min_dict)
332
+ final_Proj['Team'] = final_Proj['Player'].map(team_dict)
333
+ final_Proj['Own'] = final_Proj['Own'].astype('float')
334
+ final_Proj['Projection Rank'] = final_Proj.Top_finish.rank(pct = True)
335
+ final_Proj['Own Rank'] = final_Proj.Own.rank(pct = True)
336
+ final_Proj['LevX'] = (final_Proj['Projection Rank'] - final_Proj['Own Rank']) * 100
337
+ final_Proj['ValX'] = ((final_Proj[['4x%', '5x%']].mean(axis=1))*100) + final_Proj['LevX']
338
+ final_Proj['ValX'] = np.where(final_Proj['ValX'] > 100, 100, final_Proj['ValX'])
339
+ final_Proj['ValX'] = np.where(final_Proj['ValX'] < 0, 0, final_Proj['ValX'])
340
+ final_Proj['Pivot_source'] = players
341
+
342
+ final_Proj = final_Proj[['Player', 'Pivot_source', 'Position', 'Team', 'Salary', 'Floor', 'Median', 'Ceiling', 'Top_finish', 'Top_5_finish', 'Top_10_finish', '20+%', '4x%', '5x%', '6x%', 'GPP%', 'Own', 'LevX', 'ValX']]
343
+
344
+ final_Proj = final_Proj.sort_values(by='Top_finish', ascending=False)
345
+ final_proj_list.append(final_Proj)
346
+ st.write(f'finished run for {players}')
347
+
348
+ # Concatenate all the final_Proj dataframes
349
+ final_Proj_combined = pd.concat(final_proj_list)
350
+ final_Proj_combined = final_Proj_combined.sort_values(by='LevX', ascending=False)
351
+ final_Proj_combined = final_Proj_combined[final_Proj_combined['Player'] != final_Proj_combined['Pivot_source']]
352
+ st.session_state.final_Proj = final_Proj_combined.reset_index(drop=True) # Assign the combined dataframe back to final_Proj
353
+
354
+ placeholder.empty()
355
+
356
+ with displayholder.container():
357
+ if 'final_Proj' in st.session_state:
358
+ st.dataframe(st.session_state.final_Proj.style.background_gradient(axis=0).background_gradient(cmap='RdYlGn').format(player_roo_format, precision=2), use_container_width = True)
359
+
360
+ st.download_button(
361
+ label="Export Tables",
362
+ data=convert_df_to_csv(st.session_state.final_Proj),
363
+ file_name='NBA_pivot_export.csv',
364
+ mime='text/csv',
365
+ )
366
+ else:
367
+ st.write("Run some pivots my dude/dudette")
app.yaml ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ runtime: python
2
+ env: flex
3
+
4
+ runtime_config:
5
+ python_version: 3
6
+
7
+ entrypoint: streamlit run streamlit-app.py --server.port $PORT
8
+
9
+ automatic_scaling:
10
+ max_num_instances: 200
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ streamlit
2
+ gspread
3
+ openpyxl
4
+ matplotlib
5
+ streamlit-aggrid
6
+ pulp
7
+ docker
8
+ plotly
9
+ scipy