import streamlit as st
st.set_page_config(layout="wide")
import numpy as np
import pandas as pd
import time
from fuzzywuzzy import process
from collections import Counter

## import global functions
from global_func.load_contest_file import load_contest_file
from global_func.load_file import load_file
from global_func.find_name_mismatches import find_name_mismatches
from global_func.create_player_exposures import create_player_exposures
from global_func.create_stack_exposures import create_stack_exposures
from global_func.create_stack_size_exposures import create_stack_size_exposures
from global_func.create_general_exposures import create_general_exposures

player_exposure_format = {'Exposure Overall': '{:.2%}', 'Exposure Top 1%': '{:.2%}', 'Exposure Top 5%': '{:.2%}', 'Exposure Top 10%': '{:.2%}', 'Exposure Top 20%': '{:.2%}'}

tab1, tab2 = st.tabs(["Data Load", "Contest Analysis"])
with tab1:
    if st.button('Clear data', key='reset1'):
        st.session_state.clear()
    col1, col2 = st.columns(2)
    with col1:
        sport_select = st.selectbox("Select Sport", ['MLB', 'NBA', 'NFL'])
    with col2:
        type_var = st.selectbox("Select Game Type", ['Classic', 'Showdown'])
    # Add file uploaders to your app
    col1, col2 = st.columns(2)
    
    with col1:
        st.subheader("Contest File")
        st.info("Go ahead and upload a Contest file here. Only include player columns and an optional 'Stack' column if you are playing MLB.")
        Contest_file = st.file_uploader("Upload Contest File (CSV or Excel)", type=['csv', 'xlsx', 'xls'])
        if 'Contest' in st.session_state:
            del st.session_state['Contest']

        if Contest_file and 'Adj_Contest' not in st.session_state:
            st.session_state['Contest'], st.session_state['ownership_df'], st.session_state['actual_df'], st.session_state['entry_list'] = load_contest_file(Contest_file, sport_select)
            st.session_state['Contest'] = st.session_state['Contest'].dropna(how='all')
            st.session_state['Contest'] = st.session_state['Contest'].reset_index(drop=True)
            if st.session_state['Contest'] is not None:
                st.success('Contest file loaded successfully!')
                st.dataframe(st.session_state['Contest'].head(10))

    with col2:
        st.subheader("Projections File")
        st.info("upload a projections file that has 'player_names', 'salary', 'median', 'ownership', and 'captain ownership' (Needed for Showdown) columns. Note that the salary for showdown needs to be the FLEX salary, not the captain salary.")
        
        # Create two columns for the uploader and template button
        upload_col, template_col = st.columns([3, 1])
        
        with upload_col:
            projections_file = st.file_uploader("Upload Projections File (CSV or Excel)", type=['csv', 'xlsx', 'xls'])
            if 'projections_df' in st.session_state:
                del st.session_state['projections_df']
        
        with template_col:
            # Create empty DataFrame with required columns
            template_df = pd.DataFrame(columns=['player_names', 'position', 'team', 'salary', 'median', 'ownership', 'captain ownership'])
            # Add download button for template
            st.download_button(
                label="Template",
                data=template_df.to_csv(index=False),
                file_name="projections_template.csv",
                mime="text/csv"
            )
            
        if projections_file and 'Adj_projections_df' not in st.session_state:
            export_projections, st.session_state['projections_df'] = load_file(projections_file)
            if st.session_state['projections_df'] is not None:
                st.success('Projections file loaded successfully!')
                st.dataframe(st.session_state['projections_df'].head(10))
        
    if Contest_file and projections_file:
        st.subheader("Name Matching functions")
        if 'Adj_Contest' not in st.session_state:
            try:    
                st.session_state['Adj_Contest'], st.session_state['Adj_projections_df'], st.session_state['Adj_ownership_df'], st.session_state['Adj_actual_df'] = find_name_mismatches(st.session_state['Contest'], st.session_state['projections_df'], st.session_state['ownership_df'], st.session_state['actual_df'])
            except:
                st.warning('Please manage name matching to move forward')
        st.session_state['Adj_projections_df']['salary'] = (st.session_state['Adj_projections_df']['salary'].astype(str).str.replace(',', '').astype(float).astype(int))
        st.session_state['ownership_dict'] = dict(zip(st.session_state['Adj_ownership_df']['Player'], st.session_state['Adj_ownership_df']['Own']))
        st.session_state['actual_dict'] = dict(zip(st.session_state['Adj_actual_df']['Player'], st.session_state['Adj_actual_df']['FPTS']))


with tab2:
    excluded_cols = ['BaseName', 'EntryCount']
    if 'Adj_Contest' in st.session_state and 'Adj_projections_df' in st.session_state:
        player_columns = [col for col in st.session_state['Adj_Contest'].columns if col not in excluded_cols]
        for col in player_columns:
            st.session_state['Adj_Contest'][col] = st.session_state['Adj_Contest'][col].astype(str)
    
        # Create mapping dictionaries
        map_dict = {
            'pos_map': dict(zip(st.session_state['Adj_projections_df']['player_names'], st.session_state['Adj_projections_df']['position'])),
            'team_map': dict(zip(st.session_state['Adj_projections_df']['player_names'], st.session_state['Adj_projections_df']['team'])),
            'salary_map': dict(zip(st.session_state['Adj_projections_df']['player_names'], st.session_state['Adj_projections_df']['salary'])),
            'proj_map': dict(zip(st.session_state['Adj_projections_df']['player_names'], st.session_state['Adj_projections_df']['median'])),
            'own_map': dict(zip(st.session_state['Adj_projections_df']['player_names'], st.session_state['Adj_projections_df']['ownership'])),
            'own_percent_rank': dict(zip(st.session_state['Adj_projections_df']['player_names'], st.session_state['Adj_projections_df']['ownership'].rank(pct=True))),
            'cpt_salary_map': dict(zip(st.session_state['Adj_projections_df']['player_names'], st.session_state['Adj_projections_df']['salary'])),
            'cpt_proj_map': dict(zip(st.session_state['Adj_projections_df']['player_names'], st.session_state['Adj_projections_df']['median'] * 1.5)),
            'cpt_own_map': dict(zip(st.session_state['Adj_projections_df']['player_names'], st.session_state['Adj_projections_df']['captain ownership']))
        }
        # Create a copy of the dataframe for calculations
        working_df = st.session_state['Adj_Contest'].copy() 

        if type_var == 'Classic':
            working_df['stack'] = working_df.apply(
                lambda row: Counter(
                    map_dict['team_map'].get(player, '') for player in row[4:]
                    if map_dict['team_map'].get(player, '') != ''
                ).most_common(1)[0][0] if any(map_dict['team_map'].get(player, '') for player in row[4:]) else '',
                axis=1
            )
            working_df['stack_size'] = working_df.apply(
                lambda row: Counter(
                    map_dict['team_map'].get(player, '') for player in row[4:]
                    if map_dict['team_map'].get(player, '') != ''
                ).most_common(1)[0][1] if any(map_dict['team_map'].get(player, '') for player in row[4:]) else '',
                axis=1
            )
            working_df['salary'] = working_df.apply(lambda row: sum(map_dict['salary_map'].get(player, 0) for player in row), axis=1)
            working_df['median'] = working_df.apply(lambda row: sum(map_dict['proj_map'].get(player, 0) for player in row), axis=1)
            working_df['actual_fpts'] = working_df.apply(lambda row: sum(st.session_state['actual_dict'].get(player, 0) for player in row), axis=1)
            working_df['Own'] = working_df.apply(lambda row: sum(map_dict['own_map'].get(player, 0) for player in row), axis=1)
            working_df['actual_own'] = working_df.apply(lambda row: sum(st.session_state['ownership_dict'].get(player, 0) for player in row), axis=1)
            working_df['sorted'] = working_df[player_columns].apply(
                lambda row: ','.join(sorted(row.values)),
                axis=1
            )
            working_df['dupes'] = working_df.groupby('sorted').transform('size')
            working_df = working_df.reset_index()
            working_df['percentile_finish'] = working_df['index'].rank(pct=True)
            working_df['finish'] = working_df['index']
            working_df = working_df.drop(['sorted', 'index'], axis=1)
        elif type_var == 'Showdown':
            working_df['stack'] = working_df.apply(
                lambda row: Counter(
                    map_dict['team_map'].get(player, '') for player in row
                    if map_dict['team_map'].get(player, '') != ''
                ).most_common(1)[0][0] if any(map_dict['team_map'].get(player, '') for player in row) else '',
                axis=1
            )
            working_df['stack_size'] = working_df.apply(
                lambda row: Counter(
                    map_dict['team_map'].get(player, '') for player in row
                    if map_dict['team_map'].get(player, '') != ''
                ).most_common(1)[0][1] if any(map_dict['team_map'].get(player, '') for player in row) else '',
                axis=1
            )
            working_df['salary'] = working_df.apply(
                lambda row: map_dict['cpt_salary_map'].get(row.iloc[0], 0) + 
                            sum(map_dict['salary_map'].get(player, 0) for player in row.iloc[1:]),
                axis=1
            )
            working_df['median'] = working_df.apply(
                lambda row: map_dict['cpt_proj_map'].get(row.iloc[0], 0) + 
                            sum(map_dict['proj_map'].get(player, 0) for player in row.iloc[1:]),
                axis=1
            )
            working_df['Own'] = working_df.apply(
                lambda row: map_dict['cpt_own_map'].get(row.iloc[0], 0) + 
                            sum(map_dict['own_map'].get(player, 0) for player in row.iloc[1:]),
                axis=1
            )
            working_df['sorted'] = working_df[player_columns].apply(
                lambda row: row[0] + '|' + ','.join(sorted(row[1:].values)),
                axis=1
            )
            working_df['dupes'] = working_df.groupby('sorted').transform('size')
            working_df = working_df.reset_index()
            working_df['percentile_finish'] = working_df['index'].rank(pct=True)
            working_df['finish'] = working_df['index']
            working_df = working_df.drop(['sorted', 'index'], axis=1)
        st.session_state['field_player_frame'] = create_player_exposures(working_df, player_columns)
        st.session_state['field_stack_frame'] = create_stack_exposures(working_df)

        with st.expander("Info and filters"):
            if st.button('Clear data', key='reset3'):
                st.session_state.clear()
            with st.form(key='filter_form'):
                entry_parse_var = st.selectbox("Do you want to view a specific player(s) or a group of players?", ['All', 'Specific'])
                entry_names = st.multiselect("Select players", options=st.session_state['entry_list'], default=[])
                submitted = st.form_submit_button("Submit")
                if submitted:
                    if 'player_frame' in st.session_state:
                        del st.session_state['player_frame']
                    if 'stack_frame' in st.session_state:
                        del st.session_state['stack_frame']
                    # Apply entry name filter if specific entries are selected
                    if entry_parse_var == 'Specific' and entry_names:
                        working_df = working_df[working_df['BaseName'].isin(entry_names)]
            
        # Initialize pagination in session state if not exists
        if 'current_page' not in st.session_state:
            st.session_state.current_page = 1

        # Calculate total pages
        rows_per_page = 500
        total_rows = len(working_df)
        total_pages = (total_rows + rows_per_page - 1) // rows_per_page

        # Create pagination controls in a single row
        pagination_cols = st.columns([4, 1, 1, 1, 4])
        with pagination_cols[1]:
            if st.button(f"Previous Page"):
                if st.session_state['current_page'] > 1:
                    st.session_state.current_page -= 1
                else:
                    st.session_state.current_page = 1
                    if 'player_frame' in st.session_state:
                        del st.session_state['player_frame']
                    if 'stack_frame' in st.session_state:
                        del st.session_state['stack_frame']

        with pagination_cols[3]:
            if st.button(f"Next Page"):
                st.session_state.current_page += 1
                if 'player_frame' in st.session_state:
                    del st.session_state['player_frame']
                if 'stack_frame' in st.session_state:
                    del st.session_state['stack_frame']

        # Calculate start and end indices for current page
        start_idx = (st.session_state.current_page - 1) * rows_per_page
        end_idx = min((st.session_state.current_page) * rows_per_page, total_rows)
        st.dataframe(
            working_df.iloc[start_idx:end_idx].style
            .background_gradient(axis=0)
            .background_gradient(cmap='RdYlGn')
            .format(precision=2), 
            height=500,
            use_container_width=True,
            hide_index=True
        )

        with st.container():
            tab1, tab2, tab3, tab4 = st.tabs(['Player Used Info', 'Stack Used Info', 'Stack Size Info', 'General Info'])
            with tab1:

                if entry_parse_var == 'All':
                    st.session_state['player_frame'] = create_player_exposures(working_df, player_columns)
                    st.dataframe(st.session_state['player_frame'].
                        sort_values(by='Exposure Overall', ascending=False).
                        style.background_gradient(cmap='RdYlGn').
                        format(formatter='{:.2%}', subset=st.session_state['player_frame'].iloc[:, 1:].select_dtypes(include=['number']).columns),
                        hide_index=True)
                else:
                    st.session_state['player_frame'] = create_player_exposures(working_df, player_columns, entry_names)
                    st.dataframe(st.session_state['player_frame'].
                        sort_values(by='Exposure Overall', ascending=False).
                        style.background_gradient(cmap='RdYlGn').
                        format(formatter='{:.2%}', subset=st.session_state['player_frame'].iloc[:, 1:].select_dtypes(include=['number']).columns),
                        hide_index=True)
            with tab2:

                if entry_parse_var == 'All':
                    st.session_state['stack_frame'] = create_stack_exposures(working_df)
                    st.dataframe(st.session_state['stack_frame'].
                        sort_values(by='Exposure Overall', ascending=False).
                        style.background_gradient(cmap='RdYlGn').
                        format(formatter='{:.2%}', subset=st.session_state['stack_frame'].iloc[:, 1:].select_dtypes(include=['number']).columns),
                        hide_index=True)
                else:
                    st.session_state['stack_frame'] = create_stack_exposures(working_df, entry_names)
                    st.dataframe(st.session_state['stack_frame'].
                        sort_values(by='Exposure Overall', ascending=False).
                        style.background_gradient(cmap='RdYlGn').
                        format(formatter='{:.2%}', subset=st.session_state['stack_frame'].iloc[:, 1:].select_dtypes(include=['number']).columns),
                        hide_index=True)
            with tab3:
                
                if entry_parse_var == 'All':
                    st.session_state['stack_size_frame'] = create_stack_size_exposures(working_df)
                    st.dataframe(st.session_state['stack_size_frame'].
                        sort_values(by='Exposure Overall', ascending=False).
                        style.background_gradient(cmap='RdYlGn').
                        format(formatter='{:.2%}', subset=st.session_state['stack_size_frame'].iloc[:, 1:].select_dtypes(include=['number']).columns),
                        hide_index=True)
                else:
                    st.session_state['stack_size_frame'] = create_stack_size_exposures(working_df, entry_names)
                    st.dataframe(st.session_state['stack_size_frame'].
                        sort_values(by='Exposure Overall', ascending=False).
                        style.background_gradient(cmap='RdYlGn').
                        format(formatter='{:.2%}', subset=st.session_state['stack_size_frame'].iloc[:, 1:].select_dtypes(include=['number']).columns),
                        hide_index=True)
            
            with tab4:
                
                if entry_parse_var == 'All':
                    st.session_state['general_frame'] = create_general_exposures(working_df)
                    st.dataframe(st.session_state['general_frame'].style.background_gradient(cmap='RdYlGn', axis=1).format(precision=2), hide_index=True)
                    
                else:
                    st.session_state['general_frame'] = create_general_exposures(working_df, entry_names)
                    st.dataframe(st.session_state['general_frame'].style.background_gradient(cmap='RdYlGn', axis=1).format(precision=2), hide_index=True)