from datetime import datetime
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
from llama_parse import LlamaParse
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
import os
from dotenv import load_dotenv
import gradio as gr
import markdowm as md
import base64

# Load environment variables
load_dotenv()

llm_models = {
    "tiiuae/falcon-7b-instruct": "HundAI-7B-S",
    "mistralai/Mixtral-8x7B-Instruct-v0.1": "Mixtral-8x7B",
    "meta-llama/Meta-Llama-3-8B-Instruct": "Meta-Llama-8B",
    "mistralai/Mistral-7B-Instruct-v0.2": "Mistral-7B",
}

embed_models = [
    "BAAI/bge-small-en-v1.5",  # 33.4M
    "NeuML/pubmedbert-base-embeddings",
    "BAAI/llm-embedder", # 109M
    "BAAI/bge-large-en" # 335M
]

# Global variable for selected model
selected_llm_model_name = list(llm_models.keys())[0]  # Default to the first model in the dictionary

vector_index = None

# Initialize the parser
parser = LlamaParse(api_key=os.getenv("LLAMA_INDEX_API"), result_type='markdown')

file_extractor = {
    '.pdf': parser,
    '.docx': parser,
    '.txt': parser,
    '.csv': parser,
    '.xlsx': parser,
    '.pptx': parser,
    '.html': parser,
    '.jpg': parser,
    '.jpeg': parser,
    '.png': parser,
    '.webp': parser,
    '.svg': parser,












}


# File processing function
def load_files(file_path: str, embed_model_name: str):
    try:
        document = SimpleDirectoryReader(input_files=[file_path], file_extractor=file_extractor).load_data()
        embed_model = HuggingFaceEmbedding(model_name=embed_model_name)
        vector_index = VectorStoreIndex.from_documents(document, embed_model=embed_model)

        filename = os.path.basename(file_path)
        return f"Ready to give response on {filename}"
    except Exception as e:
        return f"An error occurred: {e}"


# Function to handle the selected model from dropdown
def set_llm_model(selected_model):
    global selected_llm_model_name
    selected_llm_model_name = next(key for key, value in llm_models.items() if value == selected_model)



# Respond function

def respond(message, history):
    try:

        llm = HuggingFaceInferenceAPI(
            model_name=selected_llm_model_name,
            contextWindow=8192,
            maxTokens=1024,
            temperature=0.3,
            topP=0.9,
            frequencyPenalty=0.5,
            presencePenalty=0.5,
            token=os.getenv("TOKEN")
        )


        query_engine = vector_index.as_query_engine(llm=llm)
        bot_message = query_engine.query(message)
        return f"{llm_models[selected_llm_model_name]}:\n{str(bot_message)}"


    except Exception as e:
        if str(e) == "'NoneType' object has no attribute 'as_query_engine'":
            return "Please upload a file."
        return f"An error occurred: {e}"










# UI Setup
with gr.Blocks(theme='Hev832/Applio', css='footer {visibility: hidden}') as demo:
    gr.Markdown("")
    with gr.Tabs():
        with gr.TabItem("Introduction"):
            gr.Markdown(md.description)

        with gr.TabItem("Chatbot"):
            with gr.Accordion("IMPORTANT: READ ME FIRST", open=False):
                guid = gr.Markdown(md.guide)
            with gr.Row():
                with gr.Column(scale=1):
                    file_input = gr.File(file_count="single", type='filepath', label="Upload document")
                    embed_model_dropdown = gr.Dropdown(embed_models, label="Select Embedding", interactive=True)

                    with gr.Row():
                        btn = gr.Button("Submit", variant='primary')
                        clear = gr.ClearButton()
                    output = gr.Text(label='Vector Index')
                    llm_model_dropdown = gr.Dropdown(list(llm_models.values()), label="Select LLM", interactive=True)
                with gr.Column(scale=3):
                    gr.ChatInterface(
                        fn=respond,
                        chatbot=gr.Chatbot(height=500),
                        theme="soft",
                        textbox=gr.Textbox(placeholder="Ask me any questions on the uploaded document!", container=False)


                    )
    

    llm_model_dropdown.change(fn=set_llm_model, inputs=llm_model_dropdown)
    btn.click(fn=load_files, inputs=[file_input, embed_model_dropdown], outputs=output)
    clear.click(lambda: [None] * 3, outputs=[file_input, embed_model_dropdown, output])


if __name__ == "__main__":
    demo.launch()