import logging
import sys
import streamlit as st
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
from llama_index.llms import LlamaCPP
from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index.embeddings import LangchainEmbedding
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.vectorstores import FAISS
from langchain.document_loaders import PyPDFLoader



# Set up logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))

def configure_llama_model():
    model_url = 'https://huggingface.co/TheBloke/Mistral-7B-Instruct-v0.1-GGUF/resolve/main/mistral-7b-instruct-v0.1.Q4_K_M.gguf'
    llm = LlamaCPP(
        model_url=model_url,
        temperature=0.1,
        max_new_tokens=256,
        context_window=3900,
        model_kwargs={"n_gpu_layers": -1},
        messages_to_prompt=messages_to_prompt,
        completion_to_prompt=completion_to_prompt,
        verbose=True,
    )
    return llm

def configure_embeddings():
    embed_model = embed_model = LangchainEmbedding(HuggingFaceEmbeddings(model_name='sentence-transformers/all-MiniLM-L6-v2'))
    return embed_model




def configure_service_context(llm, embed_model):
    return ServiceContext.from_defaults(chunk_size=256, llm=llm, embed_model=embed_model)

def initialize_vector_store_index(data_path, service_context):
    documents = SimpleDirectoryReader(data_path).load_data()
    import pickle
    loader = PyPDFLoader("./Cloudflare.pdf")
    pages = loader.load_and_split()
    # Load the index from a file
    
    #index = VectorStoreIndex.from_documents(documents, service_context=service_context)
    embeddings_2 = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
    documents = SimpleDirectoryReader("./").load_data()
    index = VectorStoreIndex.from_documents(documents, service_context=service_context)
    return index

def main():
    st.title("Cloudflare RAG")
        # User input
    user_input = st.text_input("Enter your message:")

    # Configure and initialize components
    llm = configure_llama_model()
    embed_model = configure_embeddings()
    service_context = configure_service_context(llm, embed_model)
    index = initialize_vector_store_index("./", service_context)



    if user_input:
        # Generate response
        query_engine = index.as_query_engine()
        response = query_engine.query(user_input)
        # Display response
        st.text_area("ChatGPT Response:", response, height=400)

if __name__ == "__main__":
    main()