fair-plai / app.py
CEKA-AGAPES's picture
Update app.py
51b285d verified
raw
history blame
5.19 kB
import os
import json
import gradio as gr
from llama_index.core import (
VectorStoreIndex,
download_loader,
StorageContext
)
from dotenv import load_dotenv, find_dotenv
import chromadb
from llama_index.llms.mistralai import MistralAI
from llama_index.embeddings.mistralai import MistralAIEmbedding
from llama_index.vector_stores.chroma import ChromaVectorStore
from llama_index.core.indices.service_context import ServiceContext
from pathlib import Path
TITLE = "RIZOA-AUCHAN Chatbot Demo"
DESCRIPTION = "Example of an assistant with Gradio, coupling with function calling and Mistral AI via its API"
PLACEHOLDER = (
"Vous pouvez me posez une question sur ce contexte, appuyer sur Entrée pour valider"
)
PLACEHOLDER_URL = "Extract text from this url"
llm_model = "mistral-medium"
load_dotenv()
env_api_key = os.environ.get("MISTRAL_API_KEY")
query_engine = None
# Define LLMs
llm = MistralAI(api_key=env_api_key, model=llm_model)
embed_model = MistralAIEmbedding(model_name="mistral-embed", api_key=env_api_key)
# create client and a new collection
db = chromadb.PersistentClient(path="./chroma_db")
chroma_collection = db.get_or_create_collection("quickstart")
# set up ChromaVectorStore and load in data
vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)
service_context = ServiceContext.from_defaults(
chunk_size=1024, llm=llm, embed_model=embed_model
)
#PDFReader = download_loader("PDFReader")
#loader = PDFReader()
index = VectorStoreIndex(
[], service_context=service_context, storage_context=storage_context
)
query_engine = index.as_query_engine(similarity_top_k=5)
FILE = Path(__file__).resolve()
BASE_PATH = FILE.parents[0]
'''
image = os.path.join(BASE_PATH,"img","logo_rizoa_auchan.jpg")
print(f"Chemin de l'image : {image}")
image = os.path.join("img","logo_rizoa_auchan.jpg")
print(f"chemin 2 : {image}")
image = os.path.abspath(os.path.join("img", "logo_rizoa_auchan.jpg"))
print(f"Image 3 : {image}")
image = os.path.join("https://huggingface.co/spaces/rizoa-auchan-hack/hack/blob/main/img/logo_rizoa_auchan.jpg")
print(f"Image 4 : {image}")
'''
image = os.path.join("logo_rizoa_auchan.jpg")
print(f"Chemin:{image}")
if os.path.exists(image):
print("Image existe")
else:
print("Image n'existe pas")
PLACEHOLDER = (image)
with gr.Blocks() as demo:
with gr.Row():
with gr.Column(scale=1):
'''
gr.Image(
#value=os.path.join(BASE_PATH,"img","logo_rizoa_auchan.jpg"),
#value=os.path.join("img","logo_rizoa_auchan.jpg"),
value="logo_rizoa_auchan.jpg",
height=250,
width=250,
container=False,
show_download_button=False
)
'''
gr.HTML(
value = '<img src="https://huggingface.co/spaces/rizoa-auchan-hack/hack/resolve/main/logo_rizoa_auchan.jpg">'
)
with gr.Column(scale=4):
gr.Markdown(
"""
# Bienvenue au Chatbot FAIR-PLAI
Ce chatbot est un assistant numérique, médiateur des vendeurs-acheteurs
"""
)
# gr.Markdown(""" ### 1 / Extract data from PDF """)
# with gr.Row():
# with gr.Column():
# input_file = gr.File(
# label="Load a pdf",
# file_types=[".pdf"],
# file_count="single",
# type="filepath",
# interactive=True,
# )
# file_msg = gr.Textbox(
# label="Loaded documents:", container=False, visible=False
# )
# input_file.upload(
# fn=load_document,
# inputs=[
# input_file,
# ],
# outputs=[file_msg],
# concurrency_limit=20,
# )
# file_btn = gr.Button(value="Encode file ✅", interactive=True)
# btn_msg = gr.Textbox(container=False, visible=False)
# with gr.Row():
# db_list = gr.Markdown(value=get_documents_in_db)
# delete_btn = gr.Button(value="Empty db 🗑️", interactive=True, scale=0)
# file_btn.click(
# load_file,
# inputs=[input_file],
# outputs=[file_msg, btn_msg, db_list],
# show_progress="full",
# )
# delete_btn.click(empty_db, outputs=[db_list], show_progress="minimal")
gr.Markdown(""" ### Ask a question """)
chatbot = gr.Chatbot()
msg = gr.Textbox(placeholder=PLACEHOLDER)
clear = gr.ClearButton([msg, chatbot])
def respond(message, chat_history):
response = query_engine.query(message)
chat_history.append((message, str(response)))
return chat_history
msg.submit(respond, [msg, chatbot], [chatbot])
demo.title = TITLE
if __name__ == "__main__":
demo.launch(allowed_paths=['/home/user/app/img/','./img/','.'])