Spaces:
Running
Running
File size: 2,663 Bytes
68ed2d8 3546e99 068f115 6999c1b 68ed2d8 068f115 4c8c543 eb39769 835ee70 068f115 4c8c543 68ed2d8 3546e99 68ed2d8 3546e99 68ed2d8 835ee70 4c8c543 3546e99 68ed2d8 4c8c543 68ed2d8 4c8c543 3546e99 4c8c543 3546e99 4c8c543 3546e99 68ed2d8 3546e99 4c8c543 68ed2d8 eb39769 68ed2d8 eb39769 68ed2d8 3546e99 68ed2d8 3546e99 68ed2d8 4c8c543 3546e99 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 |
import os
import gradio as gr
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, SummaryIndex, ServiceContext
from llama_index.readers.web import SimpleWebPageReader
from llama_index.llms.mistralai import MistralAI
from llama_index.embeddings.mistralai import MistralAIEmbedding
from llama_index.core.query_engine import RetrieverQueryEngine
title = "Gaia Mistral 8x7b Chat RAG URL Demo"
description = "Example of an assistant with Gradio, RAG from url and Mistral AI via its API"
placeholder = "Vous pouvez me posez une question sur ce contexte, appuyer sur Entrée pour valider"
placeholder_url = "Extract text from this url"
llm_model = 'open-mixtral-8x7b'
# choose api_key from .env or from input field
# placeholder_api_key = "API key"
env_api_key = os.environ.get("MISTRAL_API_KEY")
query_engine = None
with gr.Blocks() as demo:
gr.Markdown(""" ### Welcome to Gaia Level 2 Demo
Add an URL at the bottom of the interface before interacting with the Chat.
This demo allows you to interact with a webpage and then ask questions to Mistral APIs.
Mistral will answer with the context extracted from the webpage.
""")
# with gr.Row():
# api_key_text_box = gr.Textbox(placeholder=placeholder_api_key, container=False, scale=7)
def setup_with_url(url):
global query_engine
# Set-up clients
llm = MistralAI(api_key=env_api_key,model=llm_model)
embed_model = MistralAIEmbedding(model_name='mistral-embed', api_key=env_api_key)
service_context = ServiceContext.from_defaults(chunk_size=1024, llm=llm, embed_model=embed_model)
# Set-up db
documents = SimpleWebPageReader(html_to_text=True).load_data([url])
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
query_engine = index.as_query_engine(similarity_top_k=15)
return placeholder
gr.Markdown(""" ### 1 / Extract data from URL """)
with gr.Row():
url_msg = gr.Textbox(placeholder=placeholder_url, container=False, scale=7)
url_btn = gr.Button(value="Process url ✅", interactive=True)
url_btn.click(setup_with_url, [url_msg], url_msg, show_progress= "full")
gr.Markdown(""" ### 2 / Ask a question about this context """)
chatbot = gr.Chatbot()
msg = gr.Textbox(placeholder=placeholder)
clear = gr.ClearButton([msg, chatbot])
def respond(message, chat_history):
response = query_engine.query(message)
chat_history.append((message, str(response)))
return chat_history
msg.submit(respond, [msg, chatbot], [chatbot])
demo.title = title
demo.launch() |