File size: 2,459 Bytes
68ed2d8
3546e99
68ed2d8
 
 
 
 
 
 
4c8c543
 
 
835ee70
4c8c543
 
 
 
68ed2d8
3546e99
68ed2d8
3546e99
68ed2d8
835ee70
4c8c543
 
 
3546e99
 
 
 
68ed2d8
4c8c543
 
68ed2d8
4c8c543
3546e99
4c8c543
3546e99
4c8c543
 
3546e99
68ed2d8
3546e99
 
 
 
4c8c543
68ed2d8
 
 
3546e99
4c8c543
68ed2d8
3546e99
 
 
 
68ed2d8
3546e99
68ed2d8
4c8c543
 
3546e99
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import os
import gradio as gr
from llama_index import VectorStoreIndex, SimpleDirectoryReader, SummaryIndex
from llama_index.readers import SimpleWebPageReader
from llama_index.llms import MistralAI
from llama_index.embeddings import MistralAIEmbedding
from llama_index import ServiceContext
from llama_index.query_engine import RetrieverQueryEngine

title = "Gaia Mistral Chat RAG URL Demo"
description = "Example of an assistant with Gradio, RAG from url and Mistral AI via its API"
placeholder = "Posez moi une question sur l'agriculture"
placeholder_url = "Extract text from this url"
llm_model = 'mistral-small'
# choose api_key from .env or from input field
# placeholder_api_key = "API key"
env_api_key = os.environ.get("MISTRAL_API_KEY")

query_engine = None

with gr.Blocks() as demo:

    gr.Markdown(""" ### Welcome to Gaia Level 2 Demo 
    Add an URL at the bottom of the interface before interacting with the Chat.
    This demo allows you to interact with a webpage and then ask questions to Mistral APIs.
    Mistral will answer with the context extracted from the webpage.
    """)
    chatbot = gr.Chatbot()
    msg = gr.Textbox()
    clear = gr.ClearButton([msg, chatbot])

    # with gr.Row():
        # api_key_text_box = gr.Textbox(placeholder=placeholder_api_key, container=False, scale=7)

    def setup_with_url(url):
        global query_engine

        # Set-up clients
        llm = MistralAI(api_key=env_api_key,model=llm_model)
        embed_model = MistralAIEmbedding(model_name='mistral-embed', api_key=env_api_key)
        service_context = ServiceContext.from_defaults(chunk_size=1024, llm=llm, embed_model=embed_model)

        # Set-up db
        documents = SimpleWebPageReader(html_to_text=True).load_data([url])
        index = VectorStoreIndex.from_documents(documents, service_context=service_context)
        query_engine = index.as_query_engine(similarity_top_k=15)
        return placeholder

    with gr.Row():
        url_msg = gr.Textbox(placeholder=placeholder_url, container=False, scale=7)
        url_btn = gr.Button(value="Set-up API and process url ✅", interactive=True)
        url_btn.click(setup_with_url, [url_msg], msg, show_progress= "full")

    def respond(message, chat_history):
        response = query_engine.query(message)
        chat_history.append((message, str(response)))
        return chat_history

    msg.submit(respond, [msg, chatbot], [chatbot])

demo.title = title

demo.launch()