Spaces:
Paused
Paused
update app.py
Browse files
app.py
CHANGED
@@ -1,19 +1,60 @@
|
|
1 |
import gradio as gr
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
|
3 |
def answer_question(question):
|
4 |
-
|
5 |
-
#
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
#
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
app = gr.Interface(fn=answer_question,
|
19 |
inputs=gr.inputs.Textbox(lines=2, placeholder="Ask a question about agriculture..."),
|
|
|
1 |
import gradio as gr
|
2 |
+
from mistralai.client import MistralClient, ChatMessage
|
3 |
+
import faiss
|
4 |
+
import os
|
5 |
+
import numpy as np
|
6 |
+
from dotenv import load_dotenv
|
7 |
+
|
8 |
+
# Load environment variables
|
9 |
+
load_dotenv()
|
10 |
+
api_key = os.getenv('API_KEY')
|
11 |
+
|
12 |
+
# Initialize Mistral client
|
13 |
+
client = MistralClient(api_key=api_key)
|
14 |
+
|
15 |
+
# Assuming your embeddings and FAISS index are preloaded or initialized elsewhere
|
16 |
+
# For demonstration, these steps are not included here
|
17 |
+
# Please replace `index` and `chunks` with your actual data structures
|
18 |
+
index = None # Your FAISS index
|
19 |
+
chunks = [] # Your preprocessed text chunks
|
20 |
+
|
21 |
+
def get_text_embedding(input_text):
|
22 |
+
"""Retrieve text embeddings from Mistral."""
|
23 |
+
embeddings_batch_response = client.embeddings(
|
24 |
+
model="mistral-embed",
|
25 |
+
input=[input_text]
|
26 |
+
)
|
27 |
+
return embeddings_batch_response.data[0].embedding
|
28 |
|
29 |
def answer_question(question):
|
30 |
+
"""Generate an answer to the agriculture-related question using Mistral."""
|
31 |
+
# Embed the question
|
32 |
+
question_embedding = np.array([get_text_embedding(question)])
|
33 |
+
|
34 |
+
# Perform a search for the closest chunks
|
35 |
+
distances, indices = index.search(question_embedding, k=5) # Adjust `k` as needed
|
36 |
+
|
37 |
+
# Retrieve and format the relevant chunks as context
|
38 |
+
retrieved_chunks = " ".join([chunks[i] for i in indices.flatten()])
|
39 |
+
prompt = f"""
|
40 |
+
Context information is below.
|
41 |
+
---------------------
|
42 |
+
{retrieved_chunks}
|
43 |
+
---------------------
|
44 |
+
Given the context information and not prior knowledge, answer the query.
|
45 |
+
Query: {question}
|
46 |
+
Answer:
|
47 |
+
"""
|
48 |
+
|
49 |
+
# Generate response using Mistral with the formatted prompt
|
50 |
+
response = run_mistral(prompt)
|
51 |
+
return response
|
52 |
+
|
53 |
+
def run_mistral(user_message, model="mistral-medium"):
|
54 |
+
"""Interact with Mistral using chat."""
|
55 |
+
messages = [ChatMessage(role="user", content=user_message)]
|
56 |
+
chat_response = client.chat(model=model, messages=messages)
|
57 |
+
return chat_response.choices[0].message.content
|
58 |
|
59 |
app = gr.Interface(fn=answer_question,
|
60 |
inputs=gr.inputs.Textbox(lines=2, placeholder="Ask a question about agriculture..."),
|