Ferrxni commited on
Commit
8f27bd5
·
1 Parent(s): 8967f63

update app.py

Browse files
Files changed (1) hide show
  1. app.py +54 -13
app.py CHANGED
@@ -1,19 +1,60 @@
1
  import gradio as gr
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
 
3
  def answer_question(question):
4
- # This is a placeholder function. You should implement your model inference logic here.
5
- # For demonstration purposes, we'll return a generic answer.
6
- answers = {
7
- "how to detect crop disease": "To detect crop diseases, use image recognition models trained on datasets of diseased and healthy crops.",
8
- "best time to plant wheat": "The best time to plant wheat depends on your region. In temperate regions, it's usually early autumn.",
9
- "improving soil fertility": "Improving soil fertility can be achieved by rotating crops, using compost, and avoiding overuse of chemical fertilizers.",
10
- }
11
- # Find the closest question and return the answer
12
- question = question.lower()
13
- for key in answers:
14
- if key in question:
15
- return answers[key]
16
- return "I'm not sure how to answer that. Can you ask something else?"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
  app = gr.Interface(fn=answer_question,
19
  inputs=gr.inputs.Textbox(lines=2, placeholder="Ask a question about agriculture..."),
 
1
  import gradio as gr
2
+ from mistralai.client import MistralClient, ChatMessage
3
+ import faiss
4
+ import os
5
+ import numpy as np
6
+ from dotenv import load_dotenv
7
+
8
+ # Load environment variables
9
+ load_dotenv()
10
+ api_key = os.getenv('API_KEY')
11
+
12
+ # Initialize Mistral client
13
+ client = MistralClient(api_key=api_key)
14
+
15
+ # Assuming your embeddings and FAISS index are preloaded or initialized elsewhere
16
+ # For demonstration, these steps are not included here
17
+ # Please replace `index` and `chunks` with your actual data structures
18
+ index = None # Your FAISS index
19
+ chunks = [] # Your preprocessed text chunks
20
+
21
+ def get_text_embedding(input_text):
22
+ """Retrieve text embeddings from Mistral."""
23
+ embeddings_batch_response = client.embeddings(
24
+ model="mistral-embed",
25
+ input=[input_text]
26
+ )
27
+ return embeddings_batch_response.data[0].embedding
28
 
29
  def answer_question(question):
30
+ """Generate an answer to the agriculture-related question using Mistral."""
31
+ # Embed the question
32
+ question_embedding = np.array([get_text_embedding(question)])
33
+
34
+ # Perform a search for the closest chunks
35
+ distances, indices = index.search(question_embedding, k=5) # Adjust `k` as needed
36
+
37
+ # Retrieve and format the relevant chunks as context
38
+ retrieved_chunks = " ".join([chunks[i] for i in indices.flatten()])
39
+ prompt = f"""
40
+ Context information is below.
41
+ ---------------------
42
+ {retrieved_chunks}
43
+ ---------------------
44
+ Given the context information and not prior knowledge, answer the query.
45
+ Query: {question}
46
+ Answer:
47
+ """
48
+
49
+ # Generate response using Mistral with the formatted prompt
50
+ response = run_mistral(prompt)
51
+ return response
52
+
53
+ def run_mistral(user_message, model="mistral-medium"):
54
+ """Interact with Mistral using chat."""
55
+ messages = [ChatMessage(role="user", content=user_message)]
56
+ chat_response = client.chat(model=model, messages=messages)
57
+ return chat_response.choices[0].message.content
58
 
59
  app = gr.Interface(fn=answer_question,
60
  inputs=gr.inputs.Textbox(lines=2, placeholder="Ask a question about agriculture..."),