Update app.py
Browse files
app.py
CHANGED
@@ -1,180 +1,161 @@
|
|
1 |
import os
|
2 |
import logging
|
3 |
-
import
|
4 |
import gradio as gr
|
5 |
from huggingface_hub import InferenceClient
|
6 |
from langchain.embeddings import HuggingFaceEmbeddings
|
7 |
from langchain.vectorstores import FAISS
|
8 |
from langchain.schema import Document
|
9 |
from duckduckgo_search import DDGS
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
# Configure logging
|
12 |
-
logging.basicConfig(level=logging.INFO, format=
|
|
|
13 |
|
14 |
# Environment variables and configurations
|
15 |
-
|
16 |
-
|
17 |
|
18 |
MODELS = [
|
19 |
"mistralai/Mistral-7B-Instruct-v0.3",
|
20 |
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
21 |
"mistralai/Mistral-Nemo-Instruct-2407",
|
22 |
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
23 |
-
"meta-llama/Meta-Llama-3.1-70B-Instruct"
|
|
|
|
|
24 |
]
|
25 |
-
logging.info(f"Models list initialized with {len(MODELS)} models.")
|
26 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
27 |
def get_embeddings():
|
28 |
-
logging.info("Loading HuggingFace embeddings model.")
|
29 |
return HuggingFaceEmbeddings(model_name="sentence-transformers/stsb-roberta-large")
|
30 |
|
31 |
-
def duckduckgo_search(query):
|
32 |
-
logging.info(f"Initiating DuckDuckGo search for query: {query}")
|
33 |
-
try:
|
34 |
-
with DDGS() as ddgs:
|
35 |
-
results = ddgs.text(query, max_results=10)
|
36 |
-
logging.info(f"Search completed, found {len(results)} results.")
|
37 |
-
return results
|
38 |
-
except Exception as e:
|
39 |
-
logging.error(f"Error during DuckDuckGo search: {str(e)}")
|
40 |
-
return []
|
41 |
-
|
42 |
-
async def rephrase_query(query, context, model):
|
43 |
-
# Log the original query for debugging
|
44 |
-
logging.info(f"Original query: {query}")
|
45 |
-
|
46 |
-
prompt = f"""You are a highly intelligent conversational chatbot. Your task is to analyze the given context and new query, then decide whether to rephrase the query with or without incorporating the context. Follow these steps:
|
47 |
-
1. Determine if the new query is a continuation of the previous conversation or an entirely new topic.
|
48 |
-
2. If it's a continuation, rephrase the query by incorporating relevant information from the context to make it more specific and contextual.
|
49 |
-
3. If it's a new topic, rephrase the query to make it more appropriate for a web search, focusing on clarity and accuracy without using the previous context.
|
50 |
-
4. Provide ONLY the rephrased query without any additional explanation or reasoning.
|
51 |
-
|
52 |
-
Context: {context}
|
53 |
-
|
54 |
-
New query: {query}
|
55 |
-
|
56 |
-
Rephrased query:"""
|
57 |
-
|
58 |
-
client = InferenceClient(model, token=huggingface_token)
|
59 |
-
|
60 |
-
try:
|
61 |
-
response = await asyncio.to_thread(
|
62 |
-
client.text_generation,
|
63 |
-
prompt=prompt,
|
64 |
-
max_new_tokens=100,
|
65 |
-
temperature=0.2,
|
66 |
-
)
|
67 |
-
|
68 |
-
# Extract the rephrased query
|
69 |
-
rephrased_query = response.strip()
|
70 |
-
|
71 |
-
# Log the rephrased query
|
72 |
-
logging.info(f"Rephrased query: {rephrased_query}")
|
73 |
-
|
74 |
-
return rephrased_query
|
75 |
-
|
76 |
-
except Exception as e:
|
77 |
-
logging.error(f"Error in rephrase_query: {str(e)}")
|
78 |
-
return query # Fallback to the original query if there's an error
|
79 |
-
|
80 |
def create_web_search_vectors(search_results):
|
81 |
-
logging.info(f"Creating web search vectors from {len(search_results)} search results.")
|
82 |
embed = get_embeddings()
|
83 |
-
documents = [
|
84 |
-
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
|
|
|
|
89 |
return FAISS.from_documents(documents, embed)
|
90 |
|
91 |
-
|
92 |
-
|
93 |
-
search_results =
|
94 |
|
95 |
if not search_results:
|
96 |
-
|
97 |
-
|
98 |
-
|
|
|
|
|
99 |
|
100 |
if use_embeddings:
|
101 |
-
logging.info("Using embeddings to retrieve relevant documents.")
|
102 |
web_search_database = create_web_search_vectors(search_results)
|
103 |
retriever = web_search_database.as_retriever(search_kwargs={"k": 5})
|
104 |
relevant_docs = retriever.get_relevant_documents(query)
|
105 |
context = "\n".join([doc.page_content for doc in relevant_docs])
|
106 |
else:
|
107 |
-
|
108 |
-
context = "\n".join([f"{result['title']}\n{result['body']}\nSource: {result['href']}" for result in search_results])
|
109 |
|
110 |
-
|
111 |
-
Reason through the query inside <thinking> tags, and then provide your final response inside <output> tags.
|
112 |
-
Providing comprehensive and accurate information based on web search results is essential.
|
113 |
-
Your goal is to synthesize the given context into a coherent and detailed response that directly addresses the user's query.
|
114 |
-
Please ensure that your response is well-structured, factual, and cites sources where appropriate.
|
115 |
-
If you detect that you made a mistake in your reasoning at any point, correct yourself inside <reflection> tags."""
|
116 |
|
117 |
user_message = f"""Using the following context from web search results:
|
118 |
{context}
|
119 |
|
120 |
-
Write a detailed and complete research document that fulfills the following user request: '{query}'
|
121 |
-
After writing the document, please provide a list of sources used in your response."""
|
122 |
|
123 |
-
client = InferenceClient(model, token=
|
124 |
full_response = ""
|
125 |
-
|
126 |
try:
|
127 |
for _ in range(num_calls):
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
{"role": "system", "content": system_message},
|
132 |
{"role": "user", "content": user_message}
|
133 |
],
|
134 |
-
max_tokens
|
135 |
-
temperature
|
136 |
-
|
137 |
-
|
138 |
-
)
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
148 |
except Exception as e:
|
149 |
-
|
150 |
-
|
|
|
|
|
|
|
151 |
|
152 |
if not full_response:
|
153 |
-
|
154 |
-
|
|
|
|
|
155 |
|
156 |
-
|
157 |
-
|
158 |
-
|
159 |
-
|
160 |
-
|
161 |
-
|
|
|
|
|
162 |
|
163 |
try:
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
yield f"Rephrased Query: {rephrased_query}\n\nSearching the web...\n\n"
|
168 |
-
|
169 |
-
async for main_content, sources in get_response_with_search(rephrased_query, model, use_embeddings, num_calls=num_calls, temperature=temperature):
|
170 |
-
response = f"{main_content}\n\n{sources}"
|
171 |
-
yield response
|
172 |
-
except asyncio.CancelledError:
|
173 |
-
logging.warning("Operation cancelled by user.")
|
174 |
-
yield "The operation was cancelled. Please try again."
|
175 |
except Exception as e:
|
176 |
-
|
177 |
-
|
178 |
|
179 |
css = """
|
180 |
/* Fine-tune chatbox size */
|
@@ -188,14 +169,14 @@ css = """
|
|
188 |
}
|
189 |
"""
|
190 |
|
191 |
-
# Gradio interface setup
|
192 |
def create_gradio_interface():
|
193 |
-
logging.info("Setting up Gradio interface.")
|
194 |
custom_placeholder = "Enter your question here for web search."
|
195 |
|
196 |
demo = gr.ChatInterface(
|
197 |
-
respond,
|
|
|
198 |
additional_inputs=[
|
|
|
199 |
gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[3]),
|
200 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
|
201 |
gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"),
|
@@ -225,18 +206,17 @@ def create_gradio_interface():
|
|
225 |
gr.Markdown("""
|
226 |
## How to use
|
227 |
1. Enter your question in the chat interface.
|
228 |
-
2.
|
229 |
-
3.
|
230 |
-
4.
|
231 |
-
5.
|
232 |
-
6.
|
233 |
-
7.
|
|
|
234 |
""")
|
235 |
|
236 |
-
logging.info("Gradio interface ready.")
|
237 |
return demo
|
238 |
|
239 |
if __name__ == "__main__":
|
240 |
-
logging.info("Launching Gradio application.")
|
241 |
demo = create_gradio_interface()
|
242 |
demo.launch(share=True)
|
|
|
1 |
import os
|
2 |
import logging
|
3 |
+
import json
|
4 |
import gradio as gr
|
5 |
from huggingface_hub import InferenceClient
|
6 |
from langchain.embeddings import HuggingFaceEmbeddings
|
7 |
from langchain.vectorstores import FAISS
|
8 |
from langchain.schema import Document
|
9 |
from duckduckgo_search import DDGS
|
10 |
+
from dotenv import load_dotenv
|
11 |
+
from functools import lru_cache
|
12 |
+
|
13 |
+
# Load environment variables
|
14 |
+
load_dotenv()
|
15 |
|
16 |
# Configure logging
|
17 |
+
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
|
18 |
+
logger = logging.getLogger(__name__)
|
19 |
|
20 |
# Environment variables and configurations
|
21 |
+
HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN")
|
22 |
+
logger.info(f"Using Hugging Face token: {HUGGINGFACE_TOKEN[:4]}...{HUGGINGFACE_TOKEN[-4:] if HUGGINGFACE_TOKEN else 'Not Set'}")
|
23 |
|
24 |
MODELS = [
|
25 |
"mistralai/Mistral-7B-Instruct-v0.3",
|
26 |
"mistralai/Mixtral-8x7B-Instruct-v0.1",
|
27 |
"mistralai/Mistral-Nemo-Instruct-2407",
|
28 |
"meta-llama/Meta-Llama-3.1-8B-Instruct",
|
29 |
+
"meta-llama/Meta-Llama-3.1-70B-Instruct",
|
30 |
+
"google/gemma-2-9b-it",
|
31 |
+
"google/gemma-2-27b-it"
|
32 |
]
|
|
|
33 |
|
34 |
+
FALLBACK_MODEL = "mistralai/Mistral-7B-Instruct-v0.3"
|
35 |
+
|
36 |
+
DEFAULT_SYSTEM_PROMPT = """You are a world-class financial AI assistant, capable of complex reasoning and reflection.
|
37 |
+
Reason through the query inside <thinking> tags, and then provide your final response inside <output> tags.
|
38 |
+
Providing comprehensive and accurate information based on web search results is essential.
|
39 |
+
Your goal is to synthesize the given context into a coherent and detailed response that directly addresses the user's query.
|
40 |
+
Please ensure that your response is well-structured and factual.
|
41 |
+
If you detect that you made a mistake in your reasoning at any point, correct yourself inside <reflection> tags."""
|
42 |
+
|
43 |
+
class WebSearcher:
|
44 |
+
def __init__(self):
|
45 |
+
self.ddgs = DDGS()
|
46 |
+
|
47 |
+
@lru_cache(maxsize=100)
|
48 |
+
def search(self, query, max_results=5):
|
49 |
+
try:
|
50 |
+
results = list(self.ddgs.text(query, max_results=max_results))
|
51 |
+
logger.info(f"Search completed for query: {query}")
|
52 |
+
return results
|
53 |
+
except Exception as e:
|
54 |
+
logger.error(f"Error during DuckDuckGo search: {str(e)}")
|
55 |
+
return []
|
56 |
+
|
57 |
+
@lru_cache(maxsize=1)
|
58 |
def get_embeddings():
|
|
|
59 |
return HuggingFaceEmbeddings(model_name="sentence-transformers/stsb-roberta-large")
|
60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
61 |
def create_web_search_vectors(search_results):
|
|
|
62 |
embed = get_embeddings()
|
63 |
+
documents = [
|
64 |
+
Document(
|
65 |
+
page_content=f"{result['title']}\n{result['body']}\nSource: {result['href']}",
|
66 |
+
metadata={"source": result['href']}
|
67 |
+
)
|
68 |
+
for result in search_results if 'body' in result
|
69 |
+
]
|
70 |
+
logger.info(f"Created vectors for {len(documents)} search results.")
|
71 |
return FAISS.from_documents(documents, embed)
|
72 |
|
73 |
+
def get_response_with_search(query, system_prompt, model, use_embeddings, num_calls=3, temperature=0.2):
|
74 |
+
searcher = WebSearcher()
|
75 |
+
search_results = searcher.search(query)
|
76 |
|
77 |
if not search_results:
|
78 |
+
logger.warning(f"No web search results found for query: {query}")
|
79 |
+
return "No web search results available. Please try again.", ""
|
80 |
+
|
81 |
+
sources = [result['href'] for result in search_results if 'href' in result]
|
82 |
+
source_list_str = "\n".join(sources)
|
83 |
|
84 |
if use_embeddings:
|
|
|
85 |
web_search_database = create_web_search_vectors(search_results)
|
86 |
retriever = web_search_database.as_retriever(search_kwargs={"k": 5})
|
87 |
relevant_docs = retriever.get_relevant_documents(query)
|
88 |
context = "\n".join([doc.page_content for doc in relevant_docs])
|
89 |
else:
|
90 |
+
context = "\n".join([f"{result['title']}\n{result['body']}" for result in search_results])
|
|
|
91 |
|
92 |
+
logger.info(f"Context created for query: {query}")
|
|
|
|
|
|
|
|
|
|
|
93 |
|
94 |
user_message = f"""Using the following context from web search results:
|
95 |
{context}
|
96 |
|
97 |
+
Write a detailed and complete research document that fulfills the following user request: '{query}'."""
|
|
|
98 |
|
99 |
+
client = InferenceClient(model, token=HUGGINGFACE_TOKEN)
|
100 |
full_response = ""
|
|
|
101 |
try:
|
102 |
for _ in range(num_calls):
|
103 |
+
api_params = {
|
104 |
+
"messages": [
|
105 |
+
{"role": "system", "content": system_prompt},
|
|
|
106 |
{"role": "user", "content": user_message}
|
107 |
],
|
108 |
+
"max_tokens": 3000,
|
109 |
+
"temperature": temperature,
|
110 |
+
"top_p": 0.8,
|
111 |
+
}
|
112 |
+
logger.info(f"Sending request to API with params: {json.dumps(api_params, indent=2, default=str)}")
|
113 |
+
response = client.chat_completion(**api_params)
|
114 |
+
logger.info(f"Raw response from model: {response}")
|
115 |
+
|
116 |
+
if isinstance(response, dict):
|
117 |
+
if 'generated_text' in response:
|
118 |
+
full_response += response['generated_text']
|
119 |
+
elif 'choices' in response and len(response['choices']) > 0:
|
120 |
+
if isinstance(response['choices'][0], dict) and 'message' in response['choices'][0]:
|
121 |
+
full_response += response['choices'][0]['message'].get('content', '')
|
122 |
+
elif isinstance(response['choices'][0], str):
|
123 |
+
full_response += response['choices'][0]
|
124 |
+
elif hasattr(response, 'generated_text'):
|
125 |
+
full_response += response.generated_text
|
126 |
+
elif hasattr(response, 'content'):
|
127 |
+
full_response += response.content
|
128 |
+
else:
|
129 |
+
logger.error(f"Unexpected response format from the model: {type(response)}")
|
130 |
+
return "Unexpected response format from the model. Please try again.", ""
|
131 |
except Exception as e:
|
132 |
+
logger.error(f"Error in get_response_with_search: {str(e)}")
|
133 |
+
logger.info(f"Attempting fallback to {FALLBACK_MODEL}")
|
134 |
+
client = InferenceClient(FALLBACK_MODEL, token=HUGGINGFACE_TOKEN)
|
135 |
+
# Retry with fallback model (you can implement retry logic here)
|
136 |
+
return f"An error occurred while processing your request: {str(e)}", ""
|
137 |
|
138 |
if not full_response:
|
139 |
+
logger.warning("No response generated from the model")
|
140 |
+
return "No response generated from the model.", ""
|
141 |
+
else:
|
142 |
+
return f"{full_response}\n\nSources:\n{source_list_str}", ""
|
143 |
|
144 |
+
def respond(message, system_prompt, history, model, temperature, num_calls, use_embeddings):
|
145 |
+
logger.info(f"Respond function called with message: {message}")
|
146 |
+
logger.info(f"User Query: {message}")
|
147 |
+
logger.info(f"Model Used: {model}")
|
148 |
+
logger.info(f"Temperature: {temperature}")
|
149 |
+
logger.info(f"Number of API Calls: {num_calls}")
|
150 |
+
logger.info(f"Use Embeddings: {use_embeddings}")
|
151 |
+
logger.info(f"System Prompt: {system_prompt}")
|
152 |
|
153 |
try:
|
154 |
+
main_content, sources = get_response_with_search(message, system_prompt, model, use_embeddings, num_calls=num_calls, temperature=temperature)
|
155 |
+
return main_content
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
156 |
except Exception as e:
|
157 |
+
logger.error(f"Error in respond function: {str(e)}")
|
158 |
+
return f"An error occurred: {str(e)}"
|
159 |
|
160 |
css = """
|
161 |
/* Fine-tune chatbox size */
|
|
|
169 |
}
|
170 |
"""
|
171 |
|
|
|
172 |
def create_gradio_interface():
|
|
|
173 |
custom_placeholder = "Enter your question here for web search."
|
174 |
|
175 |
demo = gr.ChatInterface(
|
176 |
+
fn=respond,
|
177 |
+
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=True, render=False),
|
178 |
additional_inputs=[
|
179 |
+
gr.Textbox(value=DEFAULT_SYSTEM_PROMPT, lines=6, label="System Prompt", placeholder="Enter your system prompt here"),
|
180 |
gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[3]),
|
181 |
gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
|
182 |
gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"),
|
|
|
206 |
gr.Markdown("""
|
207 |
## How to use
|
208 |
1. Enter your question in the chat interface.
|
209 |
+
2. Optionally, modify the System Prompt to guide the AI's behavior.
|
210 |
+
3. Select the model you want to use from the dropdown.
|
211 |
+
4. Adjust the Temperature to control the randomness of the response.
|
212 |
+
5. Set the Number of API Calls to determine how many times the model will be queried.
|
213 |
+
6. Check or uncheck the "Use Embeddings" box to toggle between using embeddings or direct text summarization.
|
214 |
+
7. Press Enter or click the submit button to get your answer.
|
215 |
+
8. Use the provided examples or ask your own questions.
|
216 |
""")
|
217 |
|
|
|
218 |
return demo
|
219 |
|
220 |
if __name__ == "__main__":
|
|
|
221 |
demo = create_gradio_interface()
|
222 |
demo.launch(share=True)
|