|
import os |
|
import logging |
|
import json |
|
import time |
|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
from langchain.embeddings import HuggingFaceEmbeddings |
|
from langchain.vectorstores import FAISS |
|
from langchain.schema import Document |
|
from duckduckgo_search import DDGS |
|
from dotenv import load_dotenv |
|
from functools import lru_cache |
|
from tenacity import retry, stop_after_attempt, wait_fixed |
|
|
|
|
|
load_dotenv() |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
HUGGINGFACE_TOKEN = os.getenv("HUGGINGFACE_TOKEN") |
|
logger.info(f"Using Hugging Face token: {HUGGINGFACE_TOKEN[:4]}...{HUGGINGFACE_TOKEN[-4:] if HUGGINGFACE_TOKEN else 'Not Set'}") |
|
|
|
MODELS = [ |
|
"mistralai/Mistral-7B-Instruct-v0.3", |
|
"mistralai/Mixtral-8x7B-Instruct-v0.1", |
|
"mistralai/Mistral-Nemo-Instruct-2407", |
|
"meta-llama/Meta-Llama-3.1-8B-Instruct", |
|
"meta-llama/Meta-Llama-3.1-70B-Instruct", |
|
"google/gemma-2-9b-it", |
|
"google/gemma-2-27b-it" |
|
] |
|
|
|
FALLBACK_MODEL = "mistralai/Mistral-7B-Instruct-v0.3" |
|
|
|
DEFAULT_SYSTEM_PROMPT = """You are a world-class financial AI assistant, capable of complex reasoning and reflection. |
|
Reason through the query inside <thinking> tags, and then provide your final response inside <output> tags. |
|
Providing comprehensive and accurate information based on web search results is essential. |
|
Your goal is to synthesize the given context into a coherent and detailed response that directly addresses the user's query. |
|
Please ensure that your response is well-structured and factual. |
|
If you detect that you made a mistake in your reasoning at any point, correct yourself inside <reflection> tags.""" |
|
|
|
class WebSearcher: |
|
def __init__(self): |
|
self.ddgs = DDGS() |
|
|
|
@lru_cache(maxsize=100) |
|
def search(self, query, max_results=5): |
|
try: |
|
results = list(self.ddgs.text(query, max_results=max_results)) |
|
logger.info(f"Search completed for query: {query}") |
|
return results |
|
except Exception as e: |
|
logger.error(f"Error during DuckDuckGo search: {str(e)}") |
|
return [] |
|
|
|
@lru_cache(maxsize=1) |
|
def get_embeddings(): |
|
return HuggingFaceEmbeddings(model_name="sentence-transformers/stsb-roberta-large") |
|
|
|
def create_web_search_vectors(search_results): |
|
embed = get_embeddings() |
|
documents = [ |
|
Document( |
|
page_content=f"{result['title']}\n{result['body']}\nSource: {result['href']}", |
|
metadata={"source": result['href']} |
|
) |
|
for result in search_results if 'body' in result |
|
] |
|
logger.info(f"Created vectors for {len(documents)} search results.") |
|
return FAISS.from_documents(documents, embed) |
|
|
|
@retry(stop=stop_after_attempt(3), wait=wait_fixed(2)) |
|
def make_api_call(client, api_params): |
|
return client.chat_completion(**api_params) |
|
|
|
def get_response_with_search(query, system_prompt, model, use_embeddings, history, num_calls=3, temperature=0.2): |
|
searcher = WebSearcher() |
|
search_results = searcher.search(query) |
|
|
|
if not search_results: |
|
logger.warning(f"No web search results found for query: {query}") |
|
return "No web search results available. Please try again.", "" |
|
|
|
sources = [result['href'] for result in search_results if 'href' in result] |
|
source_list_str = "\n".join(sources) |
|
|
|
if use_embeddings: |
|
web_search_database = create_web_search_vectors(search_results) |
|
retriever = web_search_database.as_retriever(search_kwargs={"k": 5}) |
|
relevant_docs = retriever.get_relevant_documents(query) |
|
context = "\n".join([doc.page_content for doc in relevant_docs]) |
|
else: |
|
context = "\n".join([f"{result['title']}\n{result['body']}" for result in search_results]) |
|
|
|
logger.info(f"Context created for query: {query}") |
|
|
|
chat_history = "\n".join([f"Human: {h[0]}\nAI: {h[1]}" for h in history]) |
|
user_message = f"""Chat history: |
|
{chat_history} |
|
|
|
Using the following context from web search results: |
|
{context} |
|
|
|
Write a detailed and complete research document that fulfills the following user request: '{query}'.""" |
|
|
|
client = InferenceClient(model, token=HUGGINGFACE_TOKEN) |
|
full_response = "" |
|
try: |
|
for _ in range(num_calls): |
|
api_params = { |
|
"messages": [ |
|
{"role": "system", "content": system_prompt}, |
|
{"role": "user", "content": user_message} |
|
], |
|
"max_tokens": 3000, |
|
"temperature": temperature, |
|
"top_p": 0.8, |
|
} |
|
logger.info(f"Sending request to API with params: {json.dumps(api_params, indent=2, default=str)}") |
|
response = make_api_call(client, api_params) |
|
logger.info(f"Raw response from model: {response}") |
|
|
|
if isinstance(response, dict): |
|
if 'generated_text' in response: |
|
full_response += response['generated_text'] |
|
elif 'choices' in response and len(response['choices']) > 0: |
|
if isinstance(response['choices'][0], dict) and 'message' in response['choices'][0]: |
|
full_response += response['choices'][0]['message'].get('content', '') |
|
elif isinstance(response['choices'][0], str): |
|
full_response += response['choices'][0] |
|
elif hasattr(response, 'generated_text'): |
|
full_response += response.generated_text |
|
elif hasattr(response, 'content'): |
|
full_response += response.content |
|
else: |
|
logger.error(f"Unexpected response format from the model: {type(response)}") |
|
return "Unexpected response format from the model. Please try again.", "" |
|
|
|
time.sleep(1) |
|
except Exception as e: |
|
logger.error(f"Error in get_response_with_search: {str(e)}") |
|
logger.info(f"Attempting fallback to {FALLBACK_MODEL}") |
|
client = InferenceClient(FALLBACK_MODEL, token=HUGGINGFACE_TOKEN) |
|
|
|
return f"An error occurred while processing your request: {str(e)}", "" |
|
|
|
if not full_response: |
|
logger.warning("No response generated from the model") |
|
return "No response generated from the model.", "" |
|
else: |
|
return f"{full_response}\n\nSources:\n{source_list_str}", "" |
|
|
|
def respond(message, system_prompt, history, model, temperature, num_calls, use_embeddings): |
|
logger.info(f"Respond function called with message: {message}") |
|
logger.info(f"User Query: {message}") |
|
logger.info(f"Model Used: {model}") |
|
logger.info(f"Temperature: {temperature}") |
|
logger.info(f"Number of API Calls: {num_calls}") |
|
logger.info(f"Use Embeddings: {use_embeddings}") |
|
logger.info(f"System Prompt: {system_prompt}") |
|
logger.info(f"History: {history}") |
|
|
|
try: |
|
main_content, sources = get_response_with_search(message, system_prompt, model, use_embeddings, history, num_calls=num_calls, temperature=temperature) |
|
return main_content |
|
except Exception as e: |
|
logger.error(f"Error in respond function: {str(e)}") |
|
return f"An error occurred: {str(e)}" |
|
|
|
css = """ |
|
/* Fine-tune chatbox size */ |
|
.chatbot-container { |
|
height: 600px !important; |
|
width: 100% !important; |
|
} |
|
.chatbot-container > div { |
|
height: 100%; |
|
width: 100%; |
|
} |
|
""" |
|
|
|
def create_gradio_interface(): |
|
custom_placeholder = "Enter your question here for web search." |
|
|
|
demo = gr.ChatInterface( |
|
fn=respond, |
|
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=True, render=False), |
|
additional_inputs=[ |
|
gr.Textbox(value=DEFAULT_SYSTEM_PROMPT, lines=6, label="System Prompt", placeholder="Enter your system prompt here"), |
|
gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[3]), |
|
gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"), |
|
gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"), |
|
gr.Checkbox(label="Use Embeddings", value=False), |
|
], |
|
title="AI-powered Web Search Assistant", |
|
description="Use web search to answer questions or generate summaries.", |
|
theme=gr.Theme.from_hub("allenai/gradio-theme"), |
|
css=css, |
|
examples=[ |
|
["What are the latest developments in artificial intelligence?"], |
|
["Explain the concept of quantum computing."], |
|
["What are the environmental impacts of renewable energy?"] |
|
], |
|
cache_examples=False, |
|
analytics_enabled=False, |
|
textbox=gr.Textbox(placeholder=custom_placeholder, container=False, scale=7), |
|
chatbot=gr.Chatbot( |
|
show_copy_button=True, |
|
likeable=True, |
|
layout="bubble", |
|
height=400, |
|
) |
|
) |
|
|
|
with demo: |
|
gr.Markdown(""" |
|
## How to use |
|
1. Enter your question in the chat interface. |
|
2. Optionally, modify the System Prompt to guide the AI's behavior. |
|
3. Select the model you want to use from the dropdown. |
|
4. Adjust the Temperature to control the randomness of the response. |
|
5. Set the Number of API Calls to determine how many times the model will be queried. |
|
6. Check or uncheck the "Use Embeddings" box to toggle between using embeddings or direct text summarization. |
|
7. Press Enter or click the submit button to get your answer. |
|
8. Use the provided examples or ask your own questions. |
|
""") |
|
|
|
return demo |
|
|
|
if __name__ == "__main__": |
|
demo = create_gradio_interface() |
|
demo.launch(share=True) |