|
import os |
|
import logging |
|
import asyncio |
|
import random |
|
from typing import AsyncGenerator, Tuple |
|
import gradio as gr |
|
from huggingface_hub import InferenceClient |
|
from langchain.embeddings import HuggingFaceEmbeddings |
|
from langchain.vectorstores import FAISS |
|
from langchain.schema import Document |
|
from duckduckgo_search import DDGS |
|
|
|
|
|
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') |
|
|
|
|
|
huggingface_tokens = [ |
|
os.environ.get("HUGGINGFACE_TOKEN_1"), |
|
os.environ.get("HUGGINGFACE_TOKEN_2"), |
|
os.environ.get("HUGGINGFACE_TOKEN_3") |
|
] |
|
|
|
|
|
def get_random_token(): |
|
return random.choice(huggingface_tokens) |
|
|
|
MODELS = [ |
|
"mistralai/Mistral-7B-Instruct-v0.3", |
|
"mistralai/Mixtral-8x7B-Instruct-v0.1", |
|
"mistralai/Mistral-Nemo-Instruct-2407", |
|
"meta-llama/Meta-Llama-3.1-8B-Instruct", |
|
"meta-llama/Meta-Llama-3.1-70B-Instruct", |
|
"google/gemma-2-9b-it", |
|
"google/gemma-2-27b-it" |
|
] |
|
|
|
DEFAULT_SYSTEM_PROMPT = """You are a world-class financial AI assistant, capable of complex reasoning and reflection. |
|
Reason through the query inside <thinking> tags, and then provide your final response inside <output> tags. |
|
Providing comprehensive and accurate information based on web search results is essential. |
|
Your goal is to synthesize the given context into a coherent and detailed response that directly addresses the user's query. |
|
Please ensure that your response is well-structured and factual. |
|
If you detect that you made a mistake in your reasoning at any point, correct yourself inside <reflection> tags.""" |
|
|
|
def get_embeddings(): |
|
return HuggingFaceEmbeddings(model_name="sentence-transformers/stsb-roberta-large") |
|
|
|
def duckduckgo_search(query): |
|
try: |
|
with DDGS() as ddgs: |
|
results = ddgs.text(query, max_results=5) |
|
logging.info(f"Search completed for query: {query}") |
|
return results |
|
except Exception as e: |
|
logging.error(f"Error during DuckDuckGo search: {str(e)}") |
|
return [] |
|
|
|
def create_web_search_vectors(search_results): |
|
embed = get_embeddings() |
|
documents = [] |
|
for result in search_results: |
|
if 'body' in result: |
|
content = f"{result['title']}\n{result['body']}\nSource: {result['href']}" |
|
documents.append(Document(page_content=content, metadata={"source": result['href']})) |
|
logging.info(f"Created vectors for {len(documents)} search results.") |
|
return FAISS.from_documents(documents, embed) |
|
|
|
def create_context(search_results, use_embeddings, query): |
|
if use_embeddings: |
|
web_search_database = create_web_search_vectors(search_results) |
|
retriever = web_search_database.as_retriever(search_kwargs={"k": 5}) |
|
relevant_docs = retriever.get_relevant_documents(query) |
|
return "\n".join([doc.page_content for doc in relevant_docs]) |
|
else: |
|
return "\n".join([f"{result['title']}\n{result['body']}" for result in search_results]) |
|
|
|
async def get_response_with_search(query: str, system_prompt: str, model: str, use_embeddings: bool, history=None, num_calls: int = 3, temperature: float = 0.2) -> AsyncGenerator[Tuple[str, str], None]: |
|
search_results = duckduckgo_search(query) |
|
|
|
if not search_results: |
|
logging.warning(f"No web search results found for query: {query}") |
|
yield "No web search results available. Please try again.", "" |
|
return |
|
|
|
sources = [result['href'] for result in search_results if 'href' in result] |
|
source_list_str = "\n".join(sources) |
|
|
|
context = create_context(search_results, use_embeddings, query) |
|
logging.info(f"Context created for query: {query}") |
|
|
|
user_message = f"""Using the following context from web search results: |
|
{context} |
|
|
|
Write a detailed and complete research document that fulfills the following user request: '{query}'.""" |
|
|
|
messages = [ |
|
{"role": "system", "content": system_prompt}, |
|
{"role": "user", "content": user_message} |
|
] |
|
|
|
if history: |
|
messages = history + messages |
|
|
|
|
|
token = get_random_token() |
|
client = InferenceClient(model, token=token) |
|
full_response = "" |
|
|
|
for call in range(num_calls): |
|
try: |
|
response = await asyncio.to_thread( |
|
client.chat_completion, |
|
messages=messages, |
|
max_tokens=6000, |
|
temperature=temperature, |
|
top_p=0.8, |
|
) |
|
|
|
if response is None or not isinstance(response, dict) or 'choices' not in response: |
|
logging.error(f"API call {call + 1} returned an invalid response: {response}") |
|
if call == num_calls - 1: |
|
yield "The API returned an invalid response. Please try again later.", "" |
|
continue |
|
|
|
new_content = response['choices'][0]['message']['content'] |
|
full_response += new_content |
|
yield full_response, "" |
|
|
|
if full_response: |
|
break |
|
|
|
except Exception as e: |
|
logging.error(f"Error in API call {call + 1}: {str(e)}") |
|
if call == num_calls - 1: |
|
yield f"An error occurred during API calls: {str(e)}. Please try again later.", "" |
|
|
|
await asyncio.sleep(1) |
|
|
|
if not full_response: |
|
logging.warning("No response generated from the model") |
|
yield "No response generated from the model. Please try again.", "" |
|
else: |
|
yield f"{full_response}\n\nSources:\n{source_list_str}", "" |
|
|
|
def process_history(history): |
|
chat_history = [] |
|
if isinstance(history, str): |
|
|
|
chat_history.append({"role": "system", "content": history}) |
|
elif isinstance(history, list): |
|
for entry in history: |
|
if isinstance(entry, (list, tuple)) and len(entry) == 2: |
|
human, assistant = entry |
|
chat_history.append({"role": "user", "content": human}) |
|
if assistant: |
|
chat_history.append({"role": "assistant", "content": assistant}) |
|
elif isinstance(entry, str): |
|
|
|
chat_history.append({"role": "user", "content": entry}) |
|
return chat_history |
|
|
|
async def respond(message, system_prompt, history, model, temperature, num_calls, use_embeddings): |
|
logging.info(f"User Query: {message}") |
|
logging.info(f"Model Used: {model}") |
|
logging.info(f"Temperature: {temperature}") |
|
logging.info(f"Number of API Calls: {num_calls}") |
|
logging.info(f"Use Embeddings: {use_embeddings}") |
|
logging.info(f"System Prompt: {system_prompt}") |
|
logging.info(f"History: {history}") |
|
|
|
chat_history = process_history(history) |
|
|
|
try: |
|
async for main_content, sources in get_response_with_search( |
|
message, |
|
system_prompt, |
|
model, |
|
use_embeddings, |
|
history=chat_history, |
|
num_calls=num_calls, |
|
temperature=temperature |
|
): |
|
yield main_content |
|
|
|
if sources: |
|
yield f"\n\nSources:\n{sources}" |
|
|
|
except asyncio.CancelledError: |
|
logging.warning("The operation was cancelled.") |
|
yield "The operation was cancelled. Please try again." |
|
except Exception as e: |
|
logging.error(f"Error in respond function: {str(e)}") |
|
yield f"An error occurred: {str(e)}" |
|
|
|
css = """ |
|
/* Fine-tune chatbox size */ |
|
.chatbot-container { |
|
height: 600px !important; |
|
width: 100% !important; |
|
} |
|
.chatbot-container > div { |
|
height: 100%; |
|
width: 100%; |
|
} |
|
""" |
|
|
|
|
|
def create_gradio_interface(): |
|
custom_placeholder = "Enter your question here for web search." |
|
|
|
demo = gr.ChatInterface( |
|
fn=respond, |
|
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=True, render=False), |
|
additional_inputs=[ |
|
gr.Textbox(value=DEFAULT_SYSTEM_PROMPT, lines=6, label="System Prompt", placeholder="Enter your system prompt here"), |
|
gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[3]), |
|
gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"), |
|
gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"), |
|
gr.Checkbox(label="Use Embeddings", value=False), |
|
], |
|
title="AI-powered Web Search Assistant", |
|
description="Use web search to answer questions or generate summaries.", |
|
theme=gr.Theme.from_hub("allenai/gradio-theme"), |
|
css=css, |
|
examples=[ |
|
["What are the latest developments in artificial intelligence?"], |
|
["Explain the concept of quantum computing."], |
|
["What are the environmental impacts of renewable energy?"] |
|
], |
|
cache_examples=False, |
|
analytics_enabled=False, |
|
textbox=gr.Textbox(placeholder=custom_placeholder, container=False, scale=7), |
|
chatbot=gr.Chatbot( |
|
show_copy_button=True, |
|
likeable=True, |
|
layout="bubble", |
|
height=400, |
|
) |
|
) |
|
|
|
with demo: |
|
gr.Markdown(""" |
|
## How to use |
|
1. Enter your question in the chat interface. |
|
2. Optionally, modify the System Prompt to guide the AI's behavior. |
|
3. Select the model you want to use from the dropdown. |
|
4. Adjust the Temperature to control the randomness of the response. |
|
5. Set the Number of API Calls to determine how many times the model will be queried. |
|
6. Check or uncheck the "Use Embeddings" box to toggle between using embeddings or direct text summarization. |
|
7. Press Enter or click the submit button to get your answer. |
|
8. Use the provided examples or ask your own questions. |
|
""") |
|
|
|
return demo |
|
|
|
if __name__ == "__main__": |
|
demo = create_gradio_interface() |
|
demo.launch(share=True) |