import os import json import re import gradio as gr import requests from duckduckgo_search import DDGS from typing import List from pydantic import BaseModel, Field from langchain_community.vectorstores import FAISS from langchain_community.embeddings import HuggingFaceEmbeddings from langchain_core.documents import Document from huggingface_hub import InferenceClient import logging # Set up basic configuration for logging logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') # Environment variables and configurations huggingface_token = os.environ.get("HUGGINGFACE_TOKEN") MODELS = [ "mistralai/Mistral-7B-Instruct-v0.3", "mistralai/Mixtral-8x7B-Instruct-v0.1", "mistralai/Mistral-Nemo-Instruct-2407", "meta-llama/Meta-Llama-3.1-8B-Instruct", "meta-llama/Meta-Llama-3.1-70B-Instruct", "meta-llama/Meta-Llama-3.1-8B-Instruct", "meta-llama/Meta-Llama-3.1-70B-Instruct" ] MODEL_TOKEN_LIMITS = { "mistralai/Mistral-7B-Instruct-v0.3": 32768, "mistralai/Mixtral-8x7B-Instruct-v0.1": 32768, "mistralai/Mistral-Nemo-Instruct-2407": 32768, "meta-llama/Meta-Llama-3.1-8B-Instruct": 8192, "meta-llama/Meta-Llama-3.1-70B-Instruct": 8192, } def get_embeddings(): return HuggingFaceEmbeddings(model_name="sentence-transformers/stsb-roberta-large") def duckduckgo_search(query): with DDGS() as ddgs: results = ddgs.text(query, max_results=5) return results class CitingSources(BaseModel): sources: List[str] = Field( ..., description="List of sources to cite. Should be an URL of the source." ) def chatbot_interface(message, history, model, temperature, num_calls): if not message.strip(): return "", history history = history + [(message, "")] try: for response in respond(message, history, model, temperature, num_calls): history[-1] = (message, response) yield history except gr.CancelledError: yield history except Exception as e: logging.error(f"Unexpected error in chatbot_interface: {str(e)}") history[-1] = (message, f"An unexpected error occurred: {str(e)}") yield history def retry_last_response(history, model, temperature, num_calls): if not history: return history last_user_msg = history[-1][0] history = history[:-1] # Remove the last response return chatbot_interface(last_user_msg, history, model, temperature, num_calls) def respond(message, history, model, temperature, num_calls): logging.info(f"User Query: {message}") logging.info(f"Model Used: {model}") try: for main_content, sources in get_response_with_search(message, model, num_calls=num_calls, temperature=temperature): response = f"{main_content}\n\n{sources}" first_line = response.split('\n')[0] if response else '' yield response except Exception as e: logging.error(f"Error with {model}: {str(e)}") yield f"An error occurred with the {model} model: {str(e)}. Please try again or select a different model." def create_web_search_vectors(search_results): embed = get_embeddings() documents = [] for result in search_results: if 'body' in result: content = f"{result['title']}\n{result['body']}\nSource: {result['href']}" documents.append(Document(page_content=content, metadata={"source": result['href']})) return FAISS.from_documents(documents, embed) def get_response_with_search(query, model, num_calls=3, temperature=0.2): search_results = duckduckgo_search(query) web_search_database = create_web_search_vectors(search_results) if not web_search_database: yield "No web search results available. Please try again.", "" return retriever = web_search_database.as_retriever(search_kwargs={"k": 5}) relevant_docs = retriever.get_relevant_documents(query) context = "\n".join([doc.page_content for doc in relevant_docs]) prompt = f"""Using the following context from web search results: {context} Write a detailed and complete research document that fulfills the following user request: '{query}' After writing the document, please provide a list of sources used in your response.""" # Use Hugging Face API client = InferenceClient(model, token=huggingface_token) # Calculate input tokens (this is an approximation, you might need a more accurate method) input_tokens = len(prompt.split()) # Get the token limit for the current model model_token_limit = MODEL_TOKEN_LIMITS.get(model, 8192) # Default to 8192 if model not found # Calculate max_new_tokens max_new_tokens = min(model_token_limit - input_tokens, 4096) # Cap at 4096 to be safe main_content = "" for i in range(num_calls): for message in client.chat_completion( messages=[{"role": "user", "content": prompt}], max_new_tokens=max_new_tokens, temperature=temperature, stream=False, ): if message.choices and message.choices[0].delta and message.choices[0].delta.content: chunk = message.choices[0].delta.content main_content += chunk yield main_content, "" # Yield partial main content without sources def vote(data: gr.LikeData): if data.liked: print(f"You upvoted this response: {data.value}") else: print(f"You downvoted this response: {data.value}") css = """ /* Fine-tune chatbox size */ """ def initial_conversation(): return [ (None, "Welcome! I'm your AI assistant for web search. Here's how you can use me:\n\n" "1. Ask me any question, and I'll search the web for information.\n" "2. You can adjust the model, temperature, and number of API calls for fine-tuned responses.\n" "3. For any queries, feel free to reach out @desai.shreyas94@gmail.com or discord - shreyas094\n\n" "To get started, ask me a question!") ] demo = gr.ChatInterface( respond, additional_inputs=[ gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[2]), gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"), gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"), ], title="AI-powered Web Search Assistant", description="Ask questions and get answers from web search results.", theme=gr.themes.Soft( primary_hue="orange", secondary_hue="amber", neutral_hue="gray", font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"] ).set( body_background_fill_dark="#0c0505", block_background_fill_dark="#0c0505", block_border_width="1px", block_title_background_fill_dark="#1b0f0f", input_background_fill_dark="#140b0b", button_secondary_background_fill_dark="#140b0b", border_color_accent_dark="#1b0f0f", border_color_primary_dark="#1b0f0f", background_fill_secondary_dark="#0c0505", color_accent_soft_dark="transparent", code_background_fill_dark="#140b0b" ), css=css, examples=[ ["What are the latest developments in artificial intelligence?"], ["Can you explain the basics of quantum computing?"], ["What are the current global economic trends?"] ], cache_examples=False, analytics_enabled=False, textbox=gr.Textbox(placeholder="Ask a question", container=False, scale=7), chatbot = gr.Chatbot( show_copy_button=True, likeable=True, layout="bubble", height=400, value=initial_conversation() ) ) if __name__ == "__main__": demo.launch(share=True)