Shreyas094's picture
Update app.py
3fd1094 verified
raw
history blame
6.48 kB
import os
import logging
import asyncio
import gradio as gr
from huggingface_hub import InferenceClient
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.schema import Document
from duckduckgo_search import DDGS
# Environment variables and configurations
huggingface_token = os.environ.get("HUGGINGFACE_TOKEN")
MODELS = [
"mistralai/Mistral-7B-Instruct-v0.3",
"mistralai/Mixtral-8x7B-Instruct-v0.1",
"mistralai/Mistral-Nemo-Instruct-2407",
"meta-llama/Meta-Llama-3.1-8B-Instruct",
"meta-llama/Meta-Llama-3.1-70B-Instruct"
]
def get_embeddings():
return HuggingFaceEmbeddings(model_name="sentence-transformers/stsb-roberta-large")
def duckduckgo_search(query):
with DDGS() as ddgs:
results = ddgs.text(query, max_results=10)
return results
def create_web_search_vectors(search_results):
embed = get_embeddings()
documents = []
for result in search_results:
if 'body' in result:
content = f"{result['title']}\n{result['body']}\nSource: {result['href']}"
documents.append(Document(page_content=content, metadata={"source": result['href']}))
return FAISS.from_documents(documents, embed)
async def get_response_with_search(query, model, use_embeddings, num_calls=3, temperature=0.2):
search_results = duckduckgo_search(query)
if not search_results:
yield "No web search results available. Please try again.", ""
return
if use_embeddings:
web_search_database = create_web_search_vectors(search_results)
retriever = web_search_database.as_retriever(search_kwargs={"k": 5})
relevant_docs = retriever.get_relevant_documents(query)
context = "\n".join([doc.page_content for doc in relevant_docs])
else:
context = "\n".join([f"{result['title']}\n{result['body']}\nSource: {result['href']}" for result in search_results])
prompt = f"""Using the following context from web search results:
{context}
Write a detailed and complete research document that fulfills the following user request: '{query}'
After writing the document, please provide a list of sources used in your response."""
# Use Hugging Face API
client = InferenceClient(model, token=huggingface_token)
full_response = ""
try:
for _ in range(num_calls):
for response in client.chat_completion(
messages=[{"role": "user", "content": prompt}],
max_tokens=6000,
temperature=temperature,
stream=True,
top_p=0.8,
):
if isinstance(response, dict) and "choices" in response:
for choice in response["choices"]:
if "delta" in choice and "content" in choice["delta"]:
chunk = choice["delta"]["content"]
full_response += chunk
yield full_response, ""
else:
logging.error("Unexpected response format or missing attributes in the response object.")
break
except Exception as e:
logging.error(f"Error in get_response_with_search: {str(e)}")
yield f"An error occurred while processing your request: {str(e)}", ""
if not full_response:
logging.warning("No response generated from the model")
yield "No response generated from the model.", ""
async def respond(message, history, model, temperature, num_calls, use_embeddings):
logging.info(f"User Query: {message}")
logging.info(f"Model Used: {model}")
logging.info(f"Temperature: {temperature}")
logging.info(f"Number of API Calls: {num_calls}")
logging.info(f"Use Embeddings: {use_embeddings}")
try:
async for main_content, sources in get_response_with_search(message, model, use_embeddings, num_calls=num_calls, temperature=temperature):
response = f"{main_content}\n\n{sources}"
yield response
except asyncio.CancelledError:
yield "The operation was cancelled. Please try again."
except Exception as e:
logging.error(f"Error in respond function: {str(e)}")
yield f"An error occurred: {str(e)}"
css = """
/* Fine-tune chatbox size */
.chatbot-container {
height: 600px !important;
width: 100% !important;
}
.chatbot-container > div {
height: 100%;
width: 100%;
}
"""
# Gradio interface setup
def create_gradio_interface():
custom_placeholder = "Enter your question here for web search."
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Dropdown(choices=MODELS, label="Select Model", value=MODELS[2]),
gr.Slider(minimum=0.1, maximum=1.0, value=0.2, step=0.1, label="Temperature"),
gr.Slider(minimum=1, maximum=5, value=1, step=1, label="Number of API Calls"),
gr.Checkbox(label="Use Embeddings", value=True),
],
title="AI-powered Web Search Assistant",
description="Use web search to answer questions or generate summaries.",
theme=gr.Theme.from_hub("allenai/gradio-theme"),
css=css,
examples=[
["What are the latest developments in artificial intelligence?"],
["Explain the concept of quantum computing."],
["What are the environmental impacts of renewable energy?"]
],
cache_examples=False,
analytics_enabled=False,
textbox=gr.Textbox(placeholder=custom_placeholder, container=False, scale=7),
chatbot=gr.Chatbot(
show_copy_button=True,
likeable=True,
layout="bubble",
height=400,
)
)
with demo:
gr.Markdown("""
## How to use
1. Enter your question in the chat interface.
2. Select the model you want to use from the dropdown.
3. Adjust the Temperature to control the randomness of the response.
4. Set the Number of API Calls to determine how many times the model will be queried.
5. Check or uncheck the "Use Embeddings" box to toggle between using embeddings or direct text summarization.
6. Press Enter or click the submit button to get your answer.
7. Use the provided examples or ask your own questions.
""")
return demo
if __name__ == "__main__":
demo = create_gradio_interface()
demo.launch(share=True)