Spaces:
Sleeping
Sleeping
import streamlit as st | |
import requests | |
from transformers import AutoModelForCausalLM, AutoTokenizer | |
from huggingface_hub import login # Import the login function | |
# Authenticate with Hugging Face using your token | |
hf_token = "HF_TOKEN" # Replace with your actual Hugging Face API token | |
login(hf_token) # Log in to Hugging Face with the provided token | |
# Load the Llama model and tokenizer (replace with actual model name in Hugging Face Model Hub) | |
model_name = "meta-llama/Llama-3.2" # Adjust to the actual model path if needed | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
def search_api_call(query: str): | |
api_key = '614e992d87c496fb15a81a2039e00e6a42530f5c' # Replace with your actual Serper.dev API key | |
url = f"https://api.serper.dev/search?api_key={api_key}&q={query}" | |
try: | |
response = requests.get(url) | |
data = response.json() | |
return data['organic_results'] # Adjust according to the API response structure | |
except Exception as e: | |
st.error("Error fetching from the API: " + str(e)) | |
return None | |
def is_llm_insufficient(llm_response: str) -> bool: | |
return "I don't know" in llm_response or llm_response.strip() == '' | |
def summarize_search_results(results): | |
summary = "" | |
for result in results: | |
summary += f"- {result['title']} ({result['link']})\n" | |
return summary | |
def generate_llm_response(user_query: str) -> str: | |
inputs = tokenizer(user_query, return_tensors="pt") | |
outputs = model.generate(**inputs, max_length=150) | |
return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Streamlit user interface | |
st.title("Real-Time Factual Information Fetcher") | |
user_query = st.text_input("Enter your query:") | |
if st.button("Submit"): | |
if user_query: | |
llm_response = generate_llm_response(user_query) | |
if is_llm_insufficient(llm_response) or 'recent' in user_query.lower(): | |
search_results = search_api_call(user_query) | |
if search_results: | |
search_summary = summarize_search_results(search_results) | |
combined_response = f"{llm_response}\n\nHere are some recent findings:\n{search_summary.strip()}" | |
else: | |
combined_response = llm_response # Fallback to the original LLM response | |
else: | |
combined_response = llm_response | |
st.markdown("### Response:") | |
st.markdown(combined_response) | |
else: | |
st.warning("Please enter a query.") | |
if __name__ == "__main__": | |
import streamlit as st |