Spaces:
Sleeping
Sleeping
import os | |
import streamlit as st | |
import requests | |
import feedparser | |
from duckduckgo_search import DDGS | |
from dotenv import load_dotenv | |
load_dotenv() | |
OPENROUTER_API_KEY = os.getenv("OPENROUTER_API_KEY") # secure access | |
# --- LLM Wrapper --- | |
def call_llm(messages, model="deepseek/deepseek-chat-v3-0324:free", max_tokens=2048, temperature=0.7): | |
url = "https://openrouter.ai/api/v1/chat/completions" | |
headers = { | |
"Authorization": f"Bearer {OPENROUTER_API_KEY}", | |
"Content-Type": "application/json", | |
"X-Title": "Autonomous Research Agent" | |
} | |
data = { | |
"model": model, | |
"messages": messages, | |
"max_tokens": max_tokens, | |
"temperature": temperature | |
} | |
response = requests.post(url, headers=headers, json=data) | |
result = response.json() | |
if "choices" not in result: | |
raise RuntimeError(f"LLM returned invalid response: {result}") | |
return result["choices"][0]["message"]["content"] | |
# --- Research Source Functions --- | |
def get_arxiv_papers(query, max_results=3): | |
from urllib.parse import quote_plus | |
url = f"http://export.arxiv.org/api/query?search_query=all:{quote_plus(query)}&start=0&max_results={max_results}" | |
feed = feedparser.parse(url) | |
papers = [] | |
for entry in feed.entries: | |
pdf = next((link.href for link in entry.links if link.type == "application/pdf"), "") | |
papers.append({"title": entry.title, "summary": entry.summary[:300], "url": pdf}) | |
return papers | |
def get_semantic_scholar_papers(query, max_results=3): | |
url = "https://api.semanticscholar.org/graph/v1/paper/search" | |
params = {"query": query, "limit": max_results, "fields": "title,abstract,url"} | |
response = requests.get(url, params=params) | |
results = response.json().get("data", []) | |
return [{"title": p["title"], "summary": p.get("abstract", "N/A")[:300], "url": p.get("url", "")} for p in results] | |
def search_duckduckgo_snippets(query, max_results=3): | |
with DDGS() as ddgs: | |
return [ | |
{"title": r["title"], "snippet": r["body"], "url": r["href"]} | |
for r in ddgs.text(query, max_results=max_results) | |
] | |
def get_image_urls(query, max_images=1): | |
with DDGS() as ddgs: | |
return [img["image"] for img in ddgs.images(query, max_results=max_images)] | |
# --- Research Agent --- | |
def autonomous_research_agent(topic): | |
arxiv = get_arxiv_papers(topic) | |
scholar = get_semantic_scholar_papers(topic) | |
web = search_duckduckgo_snippets(topic) | |
images = get_image_urls(topic) | |
prompt = f"Topic: {topic}\n\n" | |
if images: | |
prompt += f"\n\n" | |
prompt += "## ArXiv:\n" + "\n".join(f"- [{p['title']}]({p['url']})\n> {p['summary']}..." for p in arxiv) + "\n\n" | |
prompt += "## Semantic Scholar:\n" + "\n".join(f"- [{p['title']}]({p['url']})\n> {p['summary']}..." for p in scholar) + "\n\n" | |
prompt += "## Web:\n" + "\n".join(f"- [{w['title']}]({w['url']})\n> {w['snippet']}" for w in web) + "\n\n" | |
prompt += ( | |
"Now synthesize all this into:\n" | |
"1. Research gap\n" | |
"2. Proposed research direction\n" | |
"3. A full academic narrative (markdown format, formal tone)" | |
) | |
return call_llm([{"role": "user", "content": prompt}], max_tokens=3000) | |
# --- Streamlit UI --- | |
st.set_page_config("Autonomous Research Agent", layout="wide") | |
st.title("π€ Autonomous AI Research Assistant") | |
if "chat_history" not in st.session_state: | |
st.session_state.chat_history = [] | |
topic = st.text_input("Enter a research topic:") | |
if st.button("Run Agent"): | |
with st.spinner("Researching..."): | |
try: | |
output = autonomous_research_agent(topic) | |
st.session_state.chat_history.append({"role": "user", "content": topic}) | |
st.session_state.chat_history.append({"role": "assistant", "content": output}) | |
st.markdown(output) | |
except Exception as e: | |
st.error(f"Error: {e}") | |
# --- Follow-up Chat --- | |
st.divider() | |
st.subheader("π¬ Ask Follow-up Questions") | |
followup = st.text_input("Ask something based on the previous research:") | |
if st.button("Send"): | |
if followup: | |
chat = st.session_state.chat_history + [{"role": "user", "content": followup}] | |
with st.spinner("Thinking..."): | |
try: | |
response = call_llm(chat, max_tokens=1500) | |
st.session_state.chat_history.append({"role": "user", "content": followup}) | |
st.session_state.chat_history.append({"role": "assistant", "content": response}) | |
st.markdown(response) | |
except Exception as e: | |
st.error(f"Follow-up failed: {e}") |