File size: 4,954 Bytes
5f94e5a
 
 
 
 
 
d268952
 
 
c64ddc6
 
d268952
 
 
 
c64ddc6
d268952
 
5f94e5a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d268952
5f94e5a
 
 
 
 
 
 
 
 
 
 
 
d268952
5f94e5a
 
 
 
 
 
 
 
 
 
 
 
 
d268952
5f94e5a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d268952
5f94e5a
 
 
 
 
 
a44f104
d268952
 
 
 
 
 
 
 
 
 
 
 
5f94e5a
 
 
d268952
5f94e5a
 
 
d268952
5f94e5a
 
 
d268952
5f94e5a
d268952
5f94e5a
d268952
 
5f94e5a
 
d268952
5f94e5a
d268952
5f94e5a
 
 
d268952
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import streamlit as st
from swarm import Swarm, Agent
from bs4 import BeautifulSoup
import requests
import os

# Function to fetch OpenAI API key from Hugging Face secrets
def fetch_openai_api_key():
    try:
        # Fetch the OpenAI API key using Streamlit's secrets
        secret_key = st.secrets.get("OPENAI_API_KEY", "")
        if secret_key:
            os.environ['OPENAI_API_KEY'] = secret_key
            st.success("OpenAI API Key retrieved and set successfully!")
        else:
            st.error("Could not retrieve the OpenAI API Key. Please check your Hugging Face secrets configuration.")
    except Exception as e:
        st.error(f"Error retrieving OpenAI API Key: {str(e)}")

# Initialize the Swarm client
def initialize_swarm_client():
    return Swarm()

# Define the scraping function
def scrape_website(url):
    """Scrapes the content of the website."""
    try:
        response = requests.get(url)
        response.raise_for_status()
        soup = BeautifulSoup(response.text, 'html.parser')
        return soup.get_text()  # Return the text content from the HTML
    except requests.exceptions.RequestException as e:
        return f"Error during scraping: {str(e)}"

# Scraper Agent
scraper_agent = Agent(
    name="Scraper Agent",
    instructions="You are an agent that scrapes content from websites.",
    functions=[scrape_website]
)

# Define the analysis function
def analyze_content(content):
    """Analyzes the scraped content for key points."""
    summary = f"Summary of content: {content[:200]}..."  # A simple placeholder summarization
    return summary

# Research Agent
research_agent = Agent(
    name="Research Agent",
    instructions="You are an agent that analyzes content and extracts key insights.",
    functions=[analyze_content]
)

# Define the writing function
def write_summary(context_variables):
    """Writes a summary based on the analysis."""
    analysis = context_variables.get('analysis', '')
    summary = f"Here's a detailed report based on the research: {analysis}"
    return summary

# Writer Agent
writer_agent = Agent(
    name="Writer Agent",
    instructions="You are an agent that writes summaries of research.",
    functions=[write_summary]
)

# Orchestrate the workflow
def orchestrate_workflow(client, url):
    # Step 1: Scrape the website
    scrape_result = client.run(
        agent=scraper_agent,
        messages=[{"role": "user", "content": f"Scrape the following website: {url}"}]
    )
    scraped_content = scrape_result.messages[-1]["content"]

    # Check for any error during scraping
    if "Error during scraping" in scraped_content:
        return scraped_content

    # Step 2: Analyze the scraped content
    research_result = client.run(
        agent=research_agent,
        messages=[{"role": "user", "content": f"Analyze the following content: {scraped_content}"}]
    )
    analysis_summary = research_result.messages[-1]["content"]

    # Step 3: Write the summary based on the analysis
    writer_result = client.run(
        agent=writer_agent,
        messages=[{"role": "user", "content": f"Write a summary based on this analysis: {analysis_summary}"}],
        context_variables={"analysis": analysis_summary}
    )

    final_summary = writer_result.messages[-1]["content"]
    return final_summary

# Streamlit App UI
st.title("🌐 Swarm-based Multi-Agent Web Content Analyzer")
st.caption("""
**Effortlessly extract, analyze, and summarize information from any website!**  
This app leverages a **multi-agent system** built on OpenAI's Swarm framework to:
- **Scrape content** from websites.
- **Analyze and extract key insights** from the scraped data.
- **Generate concise summaries** tailored to your needs.  
Simply provide a URL, and let the agents do the rest!
""")

# Fetch OpenAI API Key from Hugging Face secrets
st.subheader("πŸ”‘ OpenAI API Key Setup")
fetch_openai_api_key()

# Initialize Swarm client only after API key is set
if 'OPENAI_API_KEY' in os.environ and os.environ['OPENAI_API_KEY']:
    # Initialize the Swarm client after API key is set
    client = initialize_swarm_client()

    # Input field for the website URL
    st.subheader("🌍 Enter the Website URL")
    url = st.text_input("Enter the URL of the website you want to scrape", placeholder="https://example.com")

    # Run Workflow button
    if st.button("πŸš€ Run Workflow"):
        if url:
            with st.spinner("Running the multi-agent workflow... This may take a moment."):
                final_report = orchestrate_workflow(client, url)
            st.success("βœ… Workflow complete!")
            st.write("### πŸ“œ Final Report:")
            st.write(final_report)
        else:
            st.error("❌ Please enter a valid URL.")
else:
    st.warning("⚠️ OpenAI API Key not set. Please ensure it's properly configured in Hugging Face secrets.")

# Footer with credits
st.write("---")
st.markdown("""
### Acknowledgement: 
""")