File size: 4,896 Bytes
5f94e5a
 
 
 
 
 
d2c82ee
d268952
d2c82ee
d268952
c64ddc6
d268952
 
 
d2c82ee
d268952
 
5f94e5a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d268952
5f94e5a
 
 
 
 
 
 
 
 
 
 
 
d268952
5f94e5a
 
 
 
 
 
 
 
 
 
 
 
 
d268952
5f94e5a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d268952
5f94e5a
 
 
 
 
 
4f6325e
 
 
a301bed
eb455f0
4f6325e
a301bed
4f6325e
 
 
 
 
445387e
d2a7f1f
4f6325e
 
 
d268952
 
5f94e5a
 
 
 
 
 
d268952
5f94e5a
 
 
4f6325e
 
 
5f94e5a
d268952
5f94e5a
d268952
 
5f94e5a
 
d268952
5f94e5a
4f6325e
5f94e5a
 
4f6325e
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import streamlit as st
from swarm import Swarm, Agent
from bs4 import BeautifulSoup
import requests
import os

# Function to fetch OpenAI API key
def fetch_openai_api_key():
    """Fetch the OpenAI API key from Hugging Face secrets."""
    try:
        secret_key = st.secrets.get("OPENAI_API_KEY", "")
        if secret_key:
            os.environ['OPENAI_API_KEY'] = secret_key
        else:
            st.warning("⚠️ OpenAI API Key is missing! Please check your Hugging Face secrets configuration.")
    except Exception as e:
        st.error(f"Error retrieving OpenAI API Key: {str(e)}")

# Initialize the Swarm client
def initialize_swarm_client():
    return Swarm()

# Define the scraping function
def scrape_website(url):
    """Scrapes the content of the website."""
    try:
        response = requests.get(url)
        response.raise_for_status()
        soup = BeautifulSoup(response.text, 'html.parser')
        return soup.get_text()  # Return the text content from the HTML
    except requests.exceptions.RequestException as e:
        return f"Error during scraping: {str(e)}"

# Scraper Agent
scraper_agent = Agent(
    name="Scraper Agent",
    instructions="You are an agent that scrapes content from websites.",
    functions=[scrape_website]
)

# Define the analysis function
def analyze_content(content):
    """Analyzes the scraped content for key points."""
    summary = f"Summary of content: {content[:200]}..."  # A simple placeholder summarization
    return summary

# Research Agent
research_agent = Agent(
    name="Research Agent",
    instructions="You are an agent that analyzes content and extracts key insights.",
    functions=[analyze_content]
)

# Define the writing function
def write_summary(context_variables):
    """Writes a summary based on the analysis."""
    analysis = context_variables.get('analysis', '')
    summary = f"Here's a detailed report based on the research: {analysis}"
    return summary

# Writer Agent
writer_agent = Agent(
    name="Writer Agent",
    instructions="You are an agent that writes summaries of research.",
    functions=[write_summary]
)

# Orchestrate the workflow
def orchestrate_workflow(client, url):
    # Step 1: Scrape the website
    scrape_result = client.run(
        agent=scraper_agent,
        messages=[{"role": "user", "content": f"Scrape the following website: {url}"}]
    )
    scraped_content = scrape_result.messages[-1]["content"]

    # Check for any error during scraping
    if "Error during scraping" in scraped_content:
        return scraped_content

    # Step 2: Analyze the scraped content
    research_result = client.run(
        agent=research_agent,
        messages=[{"role": "user", "content": f"Analyze the following content: {scraped_content}"}]
    )
    analysis_summary = research_result.messages[-1]["content"]

    # Step 3: Write the summary based on the analysis
    writer_result = client.run(
        agent=writer_agent,
        messages=[{"role": "user", "content": f"Write a summary based on this analysis: {analysis_summary}"}],
        context_variables={"analysis": analysis_summary}
    )

    final_summary = writer_result.messages[-1]["content"]
    return final_summary

# Streamlit App UI
st.markdown(
    """
    <style>
    .title { text-align: center; font-size: 2.5rem; font-weight: bold; }
    .description { text-align: center; font-size: 1.1rem; color: #555; }
    .button-container { text-align: center; }
    .ack { font-size: 0.8rem; color: #888; text-align: center; }
    </style>
    """,
    unsafe_allow_html=True,
)

st.markdown('<div class="title">πŸ”Ž Swarm-based Web Content Analyzer</div>', unsafe_allow_html=True)
st.markdown('<div class="description">Effortlessly extract, analyze, and summarize web content.</div>', unsafe_allow_html=True)

st.write("")
st.write("")

fetch_openai_api_key()

# Initialize Swarm client only after API key is set
if 'OPENAI_API_KEY' in os.environ and os.environ['OPENAI_API_KEY']:
    client = initialize_swarm_client()

    # Input field for the website URL
    st.subheader("🌍 Enter the Website URL")
    url = st.text_input("Enter the URL of the website you want to scrape", placeholder="https://example.com")

    # Run Workflow button
    st.write("")

    if st.button("Run Workflow"):
        if url:
            with st.spinner("Running the multi-agent workflow... This may take a moment."):
                final_report = orchestrate_workflow(client, url)
            st.success("βœ… Workflow complete!")
            st.write("### πŸ“œ Final Report:")
            st.write(final_report)
        else:
            st.error("❌ Please enter a valid URL.")
else:
    st.sidebar.warning("⚠️ OpenAI API Key not set. Please check your Hugging Face secrets configuration.")

# Footer with credits
st.divider()
st.markdown('<div class="ack">Acknowledgement: </div>', unsafe_allow_html=True)