Upload 7 files
Browse files- agents.py +153 -0
- app.py +94 -0
- generator.py +93 -0
- model.py +31 -0
- requirements.txt +10 -0
- response_model.py +23 -0
- utils.py +29 -0
agents.py
ADDED
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from textwrap import dedent
|
2 |
+
|
3 |
+
from agno.agent import Agent
|
4 |
+
from agno.tools.duckduckgo import DuckDuckGoTools
|
5 |
+
from agno.tools.newspaper4k import Newspaper4kTools
|
6 |
+
|
7 |
+
from model import Model
|
8 |
+
from response_model import ScrapedArticle, SearchResults
|
9 |
+
|
10 |
+
|
11 |
+
class BlogAgents:
|
12 |
+
"""Agents for blog post generation workflow"""
|
13 |
+
|
14 |
+
def __init__(self, llm: Model):
|
15 |
+
"""Initialize the agents for blog post generation workflow"""
|
16 |
+
self.llm = llm.get()
|
17 |
+
self.searcher_agent = self._create_searcher_agent()
|
18 |
+
self.article_scraper_agent = self._create_article_scraper_agent()
|
19 |
+
self.writer_agent = self._create_writer_agent()
|
20 |
+
|
21 |
+
# Search Agent: Handles intelligent web searching and source gathering
|
22 |
+
def _create_searcher_agent(self) -> Agent:
|
23 |
+
"""Create the search agent for finding relevant articles"""
|
24 |
+
return Agent(
|
25 |
+
model=self.llm,
|
26 |
+
tools=[DuckDuckGoTools()],
|
27 |
+
description=dedent("""\
|
28 |
+
You are BlogResearch-X, an elite research assistant specializing in discovering
|
29 |
+
high-quality sources for compelling blog content. Your expertise includes:
|
30 |
+
|
31 |
+
- Finding authoritative and trending sources
|
32 |
+
- Evaluating content credibility and relevance
|
33 |
+
- Identifying diverse perspectives and expert opinions
|
34 |
+
- Discovering unique angles and insights
|
35 |
+
- Ensuring comprehensive topic coverage\
|
36 |
+
"""),
|
37 |
+
instructions=dedent("""\
|
38 |
+
1. Search Strategy 🔍
|
39 |
+
- Find 10-15 relevant sources and select the 5-7 best ones
|
40 |
+
- Prioritize recent, authoritative content
|
41 |
+
- Look for unique angles and expert insights
|
42 |
+
2. Source Evaluation 📊
|
43 |
+
- Verify source credibility and expertise
|
44 |
+
- Check publication dates for timeliness
|
45 |
+
- Assess content depth and uniqueness
|
46 |
+
3. Diversity of Perspectives 🌐
|
47 |
+
- Include different viewpoints
|
48 |
+
- Gather both mainstream and expert opinions
|
49 |
+
- Find supporting data and statistics\
|
50 |
+
"""),
|
51 |
+
response_model=SearchResults,
|
52 |
+
)
|
53 |
+
|
54 |
+
# Content Scraper: Extracts and processes article content
|
55 |
+
def _create_article_scraper_agent(self) -> Agent:
|
56 |
+
"""Create the article scraper agent for extracting content from articles"""
|
57 |
+
return Agent(
|
58 |
+
model=self.llm,
|
59 |
+
tools=[Newspaper4kTools()],
|
60 |
+
description=dedent("""\
|
61 |
+
You are ContentBot-X, a specialist in extracting and processing digital content
|
62 |
+
for blog creation. Your expertise includes:
|
63 |
+
|
64 |
+
- Efficient content extraction
|
65 |
+
- Smart formatting and structuring
|
66 |
+
- Key information identification
|
67 |
+
- Quote and statistic preservation
|
68 |
+
- Maintaining source attribution\
|
69 |
+
"""),
|
70 |
+
instructions=dedent("""\
|
71 |
+
1. Content Extraction 📑
|
72 |
+
- Extract content from the article
|
73 |
+
- Preserve important quotes and statistics
|
74 |
+
- Maintain proper attribution
|
75 |
+
- Handle paywalls gracefully
|
76 |
+
2. Content Processing 🔄
|
77 |
+
- Format text in clean markdown
|
78 |
+
- Preserve key information
|
79 |
+
- Structure content logically
|
80 |
+
3. Quality Control ✅
|
81 |
+
- Verify content relevance
|
82 |
+
- Ensure accurate extraction
|
83 |
+
- Maintain readability\
|
84 |
+
"""),
|
85 |
+
response_model=ScrapedArticle,
|
86 |
+
)
|
87 |
+
|
88 |
+
# Content Writer Agent: Crafts engaging blog posts from research
|
89 |
+
def _create_writer_agent(self) -> Agent:
|
90 |
+
"""Create the content writer agent for generating blog posts"""
|
91 |
+
return Agent(
|
92 |
+
model=self.llm,
|
93 |
+
description=dedent("""\
|
94 |
+
You are BlogMaster-X, an elite content creator combining journalistic excellence
|
95 |
+
with digital marketing expertise. Your strengths include:
|
96 |
+
|
97 |
+
- Crafting viral-worthy headlines
|
98 |
+
- Writing engaging introductions
|
99 |
+
- Structuring content for digital consumption
|
100 |
+
- Incorporating research seamlessly
|
101 |
+
- Optimizing for SEO while maintaining quality
|
102 |
+
- Creating shareable conclusions\
|
103 |
+
"""),
|
104 |
+
instructions=dedent("""\
|
105 |
+
1. Content Strategy 📝
|
106 |
+
- Craft attention-grabbing headlines
|
107 |
+
- Write compelling introductions
|
108 |
+
- Structure content for engagement
|
109 |
+
- Include relevant subheadings
|
110 |
+
- 800-1200 words per post
|
111 |
+
2. Writing Excellence ✍️
|
112 |
+
- Balance expertise with accessibility
|
113 |
+
- Use clear, engaging language
|
114 |
+
- Include relevant examples
|
115 |
+
- Incorporate statistics naturally
|
116 |
+
3. Source Integration 🔍
|
117 |
+
- Cite sources properly
|
118 |
+
- Include expert quotes
|
119 |
+
- Maintain factual accuracy
|
120 |
+
4. Digital Optimization 💻
|
121 |
+
- Structure for scanability
|
122 |
+
- Include shareable takeaways
|
123 |
+
- Optimize for SEO
|
124 |
+
- Add engaging subheadings\
|
125 |
+
"""),
|
126 |
+
expected_output=dedent("""\
|
127 |
+
# {Viral-Worthy Headline}
|
128 |
+
|
129 |
+
## Introduction
|
130 |
+
{Engaging hook and context}
|
131 |
+
|
132 |
+
## {Compelling Section 1}
|
133 |
+
{Key insights and analysis}
|
134 |
+
{Expert quotes and statistics}
|
135 |
+
|
136 |
+
## {Engaging Section 2}
|
137 |
+
{Deeper exploration}
|
138 |
+
{Real-world examples}
|
139 |
+
|
140 |
+
## {Practical Section 3}
|
141 |
+
{Actionable insights}
|
142 |
+
{Expert recommendations}
|
143 |
+
|
144 |
+
## Key Takeaways
|
145 |
+
- {Shareable insight 1}
|
146 |
+
- {Practical takeaway 2}
|
147 |
+
- {Notable finding 3}
|
148 |
+
|
149 |
+
## Sources
|
150 |
+
{Properly attributed sources with links}\
|
151 |
+
"""),
|
152 |
+
markdown=True,
|
153 |
+
)
|
app.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import markdown
|
3 |
+
|
4 |
+
from agents import BlogAgents
|
5 |
+
from generator import BlogPostGenerator
|
6 |
+
from model import Model
|
7 |
+
from utils import custom_css, example_prompts, get_default_llm
|
8 |
+
|
9 |
+
|
10 |
+
def generate_blog(llm_provider, llm_name, api_key, user_topic):
|
11 |
+
if not api_key:
|
12 |
+
gr.Warning(f"Please enter your {llm_provider} API key.")
|
13 |
+
if not llm_name or llm_name.strip() == "":
|
14 |
+
gr.Warning("Please enter a model name.")
|
15 |
+
url_safe_topic = user_topic.lower().replace(" ", "-")
|
16 |
+
llm = Model(llm_provider, llm_name, api_key)
|
17 |
+
blog_agents = BlogAgents(llm)
|
18 |
+
generate_blog_post = BlogPostGenerator(
|
19 |
+
blog_agents=blog_agents,
|
20 |
+
session_id=f"generate-blog-post-on-{url_safe_topic}",
|
21 |
+
debug_mode=True,
|
22 |
+
)
|
23 |
+
blog_post = generate_blog_post.run(topic=user_topic)
|
24 |
+
final_output = ""
|
25 |
+
sources = set()
|
26 |
+
for response in blog_post:
|
27 |
+
if hasattr(response, "content") and response.content:
|
28 |
+
final_output += str(response.content) + "\n"
|
29 |
+
if hasattr(response, "sources") and response.sources:
|
30 |
+
if isinstance(response.sources, (list, set)):
|
31 |
+
sources.update(response.sources)
|
32 |
+
else:
|
33 |
+
sources.add(str(response.sources))
|
34 |
+
|
35 |
+
# Format sources into HTML
|
36 |
+
sources_html = ""
|
37 |
+
if sources:
|
38 |
+
sources_html = "<h3>Sources:</h3><ul>" + "".join(f"<li>{src}</li>" for src in sources) + "</ul>"
|
39 |
+
# Convert Markdown to HTML
|
40 |
+
html_body = markdown.markdown(final_output)
|
41 |
+
# Combine both
|
42 |
+
html_content = f"<div>{html_body}{sources_html}</div>"
|
43 |
+
# Return to gr.HTML
|
44 |
+
return gr.update(value=html_content, visible=True), ""
|
45 |
+
|
46 |
+
|
47 |
+
with gr.Blocks(title="Blog Generator", css=custom_css) as demo:
|
48 |
+
gr.Markdown("# AI Blog Generator", elem_classes="center-text")
|
49 |
+
with gr.Row():
|
50 |
+
with gr.Column(scale=1):
|
51 |
+
llm_provider = gr.Radio(
|
52 |
+
label="Select LLM Provider",
|
53 |
+
choices=["OpenAI", "Gemini", "Claude", "Grok"],
|
54 |
+
value="Gemini",
|
55 |
+
)
|
56 |
+
|
57 |
+
# Function to update the textbox when provider changes
|
58 |
+
def update_llm_name(provider):
|
59 |
+
return get_default_llm(provider)
|
60 |
+
|
61 |
+
llm_name = gr.Textbox(
|
62 |
+
label="Enter LLM Name",
|
63 |
+
value=get_default_llm(llm_provider.value),
|
64 |
+
info="Specify the model name based on the provider.",
|
65 |
+
)
|
66 |
+
# When provider changes, update the textbox
|
67 |
+
llm_provider.change(fn=update_llm_name, inputs=llm_provider, outputs=llm_name)
|
68 |
+
|
69 |
+
api_key = gr.Textbox(label="Enter API Key", type="password")
|
70 |
+
selected_prompt = gr.Radio(
|
71 |
+
label="Select an example or enter your own topic below:",
|
72 |
+
choices=example_prompts,
|
73 |
+
value=example_prompts[0],
|
74 |
+
)
|
75 |
+
user_topic = gr.Textbox(label="Enter your own blog topic", value=example_prompts[0])
|
76 |
+
generate_btn = gr.Button("Generate Blog")
|
77 |
+
with gr.Column(scale=2):
|
78 |
+
output = gr.HTML(
|
79 |
+
label="Generated Post",
|
80 |
+
visible=True,
|
81 |
+
)
|
82 |
+
warning = gr.Textbox(label="Warning", visible=False)
|
83 |
+
|
84 |
+
def sync_topic(selected, current):
|
85 |
+
return selected
|
86 |
+
|
87 |
+
selected_prompt.change(sync_topic, [selected_prompt, user_topic], user_topic)
|
88 |
+
generate_btn.click(
|
89 |
+
generate_blog,
|
90 |
+
inputs=[llm_provider, llm_name, api_key, user_topic],
|
91 |
+
outputs=[output, warning],
|
92 |
+
)
|
93 |
+
|
94 |
+
demo.launch(ssr_mode=False, show_error=True, show_api=False)
|
generator.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from textwrap import dedent
|
3 |
+
from typing import Dict, Iterator, Optional
|
4 |
+
|
5 |
+
from agno.utils.log import logger
|
6 |
+
from agno.workflow import RunEvent, RunResponse, Workflow
|
7 |
+
|
8 |
+
from response_model import ScrapedArticle, SearchResults
|
9 |
+
|
10 |
+
|
11 |
+
class BlogPostGenerator(Workflow):
|
12 |
+
"""Advanced workflow for generating professional blog posts with proper research and citations."""
|
13 |
+
|
14 |
+
description: str = dedent("""\
|
15 |
+
An intelligent blog post generator that creates engaging, well-researched content.
|
16 |
+
This workflow orchestrates multiple AI agents to research, analyze, and craft
|
17 |
+
compelling blog posts that combine journalistic rigor with engaging storytelling.
|
18 |
+
The system excels at creating content that is both informative and optimized for
|
19 |
+
digital consumption.
|
20 |
+
""")
|
21 |
+
|
22 |
+
def __init__(self, blog_agents, *args, **kwargs):
|
23 |
+
super().__init__(*args, **kwargs)
|
24 |
+
self.searcher = blog_agents.searcher_agent
|
25 |
+
self.article_scraper = blog_agents.article_scraper_agent
|
26 |
+
self.writer = blog_agents.writer_agent
|
27 |
+
|
28 |
+
def get_search_results(self, topic: str, num_attempts: int = 3) -> Optional[SearchResults]:
|
29 |
+
# Use the searcher to find the latest articles
|
30 |
+
for attempt in range(num_attempts):
|
31 |
+
try:
|
32 |
+
searcher_response: RunResponse = self.searcher.run(topic)
|
33 |
+
if (
|
34 |
+
searcher_response is not None
|
35 |
+
and searcher_response.content is not None
|
36 |
+
and isinstance(searcher_response.content, SearchResults)
|
37 |
+
):
|
38 |
+
article_count = len(searcher_response.content.articles)
|
39 |
+
logger.info(f"Found {article_count} articles on attempt {attempt + 1}")
|
40 |
+
return searcher_response.content
|
41 |
+
else:
|
42 |
+
logger.warning(f"Attempt {attempt + 1}/{num_attempts} failed: Invalid response type")
|
43 |
+
except Exception as e:
|
44 |
+
logger.warning(f"Attempt {attempt + 1}/{num_attempts} failed: {str(e)}")
|
45 |
+
|
46 |
+
logger.error(f"Failed to get search results after {num_attempts} attempts")
|
47 |
+
return None
|
48 |
+
|
49 |
+
def scrape_articles(self, topic: str, search_results: SearchResults) -> Dict[str, ScrapedArticle]:
|
50 |
+
scraped_articles: Dict[str, ScrapedArticle] = {}
|
51 |
+
for article in search_results.articles:
|
52 |
+
if article.url in scraped_articles:
|
53 |
+
logger.info(f"Found scraped article in cache: {article.url}")
|
54 |
+
continue
|
55 |
+
|
56 |
+
article_scraper_response: RunResponse = self.article_scraper.run(article.url)
|
57 |
+
if (
|
58 |
+
article_scraper_response is not None
|
59 |
+
and article_scraper_response.content is not None
|
60 |
+
and isinstance(article_scraper_response.content, ScrapedArticle)
|
61 |
+
):
|
62 |
+
scraped_articles[article_scraper_response.content.url] = article_scraper_response.content
|
63 |
+
logger.info(f"Scraped article: {article_scraper_response.content.url}")
|
64 |
+
return scraped_articles
|
65 |
+
|
66 |
+
def run(
|
67 |
+
self,
|
68 |
+
topic: str,
|
69 |
+
) -> Iterator[RunResponse]:
|
70 |
+
"""Run the blog post generation workflow."""
|
71 |
+
logger.info(f"Generating a blog post on: {topic}")
|
72 |
+
|
73 |
+
# Search the web for articles on the topic
|
74 |
+
search_results: Optional[SearchResults] = self.get_search_results(topic)
|
75 |
+
# If no search_results are found for the topic, end the workflow
|
76 |
+
if search_results is None or len(search_results.articles) == 0:
|
77 |
+
yield RunResponse(
|
78 |
+
event=RunEvent.workflow_completed,
|
79 |
+
content=f"Sorry, could not find any articles on the topic: {topic}",
|
80 |
+
)
|
81 |
+
return
|
82 |
+
|
83 |
+
# Scrape the search results
|
84 |
+
scraped_articles: Dict[str, ScrapedArticle] = self.scrape_articles(topic, search_results)
|
85 |
+
|
86 |
+
# Prepare the input for the writer
|
87 |
+
writer_input = {
|
88 |
+
"topic": topic,
|
89 |
+
"articles": [v.model_dump() for v in scraped_articles.values()],
|
90 |
+
}
|
91 |
+
|
92 |
+
# Run the writer and yield the response
|
93 |
+
yield from self.writer.run(json.dumps(writer_input, indent=4), stream=True)
|
model.py
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from agno.models.anthropic import Claude
|
2 |
+
from agno.models.google import Gemini
|
3 |
+
from agno.models.openai import OpenAIChat
|
4 |
+
from agno.models.xai import xAI
|
5 |
+
|
6 |
+
|
7 |
+
class Model:
|
8 |
+
def __init__(self, provider: str, model_name: str, api_key: str):
|
9 |
+
self.provider = provider
|
10 |
+
self.api_key = api_key
|
11 |
+
if not model_name:
|
12 |
+
raise ValueError("Model name must be provided.")
|
13 |
+
self.model = self._get_model(model_name)
|
14 |
+
|
15 |
+
def _get_model(self, model_name):
|
16 |
+
try:
|
17 |
+
if self.provider == "OpenAI":
|
18 |
+
return OpenAIChat(api_key=self.api_key, id=model_name)
|
19 |
+
elif self.provider == "Gemini":
|
20 |
+
return Gemini(api_key=self.api_key, id=model_name)
|
21 |
+
elif self.provider == "Claude":
|
22 |
+
return Claude(api_key=self.api_key, id=model_name)
|
23 |
+
elif self.provider == "Grok":
|
24 |
+
return xAI(api_key=self.api_key, id=model_name)
|
25 |
+
else:
|
26 |
+
raise ValueError(f"Unsupported provider: {self.provider}")
|
27 |
+
except Exception as e:
|
28 |
+
raise ValueError(e)
|
29 |
+
|
30 |
+
def get(self):
|
31 |
+
return self.model
|
requirements.txt
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
python-dotenv
|
2 |
+
newspaper4k
|
3 |
+
duckduckgo-search
|
4 |
+
lxml_html_clean
|
5 |
+
agno
|
6 |
+
openai
|
7 |
+
google-genai
|
8 |
+
anthropic
|
9 |
+
gradio
|
10 |
+
markdown
|
response_model.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import Optional
|
2 |
+
|
3 |
+
from pydantic import BaseModel, Field
|
4 |
+
|
5 |
+
|
6 |
+
class NewsArticle(BaseModel):
|
7 |
+
title: str = Field(..., description="Title of the article.")
|
8 |
+
url: str = Field(..., description="Link to the article.")
|
9 |
+
summary: Optional[str] = Field(..., description="Summary of the article if available.")
|
10 |
+
|
11 |
+
|
12 |
+
class SearchResults(BaseModel):
|
13 |
+
articles: list[NewsArticle]
|
14 |
+
|
15 |
+
|
16 |
+
class ScrapedArticle(BaseModel):
|
17 |
+
title: str = Field(..., description="Title of the article.")
|
18 |
+
url: str = Field(..., description="Link to the article.")
|
19 |
+
summary: Optional[str] = Field(..., description="Summary of the article if available.")
|
20 |
+
content: Optional[str] = Field(
|
21 |
+
...,
|
22 |
+
description="Full article content in markdown format. None if content is unavailable.",
|
23 |
+
)
|
utils.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def get_default_llm(provider: str) -> str:
|
2 |
+
"""Get the default LLM name based on the provider"""
|
3 |
+
if provider == "OpenAI":
|
4 |
+
return "gpt-4o"
|
5 |
+
elif provider == "Gemini":
|
6 |
+
return "gemini-2.0-flash"
|
7 |
+
elif provider == "Claude":
|
8 |
+
return "claude-3-5-sonnet-20241022"
|
9 |
+
elif provider == "Grok":
|
10 |
+
return "grok-beta"
|
11 |
+
else:
|
12 |
+
raise ValueError(f"Unsupported provider: {provider}")
|
13 |
+
|
14 |
+
|
15 |
+
example_prompts = [
|
16 |
+
"How Generative AI is Changing the Way We Work",
|
17 |
+
"The Science Behind Why Pizza Tastes Better at 2 AM",
|
18 |
+
"How Rubber Ducks Revolutionized Software Development",
|
19 |
+
"Why Dogs Think We're Bad at Smelling Things",
|
20 |
+
"What Your Browser Tabs Say About You",
|
21 |
+
]
|
22 |
+
|
23 |
+
custom_css = """
|
24 |
+
.center-text h1, .center-text {
|
25 |
+
text-align: center;
|
26 |
+
font-size: 36px !important;
|
27 |
+
font-weight: bold;
|
28 |
+
}
|
29 |
+
"""
|