Phoenix21 commited on
Commit
e86d87c
·
verified ·
1 Parent(s): 2db37b9

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +623 -0
app.py ADDED
@@ -0,0 +1,623 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain_groq import ChatGroq
2
+ from langgraph.graph import StateGraph, START, END
3
+ from IPython.display import Image, display, Markdown
4
+ from typing_extensions import TypedDict
5
+ from langgraph.constants import Send
6
+ from langchain_core.messages import HumanMessage, SystemMessage
7
+ from langchain_community.tools.tavily_search import TavilySearchResults
8
+ import os
9
+ import getpass
10
+ from typing import Annotated, List, Dict, Any
11
+ import operator
12
+ from pydantic import BaseModel, Field
13
+ from datetime import datetime
14
+ import requests
15
+ from bs4 import BeautifulSoup
16
+ import re
17
+ import json
18
+ import gradio as gr
19
+ from langdetect import detect
20
+
21
+ # Define models for structured output
22
+ class NewsItem(BaseModel):
23
+ title: str = Field(description="Title of the AI news article")
24
+ url: str = Field(description="URL of the news article")
25
+ source: str = Field(description="Source website of the news")
26
+ description: str = Field(description="Brief description of the news article")
27
+
28
+ class NewsResults(BaseModel):
29
+ news_items: List[NewsItem] = Field(description="List of AI news articles found")
30
+
31
+ class Subsection(BaseModel):
32
+ title: str = Field(description="Title of the subsection (based on news item title)")
33
+ source: str = Field(description="Source of the news item")
34
+ url: str = Field(description="URL of the news item")
35
+ content: str = Field(description="Content for this subsection")
36
+
37
+ class Section(BaseModel):
38
+ name: str = Field(description="Name for this section of the blog")
39
+ description: str = Field(description="Description for this section of the blog")
40
+ information: str = Field(description="Information which should be included in this section of the blog")
41
+ subsections: List[Subsection] = Field(description="Subsections for each news item in this category", default=[])
42
+
43
+ class Sections(BaseModel):
44
+ sections: List[Section] = Field(description="List of sections for this blog")
45
+
46
+ # State definitions
47
+ class NewsState(TypedDict):
48
+ query: str
49
+ date: str
50
+ search_results: List[Dict[str, Any]]
51
+ news_items: List[Dict[str, Any]]
52
+
53
+ class BlogState(TypedDict):
54
+ content: str
55
+ sections: List[Section]
56
+ completed_sections: Annotated[List, operator.add]
57
+ final_report: str
58
+
59
+ class WorkerState(TypedDict):
60
+ section: Section
61
+ completed_sections: Annotated[List, operator.add]
62
+
63
+ class ArticleScraperState(TypedDict):
64
+ url: str
65
+ article_content: str
66
+
67
+ # Helper function to detect English language
68
+ def is_english(text):
69
+ try:
70
+ return detect(text) == 'en'
71
+ except:
72
+ # If detection fails, check for common English words
73
+ common_english_words = ['the', 'and', 'in', 'to', 'of', 'is', 'for', 'with', 'on', 'that']
74
+ text_lower = text.lower()
75
+ english_word_count = sum(1 for word in common_english_words if f" {word} " in f" {text_lower} ")
76
+ return english_word_count >= 3 # If at least 3 common English words are found
77
+
78
+ # News search functions
79
+ def search_ai_news(state: NewsState):
80
+ """Search for the latest AI news using Tavily"""
81
+ search_tool = TavilySearchResults(max_results=10)
82
+
83
+ # Format today's date
84
+ today = state.get("date", datetime.now().strftime("%Y-%m-%d"))
85
+
86
+ # Create search query with date to get recent news
87
+ query = f"latest artificial intelligence news {today} english"
88
+
89
+ # Execute search
90
+ search_results = search_tool.invoke({"query": query})
91
+
92
+ # Filter out YouTube results and non-English content
93
+ filtered_results = []
94
+ for result in search_results:
95
+ if "youtube.com" not in result.get("url", "").lower():
96
+ # Check if content is in English
97
+ content = result.get("content", "") + " " + result.get("title", "")
98
+ if is_english(content):
99
+ filtered_results.append(result)
100
+
101
+ return {"search_results": filtered_results}
102
+
103
+ def parse_news_items(state: NewsState):
104
+ """Parse search results into structured news items using a more robust approach"""
105
+ search_results = state["search_results"]
106
+
107
+ # Format results for the LLM
108
+ formatted_results = "\n\n".join([
109
+ f"Title: {result.get('title', 'No title')}\n"
110
+ f"URL: {result.get('url', 'No URL')}\n"
111
+ f"Content: {result.get('content', 'No content')}"
112
+ for result in search_results
113
+ ])
114
+
115
+ # Use a direct prompt instead of structured output
116
+ system_prompt = """
117
+ Extract AI news articles from these search results. Filter out any that aren't about artificial intelligence.
118
+
119
+ For each relevant AI news article, provide:
120
+ - title: The title of the article
121
+ - url: The URL of the article
122
+ - source: The source website of the news
123
+ - description: A brief description of the article
124
+
125
+ Format your response as a JSON list of objects. Only include the relevant fields, nothing else.
126
+ Example format:
127
+ [
128
+ {
129
+ "title": "New AI Development",
130
+ "url": "https://example.com/news/ai-dev",
131
+ "source": "Example News",
132
+ "description": "Description of the AI development"
133
+ }
134
+ ]
135
+ """
136
+
137
+ # Get the response as a string
138
+ response = llm.invoke([
139
+ SystemMessage(content=system_prompt),
140
+ HumanMessage(content=f"Here are the search results:\n\n{formatted_results}")
141
+ ])
142
+
143
+ # Extract the JSON part from the response
144
+ response_text = response.content
145
+
146
+ # Find JSON list in the response
147
+ json_match = re.search(r'\[\s*\{.*\}\s*\]', response_text, re.DOTALL)
148
+
149
+ news_items = []
150
+ if json_match:
151
+ try:
152
+ # Parse the JSON text
153
+ news_items = json.loads(json_match.group(0))
154
+ except json.JSONDecodeError:
155
+ # Fallback: create a simple item if JSON parsing fails
156
+ news_items = [{
157
+ "title": "AI News Roundup",
158
+ "url": "https://example.com/ai-news",
159
+ "source": "Various Sources",
160
+ "description": "Compilation of latest AI news from various sources."
161
+ }]
162
+ else:
163
+ # Create a default item if no JSON found
164
+ news_items = [{
165
+ "title": "AI News Roundup",
166
+ "url": "https://example.com/ai-news",
167
+ "source": "Various Sources",
168
+ "description": "Compilation of latest AI news from various sources."
169
+ }]
170
+
171
+ return {"news_items": news_items}
172
+
173
+ # Article scraping function
174
+ def scrape_article_content(state: ArticleScraperState):
175
+ """Scrape the content from a news article URL"""
176
+ url = state["url"]
177
+
178
+ try:
179
+ headers = {
180
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
181
+ }
182
+ response = requests.get(url, headers=headers, timeout=10)
183
+ response.raise_for_status()
184
+
185
+ soup = BeautifulSoup(response.text, 'html.parser')
186
+
187
+ # Extract article content
188
+ article_text = ""
189
+
190
+ # Try to find the main article content
191
+ article = soup.find('article')
192
+ if article:
193
+ paragraphs = article.find_all('p')
194
+ else:
195
+ # Fallback to all paragraphs
196
+ paragraphs = soup.find_all('p')
197
+
198
+ # Extract text from paragraphs
199
+ article_text = "\n\n".join([p.get_text().strip() for p in paragraphs])
200
+
201
+ # Clean up the text
202
+ article_text = re.sub(r'\s+', ' ', article_text).strip()
203
+
204
+ # Trim to reasonable length for LLM processing
205
+ if len(article_text) > 10000:
206
+ article_text = article_text[:10000] + "..."
207
+
208
+ # Verify the content is in English
209
+ if not is_english(article_text[:500]): # Check first 500 chars to save processing time
210
+ return {"article_content": "Content not in English or insufficient text to analyze."}
211
+
212
+ return {"article_content": article_text}
213
+
214
+ except Exception as e:
215
+ return {"article_content": f"Error scraping article: {str(e)}"}
216
+
217
+ # Blog generation functions
218
+ def orchestrator(state: BlogState):
219
+ """Orchestrator that generates a plan for the blog based on news items"""
220
+
221
+ try:
222
+ # Parse the content to extract news items
223
+ content_lines = state['content'].split('\n\n')
224
+ news_items = []
225
+ current_item = {}
226
+
227
+ for content_block in content_lines:
228
+ if content_block.startswith('TITLE:'):
229
+ # Start of a new item
230
+ if current_item and 'title' in current_item:
231
+ news_items.append(current_item)
232
+ current_item = {}
233
+
234
+ lines = content_block.split('\n')
235
+ for line in lines:
236
+ if line.startswith('TITLE:'):
237
+ current_item['title'] = line.replace('TITLE:', '').strip()
238
+ elif line.startswith('SOURCE:'):
239
+ current_item['source'] = line.replace('SOURCE:', '').strip()
240
+ elif line.startswith('URL:'):
241
+ current_item['url'] = line.replace('URL:', '').strip()
242
+ elif line.startswith('DESCRIPTION:'):
243
+ current_item['description'] = line.replace('DESCRIPTION:', '').strip()
244
+ elif line.startswith('CONTENT:'):
245
+ current_item['content'] = line.replace('CONTENT:', '').strip()
246
+ elif 'content' in current_item:
247
+ # Add to existing content
248
+ current_item['content'] += ' ' + content_block
249
+
250
+ # Add the last item
251
+ if current_item and 'title' in current_item:
252
+ news_items.append(current_item)
253
+
254
+ # Group news items by category
255
+ ai_tech_items = []
256
+ ai_business_items = []
257
+ ai_research_items = []
258
+
259
+ for item in news_items:
260
+ title = item.get('title', '').lower()
261
+ description = item.get('description', '').lower()
262
+
263
+ # Simple categorization based on keywords
264
+ if any(kw in title + description for kw in ['business', 'market', 'company', 'investment', 'startup']):
265
+ ai_business_items.append(item)
266
+ elif any(kw in title + description for kw in ['research', 'study', 'paper', 'university']):
267
+ ai_research_items.append(item)
268
+ else:
269
+ ai_tech_items.append(item)
270
+
271
+ # Create sections with subsections
272
+ sections = []
273
+
274
+ # AI Technology section
275
+ if ai_tech_items:
276
+ tech_subsections = [
277
+ Subsection(
278
+ title=item['title'],
279
+ source=item['source'],
280
+ url=item['url'],
281
+ content=f"{item.get('description', '')} {item.get('content', '')[:500]}..."
282
+ ) for item in ai_tech_items
283
+ ]
284
+
285
+ sections.append(Section(
286
+ name="AI Technology Developments",
287
+ description="Recent advancements in AI technology and applications",
288
+ information="Cover the latest developments in AI technology.",
289
+ subsections=tech_subsections
290
+ ))
291
+
292
+ # AI Business section
293
+ if ai_business_items:
294
+ business_subsections = [
295
+ Subsection(
296
+ title=item['title'],
297
+ source=item['source'],
298
+ url=item['url'],
299
+ content=f"{item.get('description', '')} {item.get('content', '')[:500]}..."
300
+ ) for item in ai_business_items
301
+ ]
302
+
303
+ sections.append(Section(
304
+ name="AI in Business",
305
+ description="How AI is transforming industries and markets",
306
+ information="Focus on business applications and market trends in AI.",
307
+ subsections=business_subsections
308
+ ))
309
+
310
+ # AI Research section
311
+ if ai_research_items:
312
+ research_subsections = [
313
+ Subsection(
314
+ title=item['title'],
315
+ source=item['source'],
316
+ url=item['url'],
317
+ content=f"{item.get('description', '')} {item.get('content', '')[:500]}..."
318
+ ) for item in ai_research_items
319
+ ]
320
+
321
+ sections.append(Section(
322
+ name="AI Research and Studies",
323
+ description="Latest research findings and academic work in AI",
324
+ information="Cover recent research papers and studies in AI.",
325
+ subsections=research_subsections
326
+ ))
327
+
328
+ # If no items were categorized, create a general section
329
+ if not sections:
330
+ general_subsections = [
331
+ Subsection(
332
+ title=item['title'],
333
+ source=item['source'],
334
+ url=item['url'],
335
+ content=f"{item.get('description', '')} {item.get('content', '')[:500]}..."
336
+ ) for item in news_items
337
+ ]
338
+
339
+ sections.append(Section(
340
+ name="Latest AI News",
341
+ description="Roundup of the latest AI news from around the web",
342
+ information="Cover a range of AI news topics.",
343
+ subsections=general_subsections
344
+ ))
345
+
346
+ return {"sections": sections}
347
+ except Exception as e:
348
+ print(f"Error in orchestrator: {str(e)}")
349
+ # Fallback plan if structured output fails
350
+ fallback_sections = [
351
+ Section(
352
+ name="Latest AI Developments",
353
+ description="Overview of recent AI advancements and research",
354
+ information="Summarize the latest AI developments from the provided content.",
355
+ subsections=[]
356
+ )
357
+ ]
358
+ return {"sections": fallback_sections}
359
+
360
+ def llm_call(state: WorkerState):
361
+ """Worker writes a section of the blog with subsections for each news item"""
362
+
363
+ section = state['section']
364
+
365
+ # Generate section header with ID for anchor linking
366
+ section_id = section.name.lower().replace(' ', '-')
367
+ section_header = f"## {section.name} {{#{section_id}}}\n\n{section.description}\n"
368
+
369
+ # If there are subsections, process each one
370
+ subsections_content = ""
371
+ if section.subsections:
372
+ for idx, subsection in enumerate(section.subsections):
373
+ # Generate subsection using LLM
374
+ subsection_prompt = f"""
375
+ Write a detailed subsection about this AI news item:
376
+ Title: {subsection.title}
377
+ Source: {subsection.source}
378
+ URL: {subsection.url}
379
+
380
+ Content to summarize and expand on:
381
+ {subsection.content}
382
+
383
+ Keep your response focused on the news item and make it engaging. Use markdown formatting.
384
+ """
385
+
386
+ subsection_content = llm.invoke([
387
+ SystemMessage(content="You are writing a subsection for an AI news blog. Write in a professional but engaging style. Include key details and insights. Use markdown formatting."),
388
+ HumanMessage(content=subsection_prompt)
389
+ ])
390
+
391
+ # Create a clean ID for the subsection
392
+ subsection_id = f"{section_id}-{idx+1}-{subsection.title.lower().replace(' ', '-').replace(':', '').replace('?', '').replace('!', '')}"
393
+
394
+ # Format subsection with title and source
395
+ formatted_subsection = f"### {subsection.title} {{#{subsection_id}}}\n\n"
396
+ formatted_subsection += f"*Source: [{subsection.source}]({subsection.url})*\n\n"
397
+ formatted_subsection += subsection_content.content
398
+
399
+ subsections_content += formatted_subsection + "\n\n"
400
+ else:
401
+ # If no subsections, generate the full section content
402
+ section_content = llm.invoke([
403
+ SystemMessage(content="Write a blog section following the provided name, description, and information. Include no preamble. Use markdown formatting."),
404
+ HumanMessage(content=f"Here is the section name: {section.name}\nDescription: {section.description}\nInformation: {section.information}")
405
+ ])
406
+ subsections_content = section_content.content
407
+
408
+ # Combine section header and subsections
409
+ complete_section = section_header + subsections_content
410
+
411
+ # Return the completed section
412
+ return {"completed_sections": [complete_section]}
413
+
414
+ def synthesizer(state: BlogState):
415
+ """Synthesize full blog from sections with proper formatting and hierarchical TOC"""
416
+
417
+ # List of completed sections
418
+ completed_sections = state["completed_sections"]
419
+
420
+ # Format completed sections into a full blog post
421
+ completed_report = "\n\n".join(completed_sections)
422
+
423
+ # Add title, date, and introduction
424
+ today = datetime.now().strftime("%Y-%m-%d")
425
+ blog_title = f"# AI News Roundup - {today}"
426
+
427
+ # Generate a brief introduction
428
+ intro = llm.invoke([
429
+ SystemMessage(content="Write a brief introduction for an AI news roundup blog post. Keep it under 100 words. Be engaging and professional."),
430
+ HumanMessage(content=f"Today's date is {today}. Write a brief introduction for an AI news roundup.")
431
+ ])
432
+
433
+ # Create hierarchical table of contents
434
+ table_of_contents = "## Table of Contents\n\n"
435
+
436
+ # Find all section headings (## headings)
437
+ section_matches = re.findall(r'## (.*?) {#(.*?)}', completed_report)
438
+
439
+ for i, (section_name, section_id) in enumerate(section_matches, 1):
440
+ # Add section to TOC
441
+ table_of_contents += f"{i}. [{section_name}](#{section_id})\n"
442
+
443
+ # Find all subsections within this section
444
+ # Look for subsection headings (### headings) until the next section or end of text
445
+ section_start = completed_report.find(f"## {section_name}")
446
+ next_section_match = re.search(r'## ', completed_report[section_start+1:])
447
+ if next_section_match:
448
+ section_end = section_start + 1 + next_section_match.start()
449
+ section_text = completed_report[section_start:section_end]
450
+ else:
451
+ section_text = completed_report[section_start:]
452
+
453
+ # Extract subsection headings and IDs
454
+ subsection_matches = re.findall(r'### (.*?) {#(.*?)}', section_text)
455
+
456
+ for j, (subsection_name, subsection_id) in enumerate(subsection_matches, 1):
457
+ # Add subsection to TOC with proper indentation
458
+ table_of_contents += f" {i}.{j}. [{subsection_name}](#{subsection_id})\n"
459
+
460
+ final_report = f"{blog_title}\n\n{intro.content}\n\n{table_of_contents}\n\n---\n\n{completed_report}\n\n---\n\n*This AI News Roundup was automatically generated on {today}.*"
461
+
462
+ return {"final_report": final_report}
463
+
464
+ # Edge function to create workers for each section
465
+ def assign_workers(state: BlogState):
466
+ """Assign a worker to each section in the plan"""
467
+
468
+ # Kick off section writing in parallel
469
+ return [Send("llm_call", {"section": s}) for s in state["sections"]]
470
+
471
+ # Main workflow functions
472
+ def create_news_search_workflow():
473
+ """Create a workflow for searching and parsing AI news"""
474
+ workflow = StateGraph(NewsState)
475
+
476
+ # Add nodes
477
+ workflow.add_node("search_ai_news", search_ai_news)
478
+ workflow.add_node("parse_news_items", parse_news_items)
479
+
480
+ # Add edges
481
+ workflow.add_edge(START, "search_ai_news")
482
+ workflow.add_edge("search_ai_news", "parse_news_items")
483
+ workflow.add_edge("parse_news_items", END)
484
+
485
+ return workflow.compile()
486
+
487
+ def create_article_scraper_workflow():
488
+ """Create a workflow for scraping article content"""
489
+ workflow = StateGraph(ArticleScraperState)
490
+
491
+ # Add node
492
+ workflow.add_node("scrape_article", scrape_article_content)
493
+
494
+ # Add edges
495
+ workflow.add_edge(START, "scrape_article")
496
+ workflow.add_edge("scrape_article", END)
497
+
498
+ return workflow.compile()
499
+
500
+ def create_blog_generator_workflow():
501
+ """Create a workflow for generating the blog"""
502
+ workflow = StateGraph(BlogState)
503
+
504
+ # Add nodes
505
+ workflow.add_node("orchestrator", orchestrator)
506
+ workflow.add_node("llm_call", llm_call)
507
+ workflow.add_node("synthesizer", synthesizer)
508
+
509
+ # Add edges
510
+ workflow.add_edge(START, "orchestrator")
511
+ workflow.add_conditional_edges("orchestrator", assign_workers, ["llm_call"])
512
+ workflow.add_edge("llm_call", "synthesizer")
513
+ workflow.add_edge("synthesizer", END)
514
+
515
+ return workflow.compile()
516
+
517
+ def generate_ai_news_blog(groq_api_key=None, tavily_api_key=None, date=None):
518
+ """Main function to generate AI news blog"""
519
+ # Set API keys if provided
520
+ if groq_api_key:
521
+ os.environ["GROQ_API_KEY"] = groq_api_key
522
+ if tavily_api_key:
523
+ os.environ["TAVILY_API_KEY"] = tavily_api_key
524
+
525
+ # Initialize LLM with the API key
526
+ global llm
527
+ llm = ChatGroq(model="qwen-2.5-32b")
528
+
529
+ # Get date
530
+ if not date:
531
+ today = datetime.now().strftime("%Y-%m-%d")
532
+ else:
533
+ today = date
534
+
535
+ # Step 1: Search for AI news
536
+ news_search = create_news_search_workflow()
537
+ news_results = news_search.invoke({"query": "latest artificial intelligence news", "date": today})
538
+
539
+ print(f"Found {len(news_results['news_items'])} AI news items")
540
+
541
+ # Step 2: Scrape content for each news item
542
+ article_scraper = create_article_scraper_workflow()
543
+ news_contents = []
544
+
545
+ for item in news_results["news_items"]:
546
+ print(f"Scraping: {item['title']} from {item['source']}")
547
+ result = article_scraper.invoke({"url": item['url']})
548
+
549
+ # Skip if not in English
550
+ if "not in English" in result["article_content"]:
551
+ print(f"Skipping non-English content: {item['title']}")
552
+ continue
553
+
554
+ news_contents.append({
555
+ "title": item['title'],
556
+ "url": item['url'],
557
+ "source": item['source'],
558
+ "description": item['description'],
559
+ "content": result["article_content"]
560
+ })
561
+
562
+ # Format news content for the blog generator
563
+ formatted_content = "\n\n".join([
564
+ f"TITLE: {item['title']}\nSOURCE: {item['source']}\nURL: {item['url']}\nDESCRIPTION: {item['description']}\nCONTENT: {item['content'][:2000]}..."
565
+ for item in news_contents
566
+ ])
567
+
568
+ # Step 3: Generate the blog
569
+ blog_generator = create_blog_generator_workflow()
570
+ blog_result = blog_generator.invoke({
571
+ "content": formatted_content,
572
+ "completed_sections": []
573
+ })
574
+
575
+ return blog_result["final_report"]
576
+
577
+ # Gradio UI
578
+ def create_gradio_interface():
579
+ """Create a Gradio interface for the AI News Blog Generator"""
580
+
581
+ def run_generation(groq_key, tavily_key, selected_date):
582
+ if not groq_key or not tavily_key:
583
+ return "Please provide both API keys."
584
+
585
+ try:
586
+ result = generate_ai_news_blog(groq_key, tavily_key, selected_date)
587
+ return result
588
+ except Exception as e:
589
+ return f"Error generating blog: {str(e)}"
590
+
591
+ # Create the interface
592
+ with gr.Blocks(title="AI News Blog Generator") as demo:
593
+ gr.Markdown("# AI News Blog Generator")
594
+ gr.Markdown("Generate a daily roundup of AI news articles, categorized by topic.")
595
+
596
+ with gr.Row():
597
+ with gr.Column():
598
+ groq_key = gr.Textbox(label="Groq API Key", placeholder="Enter your Groq API key", type="password")
599
+ tavily_key = gr.Textbox(label="Tavily API Key", placeholder="Enter your Tavily API key", type="password")
600
+ date_picker = gr.Textbox(label="Date (YYYY-MM-DD)", placeholder="Leave empty for today's date",
601
+ value=datetime.now().strftime("%Y-%m-%d"))
602
+ generate_button = gr.Button("Generate AI News Blog")
603
+
604
+ with gr.Column():
605
+ output_md = gr.Markdown("Your AI News Blog will appear here.")
606
+
607
+ generate_button.click(
608
+ fn=run_generation,
609
+ inputs=[groq_key, tavily_key, date_picker],
610
+ outputs=output_md
611
+ )
612
+
613
+ return demo
614
+
615
+ # Run the entire pipeline
616
+ if __name__ == "__main__":
617
+ try:
618
+ # Create and launch the Gradio interface
619
+ demo = create_gradio_interface()
620
+ demo.launch()
621
+
622
+ except Exception as e:
623
+ print(f"Error running the pipeline: {str(e)}")