Pamudu13 commited on
Commit
8ef8961
·
verified ·
1 Parent(s): bc6ec05

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +195 -74
app.py CHANGED
@@ -10,6 +10,9 @@ import logging
10
  from web_scraper import research_topic
11
  import queue
12
  import threading
 
 
 
13
 
14
  # Create a queue for log messages
15
  log_queue = queue.Queue()
@@ -376,14 +379,125 @@ def generate_from_csv():
376
  logger.error(f"Error in generate_from_csv: {e}")
377
  return jsonify({'error': str(e)}), 500
378
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
379
 
380
  @app.route('/generate-from-csv-text', methods=['POST'])
381
- def generate_from_csv_text():
382
  try:
383
  logger.info("Starting blog generation process for multiple clusters...")
384
 
385
  # Get CSV content and OpenRouter API key from request JSON
386
- data = request.get_json()
387
  if not data or 'csv_content' not in data:
388
  return jsonify({'error': 'No CSV content provided'}), 400
389
  if 'openrouter_key' not in data:
@@ -404,76 +518,77 @@ def generate_from_csv_text():
404
 
405
  generated_blogs = []
406
 
407
- # Process each cluster
408
- for cluster_data in clusters:
409
- try:
410
- logger.info(f"Processing cluster with primary keyword: {cluster_data['Primary Keyword']}")
411
-
412
- # Add OpenRouter key to cluster_data for use in functions
413
- cluster_data['openrouter_key'] = openrouter_key
414
-
415
- # Generate preliminary plan
416
- logger.info("Generating preliminary plan...")
417
- plan = generate_preliminary_plan(cluster_data)
418
-
419
- # Do research
420
- logger.info("Doing research...")
421
- research = do_research(plan, openrouter_key)
422
-
423
- # Create detailed plan
424
- logger.info("Creating detailed plan...")
425
- detailed_plan = blog_gen.create_detailed_plan(cluster_data, plan, research)
426
-
427
- # Write blog post
428
- logger.info("Writing blog post...")
429
- blog_content = blog_gen.write_blog_post(detailed_plan, cluster_data)
430
-
431
- # Add internal links
432
- logger.info("Adding internal links...")
433
- previous_posts = csv_handler.get_previous_posts()
434
- blog_content = blog_gen.add_internal_links(blog_content, previous_posts)
435
-
436
- # Convert to HTML
437
- logger.info("Converting to HTML...")
438
- cover_image_url = blog_gen.get_cover_image(cluster_data['Primary Keyword'])
439
- html_content = blog_gen.convert_to_html(blog_content, cover_image_url)
440
-
441
- # Generate metadata
442
- logger.info("Generating metadata...")
443
- metadata = blog_gen.generate_metadata(blog_content, cluster_data['Primary Keyword'], cluster_data)
444
-
445
- # Get cover image
446
- logger.info("Getting cover image...")
447
- cover_image_url = blog_gen.get_cover_image(metadata['title'])
448
-
449
- blog_post_data = {
450
- 'title': metadata['title'],
451
- 'slug': metadata['slug'],
452
- 'meta_description': metadata['meta_description'],
453
- 'content': html_content,
454
- 'cover_image': cover_image_url,
455
- 'keywords': cluster_data['Keywords'],
456
- 'primary_keyword': cluster_data['Primary Keyword'],
457
- 'research': research,
458
- 'detailed_plan': detailed_plan
459
- }
460
-
461
- generated_blogs.append({
462
- 'status': 'success',
463
- 'message': f"Blog post generated successfully for {cluster_data['Primary Keyword']}",
464
- 'data': blog_post_data
465
- })
466
-
467
- csv_handler.mark_cluster_complete(cluster_data['row_number'])
468
- csv_handler.log_completed_post({**metadata, 'keywords': cluster_data['Keywords']})
469
-
470
- except Exception as e:
471
- logger.error(f"Error processing cluster {cluster_data['Primary Keyword']}: {e}")
472
- generated_blogs.append({
473
- 'status': 'error',
474
- 'message': f"Failed to generate blog post for {cluster_data['Primary Keyword']}",
475
- 'error': str(e)
476
- })
 
477
 
478
  logger.info("All blog generation completed!")
479
  return jsonify({
@@ -492,7 +607,7 @@ def stream_logs():
492
  while True:
493
  try:
494
  # Get log message from queue, timeout after 1 second
495
- log_message = log_queue.get(timeout=10)
496
  yield f"data: {log_message}\n\n"
497
  except queue.Empty:
498
  # Send a heartbeat to keep the connection alive
@@ -508,4 +623,10 @@ def stream_logs():
508
 
509
  if __name__ == '__main__':
510
  logger.info("Starting Flask API server...")
511
- app.run(host='127.0.0.1', port=5001, debug=True)
 
 
 
 
 
 
 
10
  from web_scraper import research_topic
11
  import queue
12
  import threading
13
+ import aiohttp
14
+ import asyncio
15
+ from quart import Quart, request, jsonify
16
 
17
  # Create a queue for log messages
18
  log_queue = queue.Queue()
 
379
  logger.error(f"Error in generate_from_csv: {e}")
380
  return jsonify({'error': str(e)}), 500
381
 
382
+ async def generate_preliminary_plan_async(cluster_data):
383
+ logger.info("Generating preliminary plan...")
384
+ try:
385
+ async with aiohttp.ClientSession() as session:
386
+ async with session.post(
387
+ 'https://openrouter.ai/api/v1/chat/completions',
388
+ headers={
389
+ 'Authorization': f'Bearer {cluster_data["openrouter_key"]}',
390
+ 'HTTP-Referer': 'http://127.0.0.1:5001',
391
+ 'X-Title': 'Blog Generator'
392
+ },
393
+ json={
394
+ 'model': 'google/gemini-2.0-flash-thinking-exp:free',
395
+ 'messages': [{
396
+ 'role': 'user',
397
+ 'content': f"""You are part of a team that creates world class blog posts.
398
+
399
+ For each new blog post project, you are provided with a list of keywords and search intent.
400
+
401
+ - Keywords: The keywords are to what the blog post is meant to rank for. They are scattered throughout the blog and define the topic of the blog post.
402
+
403
+ - Search intent: The search intent recognises the intent of the user when searching up the keyword which defines be the theme of the blog post, so they click on our blog to satisfy their search.
404
+
405
+ - Primary keyword: Out of the keywords, there is one keyword known as the primary keyword. The primary keyword will go in the title and first few sentences. It is important that the topic of the blog post is related to the primary keyword so that you can place it into the title and introduction naturally.
406
+
407
+ Given a list of keywords and search intent, your job is to understand the goal of the blog post, identify the thought process behind the flow of the blog post and come up with a preliminary plan for the post.
408
+
409
+ Your output must:
410
+ - Recognise the discussion points of the blog post.
411
+ - Be in dot point format.
412
+
413
+ You must ensure that the plan created satisfies the search intent and revolves directly around the given keywords.
414
+
415
+ When making the plan keep in mind that all keywords must be used in the final blog post.
416
+
417
+ The final goal of the project is to create a high quality, high value, highly relevant blog post that will satisfy the users search intent and give them everything they need to know about the topic.
418
+
419
+ A new project just came across your desk with below keywords and search intent:
420
+
421
+ Keywords:
422
+ {cluster_data['Keywords']}
423
+
424
+ Search intent:
425
+ {cluster_data['Intent']}
426
+
427
+ Primary keyword:
428
+ {cluster_data['Primary Keyword']}
429
+
430
+ Create the preliminary plan."""
431
+ }]
432
+ }
433
+ ) as response:
434
+ if response.status != 200:
435
+ raise Exception(f"OpenRouter API error: {await response.text()}")
436
+
437
+ response_data = await response.json()
438
+ if 'choices' not in response_data:
439
+ raise Exception(f"Unexpected API response format: {response_data}")
440
+
441
+ return response_data['choices'][0]['message']['content']
442
+ except Exception as e:
443
+ logger.error(f"Error in generate_preliminary_plan_async: {e}")
444
+ raise
445
+
446
+ async def do_research_async(plan, openrouter_key):
447
+ logger.info("Doing research...")
448
+ try:
449
+ # Extract key points from plan to create search queries
450
+ plan_lines = [line.strip('* -').strip() for line in plan.split('\n') if line.strip()]
451
+ plan_lines = plan_lines[:3]
452
+ logger.info(f"Researching top 3 points: {plan_lines}")
453
+
454
+ all_research = []
455
+ async with aiohttp.ClientSession() as session:
456
+ research_tasks = []
457
+ for point in plan_lines:
458
+ if point:
459
+ research_tasks.append(
460
+ asyncio.create_task(
461
+ research_topic_async(point, num_sites=5, openrouter_key=openrouter_key, session=session)
462
+ )
463
+ )
464
+
465
+ results = await asyncio.gather(*research_tasks)
466
+
467
+ for result in results:
468
+ if result['success']:
469
+ all_research.append({
470
+ 'topic': result['query'],
471
+ 'analysis': result['analysis'],
472
+ 'sources': result['sources']
473
+ })
474
+
475
+ formatted_research = "# Research Results\n\n"
476
+ for research in all_research:
477
+ formatted_research += f"## {research['topic']}\n\n"
478
+ formatted_research += f"{research['analysis']}\n\n"
479
+ formatted_research += "### Sources Referenced\n\n"
480
+
481
+ for source in research['sources']:
482
+ formatted_research += f"- [{source['title']}]({source['source']})\n"
483
+ if source['meta_info']['description']:
484
+ formatted_research += f" {source['meta_info']['description']}\n"
485
+
486
+ formatted_research += "\n---\n\n"
487
+
488
+ return formatted_research
489
+
490
+ except Exception as e:
491
+ logger.error(f"Error in do_research_async: {e}")
492
+ raise
493
 
494
  @app.route('/generate-from-csv-text', methods=['POST'])
495
+ async def generate_from_csv_text():
496
  try:
497
  logger.info("Starting blog generation process for multiple clusters...")
498
 
499
  # Get CSV content and OpenRouter API key from request JSON
500
+ data = await request.get_json()
501
  if not data or 'csv_content' not in data:
502
  return jsonify({'error': 'No CSV content provided'}), 400
503
  if 'openrouter_key' not in data:
 
518
 
519
  generated_blogs = []
520
 
521
+ # Process each cluster asynchronously
522
+ async with aiohttp.ClientSession() as session:
523
+ for cluster_data in clusters:
524
+ try:
525
+ logger.info(f"Processing cluster with primary keyword: {cluster_data['Primary Keyword']}")
526
+
527
+ # Add OpenRouter key to cluster_data for use in functions
528
+ cluster_data['openrouter_key'] = openrouter_key
529
+
530
+ # Generate preliminary plan
531
+ logger.info("Generating preliminary plan...")
532
+ plan = await generate_preliminary_plan_async(cluster_data)
533
+
534
+ # Do research
535
+ logger.info("Doing research...")
536
+ research = await do_research_async(plan, openrouter_key)
537
+
538
+ # Create detailed plan
539
+ logger.info("Creating detailed plan...")
540
+ detailed_plan = await blog_gen.create_detailed_plan_async(cluster_data, plan, research)
541
+
542
+ # Write blog post
543
+ logger.info("Writing blog post...")
544
+ blog_content = await blog_gen.write_blog_post_async(detailed_plan, cluster_data)
545
+
546
+ # Add internal links
547
+ logger.info("Adding internal links...")
548
+ previous_posts = csv_handler.get_previous_posts()
549
+ blog_content = await blog_gen.add_internal_links_async(blog_content, previous_posts)
550
+
551
+ # Convert to HTML
552
+ logger.info("Converting to HTML...")
553
+ cover_image_url = await blog_gen.get_cover_image_async(cluster_data['Primary Keyword'])
554
+ html_content = await blog_gen.convert_to_html_async(blog_content, cover_image_url)
555
+
556
+ # Generate metadata
557
+ logger.info("Generating metadata...")
558
+ metadata = await blog_gen.generate_metadata_async(blog_content, cluster_data['Primary Keyword'], cluster_data)
559
+
560
+ # Get cover image
561
+ logger.info("Getting cover image...")
562
+ cover_image_url = await blog_gen.get_cover_image_async(metadata['title'])
563
+
564
+ blog_post_data = {
565
+ 'title': metadata['title'],
566
+ 'slug': metadata['slug'],
567
+ 'meta_description': metadata['meta_description'],
568
+ 'content': html_content,
569
+ 'cover_image': cover_image_url,
570
+ 'keywords': cluster_data['Keywords'],
571
+ 'primary_keyword': cluster_data['Primary Keyword'],
572
+ 'research': research,
573
+ 'detailed_plan': detailed_plan
574
+ }
575
+
576
+ generated_blogs.append({
577
+ 'status': 'success',
578
+ 'message': f"Blog post generated successfully for {cluster_data['Primary Keyword']}",
579
+ 'data': blog_post_data
580
+ })
581
+
582
+ csv_handler.mark_cluster_complete(cluster_data['row_number'])
583
+ csv_handler.log_completed_post({**metadata, 'keywords': cluster_data['Keywords']})
584
+
585
+ except Exception as e:
586
+ logger.error(f"Error processing cluster {cluster_data['Primary Keyword']}: {e}")
587
+ generated_blogs.append({
588
+ 'status': 'error',
589
+ 'message': f"Failed to generate blog post for {cluster_data['Primary Keyword']}",
590
+ 'error': str(e)
591
+ })
592
 
593
  logger.info("All blog generation completed!")
594
  return jsonify({
 
607
  while True:
608
  try:
609
  # Get log message from queue, timeout after 1 second
610
+ log_message = log_queue.get(timeout=1)
611
  yield f"data: {log_message}\n\n"
612
  except queue.Empty:
613
  # Send a heartbeat to keep the connection alive
 
623
 
624
  if __name__ == '__main__':
625
  logger.info("Starting Flask API server...")
626
+ import hypercorn.asyncio
627
+ import hypercorn.config
628
+
629
+ config = hypercorn.config.Config()
630
+ config.bind = ["127.0.0.1:5001"]
631
+
632
+ asyncio.run(hypercorn.asyncio.serve(app, config))