all files added
Browse files- src/Report/2.txt +0 -0
- src/Report/Report.py +108 -0
- src/Report/status.txt +1 -0
- src/Report/updated1.py +97 -0
- src/analysis.py +63 -0
- src/brand.py +416 -0
- src/content.py +404 -0
- src/input_analysis/Standarddeviation.py +186 -0
- src/input_analysis/competitor-analysis/1.txt +0 -0
- src/input_analysis/competitor-analysis/competitor_analysis.py +237 -0
- src/input_analysis/feedback.py +221 -0
- src/input_analysis/path.py +68 -0
- src/input_analysis/product-analysis/product_analysis.py +236 -0
- src/input_analysis/renamebranding.py +63 -0
- src/social.py +424 -0
- src/templates/1.txt +0 -0
- src/templates_images/1.txt +0 -0
src/Report/2.txt
ADDED
File without changes
|
src/Report/Report.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import PyPDF2
|
3 |
+
from reportlab.lib.pagesizes import letter
|
4 |
+
from reportlab.pdfgen import canvas
|
5 |
+
from screeninfo import get_monitors
|
6 |
+
import sys
|
7 |
+
import codecs
|
8 |
+
|
9 |
+
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.buffer)
|
10 |
+
|
11 |
+
def get_screen_size():
|
12 |
+
"""Get the primary monitor screen size."""
|
13 |
+
try:
|
14 |
+
monitor = get_monitors()[0] # Get primary monitor
|
15 |
+
screen_width = monitor.width
|
16 |
+
screen_height = monitor.height
|
17 |
+
return screen_width, int(screen_height * 1.5) # Scale height
|
18 |
+
except Exception as e:
|
19 |
+
print(f"Error getting screen size: {e}")
|
20 |
+
return 1000, 1500 # Default size
|
21 |
+
|
22 |
+
def add_padding_to_pdf(input_pdf, output_pdf, padding):
|
23 |
+
"""Add padding to a PDF file."""
|
24 |
+
try:
|
25 |
+
reader = PyPDF2.PdfReader(input_pdf)
|
26 |
+
writer = PyPDF2.PdfWriter()
|
27 |
+
|
28 |
+
for page in reader.pages:
|
29 |
+
original_width = page.mediabox.width
|
30 |
+
original_height = page.mediabox.height
|
31 |
+
|
32 |
+
# New dimensions with padding
|
33 |
+
new_width = original_width + 2 * padding
|
34 |
+
new_height = original_height + 2 * padding
|
35 |
+
|
36 |
+
# Create a new canvas
|
37 |
+
packet = canvas.Canvas(output_pdf, pagesize=(new_width, new_height))
|
38 |
+
packet.translate(padding, padding) # Center original content
|
39 |
+
packet.save()
|
40 |
+
|
41 |
+
writer.add_page(page)
|
42 |
+
|
43 |
+
with open(output_pdf, "wb") as out_file:
|
44 |
+
writer.write(out_file)
|
45 |
+
|
46 |
+
print(f"✅ Padding added: {input_pdf} -> {output_pdf}")
|
47 |
+
|
48 |
+
except Exception as e:
|
49 |
+
print(f"⚠️ Error adding padding to {input_pdf}: {e}")
|
50 |
+
|
51 |
+
def merge_pdfs(pdf_list, output_path, elongated_files=[], normalize_size=True):
|
52 |
+
"""Merge multiple PDFs into a single PDF."""
|
53 |
+
pdf_merger = PyPDF2.PdfMerger()
|
54 |
+
|
55 |
+
# Resolve absolute paths
|
56 |
+
pdf_list = [os.path.abspath(pdf) for pdf in pdf_list]
|
57 |
+
|
58 |
+
for pdf in pdf_list:
|
59 |
+
if not os.path.exists(pdf):
|
60 |
+
print(f"⚠️ File not found: {pdf}")
|
61 |
+
continue
|
62 |
+
|
63 |
+
try:
|
64 |
+
if pdf in elongated_files:
|
65 |
+
print(f"🔍 Processing elongated file: {pdf}")
|
66 |
+
# Add additional elongation logic here if needed
|
67 |
+
pdf_merger.append(pdf)
|
68 |
+
print(f"✅ Added: {pdf}")
|
69 |
+
except Exception as e:
|
70 |
+
print(f"⚠️ Could not add {pdf}: {e}")
|
71 |
+
|
72 |
+
try:
|
73 |
+
pdf_merger.write(output_path)
|
74 |
+
print(f"📄 Merged PDF saved as: {output_path}")
|
75 |
+
except Exception as e:
|
76 |
+
print(f"Error saving merged PDF: {e}")
|
77 |
+
finally:
|
78 |
+
pdf_merger.close()
|
79 |
+
|
80 |
+
if __name__ == "__main__":
|
81 |
+
# List of PDF files to merge
|
82 |
+
pdf_files = [
|
83 |
+
"src/Report/1updated.pdf",
|
84 |
+
"data/reports/report_stats/2.pdf",
|
85 |
+
"data/reports/report_stats/3.pdf",
|
86 |
+
"data/reports/report_stats/objective.pdf",
|
87 |
+
"data/reports/template_PDF/brand marketing.pdf",
|
88 |
+
"data/reports/template_PDF/content marketing.pdf",
|
89 |
+
"data/reports/template_PDF/social media marketing.pdf",
|
90 |
+
"data/reports/report_stats/last.pdf"
|
91 |
+
]
|
92 |
+
|
93 |
+
elongated_pdfs = [
|
94 |
+
"data/reports/template_PDF/brand marketing.pdf",
|
95 |
+
"data/reports/template_PDF/content marketing.pdf",
|
96 |
+
"data/reports/template_PDF/social media marketing.pdf"
|
97 |
+
]
|
98 |
+
|
99 |
+
# Define output directory and strict file name
|
100 |
+
output_dir = "src/Report"
|
101 |
+
os.makedirs(output_dir, exist_ok=True) # Create the directory if it doesn't exist
|
102 |
+
output_file = os.path.join(output_dir, "report.pdf") # Enforce strict file name as report.pdf
|
103 |
+
|
104 |
+
# Merge the PDFs and save the result in the specified directory
|
105 |
+
merge_pdfs(pdf_files, output_file, elongated_files=elongated_pdfs)
|
106 |
+
|
107 |
+
|
108 |
+
|
src/Report/status.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
done
|
src/Report/updated1.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
from PyPDF2 import PdfReader, PdfWriter
|
3 |
+
from reportlab.pdfgen import canvas
|
4 |
+
from io import BytesIO
|
5 |
+
|
6 |
+
def create_overlay_pdf(text, x, y, output_overlay_pdf, page_width, page_height, max_width):
|
7 |
+
packet = BytesIO()
|
8 |
+
can = canvas.Canvas(packet, pagesize=(page_width, page_height))
|
9 |
+
|
10 |
+
# Set initial font size
|
11 |
+
font_size = 44 # Larger font size to start with
|
12 |
+
can.setFont("Helvetica-Bold", font_size)
|
13 |
+
can.setFillColorRGB(0, 0, 0) # Black text
|
14 |
+
|
15 |
+
# Measure the text width
|
16 |
+
text_width = can.stringWidth(text, "Helvetica-Bold", font_size)
|
17 |
+
|
18 |
+
# If the text is too wide, reduce font size to fit within the max_width
|
19 |
+
if text_width > max_width:
|
20 |
+
font_size = 18 # Reduce font size if necessary
|
21 |
+
can.setFont("Helvetica-Bold", font_size)
|
22 |
+
text_width = can.stringWidth(text, "Helvetica-Bold", font_size)
|
23 |
+
|
24 |
+
# Convert page_width to float if it's a Decimal
|
25 |
+
page_width = float(page_width) # Convert to float to avoid TypeError
|
26 |
+
|
27 |
+
# Dynamically calculate the x-coordinate to center the text
|
28 |
+
x_coordinate = (page_width - text_width) / 2 # Center the text horizontally
|
29 |
+
|
30 |
+
# Draw the string at the calculated position
|
31 |
+
can.drawString(x_coordinate, y, text)
|
32 |
+
can.save()
|
33 |
+
|
34 |
+
packet.seek(0)
|
35 |
+
with open(output_overlay_pdf, "wb") as overlay_file:
|
36 |
+
overlay_file.write(packet.getvalue())
|
37 |
+
|
38 |
+
def merge_pdfs(input_pdf, overlay_pdf, output_pdf):
|
39 |
+
reader = PdfReader(input_pdf)
|
40 |
+
overlay = PdfReader(overlay_pdf)
|
41 |
+
writer = PdfWriter()
|
42 |
+
|
43 |
+
# Apply the overlay to the first page
|
44 |
+
original_page = reader.pages[0]
|
45 |
+
original_page.merge_page(overlay.pages[0])
|
46 |
+
|
47 |
+
writer.add_page(original_page)
|
48 |
+
|
49 |
+
# Add the remaining pages without the overlay
|
50 |
+
for page_num in range(1, len(reader.pages)):
|
51 |
+
writer.add_page(reader.pages[page_num])
|
52 |
+
|
53 |
+
with open(output_pdf, "wb") as output_file:
|
54 |
+
writer.write(output_file)
|
55 |
+
|
56 |
+
if __name__ == "__main__":
|
57 |
+
# Retrieve company_name from command-line argument or use default
|
58 |
+
if len(sys.argv) > 1:
|
59 |
+
company_name = sys.argv[1]
|
60 |
+
else:
|
61 |
+
company_name = "Default_Company"
|
62 |
+
|
63 |
+
input_pdf_path = "data/reports/report_stats/1.pdf"
|
64 |
+
overlay_pdf_path = "src/Report/overlay_test.pdf"
|
65 |
+
output_pdf_path = "src/Report/1updated.pdf"
|
66 |
+
|
67 |
+
# Load the original PDF to get its dimensions
|
68 |
+
reader = PdfReader(input_pdf_path)
|
69 |
+
original_page = reader.pages[0]
|
70 |
+
page_width = original_page.mediabox.width
|
71 |
+
page_height = original_page.mediabox.height
|
72 |
+
|
73 |
+
# Start with conservative coordinates and adjust as needed
|
74 |
+
x_coordinate = 800 # Adjust as needed
|
75 |
+
y_coordinate = 600 # Adjust as needed
|
76 |
+
max_width = 500 # Maximum width for the text
|
77 |
+
|
78 |
+
# Step 1: Create the overlay PDF with the same size as the original page
|
79 |
+
create_overlay_pdf(company_name, x_coordinate, y_coordinate, overlay_pdf_path, page_width, page_height, max_width)
|
80 |
+
|
81 |
+
# Step 2: Merge the overlay with the original PDF
|
82 |
+
merge_pdfs(input_pdf_path, overlay_pdf_path, output_pdf_path)
|
83 |
+
|
84 |
+
print(f"Generated PDF saved as {output_pdf_path} with company name: {company_name}")
|
85 |
+
|
86 |
+
|
87 |
+
|
88 |
+
|
89 |
+
|
90 |
+
|
91 |
+
|
92 |
+
|
93 |
+
|
94 |
+
|
95 |
+
|
96 |
+
|
97 |
+
|
src/analysis.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import subprocess
|
2 |
+
import sys
|
3 |
+
|
4 |
+
def install_requirements():
|
5 |
+
"""Ensure dependencies are installed before running scripts."""
|
6 |
+
try:
|
7 |
+
subprocess.run([sys.executable, "-m", "pip", "install", "-r", "requirements.txt"], check=True)
|
8 |
+
print("✅ Dependencies installed successfully.")
|
9 |
+
except subprocess.CalledProcessError as e:
|
10 |
+
print(f"⚠️ Failed to install dependencies: {e}")
|
11 |
+
|
12 |
+
def run_python_file(file_name, company_name):
|
13 |
+
"""Run a Python script with subprocess."""
|
14 |
+
try:
|
15 |
+
result = subprocess.run(
|
16 |
+
[sys.executable, file_name, company_name],
|
17 |
+
capture_output=True,
|
18 |
+
text=True
|
19 |
+
)
|
20 |
+
|
21 |
+
if result.returncode == 0:
|
22 |
+
return "success", result.stdout
|
23 |
+
else:
|
24 |
+
return "error", f"Error in {file_name}: {result.stderr}"
|
25 |
+
except Exception as e:
|
26 |
+
return "error", f"An error occurred while running {file_name}: {str(e)}"
|
27 |
+
|
28 |
+
def main():
|
29 |
+
if len(sys.argv) > 1:
|
30 |
+
company_name = sys.argv[1]
|
31 |
+
else:
|
32 |
+
company_name = "Default_Company"
|
33 |
+
|
34 |
+
install_requirements() # Install requirements before execution
|
35 |
+
|
36 |
+
scripts = [
|
37 |
+
'src/input_analysis/product-analysis/product_analysis.py',
|
38 |
+
'src/input_analysis/competitor-analysis/competitor_analysis.py',
|
39 |
+
'src/input_analysis/Standarddeviation.py',
|
40 |
+
'src/input_analysis/renamebranding.py',
|
41 |
+
'src/input_analysis/path.py',
|
42 |
+
'src/input_analysis/feedback.py',
|
43 |
+
'src/brand.py',
|
44 |
+
'src/content.py',
|
45 |
+
'src/social.py',
|
46 |
+
'src/Report/updated1.py',
|
47 |
+
'src/Report/Report.py'
|
48 |
+
]
|
49 |
+
|
50 |
+
for script in scripts:
|
51 |
+
status, message = run_python_file(script, company_name)
|
52 |
+
if status == "error":
|
53 |
+
print(message)
|
54 |
+
return
|
55 |
+
else:
|
56 |
+
print(f"{script} executed successfully.")
|
57 |
+
|
58 |
+
# ✅ After all scripts run successfully, mark the status as "done"
|
59 |
+
with open("src/Report/status.txt", "w") as f:
|
60 |
+
f.write("done")
|
61 |
+
|
62 |
+
if __name__ == "__main__":
|
63 |
+
main()
|
src/brand.py
ADDED
@@ -0,0 +1,416 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
import os
|
3 |
+
import sys
|
4 |
+
import time
|
5 |
+
from playwright.sync_api import sync_playwright
|
6 |
+
from PIL import Image
|
7 |
+
import pandas as pd
|
8 |
+
|
9 |
+
def save_html_file(file_name, html_content):
|
10 |
+
with open(file_name, 'w') as file:
|
11 |
+
file.write(html_content)
|
12 |
+
|
13 |
+
def encode_image_to_base64(image_path):
|
14 |
+
with open(image_path, "rb") as image_file:
|
15 |
+
return base64.b64encode(image_file.read()).decode('utf-8')
|
16 |
+
|
17 |
+
# Allow user to upload images for logo and product images
|
18 |
+
logo_image_path = r"src/templates_images/Component 3.png"
|
19 |
+
cola_image_path = r"src/templates_images/Frame 52.png"
|
20 |
+
# Encode images to base64
|
21 |
+
logo_base64 = encode_image_to_base64(logo_image_path)
|
22 |
+
cola_base64 = encode_image_to_base64(cola_image_path)
|
23 |
+
def save_html_file(file_name, html_content):
|
24 |
+
with open(file_name, 'w') as file:
|
25 |
+
file.write(html_content)
|
26 |
+
|
27 |
+
def encode_image_to_base64(image_path):
|
28 |
+
try:
|
29 |
+
with open(image_path, "rb") as img_file:
|
30 |
+
return base64.b64encode(img_file.read()).decode("utf-8")
|
31 |
+
except FileNotFoundError:
|
32 |
+
print(f"Image not found: {image_path}")
|
33 |
+
return ""
|
34 |
+
except Exception as e:
|
35 |
+
print(f"Error encoding image {image_path}: {e}")
|
36 |
+
return ""
|
37 |
+
# Function to generate HTML for Social Media Marketing
|
38 |
+
# Function to generate HTML for Brand Marketing
|
39 |
+
def generate_brand_marketing_html(product_image_base64_1, competitor_image_base64_1, product_image_base64_2, competitor_image_base64_2, donts_html, suggestions_html, company_name):
|
40 |
+
return f"""
|
41 |
+
<!DOCTYPE html>
|
42 |
+
<html lang="en">
|
43 |
+
<head>
|
44 |
+
<meta charset="UTF-8">
|
45 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
46 |
+
<title>Brand Marketing Template</title>
|
47 |
+
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@600&display=swap" rel="stylesheet">
|
48 |
+
<style>
|
49 |
+
body {{
|
50 |
+
font-family: 'Inter', sans-serif;
|
51 |
+
margin: 0;
|
52 |
+
padding: 10%; /* Increased padding by 10% */
|
53 |
+
background-color: #fff;
|
54 |
+
font-size: 6px;
|
55 |
+
}}
|
56 |
+
@page {{
|
57 |
+
size: A4;
|
58 |
+
margin: 15px; /* Increased margin to 15px */
|
59 |
+
}}
|
60 |
+
.header {{
|
61 |
+
display: flex;
|
62 |
+
justify-content: space-between;
|
63 |
+
align-items: center;
|
64 |
+
padding: 10px; /* Increased padding to 10px */
|
65 |
+
background-color: #FFFFFF;
|
66 |
+
margin-bottom: 15px; /* Increased margin between header and content */
|
67 |
+
}}
|
68 |
+
.header .logo {{
|
69 |
+
height: 25px; /* Increased logo height */
|
70 |
+
}}
|
71 |
+
.container {{
|
72 |
+
display: flex;
|
73 |
+
flex-direction: column;
|
74 |
+
padding: 10px; /* Increased padding to 10px */
|
75 |
+
flex-grow: 1;
|
76 |
+
}}
|
77 |
+
h1 {{
|
78 |
+
font-family: 'Times New Roman', serif;
|
79 |
+
font-size: 22px; /* Increased font size */
|
80 |
+
font-weight: 500;
|
81 |
+
line-height: 1.2;
|
82 |
+
text-align: left;
|
83 |
+
margin-bottom: 15px; /* Increased bottom margin */
|
84 |
+
}}
|
85 |
+
h2, p {{
|
86 |
+
font-size: 13px; /* Increased font size */
|
87 |
+
font-weight: 400;
|
88 |
+
line-height: 1.4;
|
89 |
+
color: #000;
|
90 |
+
}}
|
91 |
+
.gap {{
|
92 |
+
font-size: 10px;
|
93 |
+
color: rgb(5, 5, 5);
|
94 |
+
font-weight: 100;
|
95 |
+
}}
|
96 |
+
.examples {{
|
97 |
+
font-size: 12px; /* Increased font size */
|
98 |
+
color: green;
|
99 |
+
}}
|
100 |
+
.box-container {{
|
101 |
+
width: 100%;
|
102 |
+
display: flex;
|
103 |
+
flex-direction: column;
|
104 |
+
align-items: center;
|
105 |
+
margin-top: 15px; /* Increased margin */
|
106 |
+
}}
|
107 |
+
.wraper {{
|
108 |
+
width: 100%;
|
109 |
+
height: 220px; /* Increased height */
|
110 |
+
display: flex;
|
111 |
+
margin: 10px 0; /* Increased margin */
|
112 |
+
border-radius: 12px; /* Increased border radius */
|
113 |
+
overflow: hidden;
|
114 |
+
position: relative;
|
115 |
+
}}
|
116 |
+
.div-1 {{
|
117 |
+
flex: 1;
|
118 |
+
background-color: #ecbdbd; /* Pink background */
|
119 |
+
display: flex;
|
120 |
+
align-items: center;
|
121 |
+
justify-content: center;
|
122 |
+
}}
|
123 |
+
.div-2 {{
|
124 |
+
flex: 1;
|
125 |
+
background-color: #e6f9e6; /* Green background */
|
126 |
+
display: flex;
|
127 |
+
align-items: center;
|
128 |
+
justify-content: center;
|
129 |
+
}}
|
130 |
+
.wraper img {{
|
131 |
+
max-width: 90%;
|
132 |
+
max-height: 90%;
|
133 |
+
object-fit: contain;
|
134 |
+
border-radius: 8px;
|
135 |
+
}}
|
136 |
+
.vs-text {{
|
137 |
+
position: absolute;
|
138 |
+
left: 50%;
|
139 |
+
top: 50%;
|
140 |
+
transform: translate(-50%, -50%);
|
141 |
+
font-size: 18px; /* Increased font size */
|
142 |
+
font-weight: bold;
|
143 |
+
color: black;
|
144 |
+
}}
|
145 |
+
.side-by-side-container {{
|
146 |
+
display: flex;
|
147 |
+
gap: 35px; /* Increased gap between boxes */
|
148 |
+
margin-top: 45px; /* Increased top margin */
|
149 |
+
}}
|
150 |
+
.pink-box, .green-box {{
|
151 |
+
flex: 1;
|
152 |
+
padding: 18px; /* Increased padding */
|
153 |
+
margin-top: 15px; /* Increased margin */
|
154 |
+
border-radius: 15px; /* Increased border radius */
|
155 |
+
box-sizing: border-box;
|
156 |
+
height: auto;
|
157 |
+
}}
|
158 |
+
.pink-box {{
|
159 |
+
background-color: #ecbdbd;
|
160 |
+
color: red;
|
161 |
+
text-align: start;
|
162 |
+
display: flex;
|
163 |
+
flex-direction: column;
|
164 |
+
align-items: flex-start;
|
165 |
+
}}
|
166 |
+
.pink-box h6 {{
|
167 |
+
font-size: 16px; /* Increased text size */
|
168 |
+
font-weight: bold;
|
169 |
+
margin: 8px 0; /* Increased margin */
|
170 |
+
color: red;
|
171 |
+
}}
|
172 |
+
.green-box {{
|
173 |
+
background-color: #e6f9e6;
|
174 |
+
color: green;
|
175 |
+
text-align: start;
|
176 |
+
display: flex;
|
177 |
+
flex-direction: column;
|
178 |
+
align-items: flex-start;
|
179 |
+
}}
|
180 |
+
.green-box h6 {{
|
181 |
+
font-size: 16px; /* Increased text size */
|
182 |
+
font-weight: bold;
|
183 |
+
margin: 8px 0; /* Increased margin */
|
184 |
+
color: green;
|
185 |
+
}}
|
186 |
+
.case-study {{
|
187 |
+
font-size: 18px; /* Increased font size */
|
188 |
+
color: green;
|
189 |
+
margin-top: 25px; /* Increased margin */
|
190 |
+
}}
|
191 |
+
.container1 {{
|
192 |
+
font-size: 12px; /* Increased font size */
|
193 |
+
color: green;
|
194 |
+
margin-top: 15px; /* Increased margin */
|
195 |
+
}}
|
196 |
+
.case2 {{
|
197 |
+
font-size: 12px; /* Increased font size */
|
198 |
+
color: rgb(1, 1, 1);
|
199 |
+
margin-top: 25px; /* Increased margin */
|
200 |
+
}}
|
201 |
+
.case-study img {{
|
202 |
+
display: block;
|
203 |
+
max-width: 100%;
|
204 |
+
height: auto;
|
205 |
+
margin-top: 25px; /* Increased margin */
|
206 |
+
border-radius: 10px; /* Increased border radius */
|
207 |
+
}}
|
208 |
+
</style>
|
209 |
+
</head>
|
210 |
+
<body>
|
211 |
+
<div class="header">
|
212 |
+
<h1><span style="color:red;">Brand Marketing</span></h1>
|
213 |
+
<img src="data:image/png;base64,{logo_base64}" alt="Logo" class="logo">
|
214 |
+
</div>
|
215 |
+
<div class="container">
|
216 |
+
<p>{company_name} should use Brand Marketing effectively as the strategic promotion for identity, products, and services across all channels to create loyalty among consumers.</p>
|
217 |
+
<p class="gap"><span style="color: red;">Issue/Gap:</span> {company_name}'s current brand marketing efforts might not be reaching their full potential. A comprehensive analysis of brand messaging, target audience engagement across channels, and content strategy could reveal opportunities to optimize {company_name}'s marketing approach for greater reach and impact.</p>
|
218 |
+
</div>
|
219 |
+
<h2 class="examples"> Examples:</h2>
|
220 |
+
<div class="box-container">
|
221 |
+
<div class="wraper">
|
222 |
+
<div class="div-1"> <img src="data:image/png;base64,{product_image_base64_1}" alt="Product Image"></div>
|
223 |
+
<div class="vs-text">V/S</div>
|
224 |
+
<div class="div-2"> <img src="data:image/png;base64,{competitor_image_base64_1}" alt="Competitor Image"></div>
|
225 |
+
</div>
|
226 |
+
<div class="wraper">
|
227 |
+
<div class="div-1"> <img src="data:image/png;base64,{product_image_base64_2}" alt="Product Image"></div>
|
228 |
+
<div class="vs-text">V/S</div>
|
229 |
+
<div class="div-2"> <img src="data:image/png;base64,{competitor_image_base64_2}" alt="Competitor Image"></div>
|
230 |
+
</div>
|
231 |
+
</div>
|
232 |
+
<div class="side-by-side-container">
|
233 |
+
<div class="pink-box">
|
234 |
+
<h6>Drawbacks in Current Brand Marketing</h6>
|
235 |
+
<p>{donts_html}</p>
|
236 |
+
</div>
|
237 |
+
<div class="green-box">
|
238 |
+
<h6>How Banao Technologies Can Help</h6>
|
239 |
+
<p>{suggestions_html}</p>
|
240 |
+
</div>
|
241 |
+
</div>
|
242 |
+
<div class="case-study">
|
243 |
+
<h3>Case Study:</h3>
|
244 |
+
<div class="container1">
|
245 |
+
<p><span style="color: green;">Coca-Cola Brand Marketing using its iconic red color?</span></p>
|
246 |
+
</div>
|
247 |
+
<div class="case2">
|
248 |
+
<p>Coca-Cola uses its iconic red color, Spencerian script font, and "Open Happiness" slogan across all platforms, from its website to its social media pages to its countless physical advertisements.</p>
|
249 |
+
<img src="data:image/png;base64,{cola_base64}" alt="Cola" class="cola">
|
250 |
+
</div>
|
251 |
+
</div>
|
252 |
+
</body>
|
253 |
+
</html>
|
254 |
+
|
255 |
+
"""
|
256 |
+
|
257 |
+
# Function to parse "Product_output_cleaned.txt" for Don'ts and Suggestions specific to Brand Marketing
|
258 |
+
def parse_cleaned_file_brand_marketing(file_path):
|
259 |
+
with open(file_path, "r") as file:
|
260 |
+
content = file.read()
|
261 |
+
|
262 |
+
sections = content.split("==================================================")
|
263 |
+
for section in sections:
|
264 |
+
lines = section.strip().split("\n")
|
265 |
+
if lines and "Brand Marketing" in lines[0]:
|
266 |
+
donts = []
|
267 |
+
suggestions = []
|
268 |
+
mode = None
|
269 |
+
for line in lines[1:]:
|
270 |
+
if line.startswith("Don'ts:"):
|
271 |
+
mode = "donts"
|
272 |
+
elif line.startswith("Suggestions:"):
|
273 |
+
mode = "suggestions"
|
274 |
+
elif mode == "donts" and line.startswith("-"):
|
275 |
+
donts.append(line.lstrip("- "))
|
276 |
+
elif mode == "suggestions" and line.startswith("-"):
|
277 |
+
suggestions.append(line.lstrip("- "))
|
278 |
+
return "<br>".join(donts), "<br>".join(suggestions)
|
279 |
+
|
280 |
+
return "", ""
|
281 |
+
|
282 |
+
# Function to process Brand Marketing and generate HTML
|
283 |
+
def process_brand_marketing(data, base_image_dir, output_file, cleaned_file_path, company_name):
|
284 |
+
# Filter for Brand Marketing category
|
285 |
+
brand_data = data[data["Category"] == "Brand Marketing"]
|
286 |
+
|
287 |
+
if brand_data.empty:
|
288 |
+
print("No Brand Marketing data found in the provided Excel file.")
|
289 |
+
return
|
290 |
+
|
291 |
+
# Parse Don'ts and Suggestions
|
292 |
+
donts_html, suggestions_html = parse_cleaned_file_brand_marketing(cleaned_file_path)
|
293 |
+
|
294 |
+
# Ensure there are at least two rows
|
295 |
+
if len(brand_data) < 2:
|
296 |
+
print("Not enough rows for two product and competitor image comparisons.")
|
297 |
+
return
|
298 |
+
|
299 |
+
# Get the first two records (assuming these are needed)
|
300 |
+
brand_row_1 = brand_data.iloc[0]
|
301 |
+
brand_row_2 = brand_data.iloc[1]
|
302 |
+
|
303 |
+
product_image_path_1 = os.path.join(base_image_dir, brand_row_1["Product_Image_Name"])
|
304 |
+
competitor_image_path_1 = os.path.join(base_image_dir, brand_row_1["Competitor_Image_Name"])
|
305 |
+
|
306 |
+
product_image_path_2 = os.path.join(base_image_dir, brand_row_2["Product_Image_Name"])
|
307 |
+
competitor_image_path_2 = os.path.join(base_image_dir, brand_row_2["Competitor_Image_Name"])
|
308 |
+
|
309 |
+
# Encode images to Base64
|
310 |
+
product_image_base64_1 = encode_image_to_base64(product_image_path_1)
|
311 |
+
competitor_image_base64_1 = encode_image_to_base64(competitor_image_path_1)
|
312 |
+
|
313 |
+
product_image_base64_2 = encode_image_to_base64(product_image_path_2)
|
314 |
+
competitor_image_base64_2 = encode_image_to_base64(competitor_image_path_2)
|
315 |
+
|
316 |
+
# Generate HTML content
|
317 |
+
html_content = generate_brand_marketing_html(
|
318 |
+
product_image_base64_1,
|
319 |
+
competitor_image_base64_1,
|
320 |
+
product_image_base64_2,
|
321 |
+
competitor_image_base64_2,
|
322 |
+
donts_html,
|
323 |
+
suggestions_html,
|
324 |
+
company_name
|
325 |
+
)
|
326 |
+
|
327 |
+
# Save the HTML file
|
328 |
+
with open(output_file, "w", encoding="utf-8") as f:
|
329 |
+
f.write(html_content)
|
330 |
+
|
331 |
+
print(f"HTML file for Brand Marketing has been saved as: {output_file}")
|
332 |
+
|
333 |
+
# Main script for Content Marketing
|
334 |
+
if __name__ == "__main__":
|
335 |
+
if len(sys.argv) > 1:
|
336 |
+
company_name = sys.argv[1] # The second argument passed will be the company_name
|
337 |
+
else:
|
338 |
+
company_name = "Default_Company" # Default value if no argument is passed
|
339 |
+
# Load the Excel file
|
340 |
+
file_path = "Output File/excel/top_3_sd_results.xlsx" # Replace with the path to your Excel file
|
341 |
+
data = pd.read_excel(file_path)
|
342 |
+
|
343 |
+
base_image_dir = "" # Replace with the actual directory where your images are stored
|
344 |
+
|
345 |
+
# Path to the cleaned file with Don'ts and Suggestions
|
346 |
+
cleaned_file_path = "data/output_generated_file/Product_output_cleaned.txt" # Replace with the path to your cleaned file
|
347 |
+
|
348 |
+
# Output HTML file
|
349 |
+
output_file = "src/templates/brand_marketing.html"
|
350 |
+
|
351 |
+
# Generate HTML for Content Marketing
|
352 |
+
process_brand_marketing(data, base_image_dir, output_file, cleaned_file_path, company_name)
|
353 |
+
|
354 |
+
# Force UTF-8 encoding for terminal output
|
355 |
+
sys.stdout.reconfigure(encoding='utf-8')
|
356 |
+
|
357 |
+
def capture_screenshot_with_playwright(html_file_path, screenshot_path):
|
358 |
+
"""
|
359 |
+
Capture a full-page screenshot of the HTML file directly using Playwright.
|
360 |
+
"""
|
361 |
+
try:
|
362 |
+
# Launch Playwright in headless mode
|
363 |
+
with sync_playwright() as p:
|
364 |
+
browser = p.chromium.launch(headless=True)
|
365 |
+
page = browser.new_page()
|
366 |
+
|
367 |
+
# Open the HTML file in the browser
|
368 |
+
page.goto(f"file:///{os.path.abspath(html_file_path)}")
|
369 |
+
|
370 |
+
# Capture the full-page screenshot
|
371 |
+
page.screenshot(path=screenshot_path, full_page=True)
|
372 |
+
print(f"Screenshot saved: {screenshot_path}")
|
373 |
+
|
374 |
+
browser.close()
|
375 |
+
|
376 |
+
except Exception as e:
|
377 |
+
print(f"Error capturing screenshot: {e}")
|
378 |
+
|
379 |
+
def convert_png_to_pdf(png_path, company_name):
|
380 |
+
"""
|
381 |
+
Convert a PNG image into a PDF strictly named as 'company_name brand marketing.pdf'
|
382 |
+
in the specified folder 'data/reports/template_PDF'.
|
383 |
+
"""
|
384 |
+
try:
|
385 |
+
# Set the output folder and ensure it exists
|
386 |
+
output_folder = "data/reports/template_PDF"
|
387 |
+
os.makedirs(output_folder, exist_ok=True)
|
388 |
+
|
389 |
+
# Fixed PDF file name: 'brand marketing.pdf'
|
390 |
+
pdf_path = os.path.join(output_folder, "brand marketing.pdf")
|
391 |
+
|
392 |
+
# Convert the PNG to PDF
|
393 |
+
img = Image.open(png_path)
|
394 |
+
img.convert('RGB').save(pdf_path, "PDF")
|
395 |
+
|
396 |
+
print(f"PDF saved: {pdf_path}")
|
397 |
+
except Exception as e:
|
398 |
+
print(f"Error converting PNG to PDF: {e}")
|
399 |
+
|
400 |
+
if __name__ == "__main__":
|
401 |
+
# Paths for demonstration
|
402 |
+
html_file_path = "src/templates/brand_marketing.html"
|
403 |
+
|
404 |
+
# Screenshot saved in the folder: data/reports/template_ss
|
405 |
+
screenshot_folder = "data/reports/template_ss"
|
406 |
+
os.makedirs(screenshot_folder, exist_ok=True)
|
407 |
+
screenshot_path = os.path.join(screenshot_folder, "brand_marketing_screenshot.png")
|
408 |
+
|
409 |
+
# Ensure Playwright browsers are installed
|
410 |
+
os.system("playwright install")
|
411 |
+
|
412 |
+
# Capture screenshot
|
413 |
+
capture_screenshot_with_playwright(html_file_path, screenshot_path)
|
414 |
+
|
415 |
+
# Convert screenshot to PDF with the company name strictly as the filename
|
416 |
+
convert_png_to_pdf(screenshot_path, company_name)
|
src/content.py
ADDED
@@ -0,0 +1,404 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import base64
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
from playwright.sync_api import sync_playwright
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
def save_html_file(file_name, html_content):
|
9 |
+
with open(file_name, 'w') as file:
|
10 |
+
file.write(html_content)
|
11 |
+
|
12 |
+
def encode_image_to_base64(image_path):
|
13 |
+
with open(image_path, "rb") as image_file:
|
14 |
+
return base64.b64encode(image_file.read()).decode('utf-8')
|
15 |
+
|
16 |
+
# Allow user to upload images for logo and product images
|
17 |
+
logo_image_path = r"src/templates_images/Component 3.png"
|
18 |
+
# Encode images to base64
|
19 |
+
logo_base64 = encode_image_to_base64(logo_image_path)
|
20 |
+
def save_html_file(file_name, html_content):
|
21 |
+
with open(file_name, 'w') as file:
|
22 |
+
file.write(html_content)
|
23 |
+
|
24 |
+
def encode_image_to_base64(image_path):
|
25 |
+
try:
|
26 |
+
with open(image_path, "rb") as img_file:
|
27 |
+
return base64.b64encode(img_file.read()).decode("utf-8")
|
28 |
+
except FileNotFoundError:
|
29 |
+
print(f"Image not found: {image_path}")
|
30 |
+
return ""
|
31 |
+
except Exception as e:
|
32 |
+
print(f"Error encoding image {image_path}: {e}")
|
33 |
+
return ""
|
34 |
+
|
35 |
+
# Encode images to base64
|
36 |
+
def generate_content_marketing_html(product_image_base64_1, competitor_image_base64_1, product_image_base64_2, competitor_image_base64_2, product_image_base64_3, competitor_image_base64_3, donts_html, suggestions_html, company_name):
|
37 |
+
return f"""
|
38 |
+
<!DOCTYPE html>
|
39 |
+
<html lang="en">
|
40 |
+
<head>
|
41 |
+
<meta charset="UTF-8">
|
42 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
43 |
+
<title>Content Marketing Template</title>
|
44 |
+
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@600&display=swap" rel="stylesheet">
|
45 |
+
<style>
|
46 |
+
body {{
|
47 |
+
font-family: 'Inter', sans-serif;
|
48 |
+
margin: 0;
|
49 |
+
padding: 10%; /* Increased padding */
|
50 |
+
background-color: #fff;
|
51 |
+
font-size: 8px; /* Increased font size */
|
52 |
+
}}
|
53 |
+
@page {{
|
54 |
+
size: A4;
|
55 |
+
margin: 15px; /* Increased margin */
|
56 |
+
}}
|
57 |
+
.header {{
|
58 |
+
display: flex;
|
59 |
+
justify-content: space-between;
|
60 |
+
align-items: center;
|
61 |
+
padding: 10px; /* Increased padding */
|
62 |
+
background-color: #FFFFFF;
|
63 |
+
margin-bottom: 15px; /* Increased margin */
|
64 |
+
}}
|
65 |
+
.header .logo {{
|
66 |
+
height: 30px; /* Increased logo height */
|
67 |
+
}}
|
68 |
+
.container {{
|
69 |
+
display: flex;
|
70 |
+
flex-direction: column;
|
71 |
+
padding: 10px; /* Increased padding */
|
72 |
+
flex-grow: 1;
|
73 |
+
}}
|
74 |
+
h1 {{
|
75 |
+
font-family: 'Times New Roman', serif;
|
76 |
+
font-size: 24px; /* Increased font size */
|
77 |
+
font-weight: 500;
|
78 |
+
line-height: 1.3;
|
79 |
+
text-align: left;
|
80 |
+
margin-bottom: 20px; /* Increased bottom margin */
|
81 |
+
}}
|
82 |
+
h2, p {{
|
83 |
+
font-size: 13px; /* Increased font size */
|
84 |
+
font-weight: 400;
|
85 |
+
line-height: 1.5;
|
86 |
+
color: #000;
|
87 |
+
}}
|
88 |
+
.gap {{
|
89 |
+
font-size: 11px; /* Increased font size */
|
90 |
+
color: rgb(5, 5, 5);
|
91 |
+
font-weight: 100;
|
92 |
+
}}
|
93 |
+
.examples {{
|
94 |
+
font-size: 13px; /* Increased font size */
|
95 |
+
color: green;
|
96 |
+
}}
|
97 |
+
.box-container {{
|
98 |
+
width: 100%;
|
99 |
+
display: flex;
|
100 |
+
flex-direction: column;
|
101 |
+
align-items: center;
|
102 |
+
margin-top: 20px; /* Increased margin */
|
103 |
+
}}
|
104 |
+
.wraper {{
|
105 |
+
width: 100%;
|
106 |
+
height: 220px; /* Increased height */
|
107 |
+
display: flex;
|
108 |
+
margin: 10px 0; /* Increased margin */
|
109 |
+
border-radius: 12px; /* Increased border radius */
|
110 |
+
overflow: hidden;
|
111 |
+
position: relative;
|
112 |
+
}}
|
113 |
+
.div-1 {{
|
114 |
+
flex: 1;
|
115 |
+
background-color: #ecbdbd; /* Pink background */
|
116 |
+
display: flex;
|
117 |
+
align-items: center;
|
118 |
+
justify-content: center;
|
119 |
+
}}
|
120 |
+
.div-2 {{
|
121 |
+
flex: 1;
|
122 |
+
background-color: #e6f9e6; /* Green background */
|
123 |
+
display: flex;
|
124 |
+
align-items: center;
|
125 |
+
justify-content: center;
|
126 |
+
}}
|
127 |
+
.wraper img {{
|
128 |
+
max-width: 90%;
|
129 |
+
max-height: 90%;
|
130 |
+
object-fit: contain;
|
131 |
+
border-radius: 8px;
|
132 |
+
}}
|
133 |
+
.vs-text {{
|
134 |
+
position: absolute;
|
135 |
+
left: 50%;
|
136 |
+
top: 50%;
|
137 |
+
transform: translate(-50%, -50%);
|
138 |
+
font-size: 18px; /* Increased font size */
|
139 |
+
font-weight: bold;
|
140 |
+
color: black;
|
141 |
+
}}
|
142 |
+
.side-by-side-container {{
|
143 |
+
display: flex;
|
144 |
+
gap: 40px; /* Increased gap between boxes */
|
145 |
+
margin-top: 50px; /* Increased top margin */
|
146 |
+
}}
|
147 |
+
.pink-box, .green-box {{
|
148 |
+
flex: 1;
|
149 |
+
padding: 20px; /* Increased padding */
|
150 |
+
margin-top: 20px; /* Increased margin */
|
151 |
+
border-radius: 15px; /* Increased border radius */
|
152 |
+
box-sizing: border-box;
|
153 |
+
height: auto;
|
154 |
+
}}
|
155 |
+
.pink-box {{
|
156 |
+
background-color: #ecbdbd;
|
157 |
+
color: red;
|
158 |
+
text-align: start;
|
159 |
+
display: flex;
|
160 |
+
flex-direction: column;
|
161 |
+
align-items: flex-start;
|
162 |
+
}}
|
163 |
+
.pink-box h6 {{
|
164 |
+
font-size: 14px; /* Increased font size */
|
165 |
+
font-weight: bold;
|
166 |
+
margin: 10px 0; /* Increased margin */
|
167 |
+
color: red;
|
168 |
+
}}
|
169 |
+
.green-box {{
|
170 |
+
background-color: #e6f9e6;
|
171 |
+
color: green;
|
172 |
+
text-align: start;
|
173 |
+
display: flex;
|
174 |
+
flex-direction: column;
|
175 |
+
align-items: flex-start;
|
176 |
+
}}
|
177 |
+
.green-box h6 {{
|
178 |
+
font-size: 14px; /* Increased font size */
|
179 |
+
font-weight: bold;
|
180 |
+
margin: 10px 0; /* Increased margin */
|
181 |
+
color: green;
|
182 |
+
}}
|
183 |
+
</style>
|
184 |
+
</head>
|
185 |
+
<body>
|
186 |
+
<div class="header">
|
187 |
+
<h1><span style="color:red;">Content Marketing</span></h1>
|
188 |
+
<img src="data:image/png;base64,{logo_base64}" alt="Logo" class="logo">
|
189 |
+
</div>
|
190 |
+
<div class="container">
|
191 |
+
<p>{company_name} should use Content Marketing effectively as the strategic promotion for identity, products, and services across all channels to create loyalty among consumers.</p>
|
192 |
+
<p class="gap"><span style="color: red;">Issue/Gap:</span> {company_name}'s current content marketing efforts might not be reaching their full potential. A comprehensive analysis of brand messaging, target audience engagement across channels, and content strategy could reveal opportunities to optimize {company_name}'s marketing approach for greater reach and impact.</p>
|
193 |
+
</div>
|
194 |
+
<h2 class="examples">Examples:</h2>
|
195 |
+
<div class="box-container">
|
196 |
+
<div class="wraper">
|
197 |
+
<div class="div-1">
|
198 |
+
<img src="data:image/png;base64,{product_image_base64_1}" alt="Product Image">
|
199 |
+
</div>
|
200 |
+
<div class="vs-text">V/S</div>
|
201 |
+
<div class="div-2">
|
202 |
+
<img src="data:image/png;base64,{competitor_image_base64_1}" alt="Competitor Image">
|
203 |
+
</div>
|
204 |
+
</div>
|
205 |
+
<div class="wraper">
|
206 |
+
<div class="div-1">
|
207 |
+
<img src="data:image/png;base64,{product_image_base64_2}" alt="Product Image">
|
208 |
+
</div>
|
209 |
+
<div class="vs-text">V/S</div>
|
210 |
+
<div class="div-2">
|
211 |
+
<img src="data:image/png;base64,{competitor_image_base64_2}" alt="Competitor Image">
|
212 |
+
</div>
|
213 |
+
</div>
|
214 |
+
<div class="wraper">
|
215 |
+
<div class="div-1">
|
216 |
+
<img src="data:image/png;base64,{product_image_base64_3}" alt="Product Image">
|
217 |
+
</div>
|
218 |
+
<div class="vs-text">V/S</div>
|
219 |
+
<div class="div-2">
|
220 |
+
<img src="data:image/png;base64,{competitor_image_base64_3}" alt="Competitor Image">
|
221 |
+
</div>
|
222 |
+
</div>
|
223 |
+
</div>
|
224 |
+
<div class="side-by-side-container">
|
225 |
+
<div class="pink-box">
|
226 |
+
<h6>Drawbacks in Current Content Marketing</h6>
|
227 |
+
<p>{donts_html}</p>
|
228 |
+
</div>
|
229 |
+
<div class="green-box">
|
230 |
+
<h6>How Banao Technologies Can Help</h6>
|
231 |
+
<p>{suggestions_html}</p>
|
232 |
+
</div>
|
233 |
+
</div>
|
234 |
+
</body>
|
235 |
+
</html>
|
236 |
+
"""
|
237 |
+
|
238 |
+
# Function to parse "Product_output_cleaned.txt" for Don'ts and Suggestions specific to Content Marketing
|
239 |
+
def parse_cleaned_file_content_marketing(file_path):
|
240 |
+
with open(file_path, "r") as file:
|
241 |
+
content = file.read()
|
242 |
+
|
243 |
+
sections = content.split("==================================================")
|
244 |
+
for section in sections:
|
245 |
+
lines = section.strip().split("\n")
|
246 |
+
if lines and "Content Marketing" in lines[0]:
|
247 |
+
donts = []
|
248 |
+
suggestions = []
|
249 |
+
mode = None
|
250 |
+
for line in lines[1:]:
|
251 |
+
if line.startswith("Don'ts:"):
|
252 |
+
mode = "donts"
|
253 |
+
elif line.startswith("Suggestions:"):
|
254 |
+
mode = "suggestions"
|
255 |
+
elif mode == "donts" and line.startswith("-"):
|
256 |
+
donts.append(line.lstrip("- "))
|
257 |
+
elif mode == "suggestions" and line.startswith("-"):
|
258 |
+
suggestions.append(line.lstrip("- "))
|
259 |
+
return "<br>".join(donts), "<br>".join(suggestions)
|
260 |
+
|
261 |
+
return "", ""
|
262 |
+
|
263 |
+
# Function to process Content Marketing and generate HTML
|
264 |
+
def process_content_marketing(data, base_image_dir, output_file, cleaned_file_path, company_name):
|
265 |
+
# Filter for Content Marketing category
|
266 |
+
content_data = data[data["Category"] == "Content Marketing"]
|
267 |
+
|
268 |
+
if content_data.empty:
|
269 |
+
print("No Content Marketing data found in the provided Excel file.")
|
270 |
+
return
|
271 |
+
# Parse Don'ts and Suggestions
|
272 |
+
donts_html, suggestions_html = parse_cleaned_file_content_marketing(cleaned_file_path)
|
273 |
+
|
274 |
+
# Ensure there are at least two rows
|
275 |
+
if len(content_data) < 3:
|
276 |
+
print("Not enough rows for two product and competitor image comparisons.")
|
277 |
+
return
|
278 |
+
# Get the first record (assuming one Content Marketing entry is needed)
|
279 |
+
# Get the first two records (assuming these are needed)
|
280 |
+
content_row_1 = content_data.iloc[0]
|
281 |
+
content_row_2 = content_data.iloc[1]
|
282 |
+
content_row_3 = content_data.iloc[2]
|
283 |
+
|
284 |
+
product_image_path_1 = os.path.join(base_image_dir, content_row_1["Product_Image_Name"])
|
285 |
+
competitor_image_path_1 = os.path.join(base_image_dir, content_row_1["Competitor_Image_Name"])
|
286 |
+
|
287 |
+
product_image_path_2 = os.path.join(base_image_dir, content_row_2["Product_Image_Name"])
|
288 |
+
competitor_image_path_2 = os.path.join(base_image_dir, content_row_2["Competitor_Image_Name"])
|
289 |
+
|
290 |
+
product_image_path_3 = os.path.join(base_image_dir, content_row_3["Product_Image_Name"])
|
291 |
+
competitor_image_path_3 = os.path.join(base_image_dir, content_row_3["Competitor_Image_Name"])
|
292 |
+
|
293 |
+
# Encode images to Base64
|
294 |
+
product_image_base64_1 = encode_image_to_base64(product_image_path_1)
|
295 |
+
competitor_image_base64_1 = encode_image_to_base64(competitor_image_path_1)
|
296 |
+
|
297 |
+
product_image_base64_2 = encode_image_to_base64(product_image_path_2)
|
298 |
+
competitor_image_base64_2 = encode_image_to_base64(competitor_image_path_2)
|
299 |
+
|
300 |
+
# Encode images to Base64
|
301 |
+
product_image_base64_3 = encode_image_to_base64(product_image_path_3)
|
302 |
+
competitor_image_base64_3 = encode_image_to_base64(competitor_image_path_3)
|
303 |
+
|
304 |
+
# Generate HTML content
|
305 |
+
html_content = generate_content_marketing_html(product_image_base64_1, competitor_image_base64_1, product_image_base64_2, competitor_image_base64_2, product_image_base64_3, competitor_image_base64_3, donts_html, suggestions_html, company_name)
|
306 |
+
|
307 |
+
# Save the HTML file
|
308 |
+
with open(output_file, "w", encoding="utf-8") as f:
|
309 |
+
f.write(html_content)
|
310 |
+
|
311 |
+
print(f"HTML file for Content Marketing has been saved as: {output_file}")
|
312 |
+
|
313 |
+
# Main script for Content Marketing
|
314 |
+
if __name__ == "__main__":
|
315 |
+
if len(sys.argv) > 1:
|
316 |
+
company_name = sys.argv[1] # The second argument passed will be the company_name
|
317 |
+
else:
|
318 |
+
company_name = "Default_Company" # Default value if no argument is passed
|
319 |
+
# Load the Excel file
|
320 |
+
file_path = "Output File/excel/top_3_sd_results.xlsx" # Replace with the path to your Excel file
|
321 |
+
data = pd.read_excel(file_path)
|
322 |
+
|
323 |
+
# Set the base directory for images
|
324 |
+
base_image_dir = "" # Replace with the actual directory where your images are stored
|
325 |
+
|
326 |
+
# Path to the cleaned file with Don'ts and Suggestions
|
327 |
+
cleaned_file_path = "data/output_generated_file/Product_output_cleaned.txt"
|
328 |
+
# Output HTML file
|
329 |
+
output_file = "src/templates/content_marketing.html"
|
330 |
+
|
331 |
+
# Generate HTML for Content Marketing
|
332 |
+
process_content_marketing(data, base_image_dir, output_file, cleaned_file_path, company_name)
|
333 |
+
|
334 |
+
# Force UTF-8 encoding for terminal output
|
335 |
+
sys.stdout.reconfigure(encoding='utf-8')
|
336 |
+
|
337 |
+
def capture_screenshot_with_playwright(html_file_path, screenshot_path):
|
338 |
+
"""
|
339 |
+
Capture a full-page screenshot of the HTML file directly using Playwright.
|
340 |
+
"""
|
341 |
+
try:
|
342 |
+
# Launch Playwright in headless mode
|
343 |
+
with sync_playwright() as p:
|
344 |
+
browser = p.chromium.launch(headless=True)
|
345 |
+
page = browser.new_page()
|
346 |
+
|
347 |
+
# Open the HTML file in the browser
|
348 |
+
page.goto(f"file:///{os.path.abspath(html_file_path)}")
|
349 |
+
|
350 |
+
# Capture the full-page screenshot
|
351 |
+
page.screenshot(path=screenshot_path, full_page=True)
|
352 |
+
print(f"Screenshot saved: {screenshot_path}")
|
353 |
+
|
354 |
+
browser.close()
|
355 |
+
|
356 |
+
except Exception as e:
|
357 |
+
print(f"Error capturing screenshot: {e}")
|
358 |
+
|
359 |
+
def convert_png_to_pdf(png_path, company_name):
|
360 |
+
"""
|
361 |
+
Convert a PNG image into a PDF strictly named as 'company_name brand marketing.pdf'
|
362 |
+
in the specified folder 'data/reports/template_PDF'.
|
363 |
+
"""
|
364 |
+
try:
|
365 |
+
# Set the output folder and ensure it exists
|
366 |
+
output_folder = "data/reports/template_PDF"
|
367 |
+
os.makedirs(output_folder, exist_ok=True)
|
368 |
+
|
369 |
+
# Create the PDF file name as 'company_name brand marketing.pdf'
|
370 |
+
pdf_path = os.path.join(output_folder, "content marketing.pdf")
|
371 |
+
|
372 |
+
# Convert the PNG to PDF
|
373 |
+
img = Image.open(png_path)
|
374 |
+
img.convert('RGB').save(pdf_path, "PDF")
|
375 |
+
|
376 |
+
print(f"PDF saved: {pdf_path}")
|
377 |
+
except Exception as e:
|
378 |
+
print(f"Error converting PNG to PDF: {e}")
|
379 |
+
|
380 |
+
if __name__ == "__main__":
|
381 |
+
# Paths for demonstration
|
382 |
+
html_file_path = "src/templates/content_marketing.html"
|
383 |
+
|
384 |
+
# Screenshot saved in the folder: data/reports/template_ss
|
385 |
+
screenshot_folder = "data/reports/template_ss"
|
386 |
+
os.makedirs(screenshot_folder, exist_ok=True)
|
387 |
+
screenshot_path = os.path.join(screenshot_folder, "content_marketing_screenshot.png")
|
388 |
+
|
389 |
+
# Ensure Playwright browsers are installed
|
390 |
+
os.system("playwright install")
|
391 |
+
|
392 |
+
# Capture screenshot
|
393 |
+
capture_screenshot_with_playwright(html_file_path, screenshot_path)
|
394 |
+
|
395 |
+
# Convert screenshot to PDF with the company name strictly as the filename
|
396 |
+
convert_png_to_pdf(screenshot_path, company_name)
|
397 |
+
|
398 |
+
|
399 |
+
|
400 |
+
|
401 |
+
|
402 |
+
|
403 |
+
|
404 |
+
|
src/input_analysis/Standarddeviation.py
ADDED
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
# Load Excel files
|
5 |
+
product_data = pd.read_excel("Output File/excel/product_analysis.xlsx")
|
6 |
+
competitor_data = pd.read_excel("Output File/excel/competitor_analysis.xlsx")
|
7 |
+
|
8 |
+
# Function to filter criteria based on available columns
|
9 |
+
def filter_existing_criteria(data, criteria):
|
10 |
+
"""
|
11 |
+
Filters a list of criteria to include only those present in the given DataFrame columns.
|
12 |
+
|
13 |
+
Args:
|
14 |
+
data: DataFrame containing the data.
|
15 |
+
criteria: List of criteria to filter.
|
16 |
+
|
17 |
+
Returns:
|
18 |
+
List of filtered criteria that exist in the DataFrame columns.
|
19 |
+
"""
|
20 |
+
return [criterion for criterion in criteria if criterion in data.columns]
|
21 |
+
|
22 |
+
# Define criteria for each category
|
23 |
+
branding_criteria = [
|
24 |
+
"Logo Placement", "Consistency", "Alignment",
|
25 |
+
"Brand Colors", "Typography Consistency", "Brand Identity", "Template Consistency"
|
26 |
+
]
|
27 |
+
content_marketing_criteria = [
|
28 |
+
"Content Visibility", "Engagement Cues", "Storytelling",
|
29 |
+
"Aesthetic Coherence", "Content Relevance"
|
30 |
+
]
|
31 |
+
social_media_marketing_criteria = [
|
32 |
+
"Font Size", "Visibility of Text", "Alignment",
|
33 |
+
"Aesthetic Appeal", "Repetitiveness"
|
34 |
+
]
|
35 |
+
|
36 |
+
# Filter criteria based on available columns in the data
|
37 |
+
branding_criteria_filtered = filter_existing_criteria(product_data, branding_criteria)
|
38 |
+
content_marketing_criteria_filtered = filter_existing_criteria(product_data, content_marketing_criteria)
|
39 |
+
social_media_marketing_criteria_filtered = filter_existing_criteria(product_data, social_media_marketing_criteria)
|
40 |
+
|
41 |
+
# Helper function to calculate the mean value for a criterion
|
42 |
+
def calculate_mean_criterion_value(product_data, competitor_data, criterion):
|
43 |
+
"""
|
44 |
+
Calculate the mean of a criterion from both product and competitor data, ignoring NaN values.
|
45 |
+
|
46 |
+
Args:
|
47 |
+
product_data: DataFrame containing product data.
|
48 |
+
competitor_data: DataFrame containing competitor data.
|
49 |
+
criterion: The column name representing the criterion.
|
50 |
+
|
51 |
+
Returns:
|
52 |
+
Mean value of the criterion.
|
53 |
+
"""
|
54 |
+
combined_values = np.concatenate([
|
55 |
+
product_data[criterion].dropna().values,
|
56 |
+
competitor_data[criterion].dropna().values,
|
57 |
+
])
|
58 |
+
return np.nanmean(combined_values)
|
59 |
+
|
60 |
+
# Helper function to calculate the score differences
|
61 |
+
def calculate_score_differences(product_scores, competitor_scores, product_data, competitor_data, category_criteria):
|
62 |
+
"""
|
63 |
+
Calculate the score differences, replacing NaN with mean values.
|
64 |
+
|
65 |
+
Args:
|
66 |
+
product_scores: Scores of a product post for the criteria.
|
67 |
+
competitor_scores: Scores of a competitor post for the criteria.
|
68 |
+
product_data: DataFrame containing product data.
|
69 |
+
competitor_data: DataFrame containing competitor data.
|
70 |
+
category_criteria: List of criteria in the category.
|
71 |
+
|
72 |
+
Returns:
|
73 |
+
Array of score differences.
|
74 |
+
"""
|
75 |
+
score_diff = product_scores - competitor_scores
|
76 |
+
for k, diff in enumerate(score_diff):
|
77 |
+
if np.isnan(diff):
|
78 |
+
mean_value = calculate_mean_criterion_value(product_data, competitor_data, category_criteria[k])
|
79 |
+
score_diff[k] = mean_value
|
80 |
+
return score_diff
|
81 |
+
|
82 |
+
# Main function to calculate the SD comparison matrix
|
83 |
+
def calculate_sd_comparison_matrix(product_data, competitor_data, category_criteria):
|
84 |
+
"""
|
85 |
+
Calculate a 6x6 SD Comparison Matrix for a specific category, replacing NaN differences with the mean of the criteria.
|
86 |
+
"""
|
87 |
+
# If there are no criteria, return a DataFrame of zeros
|
88 |
+
if not category_criteria:
|
89 |
+
return pd.DataFrame(
|
90 |
+
np.zeros((6, 6)),
|
91 |
+
index=[f"Product_{i+1}" for i in range(6)],
|
92 |
+
columns=[f"Competitor_{j+1}" for j in range(6)]
|
93 |
+
)
|
94 |
+
|
95 |
+
sd_matrix = np.zeros((6, 6)) # Initialize a 6x6 matrix for SD values
|
96 |
+
|
97 |
+
for i in range(6): # Loop over product posts
|
98 |
+
for j in range(6): # Loop over competitor posts
|
99 |
+
product_scores = product_data.iloc[i][category_criteria].values
|
100 |
+
competitor_scores = competitor_data.iloc[j][category_criteria].values
|
101 |
+
|
102 |
+
# Calculate score differences
|
103 |
+
score_diff = calculate_score_differences(
|
104 |
+
product_scores, competitor_scores, product_data, competitor_data, category_criteria
|
105 |
+
)
|
106 |
+
|
107 |
+
# Safely calculate the standard deviation
|
108 |
+
if len(score_diff) > 0:
|
109 |
+
sd_matrix[i, j] = np.std(score_diff)
|
110 |
+
else:
|
111 |
+
sd_matrix[i, j] = 0
|
112 |
+
|
113 |
+
# Convert to DataFrame for better readability
|
114 |
+
return pd.DataFrame(sd_matrix,
|
115 |
+
index=[f"Product_{i+1}" for i in range(6)],
|
116 |
+
columns=[f"Competitor_{j+1}" for j in range(6)])
|
117 |
+
|
118 |
+
|
119 |
+
# Calculate SD matrices for each category
|
120 |
+
branding_sd_matrix = calculate_sd_comparison_matrix(product_data, competitor_data, branding_criteria_filtered)
|
121 |
+
content_marketing_sd_matrix = calculate_sd_comparison_matrix(product_data, competitor_data, content_marketing_criteria_filtered)
|
122 |
+
social_media_marketing_sd_matrix = calculate_sd_comparison_matrix(product_data, competitor_data, social_media_marketing_criteria_filtered)
|
123 |
+
|
124 |
+
# Function to find the top SD values ensuring non-repetitive product and competitor image pairs
|
125 |
+
def find_top_non_repetitive_sd(sd_matrix, product_data, competitor_data, category, top_count=3):
|
126 |
+
"""
|
127 |
+
Find the top SD values ensuring non-repetitive product and competitor image pairs within the same category.
|
128 |
+
|
129 |
+
Args:
|
130 |
+
sd_matrix: DataFrame representing the SD matrix.
|
131 |
+
product_data: DataFrame containing product data (to extract image names).
|
132 |
+
competitor_data: DataFrame containing competitor data (to extract image names).
|
133 |
+
category: String representing the category name.
|
134 |
+
top_count: Number of top results to return (default is 3).
|
135 |
+
|
136 |
+
Returns:
|
137 |
+
List of tuples containing category, product image name, competitor image name, and SD value.
|
138 |
+
"""
|
139 |
+
used_product_images = set()
|
140 |
+
used_competitor_images = set()
|
141 |
+
top_results = []
|
142 |
+
|
143 |
+
for i in range(sd_matrix.shape[0]):
|
144 |
+
for j in range(sd_matrix.shape[1]):
|
145 |
+
if len(top_results) == top_count:
|
146 |
+
break
|
147 |
+
|
148 |
+
product_image = product_data.iloc[i]['Image']
|
149 |
+
competitor_image = competitor_data.iloc[j]['Image']
|
150 |
+
sd_value = sd_matrix.iloc[i, j]
|
151 |
+
|
152 |
+
if product_image not in used_product_images and competitor_image not in used_competitor_images:
|
153 |
+
top_results.append((category, product_image, competitor_image, sd_value))
|
154 |
+
used_product_images.add(product_image)
|
155 |
+
used_competitor_images.add(competitor_image)
|
156 |
+
|
157 |
+
return top_results
|
158 |
+
|
159 |
+
# Find top non-repetitive SD results
|
160 |
+
branding_top_3 = find_top_non_repetitive_sd(branding_sd_matrix, product_data, competitor_data, "Brand Marketing")
|
161 |
+
content_marketing_top_3 = find_top_non_repetitive_sd(content_marketing_sd_matrix, product_data, competitor_data, "Content Marketing")
|
162 |
+
social_media_marketing_top_3 = find_top_non_repetitive_sd(social_media_marketing_sd_matrix, product_data, competitor_data, "Social Media Marketing")
|
163 |
+
|
164 |
+
# Combine results into a DataFrame
|
165 |
+
all_top_3 = branding_top_3 + content_marketing_top_3 + social_media_marketing_top_3
|
166 |
+
top_3_df = pd.DataFrame(
|
167 |
+
all_top_3,
|
168 |
+
columns=['Category', 'Product_Image_Name', 'Competitor_Image_Name', 'SD_Value']
|
169 |
+
)
|
170 |
+
|
171 |
+
# Save results to Excel
|
172 |
+
top_3_df.to_excel("Output File/excel/top_3_sd_results.xlsx", index=False)
|
173 |
+
|
174 |
+
# Print the results
|
175 |
+
print("\nTop 3 SD Results DataFrame:")
|
176 |
+
print(top_3_df)
|
177 |
+
import os
|
178 |
+
|
179 |
+
output_folder = "data/output_generated_file/Output File/excel"
|
180 |
+
output_file_path = os.path.join(output_folder, "top_3_sd_results.xlsx")
|
181 |
+
os.makedirs(output_folder, exist_ok=True)
|
182 |
+
# Save results to the specified folder
|
183 |
+
top_3_df.to_excel(output_file_path, index=False)
|
184 |
+
|
185 |
+
# Print confirmation
|
186 |
+
print(f"Top 3 SD Results saved in: {output_file_path}")
|
src/input_analysis/competitor-analysis/1.txt
ADDED
File without changes
|
src/input_analysis/competitor-analysis/competitor_analysis.py
ADDED
@@ -0,0 +1,237 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
import requests
|
3 |
+
from PIL import Image
|
4 |
+
import io
|
5 |
+
import json
|
6 |
+
import logging
|
7 |
+
import json
|
8 |
+
import pandas as pd
|
9 |
+
import re
|
10 |
+
|
11 |
+
|
12 |
+
IMAGE_ANALYSES_KEY = "Image Analyses"
|
13 |
+
# Configure logging
|
14 |
+
logging.basicConfig(level=logging.INFO)
|
15 |
+
|
16 |
+
import os
|
17 |
+
api_key = os.environ.get("OPENAI_API_KEY")
|
18 |
+
|
19 |
+
# Function to encode the image to base64
|
20 |
+
def encode_image(image_path):
|
21 |
+
try:
|
22 |
+
with Image.open(image_path) as img:
|
23 |
+
img.thumbnail((800, 800)) # Resize image
|
24 |
+
buffer = io.BytesIO()
|
25 |
+
img.save(buffer, format="JPEG", quality=85)
|
26 |
+
encoded_image = base64.b64encode(buffer.getvalue()).decode('utf-8')
|
27 |
+
return encoded_image
|
28 |
+
except Exception as e:
|
29 |
+
logging.error(f"Failed to encode image {image_path}: {e}")
|
30 |
+
return None
|
31 |
+
|
32 |
+
# Function to get image dimensions
|
33 |
+
def get_image_dimensions(image_path):
|
34 |
+
try:
|
35 |
+
with Image.open(image_path) as img:
|
36 |
+
return img.size # returns (width, height)
|
37 |
+
except Exception as e:
|
38 |
+
logging.error(f"Failed to get dimensions for image {image_path}: {e}")
|
39 |
+
return (0, 0)
|
40 |
+
|
41 |
+
# Image paths for analysis
|
42 |
+
image_paths = [
|
43 |
+
"data/competitor/image1.jpeg",
|
44 |
+
"data/competitor/image2.jpeg",
|
45 |
+
"data/competitor/image3.jpeg",
|
46 |
+
"data/competitor/image4.jpeg",
|
47 |
+
"data/competitor/image5.jpeg",
|
48 |
+
"data/competitor/image6.jpeg"
|
49 |
+
]
|
50 |
+
|
51 |
+
# Headers for the OpenAI API request
|
52 |
+
headers = {
|
53 |
+
"Content-Type": "application/json",
|
54 |
+
"Authorization": f"Bearer {api_key}"
|
55 |
+
}
|
56 |
+
|
57 |
+
# Competitor information (context for analysis)
|
58 |
+
competitor_information = """
|
59 |
+
The competitor is a leading brand in the digital marketing space known for its consistent visual branding and innovative designs on social media. They target a tech-savvy audience aged 20-35 with a focus on modern aesthetics, engaging storytelling, and minimalist yet powerful branding.
|
60 |
+
"""
|
61 |
+
|
62 |
+
system_message = """
|
63 |
+
Analyze the branding, content marketing, and social media marketing effectiveness of a company for the provided Instagram post image.
|
64 |
+
Evaluate it based on the following criteria and return a detailed JSON structure. Each criterion should have an individual score (0-10), and the total score for the category should be the average of its criteria.
|
65 |
+
Give 9 or above only when it's extraordinary. Industry standard 7-8
|
66 |
+
### Categories and Definitions:
|
67 |
+
|
68 |
+
#### Branding:
|
69 |
+
- **Logo Placement**: Is the logo perfectly sized, clearly visible, and well-positioned without being intrusive?
|
70 |
+
- **Brand Colors**: Are brand colors used consistently and without deviations?
|
71 |
+
- **Typography**: Are the fonts aligned with the brand identity, consistent in style, and visually appealing?
|
72 |
+
- **Brand Identity**: Does the post effectively reflect the brand's unique persona and messaging?
|
73 |
+
- **Visual Hierarchy**: Are key elements prioritized effectively to guide the viewer’s attention?
|
74 |
+
- **Template Consistency**: Are templates consistent with previous posts, reflecting a uniform design approach?
|
75 |
+
- **Messaging Alignment**: Is the brand messaging clear, consistent, and reflective of the brand's tone?
|
76 |
+
- **Subtle Branding**: Does the branding strike the right balance (not overly subtle or excessive)?
|
77 |
+
- **Overbranding**: Does the post avoid overwhelming and distracting brand elements?
|
78 |
+
- **Creative Variations**: Are there innovative and creative design variations in the post?
|
79 |
+
|
80 |
+
#### Content Marketing:
|
81 |
+
- **Content Visibility**: Is the primary content clear and highlighted effectively?
|
82 |
+
- **Engagement Cues**: Are clear and compelling calls-to-action (CTAs) present and engaging?
|
83 |
+
- **Information Overload**: Does the post avoid over-saturated visuals or excessive text?
|
84 |
+
- **Storytelling**: Does the post convey a relevant and engaging narrative that resonates with the audience?
|
85 |
+
- **Content Variety**: Are the posts diverse and free of monotonous or repetitive elements?
|
86 |
+
- **Typography Consistency**: Is the typography visually attractive and consistent throughout the design?
|
87 |
+
- **Aesthetic Coherence**: Do all elements harmonize to create a visually appealing composition?
|
88 |
+
- **Content Relevance**: Is the content relevant to the brand and its target audience?
|
89 |
+
- **Stock Elements**: Does the design avoid excessive use of generic or stock imagery?
|
90 |
+
|
91 |
+
#### Social Media Marketing:
|
92 |
+
- **Font Size**: Are fonts appropriately sized and legible on various screen sizes?
|
93 |
+
- **Visibility of Text**: Is the text easy to read, with proper contrast, placement, and spacing?
|
94 |
+
- **Logo Placement**: Does the logo placement avoid disrupting the design's aesthetic appeal?
|
95 |
+
- **Consistency**: Does the post maintain design cohesion across elements?
|
96 |
+
- **Alignment**: Are elements aligned professionally, creating a balanced and clean layout?
|
97 |
+
- **Aesthetic Appeal**: Is the overall design visually engaging and suitable for the platform?
|
98 |
+
- **Brand Elements**: Are brand assets (like logos, icons, or visuals) used effectively and sparingly?
|
99 |
+
- **Repetitiveness**: Does the design avoid repetitive themes and offer fresh creative ideas?
|
100 |
+
|
101 |
+
### Output JSON Format:
|
102 |
+
### Only return the following format strictly
|
103 |
+
|
104 |
+
{
|
105 |
+
"Branding Score": total_avg_score, explanation.
|
106 |
+
"criteria_name": score, explanation.
|
107 |
+
"criteria_name": score, explanation.
|
108 |
+
|
109 |
+
"Content Marketing Score": total_avg_score, explanation.
|
110 |
+
"criteria_name": score, explanation.
|
111 |
+
"criteria_name": score, explanation.
|
112 |
+
|
113 |
+
"Social Media Marketing Score": total_avg_score, explanation.
|
114 |
+
"criteria_name": score, explanation.
|
115 |
+
"criteria_name": score, explanation.
|
116 |
+
}
|
117 |
+
"""
|
118 |
+
|
119 |
+
# Function to request analysis from OpenAI API
|
120 |
+
def request_analysis(system_message, user_message, model="gpt-4o-mini", max_tokens=1500):
|
121 |
+
payload = {
|
122 |
+
"model": model,
|
123 |
+
"messages": [
|
124 |
+
{"role": "system", "content": system_message},
|
125 |
+
{"role": "user", "content": user_message}
|
126 |
+
],
|
127 |
+
"max_tokens": max_tokens
|
128 |
+
}
|
129 |
+
logging.debug(f"Payload Sent: {json.dumps(payload, indent=4)}")
|
130 |
+
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
|
131 |
+
|
132 |
+
if response.status_code == 200:
|
133 |
+
try:
|
134 |
+
return response.json().get("choices", [{}])[0].get("message", {}).get("content", "Unexpected response format")
|
135 |
+
except Exception as e:
|
136 |
+
logging.error(f"Error parsing response: {e}")
|
137 |
+
logging.debug(f"Raw Response: {response.text}")
|
138 |
+
return "Error parsing the response."
|
139 |
+
else:
|
140 |
+
logging.error(f"API Error: {response.status_code}, {response.text}")
|
141 |
+
return "Error with the API request."
|
142 |
+
|
143 |
+
# Initialize structured output for the analyses
|
144 |
+
output_structure = {
|
145 |
+
IMAGE_ANALYSES_KEY: []
|
146 |
+
}
|
147 |
+
|
148 |
+
# Loop through each image for analysis
|
149 |
+
for image_path in image_paths:
|
150 |
+
base64_image = encode_image(image_path)
|
151 |
+
if not base64_image:
|
152 |
+
output_structure[IMAGE_ANALYSES_KEY].append({
|
153 |
+
"Image": image_path,
|
154 |
+
"Analysis": "Failed to encode image."
|
155 |
+
})
|
156 |
+
continue
|
157 |
+
|
158 |
+
width, height = get_image_dimensions(image_path)
|
159 |
+
user_message = f"""
|
160 |
+
Company Information: {competitor_information}
|
161 |
+
Analyze the Instagram post with dimensions {width}x{height} pixels. Image data: {base64_image}
|
162 |
+
"""
|
163 |
+
|
164 |
+
try:
|
165 |
+
analysis_result = request_analysis(system_message, user_message)
|
166 |
+
try:
|
167 |
+
# Parse result and ensure it matches the required format
|
168 |
+
parsed_result = json.loads(analysis_result)
|
169 |
+
except json.JSONDecodeError:
|
170 |
+
logging.warning(f"Response not in expected JSON format for {image_path}: {analysis_result}")
|
171 |
+
parsed_result = {"Raw Response": analysis_result}
|
172 |
+
|
173 |
+
output_structure[IMAGE_ANALYSES_KEY].append({
|
174 |
+
"Image": image_path,
|
175 |
+
"Analysis": parsed_result
|
176 |
+
})
|
177 |
+
except Exception as err:
|
178 |
+
logging.error(f"Error analyzing image {image_path}: {err}")
|
179 |
+
output_structure[IMAGE_ANALYSES_KEY].append({
|
180 |
+
"Image": image_path,
|
181 |
+
"Analysis": "Error analyzing the image."
|
182 |
+
})
|
183 |
+
|
184 |
+
# Write structured output to file
|
185 |
+
with open("Output File/json/competitor_analysis.json", "w") as f:
|
186 |
+
json.dump(output_structure, f, indent=4)
|
187 |
+
|
188 |
+
logging.info("Analysis completed and saved to competitor_analysis.json")
|
189 |
+
|
190 |
+
|
191 |
+
def json_to_excel(json_file, excel_file):
|
192 |
+
"""
|
193 |
+
Parse the JSON file and convert it to an Excel file with structured scores and raw JSON responses.
|
194 |
+
|
195 |
+
Args:
|
196 |
+
json_file (str): Path to the input JSON file.
|
197 |
+
excel_file (str): Path to the output Excel file.
|
198 |
+
"""
|
199 |
+
# Load JSON data from the file
|
200 |
+
with open(json_file, 'r') as file:
|
201 |
+
data = json.load(file)
|
202 |
+
|
203 |
+
# Prepare a list to hold structured data
|
204 |
+
structured_data = []
|
205 |
+
|
206 |
+
# Regex patterns to extract scores and criteria
|
207 |
+
score_pattern = r'"([a-zA-Z\s]+)":\s*(\d+),' # Matches "Criteria Name": Score,
|
208 |
+
|
209 |
+
# Iterate over "Image Analyses" entries
|
210 |
+
for analysis in data.get("Image Analyses", []):
|
211 |
+
# Extract image name
|
212 |
+
image = analysis.get("Image", "Unknown")
|
213 |
+
|
214 |
+
# Extract raw response
|
215 |
+
raw_response = analysis.get("Analysis", {}).get("Raw Response", "")
|
216 |
+
|
217 |
+
# Dictionary to hold extracted data for the image
|
218 |
+
image_data = {"Image": image, "Raw JSON Response": raw_response}
|
219 |
+
|
220 |
+
# Extract criteria and scores using regex
|
221 |
+
matches = re.findall(score_pattern, raw_response)
|
222 |
+
for criterion, score in matches:
|
223 |
+
image_data[criterion.strip()] = int(score) # Add criteria as columns
|
224 |
+
|
225 |
+
# Append image data to structured list
|
226 |
+
structured_data.append(image_data)
|
227 |
+
|
228 |
+
# Convert structured data to DataFrame
|
229 |
+
df = pd.DataFrame(structured_data)
|
230 |
+
|
231 |
+
# Write DataFrame to Excel
|
232 |
+
df.to_excel(excel_file, index=False)
|
233 |
+
print(f"Data successfully written to {excel_file}")
|
234 |
+
# Example usage
|
235 |
+
json_file_path = "Output File/json/competitor_analysis.json" # Input JSON file
|
236 |
+
excel_file_path = "Output File/excel/competitor_analysis.xlsx" # Output Excel file
|
237 |
+
json_to_excel(json_file_path, excel_file_path)
|
src/input_analysis/feedback.py
ADDED
@@ -0,0 +1,221 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import openai
|
2 |
+
import os
|
3 |
+
import requests
|
4 |
+
import logging
|
5 |
+
import json
|
6 |
+
import re
|
7 |
+
|
8 |
+
import os
|
9 |
+
api_key = os.environ.get("OPENAI_API_KEY")
|
10 |
+
|
11 |
+
# Define the paths to the input files and output file
|
12 |
+
input_dir = r"data/output_generated_file"
|
13 |
+
branding_file = os.path.join(input_dir, 'Product_branding.txt')
|
14 |
+
content_marketing_file = os.path.join(input_dir, 'Product_content_marketing.txt')
|
15 |
+
smm_file = os.path.join(input_dir, 'Product_smm.txt')
|
16 |
+
|
17 |
+
# Function to read file content
|
18 |
+
def read_file(file_path):
|
19 |
+
try:
|
20 |
+
with open(file_path, 'r', encoding='utf-8') as file:
|
21 |
+
return file.read()
|
22 |
+
except UnicodeDecodeError:
|
23 |
+
# Try opening with a different encoding if utf-8 fails
|
24 |
+
with open(file_path, 'r', encoding='ISO-8859-1') as file:
|
25 |
+
return file.read()
|
26 |
+
|
27 |
+
# Read the content from the three files
|
28 |
+
branding_content = read_file(branding_file)
|
29 |
+
content_marketing_content = read_file(content_marketing_file)
|
30 |
+
smm_content = read_file(smm_file)
|
31 |
+
|
32 |
+
# LLM request function based on your provided syntax
|
33 |
+
def request_analysis(system_message, user_message, model="gpt-4o-mini", max_tokens=1500):
|
34 |
+
headers = {
|
35 |
+
'Authorization': f'Bearer {openai.api_key}',
|
36 |
+
'Content-Type': 'application/json'
|
37 |
+
}
|
38 |
+
payload = {
|
39 |
+
"model": model,
|
40 |
+
"messages": [
|
41 |
+
{"role": "system", "content": system_message},
|
42 |
+
{"role": "user", "content": user_message}
|
43 |
+
],
|
44 |
+
"max_tokens": max_tokens
|
45 |
+
}
|
46 |
+
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
|
47 |
+
|
48 |
+
if response.status_code == 200:
|
49 |
+
try:
|
50 |
+
return response.json().get("choices", [{}])[0].get("message", {}).get("content", "Unexpected response format")
|
51 |
+
except Exception as e:
|
52 |
+
logging.error(f"Error parsing response: {e}")
|
53 |
+
logging.debug(f"Raw Response: {response.text}")
|
54 |
+
return "Error parsing the response."
|
55 |
+
else:
|
56 |
+
logging.error(f"Request failed with status code {response.status_code}")
|
57 |
+
return f"Error: {response.status_code}"
|
58 |
+
|
59 |
+
# Function to extract "Don'ts" using regex
|
60 |
+
def extract_donts(text):
|
61 |
+
pattern = re.findall(r"(?:-\s*Don't\s+|-\s*)([^\n]+)", text)
|
62 |
+
return [f"Don't {dont.strip()}" for dont in pattern if not dont.startswith("Don't")]
|
63 |
+
|
64 |
+
# Retry logic for getting "Don'ts" from GPT
|
65 |
+
def get_donts_with_retry(content, category, retries=10):
|
66 |
+
system_message = (
|
67 |
+
"You are an expert in branding, content marketing, and social media marketing. "
|
68 |
+
"Based on the provided content, generate a list of 3-6 word 'Don'ts' for the company. "
|
69 |
+
"Ensure each point MUST start with 'Don't' and write them as concise, actionable bullet points."
|
70 |
+
)
|
71 |
+
|
72 |
+
user_message = f"Category: {category}\nContent:\n{content}\n\nPlease provide the 'Don'ts' in bullet points."
|
73 |
+
|
74 |
+
for attempt in range(retries):
|
75 |
+
response = request_analysis(system_message, user_message)
|
76 |
+
donts = extract_donts(response)
|
77 |
+
|
78 |
+
# If we get valid "Don'ts", return them
|
79 |
+
if donts:
|
80 |
+
return [clean_text(dont) for dont in donts]
|
81 |
+
|
82 |
+
logging.info(f"Attempt {attempt + 1} for category {category} yielded no data. Retrying...")
|
83 |
+
|
84 |
+
# If all attempts fail, return an empty or default list
|
85 |
+
logging.warning(f"All retry attempts for {category} failed. No 'Don'ts' found.")
|
86 |
+
return ["No relevant 'Don'ts' found after retries."]
|
87 |
+
|
88 |
+
# Function to clean text by removing unwanted characters (quotes, commas, periods)
|
89 |
+
def clean_text(text):
|
90 |
+
cleaned_text = re.sub(r'[\"\'.,]+', '', text).strip()
|
91 |
+
return cleaned_text
|
92 |
+
|
93 |
+
# Get "Don'ts" with retry logic for each category
|
94 |
+
branding_donts_cleaned = get_donts_with_retry(branding_content, "Brand Marketing")
|
95 |
+
content_marketing_donts_cleaned = get_donts_with_retry(content_marketing_content, "Content Marketing")
|
96 |
+
smm_donts_cleaned = get_donts_with_retry(smm_content, "Social Media Marketing")
|
97 |
+
|
98 |
+
# Store results in a dictionary
|
99 |
+
donts_output = {
|
100 |
+
"Brand Marketing": branding_donts_cleaned,
|
101 |
+
"Content Marketing": content_marketing_donts_cleaned,
|
102 |
+
"Social Media Marketing": smm_donts_cleaned
|
103 |
+
}
|
104 |
+
|
105 |
+
# Print cleaned results
|
106 |
+
print(json.dumps(donts_output, indent=4))
|
107 |
+
|
108 |
+
# Save cleaned output to a file
|
109 |
+
output_file = os.path.join(input_dir, 'Product_donts_output_cleaned.txt')
|
110 |
+
with open(output_file, 'w') as file:
|
111 |
+
for category, dont_list in donts_output.items():
|
112 |
+
file.write(f"{category}:\n")
|
113 |
+
for dont in dont_list:
|
114 |
+
file.write(f"- {dont}\n")
|
115 |
+
file.write("\n")
|
116 |
+
|
117 |
+
print(f"Cleaned output saved to {output_file}")
|
118 |
+
|
119 |
+
|
120 |
+
|
121 |
+
|
122 |
+
# Function to strip unwanted characters (quotes, commas, periods, etc.)
|
123 |
+
def clean_text(text):
|
124 |
+
# Remove quotes, commas, periods, and extra whitespace
|
125 |
+
cleaned_text = re.sub(r'[\"\'.,]+', '', text).strip()
|
126 |
+
return cleaned_text
|
127 |
+
|
128 |
+
# Function to strip inverted commas from list items
|
129 |
+
def strip_inverted_commas(items):
|
130 |
+
return [item.replace('"', '').replace("'", "").strip() for item in items]
|
131 |
+
|
132 |
+
# Function to send requests to OpenAI API
|
133 |
+
def request_analysis(system_message, user_message, model="gpt-4", max_tokens=1500):
|
134 |
+
headers = {
|
135 |
+
'Authorization': f'Bearer {openai.api_key}',
|
136 |
+
'Content-Type': 'application/json'
|
137 |
+
}
|
138 |
+
payload = {
|
139 |
+
"model": model,
|
140 |
+
"messages": [
|
141 |
+
{"role": "system", "content": system_message},
|
142 |
+
{"role": "user", "content": user_message}
|
143 |
+
],
|
144 |
+
"max_tokens": max_tokens
|
145 |
+
}
|
146 |
+
response = requests.post("https://api.openai.com/v1/chat/completions", headers=headers, json=payload)
|
147 |
+
if response.status_code == 200:
|
148 |
+
try:
|
149 |
+
return response.json().get("choices", [{}])[0].get("message", {}).get("content", "Unexpected response format")
|
150 |
+
except Exception as e:
|
151 |
+
logging.error(f"Error parsing response: {e}")
|
152 |
+
return "Error parsing the response."
|
153 |
+
else:
|
154 |
+
logging.error(f"Request failed with status code {response.status_code}")
|
155 |
+
return f"Error: {response.status_code}"
|
156 |
+
|
157 |
+
# Function to generate suggestions from GPT
|
158 |
+
def get_suggestions_from_gpt(product_donts, category):
|
159 |
+
system_message = """
|
160 |
+
You are an expert marketing consultant. Based on the company's weaknesses, generate a list of suggestions for each category.
|
161 |
+
The number of suggestions should match the number of "Don'ts" provided.
|
162 |
+
Each suggestion should be 3-6 words, practical, and tailored to address the specific weakness.
|
163 |
+
Provide output as a clean list without numbers, brackets, or extra formatting.
|
164 |
+
"""
|
165 |
+
user_message = f"""
|
166 |
+
Category: {category}
|
167 |
+
Product Company's Weaknesses (Don'ts): {product_donts}
|
168 |
+
Provide only the suggestions list as output, separated by new lines.
|
169 |
+
"""
|
170 |
+
response = request_analysis(system_message, user_message)
|
171 |
+
if response != "Error parsing the response.":
|
172 |
+
# Clean up the response by removing unwanted symbols
|
173 |
+
suggestions = [clean_text(line.strip().replace("-", "").strip()) for line in response.strip().split("\n") if line.strip()]
|
174 |
+
return suggestions
|
175 |
+
return []
|
176 |
+
|
177 |
+
# Generate lists of suggestions for each category
|
178 |
+
branding_suggestions = get_suggestions_from_gpt(branding_donts_cleaned, "Brand Marketing")
|
179 |
+
content_marketing_suggestions = get_suggestions_from_gpt(content_marketing_donts_cleaned, "Content Marketing")
|
180 |
+
smm_suggestions = get_suggestions_from_gpt(smm_donts_cleaned, "Social Media Marketing")
|
181 |
+
|
182 |
+
# Prepare output dictionary
|
183 |
+
output = {
|
184 |
+
"Brand Marketing": branding_suggestions,
|
185 |
+
"Content Marketing": content_marketing_suggestions,
|
186 |
+
"Social Media Marketing": smm_suggestions
|
187 |
+
}
|
188 |
+
|
189 |
+
# Print the output for verification
|
190 |
+
for category, items in output.items():
|
191 |
+
print(f"{category}:")
|
192 |
+
for item in items:
|
193 |
+
print(f'- {item}')
|
194 |
+
print()
|
195 |
+
|
196 |
+
# Save output to a file
|
197 |
+
output_file = os.path.join("data", "output_generated_file", "Product_suggestions_output_cleaned.txt")
|
198 |
+
|
199 |
+
|
200 |
+
print("This branding don't ")
|
201 |
+
print(branding_donts_cleaned)
|
202 |
+
|
203 |
+
# Save cleaned output to a file
|
204 |
+
output_file = os.path.join(input_dir, 'Product_output_cleaned.txt')
|
205 |
+
|
206 |
+
with open(output_file, 'w') as file:
|
207 |
+
for category in donts_output.keys():
|
208 |
+
# Write the category
|
209 |
+
file.write(f"{category}:\n")
|
210 |
+
file.write("Don'ts:\n")
|
211 |
+
# Write the "Don'ts"
|
212 |
+
for dont in donts_output[category]:
|
213 |
+
file.write(f"- {dont}\n")
|
214 |
+
file.write("\nSuggestions:\n")
|
215 |
+
# Write the corresponding suggestions
|
216 |
+
suggestions = output[category]
|
217 |
+
for suggestion in suggestions:
|
218 |
+
file.write(f"- {suggestion}\n")
|
219 |
+
file.write("\n" + "="*50 + "\n\n")
|
220 |
+
|
221 |
+
print(f"All outputs saved to {output_file}")
|
src/input_analysis/path.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
# Define the paths
|
4 |
+
input_dir = 'data/top_3_images'
|
5 |
+
output_dir = 'data/output_generated_file'
|
6 |
+
|
7 |
+
# Ensure output directory exists
|
8 |
+
if not os.path.exists(output_dir):
|
9 |
+
os.makedirs(output_dir)
|
10 |
+
|
11 |
+
# Loop over all .txt files in the 'D' folder
|
12 |
+
for filename in os.listdir(input_dir):
|
13 |
+
if filename.endswith('.txt'):
|
14 |
+
file_path = os.path.join(input_dir, filename)
|
15 |
+
|
16 |
+
# Read the content of each text file
|
17 |
+
with open(file_path, 'r') as file:
|
18 |
+
file_content = file.read()
|
19 |
+
|
20 |
+
# Determine if it's a product or competitor
|
21 |
+
is_product = 'product' in filename.lower()
|
22 |
+
if is_product:
|
23 |
+
branding_file = os.path.join(output_dir, 'Product_branding.txt')
|
24 |
+
content_marketing_file = os.path.join(output_dir, 'Product_content_marketing.txt')
|
25 |
+
smm_file = os.path.join(output_dir, 'Product_smm.txt')
|
26 |
+
else:
|
27 |
+
branding_file = os.path.join(output_dir, 'Competitor_branding.txt')
|
28 |
+
content_marketing_file = os.path.join(output_dir, 'Competitor_content_marketing.txt')
|
29 |
+
smm_file = os.path.join(output_dir, 'Competitor_smm.txt')
|
30 |
+
|
31 |
+
# Split the content into lines
|
32 |
+
lines = file_content.split('\n')
|
33 |
+
|
34 |
+
# Prepare content variables for each category
|
35 |
+
branding_content = []
|
36 |
+
content_marketing_content = []
|
37 |
+
smm_content = []
|
38 |
+
|
39 |
+
current_category = None
|
40 |
+
|
41 |
+
# Iterate over lines to categorize them based on detected categories
|
42 |
+
for line in lines:
|
43 |
+
if "Branding" in line:
|
44 |
+
current_category = 'Branding'
|
45 |
+
elif "Content Marketing" in line:
|
46 |
+
current_category = 'Content Marketing'
|
47 |
+
elif "Social Media Marketing" in line:
|
48 |
+
current_category = 'Social Media Marketing'
|
49 |
+
|
50 |
+
# Append line to the corresponding category
|
51 |
+
if current_category == 'Branding':
|
52 |
+
branding_content.append(line)
|
53 |
+
elif current_category == 'Content Marketing':
|
54 |
+
content_marketing_content.append(line)
|
55 |
+
elif current_category == 'Social Media Marketing':
|
56 |
+
smm_content.append(line)
|
57 |
+
|
58 |
+
# Write the extracted data into their respective files (overwrite the files)
|
59 |
+
with open(branding_file, 'w') as bf: # 'w' mode will overwrite the file
|
60 |
+
bf.write('\n'.join(branding_content) + '\n\n') # Add the new content
|
61 |
+
|
62 |
+
with open(content_marketing_file, 'w') as cmf: # 'w' mode will overwrite the file
|
63 |
+
cmf.write('\n'.join(content_marketing_content) + '\n\n')
|
64 |
+
|
65 |
+
with open(smm_file, 'w') as smf: # 'w' mode will overwrite the file
|
66 |
+
smf.write('\n'.join(smm_content) + '\n\n')
|
67 |
+
|
68 |
+
print(f"Processed {filename} and saved data into {branding_file}, {content_marketing_file}, and {smm_file}")
|
src/input_analysis/product-analysis/product_analysis.py
ADDED
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import base64
|
2 |
+
import requests
|
3 |
+
from PIL import Image
|
4 |
+
import io
|
5 |
+
import json
|
6 |
+
import logging
|
7 |
+
import json
|
8 |
+
import pandas as pd
|
9 |
+
import re
|
10 |
+
|
11 |
+
# Define constants
|
12 |
+
API_URL = "https://api.openai.com/v1/chat/completions"
|
13 |
+
IMAGE_ANALYSES_KEY = "Image Analyses"
|
14 |
+
import os
|
15 |
+
api_key = os.environ.get("OPENAI_API_KEY")
|
16 |
+
# Configure logging
|
17 |
+
logging.basicConfig(level=logging.INFO)
|
18 |
+
|
19 |
+
# Function to encode the image to base64
|
20 |
+
def encode_image(image_path):
|
21 |
+
try:
|
22 |
+
with Image.open(image_path) as img:
|
23 |
+
img.thumbnail((800, 800)) # Resize image
|
24 |
+
buffer = io.BytesIO()
|
25 |
+
img.save(buffer, format="JPEG", quality=85)
|
26 |
+
encoded_image = base64.b64encode(buffer.getvalue()).decode('utf-8')
|
27 |
+
return encoded_image
|
28 |
+
except Exception as e:
|
29 |
+
logging.error(f"Failed to encode image {image_path}: {e}")
|
30 |
+
return None
|
31 |
+
|
32 |
+
# Function to get image dimensions
|
33 |
+
def get_image_dimensions(image_path):
|
34 |
+
try:
|
35 |
+
with Image.open(image_path) as img:
|
36 |
+
return img.size # returns (width, height)
|
37 |
+
except Exception as e:
|
38 |
+
logging.error(f"Failed to get dimensions for image {image_path}: {e}")
|
39 |
+
return (0, 0)
|
40 |
+
|
41 |
+
# Image paths for analysis
|
42 |
+
image_paths = [
|
43 |
+
"data/product/image1.jpeg",
|
44 |
+
"data/product/image2.jpeg",
|
45 |
+
"data/product/image3.jpeg",
|
46 |
+
"data/product/image4.jpeg",
|
47 |
+
"data/product/image5.jpeg",
|
48 |
+
"data/product/image6.jpeg"
|
49 |
+
]
|
50 |
+
|
51 |
+
# Headers for the OpenAI API request
|
52 |
+
headers = {
|
53 |
+
"Content-Type": "application/json",
|
54 |
+
"Authorization": f"Bearer {api_key}"
|
55 |
+
}
|
56 |
+
|
57 |
+
# System Message for Brutal Analysis
|
58 |
+
system_message = """
|
59 |
+
Analyze the branding, content marketing, and social media marketing effectiveness of a company for the provided Instagram post image.
|
60 |
+
Evaluate it with extreme scrutiny, judging harshly and penalizing even minor mistakes or deviations. Scores (0-10) should reflect an unforgiving critique.
|
61 |
+
A score of 10 should be nearly unattainable, and anything below basic industry standards should score no more than 5. BUT GIVE GOOD SCORE WHEREVER THEY REALLY REALLY DESERVE IT.
|
62 |
+
The final evaluation must highlight deficiencies prominently, even if they appear minor.
|
63 |
+
|
64 |
+
### Categories and Criteria:
|
65 |
+
#### Branding:
|
66 |
+
- **Logo Usage**: Penalize heavily if the logo is not perfectly sized, clearly visible, or appropriately placed.
|
67 |
+
- **Brand Colors**: Deduct points for any deviations or inconsistencies in brand colors.
|
68 |
+
- **Typography**: Strictly penalize if fonts do not align with the brand identity or lack consistency.
|
69 |
+
- **Brand Identity**: Judge if the design fails to reflect the unique persona of the brand.
|
70 |
+
- **Visual Hierarchy**: Critique poor prioritization of key elements, even slightly.
|
71 |
+
- **Template Consistency**: Penalize mismatched templates or lack of a uniform design approach.
|
72 |
+
- **Messaging Alignment**: Deduct heavily for vague or inconsistent brand messaging.
|
73 |
+
- **Subtle Branding**: Punish overly subtle or excessive branding.
|
74 |
+
- **Overbranding**: Penalize for overwhelming, distracting brand elements.
|
75 |
+
- **Variations**: Critique lack of variety or creative innovation in the post.
|
76 |
+
|
77 |
+
#### Content Marketing:
|
78 |
+
- **Content Visibility**: Penalize cluttered designs or poorly highlighted content.
|
79 |
+
- **Engagement Cues**: Harshly judge unclear or missing calls-to-action.
|
80 |
+
- **Information Overload**: Deduct for over-saturated visuals or excessive text.
|
81 |
+
- **Storytelling**: Penalize weak, unengaging, or irrelevant narratives.
|
82 |
+
- **Content Variety**: Deduct for monotony or repetition across posts.
|
83 |
+
- **Typography Consistency**: Penalize for inconsistent or unattractive typography.
|
84 |
+
- **Aesthetic Coherence**: Heavily penalize jarring or unappealing designs.
|
85 |
+
- **Content Relevance**: Deduct for off-brand or irrelevant posts.
|
86 |
+
- **Stock Elements**: Heavily penalize excessive reliance on generic or stock imagery.
|
87 |
+
|
88 |
+
#### Social Media Marketing:
|
89 |
+
- **Font Size**: Penalize unreadable or poorly sized text, even slightly.
|
90 |
+
- **Visibility of Text**: Deduct for hard-to-read text due to placement or design choices.
|
91 |
+
- **Logo Placement**: Harshly critique logos that disrupt the aesthetic.
|
92 |
+
- **Consistency**: Deduct for designs that lack cohesion or consistency.
|
93 |
+
- **Alignment**: Penalize for poorly aligned elements or uneven layouts.
|
94 |
+
- **Aesthetic Appeal**: Heavily penalize designs that lack visual allure or professionalism.
|
95 |
+
- **Brand Elements**: Critique insufficient or overused brand assets.
|
96 |
+
- **Repetitiveness**: Harshly penalize repetitive themes or lack of creative diversity.
|
97 |
+
|
98 |
+
### Output JSON Format:
|
99 |
+
### Only return the following format strictly
|
100 |
+
|
101 |
+
{
|
102 |
+
"Branding Score": total_avg_score, explanation.
|
103 |
+
"criteria_name": score, explanation.
|
104 |
+
"criteria_name": score, explanation.
|
105 |
+
|
106 |
+
"Content Marketing Score": total_avg_score, explanation.
|
107 |
+
"criteria_name": score, explanation.
|
108 |
+
"criteria_name": score, explanation.
|
109 |
+
|
110 |
+
"Social Media Marketing Score": total_avg_score, explanation.
|
111 |
+
"criteria_name": score, explanation.
|
112 |
+
"criteria_name": score, explanation.
|
113 |
+
}
|
114 |
+
"""
|
115 |
+
|
116 |
+
# Example product information string
|
117 |
+
product_information = "This product is an eco-friendly, high-performance water bottle designed to keep beverages cold for up to 24 hours. Made with BPA-free materials, it features a sleek design with a customizable logo space."
|
118 |
+
|
119 |
+
# Function to request analysis from OpenAI API
|
120 |
+
def request_analysis(system_message, user_message, model="gpt-4o-mini", max_tokens=1500):
|
121 |
+
payload = {
|
122 |
+
"model": model,
|
123 |
+
"messages": [
|
124 |
+
{"role": "system", "content": system_message},
|
125 |
+
{"role": "user", "content": f"{user_message} Product Information: {product_information}"}
|
126 |
+
],
|
127 |
+
"max_tokens": max_tokens
|
128 |
+
}
|
129 |
+
logging.debug(f"Payload Sent: {json.dumps(payload, indent=4)}")
|
130 |
+
response = requests.post(API_URL, headers=headers, json=payload)
|
131 |
+
|
132 |
+
if response.status_code == 200:
|
133 |
+
try:
|
134 |
+
return response.json().get("choices", [{}])[0].get("message", {}).get("content", "Unexpected response format")
|
135 |
+
except Exception as e:
|
136 |
+
logging.error(f"Error parsing response: {e}")
|
137 |
+
logging.debug(f"Raw Response: {response.text}")
|
138 |
+
return "Error parsing the response."
|
139 |
+
else:
|
140 |
+
logging.error(f"API Error: {response.status_code}, {response.text}")
|
141 |
+
return "Error with the API request."
|
142 |
+
|
143 |
+
# Initialize structured output for the analyses
|
144 |
+
output_structure = {
|
145 |
+
IMAGE_ANALYSES_KEY: []
|
146 |
+
}
|
147 |
+
|
148 |
+
# Loop through each image for analysis
|
149 |
+
for image_path in image_paths:
|
150 |
+
base64_image = encode_image(image_path)
|
151 |
+
if not base64_image:
|
152 |
+
output_structure[IMAGE_ANALYSES_KEY].append({
|
153 |
+
"Image": image_path,
|
154 |
+
"Analysis": "Failed to encode image."
|
155 |
+
})
|
156 |
+
continue
|
157 |
+
|
158 |
+
width, height = get_image_dimensions(image_path)
|
159 |
+
user_message = f"Analyze the Instagram post with dimensions {width}x{height} pixels. Image data: {base64_image}"
|
160 |
+
|
161 |
+
try:
|
162 |
+
analysis_result = request_analysis(system_message, user_message)
|
163 |
+
try:
|
164 |
+
# Parse result and ensure it matches the required format
|
165 |
+
parsed_result = json.loads(analysis_result)
|
166 |
+
except json.JSONDecodeError:
|
167 |
+
logging.warning(f"Response not in expected JSON format for {image_path}: {analysis_result}")
|
168 |
+
parsed_result = {"Raw Response": analysis_result}
|
169 |
+
|
170 |
+
output_structure[IMAGE_ANALYSES_KEY].append({
|
171 |
+
"Image": image_path,
|
172 |
+
"Analysis": parsed_result
|
173 |
+
})
|
174 |
+
except Exception as err:
|
175 |
+
logging.error(f"Error analyzing image {image_path}: {err}")
|
176 |
+
output_structure[IMAGE_ANALYSES_KEY].append({
|
177 |
+
"Image": image_path,
|
178 |
+
"Analysis": "Error analyzing the image."
|
179 |
+
})
|
180 |
+
|
181 |
+
# Write structured output to file
|
182 |
+
with open("Output File/json/product_analysis.json", "w") as f:
|
183 |
+
json.dump(output_structure, f, indent=4)
|
184 |
+
|
185 |
+
|
186 |
+
logging.info("Analysis completed and saved to product_analysis.json")
|
187 |
+
|
188 |
+
|
189 |
+
def json_to_excel(json_file, excel_file):
|
190 |
+
"""
|
191 |
+
Parse the JSON file and convert it to an Excel file with structured scores and raw JSON responses.
|
192 |
+
|
193 |
+
Args:
|
194 |
+
json_file (str): Path to the input JSON file.
|
195 |
+
excel_file (str): Path to the output Excel file.
|
196 |
+
"""
|
197 |
+
# Load JSON data from the file
|
198 |
+
with open(json_file, 'r') as file:
|
199 |
+
data = json.load(file)
|
200 |
+
|
201 |
+
# Prepare a list to hold structured data
|
202 |
+
structured_data = []
|
203 |
+
|
204 |
+
# Regex patterns to extract scores and criteria
|
205 |
+
score_pattern = r'"([a-zA-Z\s]+)":\s*(\d+),' # Matches "Criteria Name": Score,
|
206 |
+
|
207 |
+
# Iterate over "Image Analyses" entries
|
208 |
+
for analysis in data.get("Image Analyses", []):
|
209 |
+
# Extract image name
|
210 |
+
image = analysis.get("Image", "Unknown")
|
211 |
+
|
212 |
+
# Extract raw response
|
213 |
+
raw_response = analysis.get("Analysis", {}).get("Raw Response", "")
|
214 |
+
|
215 |
+
# Dictionary to hold extracted data for the image
|
216 |
+
image_data = {"Image": image, "Raw JSON Response": raw_response}
|
217 |
+
|
218 |
+
# Extract criteria and scores using regex
|
219 |
+
matches = re.findall(score_pattern, raw_response)
|
220 |
+
for criterion, score in matches:
|
221 |
+
image_data[criterion.strip()] = int(score) # Add criteria as columns
|
222 |
+
|
223 |
+
# Append image data to structured list
|
224 |
+
structured_data.append(image_data)
|
225 |
+
|
226 |
+
# Convert structured data to DataFrame
|
227 |
+
df = pd.DataFrame(structured_data)
|
228 |
+
|
229 |
+
# Write DataFrame to Excel
|
230 |
+
df.to_excel(excel_file, index=False)
|
231 |
+
print(f"Data successfully written to {excel_file}")
|
232 |
+
|
233 |
+
# Example usage
|
234 |
+
json_file_path = "Output File/json/product_analysis.json" # Input JSON file
|
235 |
+
excel_file_path = "Output File/excel/product_analysis.xlsx" # Output Excel file
|
236 |
+
json_to_excel(json_file_path, excel_file_path)
|
src/input_analysis/renamebranding.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import os
|
3 |
+
import re # For sanitizing the filenames
|
4 |
+
|
5 |
+
# Define file paths
|
6 |
+
product_analysis_path = "Output File/excel/product_analysis.xlsx"
|
7 |
+
competitor_analysis_path = "Output File/excel/competitor_analysis.xlsx"
|
8 |
+
top_3_df_path = "Output File/excel/top_3_sd_results.xlsx"
|
9 |
+
|
10 |
+
# Read the data from Excel files
|
11 |
+
product_data = pd.read_excel(product_analysis_path)
|
12 |
+
competitor_data = pd.read_excel(competitor_analysis_path)
|
13 |
+
top_3_df = pd.read_excel(top_3_df_path)
|
14 |
+
|
15 |
+
# Create directory D if not exists
|
16 |
+
output_dir = "data/top_3_images"
|
17 |
+
if not os.path.exists(output_dir):
|
18 |
+
os.makedirs(output_dir)
|
19 |
+
|
20 |
+
# Check columns to make sure we are accessing the correct data
|
21 |
+
print("Product Data Columns:", product_data.columns)
|
22 |
+
print("Competitor Data Columns:", competitor_data.columns)
|
23 |
+
|
24 |
+
# Function to sanitize filenames (remove any invalid characters)
|
25 |
+
def sanitize_filename(name):
|
26 |
+
# Replace any character that's not alphanumeric, space, or underscore with an underscore
|
27 |
+
return re.sub(r'[^\w\s-]', '_', name).strip().replace(' ', '_')
|
28 |
+
|
29 |
+
# Process Product Image Names
|
30 |
+
for image_name in top_3_df['Product_Image_Name']:
|
31 |
+
# Sanitize the image name to avoid invalid filename characters
|
32 |
+
sanitized_image_name = sanitize_filename(image_name)
|
33 |
+
|
34 |
+
# Fetch raw JSON response for the image in Product data by matching 'Image' column
|
35 |
+
product_response = product_data.loc[product_data['Image'] == image_name, 'Raw JSON Response'].values
|
36 |
+
if len(product_response) > 0:
|
37 |
+
raw_response = product_response[0]
|
38 |
+
if raw_response: # Check if the response is not empty
|
39 |
+
with open(f"{output_dir}/{sanitized_image_name}.txt", 'w') as file:
|
40 |
+
file.write(raw_response) # Write raw response as text
|
41 |
+
print(f"Saved Raw Text for Product image: {sanitized_image_name}")
|
42 |
+
else:
|
43 |
+
print(f"Empty Raw JSON Response for Product image: {sanitized_image_name}")
|
44 |
+
else:
|
45 |
+
print(f"Product image name '{image_name}' not found.")
|
46 |
+
|
47 |
+
# Process Competitor Image Names
|
48 |
+
for image_name in top_3_df['Competitor_Image_Name']:
|
49 |
+
# Sanitize the image name to avoid invalid filename characters
|
50 |
+
sanitized_image_name = sanitize_filename(image_name)
|
51 |
+
|
52 |
+
# Fetch raw JSON response for the image in Competitor data by matching 'Image' column
|
53 |
+
competitor_response = competitor_data.loc[competitor_data['Image'] == image_name, 'Raw JSON Response'].values
|
54 |
+
if len(competitor_response) > 0:
|
55 |
+
raw_response = competitor_response[0]
|
56 |
+
if raw_response: # Check if the response is not empty
|
57 |
+
with open(f"{output_dir}/{sanitized_image_name}.txt", 'w') as file:
|
58 |
+
file.write(raw_response) # Write raw response as text
|
59 |
+
print(f"Saved Raw Text for Competitor image: {sanitized_image_name}")
|
60 |
+
else:
|
61 |
+
print(f"Empty Raw JSON Response for Competitor image: {sanitized_image_name}")
|
62 |
+
else:
|
63 |
+
print(f"Competitor image name '{image_name}' not found.")
|
src/social.py
ADDED
@@ -0,0 +1,424 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import base64
|
3 |
+
import os
|
4 |
+
import sys
|
5 |
+
from playwright.sync_api import sync_playwright
|
6 |
+
from PIL import Image
|
7 |
+
|
8 |
+
def save_html_file(file_name, html_content):
|
9 |
+
with open(file_name, 'w') as file:
|
10 |
+
file.write(html_content)
|
11 |
+
|
12 |
+
def encode_image_to_base64(image_path):
|
13 |
+
with open(image_path, "rb") as image_file:
|
14 |
+
return base64.b64encode(image_file.read()).decode('utf-8')
|
15 |
+
|
16 |
+
# Allow user to upload images for logo and product images
|
17 |
+
logo_image_path = r"src/templates_images/Component 3.png"
|
18 |
+
haldiram_image_path = r"src/templates_images/Frame 57.png"
|
19 |
+
# Encode images to base64
|
20 |
+
logo_base64 = encode_image_to_base64(logo_image_path)
|
21 |
+
haldiram_base64 = encode_image_to_base64(haldiram_image_path)
|
22 |
+
def save_html_file(file_name, html_content):
|
23 |
+
with open(file_name, 'w') as file:
|
24 |
+
file.write(html_content)
|
25 |
+
|
26 |
+
def encode_image_to_base64(image_path):
|
27 |
+
try:
|
28 |
+
with open(image_path, "rb") as img_file:
|
29 |
+
return base64.b64encode(img_file.read()).decode("utf-8")
|
30 |
+
except FileNotFoundError:
|
31 |
+
print(f"Image not found: {image_path}")
|
32 |
+
return ""
|
33 |
+
except Exception as e:
|
34 |
+
print(f"Error encoding image {image_path}: {e}")
|
35 |
+
return ""
|
36 |
+
|
37 |
+
# Function to generate HTML for Social Media Marketing
|
38 |
+
def generate_social_media_marketing_html(product_image_base64_1, competitor_image_base64_1, product_image_base64_2, competitor_image_base64_2, product_image_base64_3, competitor_image_base64_3, donts_html, suggestions_html, company_name):
|
39 |
+
return f"""
|
40 |
+
<!DOCTYPE html>
|
41 |
+
<html lang="en">
|
42 |
+
<head>
|
43 |
+
<meta charset="UTF-8">
|
44 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
45 |
+
<title>Social Marketing Template</title>
|
46 |
+
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@600&display=swap" rel="stylesheet">
|
47 |
+
<style>
|
48 |
+
body {{
|
49 |
+
font-family: 'Inter', sans-serif;
|
50 |
+
margin: 0;
|
51 |
+
padding: 10%; /* Increased padding by 10% */
|
52 |
+
background-color: #fff;
|
53 |
+
font-size: 6px;
|
54 |
+
}}
|
55 |
+
@page {{
|
56 |
+
size: A4;
|
57 |
+
margin: 15px; /* Increased margin to 15px */
|
58 |
+
}}
|
59 |
+
.header {{
|
60 |
+
display: flex;
|
61 |
+
justify-content: space-between;
|
62 |
+
align-items: center;
|
63 |
+
padding: 10px; /* Increased padding to 10px */
|
64 |
+
background-color: #FFFFFF;
|
65 |
+
margin-bottom: 15px; /* Increased margin between header and content */
|
66 |
+
}}
|
67 |
+
.header .logo {{
|
68 |
+
height: 25px; /* Increased logo height */
|
69 |
+
}}
|
70 |
+
.container {{
|
71 |
+
display: flex;
|
72 |
+
flex-direction: column;
|
73 |
+
padding: 10px; /* Increased padding to 10px */
|
74 |
+
flex-grow: 1;
|
75 |
+
}}
|
76 |
+
h1 {{
|
77 |
+
font-family: 'Times New Roman', serif;
|
78 |
+
font-size: 22px; /* Increased font size */
|
79 |
+
font-weight: 500;
|
80 |
+
line-height: 1.2;
|
81 |
+
text-align: left;
|
82 |
+
margin-bottom: 15px; /* Increased bottom margin */
|
83 |
+
}}
|
84 |
+
h2, p {{
|
85 |
+
font-size: 13px; /* Increased font size */
|
86 |
+
font-weight: 400;
|
87 |
+
line-height: 1.4;
|
88 |
+
color: #000;
|
89 |
+
}}
|
90 |
+
.gap {{
|
91 |
+
font-size: 10px;
|
92 |
+
color: rgb(5, 5, 5);
|
93 |
+
font-weight: 100;
|
94 |
+
}}
|
95 |
+
.examples {{
|
96 |
+
font-size: 12px; /* Increased font size */
|
97 |
+
color: green;
|
98 |
+
}}
|
99 |
+
.box-container {{
|
100 |
+
width: 100%;
|
101 |
+
display: flex;
|
102 |
+
flex-direction: column;
|
103 |
+
align-items: center;
|
104 |
+
margin-top: 15px; /* Increased margin */
|
105 |
+
}}
|
106 |
+
.wraper {{
|
107 |
+
width: 100%;
|
108 |
+
height: 220px; /* Increased height */
|
109 |
+
display: flex;
|
110 |
+
margin: 10px 0; /* Increased margin */
|
111 |
+
border-radius: 12px; /* Increased border radius */
|
112 |
+
overflow: hidden;
|
113 |
+
position: relative;
|
114 |
+
}}
|
115 |
+
.div-1 {{
|
116 |
+
flex: 1;
|
117 |
+
background-color: #ecbdbd; /* Pink background */
|
118 |
+
display: flex;
|
119 |
+
align-items: center;
|
120 |
+
justify-content: center;
|
121 |
+
}}
|
122 |
+
.div-2 {{
|
123 |
+
flex: 1;
|
124 |
+
background-color: #e6f9e6; /* Green background */
|
125 |
+
display: flex;
|
126 |
+
align-items: center;
|
127 |
+
justify-content: center;
|
128 |
+
}}
|
129 |
+
.wraper img {{
|
130 |
+
max-width: 90%;
|
131 |
+
max-height: 90%;
|
132 |
+
object-fit: contain;
|
133 |
+
border-radius: 8px;
|
134 |
+
}}
|
135 |
+
.vs-text {{
|
136 |
+
position: absolute;
|
137 |
+
left: 50%;
|
138 |
+
top: 50%;
|
139 |
+
transform: translate(-50%, -50%);
|
140 |
+
font-size: 18px; /* Increased font size */
|
141 |
+
font-weight: bold;
|
142 |
+
color: black;
|
143 |
+
}}
|
144 |
+
.side-by-side-container {{
|
145 |
+
display: flex;
|
146 |
+
gap: 35px; /* Increased gap between boxes */
|
147 |
+
margin-top: 45px; /* Increased top margin */
|
148 |
+
}}
|
149 |
+
.pink-box, .green-box {{
|
150 |
+
flex: 1;
|
151 |
+
padding: 18px; /* Increased padding */
|
152 |
+
margin-top: 15px; /* Increased margin */
|
153 |
+
border-radius: 15px; /* Increased border radius */
|
154 |
+
box-sizing: border-box;
|
155 |
+
height: auto;
|
156 |
+
}}
|
157 |
+
.pink-box {{
|
158 |
+
background-color: #ecbdbd;
|
159 |
+
color: red;
|
160 |
+
text-align: start;
|
161 |
+
display: flex;
|
162 |
+
flex-direction: column;
|
163 |
+
align-items: flex-start;
|
164 |
+
}}
|
165 |
+
.pink-box h6 {{
|
166 |
+
font-size: 16px; /* Increased text size */
|
167 |
+
font-weight: bold;
|
168 |
+
margin: 8px 0; /* Increased margin */
|
169 |
+
color: red;
|
170 |
+
}}
|
171 |
+
.green-box {{
|
172 |
+
background-color: #e6f9e6;
|
173 |
+
color: green;
|
174 |
+
text-align: start;
|
175 |
+
display: flex;
|
176 |
+
flex-direction: column;
|
177 |
+
align-items: flex-start;
|
178 |
+
}}
|
179 |
+
.green-box h6 {{
|
180 |
+
font-size: 16px; /* Increased text size */
|
181 |
+
font-weight: bold;
|
182 |
+
margin: 8px 0; /* Increased margin */
|
183 |
+
color: green;
|
184 |
+
}}
|
185 |
+
.case-study {{
|
186 |
+
font-size: 18px; /* Increased font size */
|
187 |
+
color: green;
|
188 |
+
margin-top: 25px; /* Increased margin */
|
189 |
+
}}
|
190 |
+
.container1 {{
|
191 |
+
font-size: 12px; /* Increased font size */
|
192 |
+
color: green;
|
193 |
+
margin-top: 15px; /* Increased margin */
|
194 |
+
}}
|
195 |
+
.case2 {{
|
196 |
+
font-size: 12px; /* Increased font size */
|
197 |
+
color: rgb(1, 1, 1);
|
198 |
+
margin-top: 25px; /* Increased margin */
|
199 |
+
}}
|
200 |
+
.case-study img {{
|
201 |
+
display: block;
|
202 |
+
max-width: 100%;
|
203 |
+
height: auto;
|
204 |
+
margin-top: 25px; /* Increased margin */
|
205 |
+
border-radius: 10px; /* Increased border radius */
|
206 |
+
}}
|
207 |
+
</style>
|
208 |
+
</head>
|
209 |
+
<body>
|
210 |
+
<div class="header">
|
211 |
+
<h1><span style="color:red;">Social Media Marketing</span></h1>
|
212 |
+
<img src="data:image/png;base64,{logo_base64}" alt="Logo" class="logo">
|
213 |
+
</div>
|
214 |
+
<div class="container">
|
215 |
+
<p>{company_name} should use Social Media Marketing effectively as the strategic promotion for identity, products, and services across all channels to create loyalty among consumers.</p>
|
216 |
+
<p class="gap"><span style="color: red;">Issue/Gap:</span> {company_name}'s current social media marketing efforts might not be reaching their full potential. A comprehensive analysis of brand messaging, target audience engagement across channels, and content strategy could reveal opportunities to optimize {company_name}'s marketing approach for greater reach and impact.</p>
|
217 |
+
|
218 |
+
</div>
|
219 |
+
<h2 class="examples"> Examples:</h2>
|
220 |
+
<div class="box-container">
|
221 |
+
<div class="wraper">
|
222 |
+
<div class="div-1"> <img src="data:image/png;base64,{product_image_base64_1}" alt="Product Image"></div>
|
223 |
+
<div class="vs-text">V/S</div>
|
224 |
+
<div class="div-2"> <img src="data:image/png;base64,{competitor_image_base64_1}" alt="Competitor Image"></div>
|
225 |
+
</div>
|
226 |
+
<div class="wraper">
|
227 |
+
<div class="div-1"> <img src="data:image/png;base64,{product_image_base64_2}" alt="Product Image"></div>
|
228 |
+
<div class="vs-text">V/S</div>
|
229 |
+
<div class="div-2"> <img src="data:image/png;base64,{competitor_image_base64_2}" alt="Competitor Image"></div>
|
230 |
+
</div>
|
231 |
+
<div class="wraper">
|
232 |
+
<div class="div-1"> <img src="data:image/png;base64,{product_image_base64_3}" alt="Product Image"></div>
|
233 |
+
<div class="vs-text">V/S</div>
|
234 |
+
<div class="div-2"> <img src="data:image/png;base64,{competitor_image_base64_3}" alt="Competitor Image"></div>
|
235 |
+
</div>
|
236 |
+
</div>
|
237 |
+
<div class="side-by-side-container">
|
238 |
+
<div class="pink-box">
|
239 |
+
<h6>Drawbacks in Current Social Media Marketing</h6>
|
240 |
+
<p>{donts_html}</p>
|
241 |
+
</div>
|
242 |
+
<div class="green-box">
|
243 |
+
<h6>How Banao Technologies Can Help</h6>
|
244 |
+
<p>{suggestions_html}</p>
|
245 |
+
</div>
|
246 |
+
</div>
|
247 |
+
<div class="case-study">
|
248 |
+
<h3><span style="color: green;">Case Study:</span></h3>
|
249 |
+
<div class="container1">
|
250 |
+
<p><span style="color: green;">We Helped HaldiRam's to Grow</span></p>
|
251 |
+
</div>
|
252 |
+
<div class="case2">
|
253 |
+
<p> Using different techniques we Banao helped Haldiram's to reach 54k followers and generate a revenue of 4.75 Lakh in 3 months </p>
|
254 |
+
<img src="data:image/png;base64,{haldiram_base64}" alt="Haldiram" class="haldiram">
|
255 |
+
</div>
|
256 |
+
</div>
|
257 |
+
</div>
|
258 |
+
</body>
|
259 |
+
</html>
|
260 |
+
|
261 |
+
|
262 |
+
"""
|
263 |
+
|
264 |
+
# Function to parse "Product_output_cleaned.txt" for Don'ts and Suggestions specific to Social Media Marketing
|
265 |
+
def parse_cleaned_file_social_media(file_path):
|
266 |
+
with open(file_path, "r") as file:
|
267 |
+
content = file.read()
|
268 |
+
|
269 |
+
sections = content.split("==================================================")
|
270 |
+
for section in sections:
|
271 |
+
lines = section.strip().split("\n")
|
272 |
+
if lines and "Social Media Marketing" in lines[0]:
|
273 |
+
donts = []
|
274 |
+
suggestions = []
|
275 |
+
mode = None
|
276 |
+
for line in lines[1:]:
|
277 |
+
if line.startswith("Don'ts:"):
|
278 |
+
mode = "donts"
|
279 |
+
elif line.startswith("Suggestions:"):
|
280 |
+
mode = "suggestions"
|
281 |
+
elif mode == "donts" and line.startswith("-"):
|
282 |
+
donts.append(line.lstrip("- "))
|
283 |
+
elif mode == "suggestions" and line.startswith("-"):
|
284 |
+
suggestions.append(line.lstrip("- "))
|
285 |
+
return "<br>".join(donts), "<br>".join(suggestions)
|
286 |
+
|
287 |
+
return "", ""
|
288 |
+
|
289 |
+
# Function to process Social Media Marketing and generate HTML
|
290 |
+
def process_social_media_marketing(data, base_image_dir, output_file, cleaned_file_path,company_name):
|
291 |
+
# Filter for Social Media Marketing category
|
292 |
+
social_media_data = data[data["Category"] == "Social Media Marketing"]
|
293 |
+
|
294 |
+
if social_media_data.empty:
|
295 |
+
print("No Social Media Marketing data found in the provided Excel file.")
|
296 |
+
return
|
297 |
+
# Parse Don'ts and Suggestions
|
298 |
+
donts_html, suggestions_html = parse_cleaned_file_social_media(cleaned_file_path)
|
299 |
+
|
300 |
+
# Ensure there are at least two rows
|
301 |
+
if len(social_media_data) < 3:
|
302 |
+
print("Not enough rows for two product and competitor image comparisons.")
|
303 |
+
return
|
304 |
+
# Get the first record (assuming one Content Marketing entry is needed)
|
305 |
+
# Get the first two records (assuming these are needed)
|
306 |
+
social_media_row_1 = social_media_data.iloc[0]
|
307 |
+
social_media_row_2 = social_media_data.iloc[1]
|
308 |
+
social_media_row_3 = social_media_data.iloc[2]
|
309 |
+
|
310 |
+
product_image_path_1 = os.path.join(base_image_dir, social_media_row_1["Product_Image_Name"])
|
311 |
+
competitor_image_path_1 = os.path.join(base_image_dir, social_media_row_1["Competitor_Image_Name"])
|
312 |
+
|
313 |
+
product_image_path_2 = os.path.join(base_image_dir, social_media_row_2["Product_Image_Name"])
|
314 |
+
competitor_image_path_2 = os.path.join(base_image_dir, social_media_row_2["Competitor_Image_Name"])
|
315 |
+
|
316 |
+
product_image_path_3 = os.path.join(base_image_dir, social_media_row_3["Product_Image_Name"])
|
317 |
+
competitor_image_path_3 = os.path.join(base_image_dir, social_media_row_3["Competitor_Image_Name"])
|
318 |
+
|
319 |
+
# Encode images to Base64
|
320 |
+
product_image_base64_1 = encode_image_to_base64(product_image_path_1)
|
321 |
+
competitor_image_base64_1 = encode_image_to_base64(competitor_image_path_1)
|
322 |
+
|
323 |
+
product_image_base64_2 = encode_image_to_base64(product_image_path_2)
|
324 |
+
competitor_image_base64_2 = encode_image_to_base64(competitor_image_path_2)
|
325 |
+
|
326 |
+
product_image_base64_3 = encode_image_to_base64(product_image_path_3)
|
327 |
+
competitor_image_base64_3 = encode_image_to_base64(competitor_image_path_3)
|
328 |
+
|
329 |
+
# Generate HTML content
|
330 |
+
html_content = generate_social_media_marketing_html(product_image_base64_1, competitor_image_base64_1, product_image_base64_2, competitor_image_base64_2, product_image_base64_3, competitor_image_base64_3, donts_html, suggestions_html, company_name)
|
331 |
+
|
332 |
+
# Save the HTML file
|
333 |
+
with open(output_file, "w", encoding="utf-8") as f:
|
334 |
+
f.write(html_content)
|
335 |
+
|
336 |
+
print(f"HTML file for Social Media Marketing has been saved as: {output_file}")
|
337 |
+
|
338 |
+
# Main script for Social Media Marketing
|
339 |
+
if __name__ == "__main__":
|
340 |
+
if len(sys.argv) > 1:
|
341 |
+
company_name = sys.argv[1] # The second argument passed will be the company_name
|
342 |
+
else:
|
343 |
+
company_name = "Default_Company" # Default value if no argument is passed
|
344 |
+
# Load the Excel file
|
345 |
+
file_path = "Output File/excel/top_3_sd_results.xlsx" # Replace with the path to your Excel file
|
346 |
+
data = pd.read_excel(file_path)
|
347 |
+
|
348 |
+
# Set the base directory for images
|
349 |
+
base_image_dir = "" # Replace with the actual directory where your images are stored
|
350 |
+
|
351 |
+
# Path to the cleaned file with Don'ts and Suggestions
|
352 |
+
cleaned_file_path = "data/output_generated_file/Product_output_cleaned.txt" # Replace with the path to your cleaned file
|
353 |
+
|
354 |
+
# Output HTML file
|
355 |
+
output_file = "src/templates/social_marketing.html"
|
356 |
+
|
357 |
+
# Generate HTML for Social Media Marketing
|
358 |
+
process_social_media_marketing(data, base_image_dir, output_file, cleaned_file_path, company_name)
|
359 |
+
|
360 |
+
# Force UTF-8 encoding for terminal output
|
361 |
+
sys.stdout.reconfigure(encoding='utf-8')
|
362 |
+
|
363 |
+
def capture_screenshot_with_playwright(html_file_path, screenshot_path):
|
364 |
+
"""
|
365 |
+
Capture a full-page screenshot of the HTML file directly using Playwright.
|
366 |
+
"""
|
367 |
+
try:
|
368 |
+
# Launch Playwright in headless mode
|
369 |
+
with sync_playwright() as p:
|
370 |
+
browser = p.chromium.launch(headless=True)
|
371 |
+
page = browser.new_page()
|
372 |
+
|
373 |
+
# Open the HTML file in the browser
|
374 |
+
page.goto(f"file:///{os.path.abspath(html_file_path)}")
|
375 |
+
|
376 |
+
# Capture the full-page screenshot
|
377 |
+
page.screenshot(path=screenshot_path, full_page=True)
|
378 |
+
print(f"Screenshot saved: {screenshot_path}")
|
379 |
+
|
380 |
+
browser.close()
|
381 |
+
|
382 |
+
except Exception as e:
|
383 |
+
print(f"Error capturing screenshot: {e}")
|
384 |
+
|
385 |
+
def convert_png_to_pdf(png_path, company_name):
|
386 |
+
"""
|
387 |
+
Convert a PNG image into a PDF strictly named as 'company_name brand marketing.pdf'
|
388 |
+
in the specified folder 'data/reports/template_PDF'.
|
389 |
+
"""
|
390 |
+
try:
|
391 |
+
# Set the output folder and ensure it exists
|
392 |
+
output_folder = "data/reports/template_PDF"
|
393 |
+
os.makedirs(output_folder, exist_ok=True)
|
394 |
+
|
395 |
+
# Create the PDF file name as 'company_name brand marketing.pdf'
|
396 |
+
# Create the PDF file name as 'company_name brand marketing.pdf'
|
397 |
+
pdf_path = os.path.join(output_folder, "social media marketing.pdf")
|
398 |
+
|
399 |
+
|
400 |
+
# Convert the PNG to PDF
|
401 |
+
img = Image.open(png_path)
|
402 |
+
img.convert('RGB').save(pdf_path, "PDF")
|
403 |
+
|
404 |
+
print(f"PDF saved: {pdf_path}")
|
405 |
+
except Exception as e:
|
406 |
+
print(f"Error converting PNG to PDF: {e}")
|
407 |
+
|
408 |
+
if __name__ == "__main__":
|
409 |
+
# Paths for demonstration
|
410 |
+
html_file_path = "src/templates/social_marketing.html"
|
411 |
+
|
412 |
+
# Screenshot saved in the folder: data/reports/template_ss
|
413 |
+
screenshot_folder = "data/reports/template_ss"
|
414 |
+
os.makedirs(screenshot_folder, exist_ok=True)
|
415 |
+
screenshot_path = os.path.join(screenshot_folder, "social_marketing_screenshot.png")
|
416 |
+
|
417 |
+
# Ensure Playwright browsers are installed
|
418 |
+
os.system("playwright install")
|
419 |
+
|
420 |
+
# Capture screenshot
|
421 |
+
capture_screenshot_with_playwright(html_file_path, screenshot_path)
|
422 |
+
|
423 |
+
# Convert screenshot to PDF with the company name strictly as the filename
|
424 |
+
convert_png_to_pdf(screenshot_path, company_name)
|
src/templates/1.txt
ADDED
File without changes
|
src/templates_images/1.txt
ADDED
File without changes
|