File size: 6,306 Bytes
9c8ba2d
 
df97270
7d8ec5e
822fee8
9eb2e2a
822fee8
5fc5e6a
0258ba7
5fc5e6a
 
822fee8
 
 
 
 
 
19d8d8c
 
 
 
9c8ba2d
 
19d8d8c
 
 
 
 
 
 
 
 
 
 
c4e3ea5
822fee8
df97270
 
 
 
 
3dfd15f
9eb2e2a
 
9c8ba2d
 
 
 
 
 
9eb2e2a
 
 
 
 
 
 
 
 
 
 
 
9c8ba2d
 
 
9eb2e2a
 
df97270
 
 
822fee8
 
 
9eb2e2a
 
 
 
 
 
 
 
 
 
 
 
 
 
df97270
 
 
 
c4e3ea5
822fee8
 
5fc5e6a
 
822fee8
5fc5e6a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
822fee8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0258ba7
5fc5e6a
 
822fee8
5fc5e6a
822fee8
5fc5e6a
822fee8
 
 
 
 
 
 
 
 
 
 
 
c4e3ea5
5fc5e6a
9eb2e2a
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
from fastapi import FastAPI, File, UploadFile
import requests
from transformers import BlipProcessor, BlipForConditionalGeneration
from PIL import Image
import torch
import gradio as gr
from datetime import datetime
from reportlab.lib.pagesizes import letter
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer, Image as PDFImage
from reportlab.lib.styles import getSampleStyleSheet, ParagraphStyle
from reportlab.lib import colors
from simple_salesforce import Salesforce
import os
from dotenv import load_dotenv
import base64
import io
import concurrent.futures

# Load environment variables from .env file
load_dotenv()

app = FastAPI()

# Salesforce credentials
SF_USERNAME = os.getenv('SF_USERNAME')
SF_PASSWORD = os.getenv('SF_PASSWORD')
SF_SECURITY_TOKEN = os.getenv('SF_SECURITY_TOKEN')

# Initialize Salesforce connection
try:
    sf = Salesforce(username=SF_USERNAME, password=SF_PASSWORD, security_token=SF_SECURITY_TOKEN)
except Exception as e:
    sf = None
    print(f"Failed to connect to Salesforce: {str(e)}")

# Load BLIP model and processor
processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
model = BlipForConditionalGeneration.from_pretrained("Salesforce/blip-image-captioning-base")
model.eval()
device = "cuda" if torch.cuda.is_available() else "cpu"
model.to(device)

# FastAPI endpoint to handle image upload and forward it to Hugging Face API for caption generation
HUGGING_FACE_ENDPOINT = 'https://huggingface.co/spaces/Rammohan0504/DPR-4/predict'

@app.post("/predict/")
async def predict(image: UploadFile = File(...)):
    try:
        # Read the image from the request
        image_bytes = await image.read()
        image = Image.open(io.BytesIO(image_bytes))

        # Forward the image to Hugging Face endpoint
        response = forward_image_to_huggingface(image)

        # Check the response from Hugging Face
        if response.status_code == 200:
            result = response.json()
            caption = result.get("caption", "No caption found.")
            return {"caption": caption}
        else:
            return {"error": f"Failed to get prediction from Hugging Face Space. Status code: {response.status_code}"}
    except Exception as e:
        return {"error": str(e)}

# Function to forward the image to Hugging Face API
def forward_image_to_huggingface(image: Image):
    if image.mode != "RGB":
        image = image.convert("RGB")
    
    # Resize image for faster processing
    image = image.resize((640, 640))
    
    # Convert image to bytes for API request
    img_byte_arr = io.BytesIO()
    image.save(img_byte_arr, format='JPEG')
    img_byte_arr = img_byte_arr.getvalue()

    # Create the payload to send to Hugging Face (it expects a file)
    files = {'file': ('image.jpg', img_byte_arr, 'image/jpeg')}
    
    # Make the POST request to Hugging Face Space
    response = requests.post(HUGGING_FACE_ENDPOINT, files=files)
    return response

# Inference function to generate captions dynamically based on image content
def generate_captions_from_image(image):
    inputs = processor(image, return_tensors="pt").to(device, torch.float16)
    output = model.generate(**inputs, max_new_tokens=50)
    caption = processor.decode(output[0], skip_special_tokens=True)
    return caption

# Function to save DPR text to a PDF file
def save_dpr_to_pdf(dpr_text, image_paths, captions, filename):
    try:
        # Create a PDF document
        doc = SimpleDocTemplate(filename, pagesize=letter)
        styles = getSampleStyleSheet()
        
        # Define custom styles
        title_style = ParagraphStyle(
            name='Title',
            fontSize=16,
            leading=20,
            alignment=1,  # Center
            spaceAfter=20,
            textColor=colors.black,
            fontName='Helvetica-Bold'
        )
        body_style = ParagraphStyle(
            name='Body',
            fontSize=12,
            leading=14,
            spaceAfter=10,
            textColor=colors.black,
            fontName='Helvetica'
        )
        
        # Build the PDF content
        flowables = []
        
        # Add title
        flowables.append(Paragraph("Daily Progress Report", title_style))
        
        # Split DPR text into lines and add as paragraphs (excluding descriptions for images)
        for line in dpr_text.split('\n'):
            # Replace problematic characters for PDF
            line = line.replace('\u2019', "'").replace('\u2018', "'")
            if line.strip():
                flowables.append(Paragraph(line, body_style))
            else:
                flowables.append(Spacer(1, 12))
        
        # Add images and captions in the correct order (no need to add description to dpr_text again)
        for img_path, caption in zip(image_paths, captions):
            try:
                # Add image first
                img = PDFImage(img_path, width=200, height=150)  # Adjust image size if needed
                flowables.append(img)
                # Add description below the image
                description = f"Description: {caption}"
                flowables.append(Paragraph(description, body_style))
                flowables.append(Spacer(1, 12))  # Add some space between images
            except Exception as e:
                flowables.append(Paragraph(f"Error loading image: {str(e)}", body_style))
        
        # Build the PDF
        doc.build(flowables)
        return f"PDF saved successfully as {filename}", filename
    except Exception as e:
        return f"Error saving PDF: {str(e)}", None

# Gradio interface for uploading multiple files, displaying DPR, and downloading PDF
iface = gr.Interface(
    fn=generate_dpr,
    inputs=gr.Files(type="filepath", label="Upload Site Photos"),
    outputs=[
        gr.Textbox(label="Daily Progress Report"),
        gr.File(label="Download PDF")
    ],
    title="Daily Progress Report Generator",
    description="Upload up to 10 site photos. The AI model will generate a text-based Daily Progress Report (DPR), save it as a PDF, and upload the PDF and images to Salesforce under Daily_Progress_Reports__c in the Files related list. Download the PDF locally if needed.",
    allow_flagging="never"
)

if __name__ == "__main__":
        iface.launch()