Sivainti's picture
Update app.py
923d071 verified
raw
history blame
24.7 kB
import gradio as gr
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from io import BytesIO
import os
import logging
import base64
import shutil
import tempfile
from simple_salesforce import Salesforce
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
from fastapi import FastAPI, Form, File, UploadFile
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import JSONResponse
from fastapi.staticfiles import StaticFiles
from dotenv import load_dotenv
# Load environment variables from .env file
load_dotenv()
# Configure logging to show detailed messages
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
# Salesforce credentials (loaded from environment variables)
SALESFORCE_USERNAME = os.getenv("SALESFORCE_USERNAME")
SALESFORCE_PASSWORD = os.getenv("SALESFORCE_PASSWORD")
SALESFORCE_SECURITY_TOKEN = os.getenv("SALESFORCE_SECURITY_TOKEN")
SALESFORCE_DOMAIN = os.getenv("SALESFORCE_DOMAIN", "login")
# Validate that credentials are set
if not all([SALESFORCE_USERNAME, SALESFORCE_PASSWORD, SALESFORCE_SECURITY_TOKEN]):
logger.error("Salesforce credentials not set in environment variables.")
raise ValueError("Missing Salesforce credentials in environment variables.")
logger.debug("Using Salesforce credentials - Username and Security Token loaded from environment variables.")
# Function to authenticate with Salesforce
def get_salesforce_connection():
try:
logger.debug("Attempting to connect to Salesforce...")
sf = Salesforce(
username=SALESFORCE_USERNAME,
password=SALESFORCE_PASSWORD,
security_token=SALESFORCE_SECURITY_TOKEN,
domain=SALESFORCE_DOMAIN
)
logger.info("Salesforce connection successful.")
result = sf.query("SELECT Id FROM User LIMIT 1")
logger.debug(f"Successfully queried Salesforce to confirm connection. Result: {result}")
return sf
except Exception as e:
logger.error(f"Failed to connect to Salesforce: {str(e)}", exc_info=True)
return None
# Function to upload a file to Salesforce as a ContentVersion
def upload_file_to_salesforce(file_path, file_name, record_id=None):
try:
sf = get_salesforce_connection()
if not sf:
logger.error("Salesforce connection failed. Cannot upload file.")
return None
with open(file_path, "rb") as f:
file_data = f.read()
encoded_file_data = base64.b64encode(file_data).decode('utf-8')
logger.debug(f"Uploading file {file_name} for record ID: {record_id}")
content_version_data = {
"Title": file_name,
"PathOnClient": file_name,
"VersionData": encoded_file_data,
}
if record_id:
content_version_data["FirstPublishLocationId"] = record_id
content_version = sf.ContentVersion.create(content_version_data)
logger.info(f"File uploaded to Salesforce with ContentVersion ID: {content_version['id']}")
return content_version["id"]
except Exception as e:
logger.error(f"Error uploading file to Salesforce: {str(e)}", exc_info=True)
return None
# Function to generate PDF
def generate_pdf(record_data):
try:
logger.debug("Generating PDF...")
pdf_file = BytesIO()
c = canvas.Canvas(pdf_file, pagesize=letter)
# Add project details
c.setFont("Helvetica-Bold", 14)
c.drawString(100, 750, "Project Summary Report")
c.setFont("Helvetica", 12)
y_position = 720
for key, value in record_data.items():
if key == "risk_tags":
continue # We'll handle risk tags separately
c.drawString(100, y_position, f"{key.replace('_', ' ').title()}: {value}")
y_position -= 20
# Add risk tags section
if "risk_tags" in record_data:
c.setFont("Helvetica-Bold", 12)
c.drawString(100, y_position - 20, "Risk Analysis:")
c.setFont("Helvetica", 10)
risk_tags = record_data["risk_tags"].split("\n")
for tag in risk_tags:
if tag.strip():
c.drawString(120, y_position - 40, tag)
y_position -= 15
c.save()
pdf_file.seek(0)
logger.debug("PDF generated successfully.")
return pdf_file
except Exception as e:
logger.error(f"Error generating PDF: {str(e)}", exc_info=True)
return None
# Function to upload PDF to Salesforce and get its URL
def upload_pdf_to_salesforce(pdf_file, project_title, record_id=None):
try:
sf = get_salesforce_connection()
if not sf:
logger.error("Salesforce connection failed. Cannot upload PDF.")
return None, None
encoded_pdf_data = base64.b64encode(pdf_file.getvalue()).decode('utf-8')
logger.debug(f"Uploading PDF for project: {project_title}, record ID: {record_id}")
content_version_data = {
"Title": f"{project_title} - Project Report",
"PathOnClient": f"{project_title}_Report.pdf",
"VersionData": encoded_pdf_data,
}
if record_id:
content_version_data["FirstPublishLocationId"] = record_id
content_version = sf.ContentVersion.create(content_version_data)
content_version_id = content_version["id"]
logger.info(f"PDF uploaded to Salesforce with ContentVersion ID: {content_version_id}")
result = sf.query(f"SELECT Id, ContentDocumentId FROM ContentVersion WHERE Id = '{content_version_id}'")
if not result['records']:
logger.error("No records returned for ContentVersion query")
return content_version_id, None
content_document_id = result['records'][0]['ContentDocumentId']
file_url = f"https://{sf.sf_instance}/sfc/servlet.shepherd/version/download/{content_version_id}"
logger.debug(f"Generated PDF URL: {file_url}")
return content_version_id, file_url
except Exception as e:
logger.error(f"Error uploading PDF to Salesforce: {str(e)}", exc_info=True)
return None, None
# Function to create or update project timeline in Salesforce
def send_to_salesforce(project_title, gantt_chart_url, ai_plan_score, estimated_duration, status="Draft", record_id=None, location="", weather_type="", work_items=None, work_items_id=None):
try:
logger.debug("Starting send_to_salesforce function...")
sf = get_salesforce_connection()
if not sf:
logger.error("Salesforce connection failed. Cannot proceed with record creation/update.")
return None
try:
obj_description = sf.AI_Project_Timeline__c.describe()
logger.debug("AI_Project_Timeline__c object exists and is accessible.")
available_fields = [field['name'] for field in obj_description['fields']]
logger.debug(f"Available fields on AI_Project_Timeline__c: {available_fields}")
except Exception as e:
logger.error(f"Error: AI_Project_Timeline__c object not found or inaccessible: {str(e)}")
return None
sf_data = {
"Name": project_title[:80],
"Project_Title__c": project_title,
"Estimated_Duration__c": estimated_duration,
"AI_Plan_Score__c": ai_plan_score,
"Status__c": status,
"Location__c": location,
"Weather_Type__c": weather_type,
}
if gantt_chart_url:
sf_data["Gantt_Chart_PDF__c"] = gantt_chart_url
if work_items_id:
sf_data["Work_Items__c"] = work_items_id
logger.debug(f"Prepared Salesforce data: {sf_data}")
if record_id:
try:
logger.info(f"Attempting to update Salesforce record with ID: {record_id}")
sf.AI_Project_Timeline__c.update(record_id, sf_data)
logger.info(f"Successfully updated Salesforce record with ID: {record_id}")
return record_id
except Exception as e:
logger.error(f"Error updating record {record_id}: {str(e)}")
record_id = None
logger.info("Creating new Salesforce record...")
project_record = sf.AI_Project_Timeline__c.create(sf_data)
if not project_record.get('id'):
logger.error("Failed to create record, no ID returned")
return None
new_record_id = project_record['id']
logger.info(f"Created new Salesforce record with ID: {new_record_id}")
return new_record_id
except Exception as e:
logger.error(f"Error sending data to Salesforce: {str(e)}", exc_info=True)
if hasattr(e, 'content') and e.content:
logger.error(f"Salesforce API response: {e.content}")
return None
# Function to generate Gantt chart and risk analysis
def generate_project_timeline(boq_file, weather, workforce, location, project_title):
temp_dir = None
try:
logger.debug("Processing BOQ data...")
if not boq_file:
raise ValueError("No file uploaded")
temp_dir = tempfile.mkdtemp()
output_filename = f"gantt_chart_{project_title.replace(' ', '_')}.png"
output_path = os.path.join(temp_dir, output_filename)
logger.debug(f"Gantt chart will be saved to: {output_path}")
# Read the BOQ file
if isinstance(boq_file, str):
df = pd.read_csv(boq_file)
else:
df = pd.read_csv(boq_file.name)
# Validate required columns
required_columns = ["Task Name", "Duration"]
missing_columns = [col for col in required_columns if col not in df.columns]
if missing_columns:
raise ValueError(f"CSV is missing required columns: {', '.join(missing_columns)}")
# Generate detailed risk analysis
risk_analysis = []
for _, row in df.iterrows():
task = row["Task Name"]
duration = row["Duration"]
# Weather risk assessment
if weather.lower() == "rainy":
weather_impact = "High" if duration > 3 else "Medium"
weather_reason = "Prolonged rain exposure" if duration > 3 else "Some rain impact expected"
elif weather.lower() == "sunny":
weather_impact = "Low"
weather_reason = "Favorable working conditions"
else: # cloudy
weather_impact = "Low"
weather_reason = "Mild weather impact"
# Workforce risk assessment
if workforce < 10 and duration > 5:
workforce_impact = "High"
workforce_reason = "Insufficient workforce for task duration"
elif workforce < 15 and duration > 10:
workforce_impact = "Medium"
workforce_reason = "Workforce may be stretched for this duration"
else:
workforce_impact = "Low"
workforce_reason = "Adequate workforce available"
# Overall risk assessment
overall_risk = "High" if "High" in [weather_impact, workforce_impact] else "Medium" if "Medium" in [weather_impact, workforce_impact] else "Low"
risk_analysis.append(
f"Task: {task}\n"
f"- Duration: {duration} days\n"
f"- Weather Impact: {weather_impact} ({weather_reason})\n"
f"- Workforce Impact: {workforce_impact} ({workforce_reason})\n"
f"- Overall Risk: {overall_risk}\n"
)
risk_tags_str = "\n".join(risk_analysis)
# Generate Gantt chart
plt.style.use('ggplot')
fig, ax = plt.subplots(figsize=(12, 6))
# Color tasks based on risk level
colors = []
for _, row in df.iterrows():
duration = row["Duration"]
if weather.lower() == "rainy" and duration > 3:
colors.append('#ff6b6b') # red for high risk
elif workforce < 10 and duration > 5:
colors.append('#ff6b6b') # red for high risk
elif (weather.lower() == "rainy" and duration > 1) or (workforce < 15 and duration > 7):
colors.append('#ffd166') # yellow for medium risk
else:
colors.append('#06d6a0') # green for low risk
ax.barh(df["Task Name"], df["Duration"], color=colors, edgecolor='black')
ax.set_xlabel("Duration (days)", fontweight='bold')
ax.set_ylabel("Tasks", fontweight='bold')
ax.set_title(f"Project Timeline: {project_title}\nLocation: {location} | Weather: {weather}", fontweight='bold')
# Add risk legend
ax.text(0.95, 0.15,
"Risk Levels:\n"
"Green = Low Risk\n"
"Yellow = Medium Risk\n"
"Red = High Risk",
transform=ax.transAxes,
bbox=dict(facecolor='white', alpha=0.8),
verticalalignment='top',
horizontalalignment='right')
plt.tight_layout()
fig.savefig(output_path, format="png", bbox_inches="tight", dpi=100)
plt.close(fig)
logger.info("Gantt chart and risk analysis generated successfully.")
return output_path, risk_tags_str, temp_dir
except Exception as e:
logger.error(f"Error generating project timeline: {str(e)}", exc_info=True)
if temp_dir and os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
return None, str(e), None
# Gradio interface function
def gradio_interface(boq_file, weather, workforce, location, project_title):
temp_dir = None
try:
logger.info("Starting gradio_interface...")
if not boq_file:
return None, "Error: No BOQ file uploaded"
# Validate workforce input
if workforce <= 0:
return None, "Error: Workforce size must be greater than 0"
boq_file_path = boq_file.name if hasattr(boq_file, 'name') else boq_file
file_path, risk_tags, temp_dir = generate_project_timeline(boq_file_path, weather, workforce, location, project_title)
if not file_path:
return None, f"Error: Failed to generate timeline: {risk_tags}"
# Calculate project metrics
df = pd.read_csv(boq_file_path)
estimated_duration = sum(df["Duration"])
ai_plan_score = min(100, max(0, 100 - (estimated_duration / 100)))
logger.debug(f"Estimated duration: {estimated_duration}, AI plan score: {ai_plan_score}")
# Create Salesforce record
record_id = send_to_salesforce(
project_title=project_title,
gantt_chart_url="",
ai_plan_score=ai_plan_score,
estimated_duration=estimated_duration,
status="Draft",
record_id=None,
location=location,
weather_type=weather
)
if not record_id:
return None, f"Error: Failed to create Salesforce record - check logs for details\n\n=== RISK ANALYSIS ===\n\n{risk_tags}"
# Upload BOQ file to Salesforce
work_items_id = upload_file_to_salesforce(boq_file_path, "Boq_data.csv", record_id)
if not work_items_id:
logger.warning("Failed to upload BOQ file, but proceeding with record creation")
# Generate and upload PDF report
record_data = {
"project_title": project_title,
"estimated_duration": estimated_duration,
"ai_plan_score": ai_plan_score,
"status": "Draft",
"location": location,
"weather": weather,
"workforce_size": workforce,
"risk_tags": risk_tags,
}
pdf_file = generate_pdf(record_data)
if not pdf_file:
logger.warning("Failed to generate PDF, but proceeding with record creation")
pdf_content_id, pdf_url = None, None
if pdf_file:
pdf_content_id, pdf_url = upload_pdf_to_salesforce(pdf_file, project_title, record_id)
if not pdf_content_id:
logger.warning("Failed to upload PDF, but proceeding with record creation")
# Update record with PDF URL
update_result = send_to_salesforce(
project_title=project_title,
gantt_chart_url=pdf_url if pdf_url else "",
ai_plan_score=ai_plan_score,
estimated_duration=estimated_duration,
status="Draft",
record_id=record_id,
location=location,
weather_type=weather,
work_items_id=work_items_id if work_items_id else ""
)
if not update_result:
logger.warning("Failed to update record with PDF URL, but record was created")
# Upload Gantt chart image
image_content_id = upload_file_to_salesforce(file_path, f"{project_title}_Gantt_Chart.png", record_id)
image_url = None
if image_content_id:
sf = get_salesforce_connection()
if sf:
image_url = f"https://{sf.sf_instance}/sfc/servlet.shepherd/version/download/{image_content_id}"
logger.debug(f"Generated image URL: {image_url}")
# Format output message
output_message = (
f"=== PROJECT SUMMARY ===\n\n"
f"Project: {project_title}\n"
f"Location: {location}\n"
f"Weather: {weather}\n"
f"Workforce Size: {workforce}\n"
f"Estimated Duration: {estimated_duration} days\n"
f"AI Plan Score: {ai_plan_score:.1f}%\n\n"
f"Salesforce Record ID: {record_id}\n\n"
f"=== RISK ANALYSIS ===\n\n"
f"{risk_tags}"
)
logger.info("Gradio interface completed successfully.")
return image_url if image_url else file_path, output_message
except Exception as e:
logger.error(f"Error in Gradio interface: {str(e)}", exc_info=True)
return None, f"Error in Gradio interface: {str(e)}"
finally:
if temp_dir and os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
logger.debug(f"Cleaned up temporary directory: {temp_dir}")
# Create Gradio interface
demo = gr.Blocks(theme="default")
with demo:
gr.Markdown("## AI Civil Work Planner")
gr.Markdown("Generate a project timeline (Gantt chart) and risk analysis based on BOQ data and site parameters.")
with gr.Row():
with gr.Column():
boq_file = gr.File(label="Upload BOQ Data (CSV format)", file_types=[".csv"])
weather = gr.Dropdown(label="Weather Condition",
choices=["Sunny", "Rainy", "Cloudy"],
value="Sunny")
workforce = gr.Number(label="Workforce Size",
value=10,
precision=0,
minimum=1,
maximum=100,
step=1)
location = gr.Textbox(label="Location",
placeholder="Enter project location")
project_title = gr.Textbox(label="Project Title",
placeholder="Enter project title")
submit_btn = gr.Button("Generate Project Plan", variant="primary")
with gr.Column():
output_image = gr.Image(label="Gantt Chart",
type="filepath")
risk_tags = gr.Textbox(label="Project Summary and Risk Analysis",
lines=20,
max_lines=50)
submit_btn.click(
fn=gradio_interface,
inputs=[boq_file, weather, workforce, location, project_title],
outputs=[output_image, risk_tags],
)
# Create a FastAPI app with CORS support
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Mount directory for temporary files
app.mount("/static", StaticFiles(directory=tempfile.gettempdir()), name="static")
# Health check endpoint
@app.get("/health")
async def health_check():
return {"status": "healthy"}
# FastAPI endpoint for processing BOQ files
@app.post("/api/gradio_interface")
async def api_gradio_interface(
boq_file: UploadFile = File(...),
weather: str = Form(...),
workforce: int = Form(...),
location: str = Form(...),
project_title: str = Form(...)
):
temp_dir = None
try:
logger.info("Starting api_gradio_interface...")
temp_dir = tempfile.mkdtemp()
boq_file_path = os.path.join(temp_dir, boq_file.filename)
with open(boq_file_path, "wb") as f:
f.write(boq_file.file.read())
file_path, risk_tags, temp_dir = generate_project_timeline(boq_file_path, weather, workforce, location, project_title)
if not file_path:
return JSONResponse({"error": f"Failed to generate timeline: {risk_tags}"}, status_code=400)
df = pd.read_csv(boq_file_path)
estimated_duration = sum(df["Duration"])
ai_plan_score = min(100, max(0, 100 - (estimated_duration / 100)))
record_id = send_to_salesforce(
project_title=project_title,
gantt_chart_url="",
ai_plan_score=ai_plan_score,
estimated_duration=estimated_duration,
status="Draft",
record_id=None,
location=location,
weather_type=weather
)
if not record_id:
return JSONResponse({
"error": "Failed to create Salesforce record",
"text": f"Risk Analysis:\n\n{risk_tags}"
}, status_code=500)
work_items_id = upload_file_to_salesforce(boq_file_path, "Boq_data.csv", record_id)
record_data = {
"project_title": project_title,
"estimated_duration": estimated_duration,
"ai_plan_score": ai_plan_score,
"status": "Draft",
"location": location,
"weather": weather,
"workforce_size": workforce,
"risk_tags": risk_tags,
}
pdf_file = generate_pdf(record_data)
pdf_content_id, pdf_url = None, None
if pdf_file:
pdf_content_id, pdf_url = upload_pdf_to_salesforce(pdf_file, project_title, record_id)
update_result = send_to_salesforce(
project_title=project_title,
gantt_chart_url=pdf_url if pdf_url else "",
ai_plan_score=ai_plan_score,
estimated_duration=estimated_duration,
status="Draft",
record_id=record_id,
location=location,
weather_type=weather,
work_items_id=work_items_id if work_items_id else ""
)
image_content_id = upload_file_to_salesforce(file_path, f"{project_title}_Gantt_Chart.png", record_id)
image_url = None
if image_content_id:
sf = get_salesforce_connection()
if sf:
image_url = f"https://{sf.sf_instance}/sfc/servlet.shepherd/version/download/{image_content_id}"
output_message = (
f"=== PROJECT SUMMARY ===\n\n"
f"Project: {project_title}\n"
f"Location: {location}\n"
f"Weather: {weather}\n"
f"Workforce Size: {workforce}\n"
f"Estimated Duration: {estimated_duration} days\n"
f"AI Plan Score: {ai_plan_score:.1f}%\n\n"
f"Salesforce Record ID: {record_id}\n\n"
f"=== RISK ANALYSIS ===\n\n"
f"{risk_tags}"
)
return JSONResponse({
"image": image_url if image_url else f"/static/{os.path.basename(file_path)}",
"text": output_message
})
except Exception as e:
logger.error(f"Error in API gradio interface: {str(e)}", exc_info=True)
return JSONResponse({"error": f"Error in API gradio interface: {str(e)}"}, status_code=500)
finally:
if temp_dir and os.path.exists(temp_dir):
shutil.rmtree(temp_dir)
if __name__ == "__main__":
# Run Gradio UI
demo.launch(server_name="0.0.0.0", server_port=7860)