File size: 6,849 Bytes
736dc08
 
 
 
126a459
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
736dc08
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126a459
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
736dc08
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
126a459
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import datetime
from simple_salesforce import Salesforce
import os
import uuid
import tempfile

# Salesforce configuration
SF_USERNAME = os.getenv('SF_USERNAME', 'your_salesforce_username')
SF_PASSWORD = os.getenv('SF_PASSWORD', 'your_salesforce_password')
SF_SECURITY_TOKEN = os.getenv('SF_SECURITY_TOKEN', 'your_salesforce_security_token')
SF_DOMAIN = 'login'  # Use 'test' for sandbox or 'login' for production

# Initialize Salesforce connection
sf = Salesforce(
    username=SF_USERNAME,
    password=SF_PASSWORD,
    security_token=SF_SECURITY_TOKEN,
    domain=SF_DOMAIN
)

# Initialize model and tokenizer (preloading them for quicker response)
model_name = "distilgpt2"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)

# Set pad_token_id to eos_token_id to avoid warnings
tokenizer.pad_token = tokenizer.eos_token
model.config.pad_token_id = tokenizer.eos_token_id

# Define a more contextual prompt template
PROMPT_TEMPLATE = """You are an AI coach for construction supervisors. Based on the following inputs, generate a daily checklist, focus suggestions, and a motivational quote. Format your response with clear labels as follows:

Checklist:
- {milestones_list}

Suggestions:
- {suggestions_list}

Quote:
- Your motivational quote here

Inputs:
Role: {role}
Project: {project_id}
Milestones: {milestones}
Reflection: {reflection}
"""

# Function to generate outputs based on inputs
def generate_outputs(role, project_id, milestones, reflection):
    # Validate inputs to ensure no missing fields
    if not all([role, project_id, milestones, reflection]):
        return "Error: All fields are required.", "", ""
    
    # Create prompt from template
    milestones_list = "\n- ".join([m.strip() for m in milestones.split(",")])
    
    suggestions_list = ""
    if "delays" in reflection.lower():
        suggestions_list = "- Consider adjusting timelines to accommodate delays.\n- Communicate delays to all relevant stakeholders."
    elif "weather" in reflection.lower():
        suggestions_list = "- Ensure team has rain gear.\n- Monitor weather updates for possible further delays."
    elif "equipment" in reflection.lower():
        suggestions_list = "- Inspect all equipment to ensure no malfunctions.\n- Schedule maintenance if necessary."

    # Create final prompt
    prompt = PROMPT_TEMPLATE.format(
        role=role,
        project_id=project_id,
        milestones=milestones,
        reflection=reflection,
        milestones_list=milestones_list,
        suggestions_list=suggestions_list
    )

    # Tokenize inputs for model processing
    inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True, padding=True)
    
    # Generate response from the model
    with torch.no_grad():
        outputs = model.generate(
            inputs['input_ids'],
            max_length=512,
            num_return_sequences=1,
            no_repeat_ngram_size=2,
            do_sample=True,
            top_p=0.9,
            temperature=0.8,
            pad_token_id=tokenizer.eos_token_id
        )
    
    # Decode the response
    generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)

    # Parse the output and ensure it is structured
    checklist = "No checklist generated."
    suggestions = "No suggestions generated."
    quote = "No quote generated."

    if "Checklist:" in generated_text:
        checklist_start = generated_text.find("Checklist:") + len("Checklist:")
        suggestions_start = generated_text.find("Suggestions:")
        checklist = generated_text[checklist_start:suggestions_start].strip()
    
    if "Suggestions:" in generated_text:
        suggestions_start = generated_text.find("Suggestions:") + len("Suggestions:")
        quote_start = generated_text.find("Quote:")
        suggestions = generated_text[suggestions_start:quote_start].strip()
    
    if "Quote:" in generated_text:
        quote_start = generated_text.find("Quote:") + len("Quote:")
        quote = generated_text[quote_start:].strip()

    # Generate a file with the processed output
    output_content = f"""Checklist:
{checklist}

Suggestions:
{suggestions}

Quote:
{quote}
"""
    # Create a temporary file
    temp_file = tempfile.NamedTemporaryFile(delete=False, suffix='.txt', mode='w', encoding='utf-8')
    temp_file.write(output_content)
    temp_file.close()
    
    # Simulate a download URL (in production, upload to a file hosting service like Salesforce Content or AWS S3)
    file_name = f"supervisor_coaching_{uuid.uuid4()}.txt"
    download_url = f"/tmp/{file_name}"  # Placeholder URL; replace with actual file hosting URL in production
    os.rename(temp_file.name, os.path.join(tempfile.gettempdir(), file_name))

    # Save to Salesforce Supervisor_AI_Coaching__c object
    try:
        sf.Supervisor_AI_Coaching__c.create({
            'Role__c': role,
            'Project_ID__c': project_id,
            'Milestones__c': milestones,
            'Reflection__c': reflection,
            'Checklist__c': checklist,
            'Suggestions__c': suggestions,
            'Quote__c': quote,
            'Download_Link__c': download_url
        })
    except Exception as e:
        print(f"Error saving to Salesforce: {str(e)}")

    # Return structured outputs
    return checklist, suggestions, quote

# Gradio interface for fast user interaction
def create_interface():
    with gr.Blocks() as demo:
        gr.Markdown("# Construction Supervisor AI Coach")
        gr.Markdown("Enter details to generate a daily checklist, focus suggestions, and a motivational quote.")
        
        with gr.Row():
            role = gr.Dropdown(choices=["Supervisor", "Foreman", "Project Manager"], label="Role")
            project_id = gr.Textbox(label="Project ID")
        
        milestones = gr.Textbox(label="Milestones (comma-separated KPIs)")
        reflection = gr.Textbox(label="Reflection Log", lines=5)
        
        with gr.Row():
            submit = gr.Button("Generate")
            clear = gr.Button("Clear")
        
        checklist_output = gr.Textbox(label="Daily Checklist")
        suggestions_output = gr.Textbox(label="Focus Suggestions")
        quote_output = gr.Textbox(label="Motivational Quote")
        
        submit.click(
            fn=generate_outputs,
            inputs=[role, project_id, milestones, reflection],
            outputs=[checklist_output, suggestions_output, quote_output]
        )
        clear.click(
            fn=lambda: ("", "", "", ""),
            inputs=None,
            outputs=[role, project_id, milestones, reflection]
        )
    
    return demo

if __name__ == "__main__":
    demo = create_interface()
    demo.launch()