Spaces:
Sleeping
Sleeping
File size: 5,070 Bytes
e4ced37 34607a1 9e6531c d021ecf c1aacae e4ced37 34607a1 9e6531c 34607a1 39d22d4 9e6531c 34607a1 39d22d4 9e6531c e4ced37 159fa92 e4ced37 34607a1 e4ced37 34607a1 e4ced37 9e6531c e4ced37 34607a1 e4ced37 34607a1 c1aacae 34607a1 c1aacae 34607a1 e4ced37 34607a1 e4ced37 34607a1 9e6531c 34607a1 9e6531c 34607a1 e4ced37 34607a1 e4ced37 159fa92 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 |
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import datetime
# Initialize model and tokenizer (preloading them for quicker response)
model_name = "distilgpt2"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
# Set pad_token_id to eos_token_id to avoid warnings
tokenizer.pad_token = tokenizer.eos_token
model.config.pad_token_id = tokenizer.eos_token_id
# Define a more contextual prompt template
PROMPT_TEMPLATE = """You are an AI coach for construction supervisors. Based on the following inputs, generate a daily checklist, focus suggestions, and a motivational quote. Format your response with clear labels as follows:
Checklist:
- {milestones_list}
Suggestions:
- {suggestions_list}
Quote:
- Your motivational quote here
Inputs:
Role: {role}
Project: {project_id}
Milestones: {milestones}
Reflection: {reflection}
"""
# Function to generate outputs based on inputs
def generate_outputs(role, project_id, milestones, reflection):
# Validate inputs to ensure no missing fields
if not all([role, project_id, milestones, reflection]):
return "Error: All fields are required.", "", ""
# Create prompt from template
milestones_list = "\n- ".join([m.strip() for m in milestones.split(",")])
suggestions_list = ""
if "delays" in reflection.lower():
suggestions_list = "- Consider adjusting timelines to accommodate delays.\n- Communicate delays to all relevant stakeholders."
elif "weather" in reflection.lower():
suggestions_list = "- Ensure team has rain gear.\n- Monitor weather updates for possible further delays."
elif "equipment" in reflection.lower():
suggestions_list = "- Inspect all equipment to ensure no malfunctions.\n- Schedule maintenance if necessary."
# Create final prompt
prompt = PROMPT_TEMPLATE.format(
role=role,
project_id=project_id,
milestones=milestones,
reflection=reflection,
milestones_list=milestones_list,
suggestions_list=suggestions_list
)
# Tokenize inputs for model processing
inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True, padding=True)
# Generate response from the model
with torch.no_grad():
outputs = model.generate(
inputs['input_ids'],
max_length=512,
num_return_sequences=1,
no_repeat_ngram_size=2,
do_sample=True,
top_p=0.9,
temperature=0.8,
pad_token_id=tokenizer.eos_token_id
)
# Decode the response
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
# Parse the output and ensure it is structured
checklist = "No checklist generated."
suggestions = "No suggestions generated."
quote = "No quote generated."
if "Checklist:" in generated_text:
checklist_start = generated_text.find("Checklist:") + len("Checklist:")
suggestions_start = generated_text.find("Suggestions:")
checklist = generated_text[checklist_start:suggestions_start].strip()
if "Suggestions:" in generated_text:
suggestions_start = generated_text.find("Suggestions:") + len("Suggestions:")
quote_start = generated_text.find("Quote:")
suggestions = generated_text[suggestions_start:quote_start].strip()
if "Quote:" in generated_text:
quote_start = generated_text.find("Quote:") + len("Quote:")
quote = generated_text[quote_start:].strip()
# Return structured outputs
return checklist, suggestions, quote
# Gradio interface for fast user interaction
def create_interface():
with gr.Blocks() as demo:
gr.Markdown("# Construction Supervisor AI Coach")
gr.Markdown("Enter details to generate a daily checklist, focus suggestions, and a motivational quote.")
with gr.Row():
role = gr.Dropdown(choices=["Supervisor", "Foreman", "Project Manager"], label="Role")
project_id = gr.Textbox(label="Project ID")
milestones = gr.Textbox(label="Milestones (comma-separated KPIs)")
reflection = gr.Textbox(label="Reflection Log", lines=5)
with gr.Row():
submit = gr.Button("Generate")
clear = gr.Button("Clear")
checklist_output = gr.Textbox(label="Daily Checklist")
suggestions_output = gr.Textbox(label="Focus Suggestions")
quote_output = gr.Textbox(label="Motivational Quote")
submit.click(
fn=generate_outputs,
inputs=[role, project_id, milestones, reflection],
outputs=[checklist_output, suggestions_output, quote_output]
)
clear.click(
fn=lambda: ("", "", "", ""),
inputs=None,
outputs=[role, project_id, milestones, reflection]
)
return demo
if __name__ == "__main__":
demo = create_interface()
demo.launch()
|