geethareddy commited on
Commit
736dc08
·
verified ·
1 Parent(s): c7dd8dd

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +138 -0
app.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ import datetime
5
+
6
+ # Initialize model and tokenizer (preloading them for quicker response)
7
+ model_name = "distilgpt2"
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForCausalLM.from_pretrained(model_name)
10
+
11
+ # Set pad_token_id to eos_token_id to avoid warnings
12
+ tokenizer.pad_token = tokenizer.eos_token
13
+ model.config.pad_token_id = tokenizer.eos_token_id
14
+
15
+ # Define a more contextual prompt template
16
+ PROMPT_TEMPLATE = """You are an AI coach for construction supervisors. Based on the following inputs, generate a daily checklist, focus suggestions, and a motivational quote. Format your response with clear labels as follows:
17
+
18
+ Checklist:
19
+ - {milestones_list}
20
+
21
+ Suggestions:
22
+ - {suggestions_list}
23
+
24
+ Quote:
25
+ - Your motivational quote here
26
+
27
+ Inputs:
28
+ Role: {role}
29
+ Project: {project_id}
30
+ Milestones: {milestones}
31
+ Reflection: {reflection}
32
+ """
33
+
34
+ # Function to generate outputs based on inputs
35
+ def generate_outputs(role, project_id, milestones, reflection):
36
+ # Validate inputs to ensure no missing fields
37
+ if not all([role, project_id, milestones, reflection]):
38
+ return "Error: All fields are required.", "", ""
39
+
40
+ # Create prompt from template
41
+ milestones_list = "\n- ".join([m.strip() for m in milestones.split(",")])
42
+
43
+ suggestions_list = ""
44
+ if "delays" in reflection.lower():
45
+ suggestions_list = "- Consider adjusting timelines to accommodate delays.\n- Communicate delays to all relevant stakeholders."
46
+ elif "weather" in reflection.lower():
47
+ suggestions_list = "- Ensure team has rain gear.\n- Monitor weather updates for possible further delays."
48
+ elif "equipment" in reflection.lower():
49
+ suggestions_list = "- Inspect all equipment to ensure no malfunctions.\n- Schedule maintenance if necessary."
50
+
51
+ # Create final prompt
52
+ prompt = PROMPT_TEMPLATE.format(
53
+ role=role,
54
+ project_id=project_id,
55
+ milestones=milestones,
56
+ reflection=reflection,
57
+ milestones_list=milestones_list,
58
+ suggestions_list=suggestions_list
59
+ )
60
+
61
+ # Tokenize inputs for model processing
62
+ inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True, padding=True)
63
+
64
+ # Generate response from the model
65
+ with torch.no_grad():
66
+ outputs = model.generate(
67
+ inputs['input_ids'],
68
+ max_length=512,
69
+ num_return_sequences=1,
70
+ no_repeat_ngram_size=2,
71
+ do_sample=True,
72
+ top_p=0.9,
73
+ temperature=0.8,
74
+ pad_token_id=tokenizer.eos_token_id
75
+ )
76
+
77
+ # Decode the response
78
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
79
+
80
+ # Parse the output and ensure it is structured
81
+ checklist = "No checklist generated."
82
+ suggestions = "No suggestions generated."
83
+ quote = "No quote generated."
84
+
85
+ if "Checklist:" in generated_text:
86
+ checklist_start = generated_text.find("Checklist:") + len("Checklist:")
87
+ suggestions_start = generated_text.find("Suggestions:")
88
+ checklist = generated_text[checklist_start:suggestions_start].strip()
89
+
90
+ if "Suggestions:" in generated_text:
91
+ suggestions_start = generated_text.find("Suggestions:") + len("Suggestions:")
92
+ quote_start = generated_text.find("Quote:")
93
+ suggestions = generated_text[suggestions_start:quote_start].strip()
94
+
95
+ if "Quote:" in generated_text:
96
+ quote_start = generated_text.find("Quote:") + len("Quote:")
97
+ quote = generated_text[quote_start:].strip()
98
+
99
+ # Return structured outputs
100
+ return checklist, suggestions, quote
101
+
102
+ # Gradio interface for fast user interaction
103
+ def create_interface():
104
+ with gr.Blocks() as demo:
105
+ gr.Markdown("# Construction Supervisor AI Coach")
106
+ gr.Markdown("Enter details to generate a daily checklist, focus suggestions, and a motivational quote.")
107
+
108
+ with gr.Row():
109
+ role = gr.Dropdown(choices=["Supervisor", "Foreman", "Project Manager"], label="Role")
110
+ project_id = gr.Textbox(label="Project ID")
111
+
112
+ milestones = gr.Textbox(label="Milestones (comma-separated KPIs)")
113
+ reflection = gr.Textbox(label="Reflection Log", lines=5)
114
+
115
+ with gr.Row():
116
+ submit = gr.Button("Generate")
117
+ clear = gr.Button("Clear")
118
+
119
+ checklist_output = gr.Textbox(label="Daily Checklist")
120
+ suggestions_output = gr.Textbox(label="Focus Suggestions")
121
+ quote_output = gr.Textbox(label="Motivational Quote")
122
+
123
+ submit.click(
124
+ fn=generate_outputs,
125
+ inputs=[role, project_id, milestones, reflection],
126
+ outputs=[checklist_output, suggestions_output, quote_output]
127
+ )
128
+ clear.click(
129
+ fn=lambda: ("", "", "", ""),
130
+ inputs=None,
131
+ outputs=[role, project_id, milestones, reflection]
132
+ )
133
+
134
+ return demo
135
+
136
+ if __name__ == "__main__":
137
+ demo = create_interface()
138
+ demo.launch()