neerajkalyank commited on
Commit
e4ced37
·
verified ·
1 Parent(s): ee1e024

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +109 -0
app.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ from joblib import Memory
5
+ import hashlib
6
+ import datetime
7
+
8
+ # Initialize cache
9
+ cache_dir = "./cache"
10
+ memory = Memory(cache_dir, verbose=0)
11
+
12
+ # Load fine-tuned model and tokenizer (replace with your model path)
13
+ model_name = "distilgpt2" # Placeholder; use your fine-tuned model from Hugging Face Hub
14
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
15
+ model = AutoModelForCausalLM.from_pretrained(model_name)
16
+
17
+ # Define prompt template
18
+ PROMPT_TEMPLATE = """Role: {role}
19
+ Project: {project_id}
20
+ Milestones: {milestones}
21
+ Reflection: {reflection}
22
+ Generate a daily checklist, focus suggestions, and a motivational quote for a construction supervisor."""
23
+
24
+ # Cache reset check
25
+ last_reset = datetime.date.today()
26
+
27
+ def reset_cache_if_new_day():
28
+ global last_reset
29
+ today = datetime.date.today()
30
+ if today > last_reset:
31
+ memory.clear()
32
+ last_reset = today
33
+
34
+ # Cached generation function
35
+ @memory.cache
36
+ def generate_outputs(role, project_id, milestones, reflection):
37
+ reset_cache_if_new_day()
38
+
39
+ # Validate inputs
40
+ if not all([role, project_id, milestones, reflection]):
41
+ return "Error: All fields are required."
42
+
43
+ # Create prompt
44
+ prompt = PROMPT_TEMPLATE.format(
45
+ role=role,
46
+ project_id=project_id,
47
+ milestones=milestones,
48
+ reflection=reflection
49
+ )
50
+
51
+ # Tokenize and generate
52
+ inputs = tokenizer(prompt, return_tensors="pt", max_length=512, truncation=True)
53
+ outputs = model.generate(
54
+ inputs["input_ids"],
55
+ max_length=1000,
56
+ num_return_sequences=1,
57
+ no_repeat_ngram_size=2,
58
+ do_sample=True,
59
+ top_p=0.9,
60
+ temperature=0.7
61
+ )
62
+
63
+ # Decode and parse output
64
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
65
+ sections = generated_text.split("\n\n")
66
+
67
+ checklist = sections[0] if len(sections) > 0 else "No checklist generated."
68
+ suggestions = sections[1] if len(sections) > 1 else "No suggestions generated."
69
+ quote = sections[2] if len(sections) > 2 else "No quote generated."
70
+
71
+ return checklist, suggestions, quote
72
+
73
+ # Gradio interface
74
+ def create_interface():
75
+ with gr.Blocks() as demo:
76
+ gr.Markdown("# Construction Supervisor AI Coach")
77
+ gr.Markdown("Enter details to generate a daily checklist, focus suggestions, and a motivational quote.")
78
+
79
+ with gr.Row():
80
+ role = gr.Dropdown(choices=["Supervisor", "Foreman", "Project Manager"], label="Role")
81
+ project_id = gr.Textbox(label="Project ID")
82
+
83
+ milestones = gr.Textbox(label="Milestones (comma-separated KPIs)")
84
+ reflection = gr.Textbox(label="Reflection Log", lines=5)
85
+
86
+ with gr.Row():
87
+ submit = gr.Button("Generate")
88
+ clear = gr.Button("Clear")
89
+
90
+ checklist_output = gr.Textbox(label="Daily Checklist")
91
+ suggestions_output = gr.Textbox(label="Focus Suggestions")
92
+ quote_output = gr.Textbox(label="Motivational Quote")
93
+
94
+ submit.click(
95
+ fn=generate_outputs,
96
+ inputs=[role, project_id, milestones, reflection],
97
+ outputs=[checklist_output, suggestions_output, quote_output]
98
+ )
99
+ clear.click(
100
+ fn=lambda: ("", "", "", ""),
101
+ inputs=None,
102
+ outputs=[role, project_id, milestones, reflection]
103
+ )
104
+
105
+ return demo
106
+
107
+ if __name__ == "__main__":
108
+ demo = create_interface()
109
+ demo.launch()