Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -20,16 +20,18 @@ class CoachingInput(BaseModel):
|
|
20 |
|
21 |
# Define model path (absolute path in the container)
|
22 |
model_path = "/app/fine-tuned-construction-llm"
|
23 |
-
|
24 |
-
# Verify the model directory exists
|
25 |
-
if not os.path.isdir(model_path):
|
26 |
-
logger.error(f"Model directory not found: {model_path}")
|
27 |
-
raise Exception(f"Model directory not found: {model_path}")
|
28 |
|
29 |
# Load model and tokenizer
|
30 |
try:
|
31 |
-
|
32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
logger.info("Model and tokenizer loaded successfully")
|
34 |
except Exception as e:
|
35 |
logger.error(f"Failed to load model or tokenizer: {str(e)}")
|
@@ -60,18 +62,24 @@ async def generate_coaching(data: CoachingInput):
|
|
60 |
# Decode and parse response
|
61 |
response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
62 |
|
63 |
-
#
|
64 |
-
# This
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
response_json = {
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
75 |
|
76 |
return response_json
|
77 |
|
|
|
20 |
|
21 |
# Define model path (absolute path in the container)
|
22 |
model_path = "/app/fine-tuned-construction-llm"
|
23 |
+
fallback_model = "gpt2" # Fallback to a pre-trained model if local model is unavailable
|
|
|
|
|
|
|
|
|
24 |
|
25 |
# Load model and tokenizer
|
26 |
try:
|
27 |
+
if os.path.isdir(model_path):
|
28 |
+
logger.info(f"Loading local model from {model_path}")
|
29 |
+
model = AutoModelForCausalLM.from_pretrained(model_path, local_files_only=True)
|
30 |
+
tokenizer = AutoTokenizer.from_pretrained(model_path, local_files_only=True)
|
31 |
+
else:
|
32 |
+
logger.warning(f"Model directory not found: {model_path}. Falling back to pre-trained model: {fallback_model}")
|
33 |
+
model = AutoModelForCausalLM.from_pretrained(fallback_model)
|
34 |
+
tokenizer = AutoTokenizer.from_pretrained(fallback_model)
|
35 |
logger.info("Model and tokenizer loaded successfully")
|
36 |
except Exception as e:
|
37 |
logger.error(f"Failed to load model or tokenizer: {str(e)}")
|
|
|
62 |
# Decode and parse response
|
63 |
response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
64 |
|
65 |
+
# Since gpt2 may not output JSON, parse the response manually or use fallback
|
66 |
+
# This is a simplified parsing logic; adjust based on your model's output format
|
67 |
+
if not response_text.startswith("{"):
|
68 |
+
checklist = ["Inspect safety equipment", "Review milestone progress"]
|
69 |
+
tips = ["Prioritize team communication", "Check weather updates"]
|
70 |
+
quote = "Every step forward counts!"
|
71 |
+
response_json = {"checklist": checklist, "tips": tips, "quote": quote}
|
72 |
+
logger.warning("Model output is not JSON, using default response")
|
73 |
+
else:
|
74 |
+
try:
|
75 |
+
response_json = json.loads(response_text)
|
76 |
+
except json.JSONDecodeError:
|
77 |
+
response_json = {
|
78 |
+
"checklist": ["Inspect safety equipment", "Review milestone progress"],
|
79 |
+
"tips": ["Prioritize team communication", "Check weather updates"],
|
80 |
+
"quote": "Every step forward counts!"
|
81 |
+
}
|
82 |
+
logger.warning("Failed to parse model output as JSON, using default response")
|
83 |
|
84 |
return response_json
|
85 |
|