geethareddy commited on
Commit
92b443e
·
verified ·
1 Parent(s): 9f073b0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -19
app.py CHANGED
@@ -20,16 +20,18 @@ class CoachingInput(BaseModel):
20
 
21
  # Define model path (absolute path in the container)
22
  model_path = "/app/fine-tuned-construction-llm"
23
-
24
- # Verify the model directory exists
25
- if not os.path.isdir(model_path):
26
- logger.error(f"Model directory not found: {model_path}")
27
- raise Exception(f"Model directory not found: {model_path}")
28
 
29
  # Load model and tokenizer
30
  try:
31
- model = AutoModelForCausalLM.from_pretrained(model_path, local_files_only=True)
32
- tokenizer = AutoTokenizer.from_pretrained(model_path, local_files_only=True)
 
 
 
 
 
 
33
  logger.info("Model and tokenizer loaded successfully")
34
  except Exception as e:
35
  logger.error(f"Failed to load model or tokenizer: {str(e)}")
@@ -60,18 +62,24 @@ async def generate_coaching(data: CoachingInput):
60
  # Decode and parse response
61
  response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
62
 
63
- # Simulate structured output (replace with actual parsing logic based on model output)
64
- # This assumes the model outputs a JSON-like string; adjust based on fine-tuning
65
- try:
66
- response_json = json.loads(response_text)
67
- except json.JSONDecodeError:
68
- # Fallback: Construct a default response if parsing fails
69
- response_json = {
70
- "checklist": ["Inspect safety equipment", "Review milestone progress"],
71
- "tips": ["Prioritize team communication", "Check weather updates"],
72
- "quote": "Every step forward counts!"
73
- }
74
- logger.warning("Failed to parse model output as JSON, using default response")
 
 
 
 
 
 
75
 
76
  return response_json
77
 
 
20
 
21
  # Define model path (absolute path in the container)
22
  model_path = "/app/fine-tuned-construction-llm"
23
+ fallback_model = "gpt2" # Fallback to a pre-trained model if local model is unavailable
 
 
 
 
24
 
25
  # Load model and tokenizer
26
  try:
27
+ if os.path.isdir(model_path):
28
+ logger.info(f"Loading local model from {model_path}")
29
+ model = AutoModelForCausalLM.from_pretrained(model_path, local_files_only=True)
30
+ tokenizer = AutoTokenizer.from_pretrained(model_path, local_files_only=True)
31
+ else:
32
+ logger.warning(f"Model directory not found: {model_path}. Falling back to pre-trained model: {fallback_model}")
33
+ model = AutoModelForCausalLM.from_pretrained(fallback_model)
34
+ tokenizer = AutoTokenizer.from_pretrained(fallback_model)
35
  logger.info("Model and tokenizer loaded successfully")
36
  except Exception as e:
37
  logger.error(f"Failed to load model or tokenizer: {str(e)}")
 
62
  # Decode and parse response
63
  response_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
64
 
65
+ # Since gpt2 may not output JSON, parse the response manually or use fallback
66
+ # This is a simplified parsing logic; adjust based on your model's output format
67
+ if not response_text.startswith("{"):
68
+ checklist = ["Inspect safety equipment", "Review milestone progress"]
69
+ tips = ["Prioritize team communication", "Check weather updates"]
70
+ quote = "Every step forward counts!"
71
+ response_json = {"checklist": checklist, "tips": tips, "quote": quote}
72
+ logger.warning("Model output is not JSON, using default response")
73
+ else:
74
+ try:
75
+ response_json = json.loads(response_text)
76
+ except json.JSONDecodeError:
77
+ response_json = {
78
+ "checklist": ["Inspect safety equipment", "Review milestone progress"],
79
+ "tips": ["Prioritize team communication", "Check weather updates"],
80
+ "quote": "Every step forward counts!"
81
+ }
82
+ logger.warning("Failed to parse model output as JSON, using default response")
83
 
84
  return response_json
85