Spaces:
Sleeping
Sleeping
Joash
commited on
Commit
·
93aa8dc
1
Parent(s):
2130931
Fix model_manager to use actual model inference instead of mock response
Browse files- src/model_manager.py +26 -20
src/model_manager.py
CHANGED
@@ -65,36 +65,42 @@ class ModelManager:
|
|
65 |
def generate_text(self, prompt: str, max_new_tokens: int = 1024) -> str:
|
66 |
"""Generate text from prompt."""
|
67 |
try:
|
68 |
-
#
|
69 |
-
|
70 |
-
|
71 |
-
- The code is simple and straightforward
|
72 |
-
|
73 |
-
- Improvements:
|
74 |
-
- Consider adding type hints for better code readability
|
75 |
-
- Add input validation for the numbers parameter
|
76 |
-
- Consider using sum() built-in function for better performance
|
77 |
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
|
83 |
-
|
84 |
-
|
85 |
-
|
|
|
|
|
|
|
86 |
|
87 |
except Exception as e:
|
88 |
logger.error(f"Error generating text: {str(e)}")
|
89 |
-
# Return a default response in case of error
|
90 |
return """- Issues:
|
91 |
-
-
|
|
|
92 |
|
93 |
- Improvements:
|
94 |
-
-
|
|
|
95 |
|
96 |
- Best Practices:
|
97 |
-
-
|
|
|
98 |
|
99 |
- Security:
|
100 |
- No immediate concerns"""
|
|
|
65 |
def generate_text(self, prompt: str, max_new_tokens: int = 1024) -> str:
|
66 |
"""Generate text from prompt."""
|
67 |
try:
|
68 |
+
# Encode the prompt
|
69 |
+
inputs = self.tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048)
|
70 |
+
inputs = {k: v.to(self.device) for k, v in inputs.items()}
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
|
72 |
+
# Generate response
|
73 |
+
with torch.no_grad():
|
74 |
+
outputs = self.model.generate(
|
75 |
+
**inputs,
|
76 |
+
max_new_tokens=max_new_tokens,
|
77 |
+
do_sample=True,
|
78 |
+
temperature=Config.TEMPERATURE,
|
79 |
+
top_p=Config.TOP_P,
|
80 |
+
pad_token_id=self.tokenizer.pad_token_id,
|
81 |
+
eos_token_id=self.tokenizer.eos_token_id,
|
82 |
+
)
|
83 |
|
84 |
+
# Decode and return the generated text
|
85 |
+
generated_text = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
86 |
+
# Extract only the generated part (remove the prompt)
|
87 |
+
response = generated_text[len(prompt):].strip()
|
88 |
+
|
89 |
+
return response
|
90 |
|
91 |
except Exception as e:
|
92 |
logger.error(f"Error generating text: {str(e)}")
|
|
|
93 |
return """- Issues:
|
94 |
+
- Error generating code review
|
95 |
+
- Model inference failed
|
96 |
|
97 |
- Improvements:
|
98 |
+
- Please try again
|
99 |
+
- Check model configuration
|
100 |
|
101 |
- Best Practices:
|
102 |
+
- Ensure proper model setup
|
103 |
+
- Verify token permissions
|
104 |
|
105 |
- Security:
|
106 |
- No immediate concerns"""
|