Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -314,23 +314,17 @@ registration_interface = gr.Interface(
|
|
314 |
# outputs="text",
|
315 |
# )
|
316 |
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
# Decode the response
|
330 |
-
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
331 |
-
return response
|
332 |
-
except Exception as e:
|
333 |
-
return f"An error occurred while generating a response: {e}"
|
334 |
|
335 |
|
336 |
|
|
|
314 |
# outputs="text",
|
315 |
# )
|
316 |
|
317 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
318 |
+
|
319 |
+
try:
|
320 |
+
tokenizer = AutoTokenizer.from_pretrained("ahmed-7124/dgptAW")
|
321 |
+
model = AutoModelForCausalLM.from_pretrained("ahmed-7124/dgptAW")
|
322 |
+
print("Model and tokenizer loaded successfully!")
|
323 |
+
except Exception as e:
|
324 |
+
print(f"Error loading tokenizer or model: {e}")
|
325 |
+
print("Trying with GPT-2 tokenizer as a fallback...")
|
326 |
+
tokenizer = AutoTokenizer.from_pretrained("gpt2")
|
327 |
+
model = AutoModelForCausalLM.from_pretrained("ahmed-7124/dgptAW")
|
|
|
|
|
|
|
|
|
|
|
|
|
328 |
|
329 |
|
330 |
|