ahmed-7124 commited on
Commit
e43ac0f
·
verified ·
1 Parent(s): 6dbf2bb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -17
app.py CHANGED
@@ -314,23 +314,17 @@ registration_interface = gr.Interface(
314
  # outputs="text",
315
  # )
316
 
317
- # Load the model and tokenizer
318
- tokenizer = AutoTokenizer.from_pretrained("ahmed-7124/dgptAW")
319
- model = AutoModelForCausalLM.from_pretrained("ahmed-7124/dgptAW")
320
-
321
- def answer_medical_query(query):
322
- try:
323
- # Tokenize the input query
324
- inputs = tokenizer(query, return_tensors="pt")
325
-
326
- # Generate a response
327
- outputs = model.generate(**inputs, max_new_tokens=150, temperature=0.7, top_k=50)
328
-
329
- # Decode the response
330
- response = tokenizer.decode(outputs[0], skip_special_tokens=True)
331
- return response
332
- except Exception as e:
333
- return f"An error occurred while generating a response: {e}"
334
 
335
 
336
 
 
314
  # outputs="text",
315
  # )
316
 
317
+ from transformers import AutoTokenizer, AutoModelForCausalLM
318
+
319
+ try:
320
+ tokenizer = AutoTokenizer.from_pretrained("ahmed-7124/dgptAW")
321
+ model = AutoModelForCausalLM.from_pretrained("ahmed-7124/dgptAW")
322
+ print("Model and tokenizer loaded successfully!")
323
+ except Exception as e:
324
+ print(f"Error loading tokenizer or model: {e}")
325
+ print("Trying with GPT-2 tokenizer as a fallback...")
326
+ tokenizer = AutoTokenizer.from_pretrained("gpt2")
327
+ model = AutoModelForCausalLM.from_pretrained("ahmed-7124/dgptAW")
 
 
 
 
 
 
328
 
329
 
330