raghavdw commited on
Commit
cb507fd
·
verified ·
1 Parent(s): 68ba197

fixed the generate response function

Browse files
Files changed (1) hide show
  1. app.py +0 -3
app.py CHANGED
@@ -12,9 +12,6 @@ model = AutoModelWithLMHead.from_pretrained("raghavdw/finedtuned_gpt2_medQA_mode
12
 
13
  def generate_query_response(prompt, max_length=200):
14
 
15
- model = loaded_model
16
- tokenizer = loaded_tokenizer
17
-
18
  input_ids = tokenizer.encode(prompt, return_tensors="pt")
19
 
20
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
12
 
13
  def generate_query_response(prompt, max_length=200):
14
 
 
 
 
15
  input_ids = tokenizer.encode(prompt, return_tensors="pt")
16
 
17
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")