Spaces:
Sleeping
Sleeping
updated app.py generate response function
Browse files
app.py
CHANGED
@@ -12,6 +12,9 @@ model = AutoModelWithLMHead.from_pretrained("raghavdw/finedtuned_gpt2_medQA_mode
|
|
12 |
|
13 |
def generate_query_response(prompt, max_length=200):
|
14 |
|
|
|
|
|
|
|
15 |
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
16 |
|
17 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
|
12 |
|
13 |
def generate_query_response(prompt, max_length=200):
|
14 |
|
15 |
+
model = model
|
16 |
+
tokenizer = tokenizer
|
17 |
+
|
18 |
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
19 |
|
20 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|