Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,7 @@ bnb_config = BitsAndBytesConfig(
|
|
11 |
load_in_4bit=True,
|
12 |
bnb_4bit_use_double_quant=True,
|
13 |
bnb_4bit_quant_type="nf4",
|
14 |
-
bnb_4bit_compute_dtype=torch.
|
15 |
)
|
16 |
|
17 |
# Load models and tokenizer efficiently
|
@@ -24,9 +24,9 @@ model = PeftModel.from_pretrained(model, model_id)
|
|
24 |
|
25 |
def greet(text):
|
26 |
with torch.no_grad(): # Disable gradient calculation for inference
|
27 |
-
batch = tokenizer(f'### Input:\n{text}\n\n### Answer
|
28 |
with torch.cuda.amp.autocast(): # Enable mixed-precision if available
|
29 |
-
output_tokens = model.generate(**batch, max_new_tokens=
|
30 |
return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
|
31 |
|
32 |
iface = gr.Interface(fn=greet, inputs="text", outputs="text", title="PEFT Model for Big Brain")
|
|
|
11 |
load_in_4bit=True,
|
12 |
bnb_4bit_use_double_quant=True,
|
13 |
bnb_4bit_quant_type="nf4",
|
14 |
+
bnb_4bit_compute_dtype=torch.bfloat16
|
15 |
)
|
16 |
|
17 |
# Load models and tokenizer efficiently
|
|
|
24 |
|
25 |
def greet(text):
|
26 |
with torch.no_grad(): # Disable gradient calculation for inference
|
27 |
+
batch = tokenizer(f'### Input:\n{text}\n\n### Answer:\n', return_tensors='pt') # Move tensors to device
|
28 |
with torch.cuda.amp.autocast(): # Enable mixed-precision if available
|
29 |
+
output_tokens = model.generate(**batch, max_new_tokens=50)
|
30 |
return tokenizer.decode(output_tokens[0], skip_special_tokens=True)
|
31 |
|
32 |
iface = gr.Interface(fn=greet, inputs="text", outputs="text", title="PEFT Model for Big Brain")
|