Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -6,18 +6,8 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
|
|
6 |
from peft import LoraConfig, PeftModel, get_peft_model
|
7 |
import gradio as gr
|
8 |
|
9 |
-
tokenizer = AutoTokenizer.from_pretrained("VanguardAI/BhashiniLLaMa3-8B_16bit_LoRA_Adapters"
|
10 |
-
|
11 |
-
load_in_4bit=True,
|
12 |
-
bnb_4bit_use_double_quant=True,
|
13 |
-
bnb_4bit_quant_type="nf4",
|
14 |
-
bnb_4bit_compute_dtype=torch.float16)
|
15 |
-
model = AutoModelForCausalLM.from_pretrained("VanguardAI/BhashiniLLaMa3-8B_16bit_LoRA_Adapters",
|
16 |
-
quantization_config=quantization_config,
|
17 |
-
torch_dtype =torch.bfloat16,
|
18 |
-
low_cpu_mem_usage=True,
|
19 |
-
use_safetensors=True,
|
20 |
-
trust_remote_code=True)
|
21 |
|
22 |
condition = '''
|
23 |
ALWAYS provide output in a JSON format.
|
|
|
6 |
from peft import LoraConfig, PeftModel, get_peft_model
|
7 |
import gradio as gr
|
8 |
|
9 |
+
tokenizer = AutoTokenizer.from_pretrained("VanguardAI/BhashiniLLaMa3-8B_16bit_LoRA_Adapters")
|
10 |
+
model = AutoModelForCausalLM.from_pretrained("VanguardAI/BhashiniLLaMa3-8B_16bit_LoRA_Adapters")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
condition = '''
|
13 |
ALWAYS provide output in a JSON format.
|