PhantHive commited on
Commit
6fbf583
·
verified ·
1 Parent(s): daadf37

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -4
app.py CHANGED
@@ -11,7 +11,8 @@ bnb_config = BitsAndBytesConfig(
11
  load_in_4bit=True,
12
  bnb_4bit_use_double_quant=True,
13
  bnb_4bit_quant_type="nf4",
14
- bnb_4bit_compute_dtype=torch.bfloat16
 
15
  )
16
 
17
  # Load models and tokenizer efficiently
@@ -19,9 +20,6 @@ config = PeftConfig.from_pretrained("phearion/bigbrain-v0.0.1")
19
  tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
20
  model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, quantization_config=bnb_config)
21
 
22
- # Load the Lora model
23
- model = PeftModel.from_pretrained(model, model_id)
24
-
25
  def greet(text):
26
  with torch.no_grad(): # Disable gradient calculation for inference
27
  batch = tokenizer(f'"{text}" ->:', return_tensors='pt') # Move tensors to device
 
11
  load_in_4bit=True,
12
  bnb_4bit_use_double_quant=True,
13
  bnb_4bit_quant_type="nf4",
14
+ bnb_4bit_compute_dtype=torch
15
+ .bfloat16
16
  )
17
 
18
  # Load models and tokenizer efficiently
 
20
  tokenizer = AutoTokenizer.from_pretrained(config.base_model_name_or_path)
21
  model = AutoModelForCausalLM.from_pretrained(config.base_model_name_or_path, quantization_config=bnb_config)
22
 
 
 
 
23
  def greet(text):
24
  with torch.no_grad(): # Disable gradient calculation for inference
25
  batch = tokenizer(f'"{text}" ->:', return_tensors='pt') # Move tensors to device