FlawedLLM commited on
Commit
4ccf4fb
·
verified ·
1 Parent(s): 4b9b455

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -2,7 +2,7 @@ import re
2
  import spaces
3
  import gradio as gr
4
  import torch
5
- from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
6
  # from peft import PeftModel, PeftConfig
7
 
8
 
@@ -12,6 +12,7 @@ quantization_config = BitsAndBytesConfig(
12
  bnb_4bit_use_double_quant=True,
13
  bnb_4bit_quant_type="nf4",
14
  bnb_4bit_compute_dtype=torch.float16)
 
15
  model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_00",
16
  device_map="auto",
17
  quantization_config=quantization_config,
 
2
  import spaces
3
  import gradio as gr
4
  import torch
5
+ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, AutoCnfig
6
  # from peft import PeftModel, PeftConfig
7
 
8
 
 
12
  bnb_4bit_use_double_quant=True,
13
  bnb_4bit_quant_type="nf4",
14
  bnb_4bit_compute_dtype=torch.float16)
15
+ config=AutoConfig("FlawedLLM/Bhashini_00")
16
  model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_00",
17
  device_map="auto",
18
  quantization_config=quantization_config,