FlawedLLM commited on
Commit
12a17e9
·
verified ·
1 Parent(s): 1cfdffa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -46,7 +46,7 @@ import torch
46
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, AutoConfig
47
 
48
  tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_9")
49
- config = AutoConfig.from_pretrained(model_id) # Load configuration
50
 
51
  # quantization_config = BitsAndBytesConfig(
52
  # load_in_4bit=True,
 
46
  from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig, AutoConfig
47
 
48
  tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_9")
49
+ config = AutoConfig.from_pretrained("FlawedLLM/Bhashini_9") # Load configuration
50
 
51
  # quantization_config = BitsAndBytesConfig(
52
  # load_in_4bit=True,