FlawedLLM commited on
Commit
91c8163
·
verified ·
1 Parent(s): 29e0d2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -2
app.py CHANGED
@@ -61,7 +61,7 @@ from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig
61
  # model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_9",config=config, ignore_mismatched_sizes=True).to('cuda')
62
  # Load model directly
63
 
64
- tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_00")
65
  quantization_config = BitsAndBytesConfig(
66
  load_in_4bit=True,
67
  bnb_4bit_use_double_quant=True,
@@ -72,7 +72,8 @@ model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_00",
72
  quantization_config=quantization_config,
73
  torch_dtype =torch.float16,
74
  low_cpu_mem_usage=True,
75
- use_safetensors=True,)
 
76
 
77
  @spaces.GPU(duration=300)
78
  def chunk_it(input_command, item_list):
 
61
  # model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_9",config=config, ignore_mismatched_sizes=True).to('cuda')
62
  # Load model directly
63
 
64
+ tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_00", trust_remote_code=True)
65
  quantization_config = BitsAndBytesConfig(
66
  load_in_4bit=True,
67
  bnb_4bit_use_double_quant=True,
 
72
  quantization_config=quantization_config,
73
  torch_dtype =torch.float16,
74
  low_cpu_mem_usage=True,
75
+ use_safetensors=True,
76
+ trust_remote_code=True)
77
 
78
  @spaces.GPU(duration=300)
79
  def chunk_it(input_command, item_list):