FlawedLLM commited on
Commit
2ae9968
·
verified ·
1 Parent(s): bb033c5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -8
app.py CHANGED
@@ -19,16 +19,25 @@ from huggingface_hub import login, HfFolder
19
  # torch_dtype =torch.float16,
20
  # low_cpu_mem_usage=True,
21
  # trust_remote_code=True)
22
- from transformers import AutoModelForCausalLM, AutoTokenizer
23
- from peft import PeftModel
24
 
25
- # 1. Load Your Base Model and LoRA Adapter
26
- model_name_or_path = "FlawedLLM/Bhashini_gemma_merged4bit_clean_final" # Hugging Face model or local path
27
- lora_weights = "FlawedLLM/Bhashini_gemma_lora_clean_final" # LoRA weights
28
 
29
- tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
30
- model = AutoModelForCausalLM.from_pretrained(model_name_or_path, load_in_8bit=True, device_map='auto')
31
- model = PeftModel.from_pretrained(model, lora_weights)
 
 
 
 
 
 
 
 
 
32
  # alpaca_prompt = You MUST copy from above!
33
  @spaces.GPU(duration=300)
34
  def chunk_it(input_command, item_list):
 
19
  # torch_dtype =torch.float16,
20
  # low_cpu_mem_usage=True,
21
  # trust_remote_code=True)
22
+ # from transformers import AutoModelForCausalLM, AutoTokenizer
23
+ # from peft import PeftModel
24
 
25
+ # # 1. Load Your Base Model and LoRA Adapter
26
+ # model_name_or_path = "FlawedLLM/Bhashini_gemma_merged4bit_clean_final" # Hugging Face model or local path
27
+ # lora_weights = "FlawedLLM/Bhashini_gemma_lora_clean_final" # LoRA weights
28
 
29
+ # tokenizer = AutoTokenizer.from_pretrained(model_name_or_path)
30
+ # model = AutoModelForCausalLM.from_pretrained(model_name_or_path, load_in_8bit=True, device_map='auto')
31
+ # model = PeftModel.from_pretrained(model, lora_weights)
32
+ # Load model directly
33
+ from transformers import AutoTokenizer, AutoModelForCausalLM
34
+
35
+ tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/Bhashini_gemma_merged4bit_clean_final",trust_remote_code=True)
36
+ model = AutoModelForCausalLM.from_pretrained("FlawedLLM/Bhashini_gemma_merged4bit_clean_final",
37
+ device_map="auto",
38
+ torch_dtype =torch.float16,
39
+ low_cpu_mem_usage=True,
40
+ trust_remote_code=True)
41
  # alpaca_prompt = You MUST copy from above!
42
  @spaces.GPU(duration=300)
43
  def chunk_it(input_command, item_list):