FlawedLLM commited on
Commit
08b4513
·
verified ·
1 Parent(s): e4cd2b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -4,12 +4,12 @@ from peft import AutoPeftModelForCausalLM
4
  from transformers import AutoTokenizer
5
 
6
  @spaces.GPU(duration=300)
7
- model = AutoPeftModelForCausalLM.from_pretrained(
 
8
  "FlawedLLM/BhashiniLLM", # YOUR MODEL YOU USED FOR TRAINING
9
  load_in_4bit = True,
10
- )
11
- tokenizer = AutoTokenizer.from_pretrained("lora_model")
12
- def chunk_it(input_command):
13
  inputs = tokenizer(
14
  [
15
  alpaca_prompt.format(
 
4
  from transformers import AutoTokenizer
5
 
6
  @spaces.GPU(duration=300)
7
+ def chunk_it(input_command):
8
+ model = AutoPeftModelForCausalLM.from_pretrained(
9
  "FlawedLLM/BhashiniLLM", # YOUR MODEL YOU USED FOR TRAINING
10
  load_in_4bit = True,
11
+ )
12
+ tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/BhashiniLLM")
 
13
  inputs = tokenizer(
14
  [
15
  alpaca_prompt.format(