FlawedLLM commited on
Commit
a800923
·
verified ·
1 Parent(s): a87f8a7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -13
app.py CHANGED
@@ -4,23 +4,15 @@ import gradio as gr
4
  from peft import PeftModel, PeftConfig
5
  from peft import AutoPeftModelForCausalLM
6
  from transformers import AutoTokenizer
7
- # model = AutoPeftModelForCausalLM.from_pretrained(
8
- # "FlawedLLM/BhashiniLLM", # YOUR MODEL YOU USED FOR TRAINING
9
- # load_in_4bit = True,
10
- # )
11
- # tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/BhashiniLLM")
12
- from unsloth import FastLanguageModel
13
 
14
 
15
  @spaces.GPU(duration=300)
16
  def chunk_it(input_command):
17
- model, tokenizer = FastLanguageModel.from_pretrained(
18
- model_name = "FlawedLLM/BhashiniLLM", # YOUR MODEL YOU USED FOR TRAINING
19
- max_seq_length = 2048,
20
- dtype = None,
21
- load_in_4bit = True,
22
- )
23
- FastLanguageModel.for_inference(model)
24
  alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
25
 
26
  ### Instruction:
 
4
  from peft import PeftModel, PeftConfig
5
  from peft import AutoPeftModelForCausalLM
6
  from transformers import AutoTokenizer
7
+ model = AutoPeftModelForCausalLM.from_pretrained(
8
+ "FlawedLLM/BhashiniLLM", # YOUR MODEL YOU USED FOR TRAINING
9
+ load_in_4bit = True,
10
+ )
11
+ tokenizer = AutoTokenizer.from_pretrained("FlawedLLM/BhashiniLLM")
 
12
 
13
 
14
  @spaces.GPU(duration=300)
15
  def chunk_it(input_command):
 
 
 
 
 
 
 
16
  alpaca_prompt = """Below is an instruction that describes a task, paired with an input that provides further context. Write a response that appropriately completes the request.
17
 
18
  ### Instruction: