RandomNameAnd6 commited on
Commit
d34d83c
·
verified ·
1 Parent(s): ec6c8ad

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -4
app.py CHANGED
@@ -1,19 +1,17 @@
1
  import os
2
  os.system("pip install -q gradio torch")
3
- os.system('pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"')
4
  os.system('pip install --no-deps xformers "trl<0.9.0" peft accelerate bitsandbytes')
5
 
6
  import gradio as gr
7
  import torch
8
  import random
9
- from unsloth import FastLanguageModel
10
- from peft import PeftModel
11
 
12
  max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
13
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
14
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
15
 
16
- model, tokenizer = FastLanguageModel.from_pretrained(
17
  model_name = "Qwen/Qwen2-1.5B",
18
  max_seq_length = max_seq_length,
19
  dtype = dtype,
 
1
  import os
2
  os.system("pip install -q gradio torch")
 
3
  os.system('pip install --no-deps xformers "trl<0.9.0" peft accelerate bitsandbytes')
4
 
5
  import gradio as gr
6
  import torch
7
  import random
8
+ from transformers import AutoTokenizer, AutoModelForCausalLM
 
9
 
10
  max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
11
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
12
  load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
13
 
14
+ model, tokenizer = AutoModelForCausalLM.from_pretrained(
15
  model_name = "Qwen/Qwen2-1.5B",
16
  max_seq_length = max_seq_length,
17
  dtype = dtype,