Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,14 +1,26 @@
|
|
1 |
import os
|
2 |
-
os.system("pip install
|
|
|
|
|
3 |
|
4 |
import gradio as gr
|
5 |
import torch
|
6 |
import random
|
7 |
-
from
|
|
|
8 |
|
|
|
|
|
|
|
9 |
|
10 |
-
tokenizer =
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
|
13 |
def generate_text(prompt):
|
14 |
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
|
|
1 |
import os
|
2 |
+
os.system("pip install -q gradio")
|
3 |
+
os.system('pip install "unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git"')
|
4 |
+
os.system('pip install --no-deps xformers "trl<0.9.0" peft accelerate bitsandbytes')
|
5 |
|
6 |
import gradio as gr
|
7 |
import torch
|
8 |
import random
|
9 |
+
from unsloth import FastLanguageModel
|
10 |
+
from peft import PeftModel
|
11 |
|
12 |
+
max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
|
13 |
+
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
14 |
+
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
|
15 |
|
16 |
+
model, tokenizer = FastLanguageModel.from_pretrained(
|
17 |
+
model_name = "Qwen/Qwen2-1.5B",
|
18 |
+
max_seq_length = max_seq_length,
|
19 |
+
dtype = dtype,
|
20 |
+
load_in_4bit = load_in_4bit
|
21 |
+
)
|
22 |
+
|
23 |
+
model = PeftModel.from_pretrained(model, "RandomNameAnd6/Phi-3-Mini-Dhar-Mann-Adapters-BOS")
|
24 |
|
25 |
def generate_text(prompt):
|
26 |
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|