Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,6 +3,9 @@ from transformers import pipeline
|
|
3 |
from peft import AutoPeftModelForCausalLM
|
4 |
from transformers import AutoTokenizer
|
5 |
|
|
|
|
|
|
|
6 |
#from llama_cpp import Llama
|
7 |
|
8 |
# Load the Llama model
|
|
|
3 |
from peft import AutoPeftModelForCausalLM
|
4 |
from transformers import AutoTokenizer
|
5 |
|
6 |
+
max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
|
7 |
+
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
|
8 |
+
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
|
9 |
#from llama_cpp import Llama
|
10 |
|
11 |
# Load the Llama model
|