filipealmeida commited on
Commit
551ba9b
1 Parent(s): a050d94

Try to load the model in 8 bits

Browse files
Files changed (2) hide show
  1. app.py +1 -1
  2. requirements.txt +3 -1
app.py CHANGED
@@ -7,7 +7,7 @@ import re
7
  logging.basicConfig(level=logging.INFO)
8
  logging.getLogger('transformers').setLevel(logging.INFO)
9
 
10
- llama = pipeline("text-generation", model="filipealmeida/open-llama-3b-v2-pii-transform")
11
 
12
  def generate_text(prompt, example):
13
  logging.debug(f"Received prompt: {prompt}")
 
7
  logging.basicConfig(level=logging.INFO)
8
  logging.getLogger('transformers').setLevel(logging.INFO)
9
 
10
+ llama = pipeline("text-generation", model="filipealmeida/open-llama-3b-v2-pii-transform", model_kwargs={"load_in_8bit": True})
11
 
12
  def generate_text(prompt, example):
13
  logging.debug(f"Received prompt: {prompt}")
requirements.txt CHANGED
@@ -1,2 +1,4 @@
1
  transformers==4.31.0
2
- torch==2.0.1
 
 
 
1
  transformers==4.31.0
2
+ torch==2.0.1
3
+ bitsandbytes
4
+ accelerate