Spaces:
Runtime error
Runtime error
Commit
·
037f494
1
Parent(s):
a96ce40
Update app.py
Browse files
app.py
CHANGED
@@ -1,25 +1,23 @@
|
|
1 |
-
|
2 |
-
!apt-get -y install -qq aria2
|
3 |
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
|
|
|
|
|
|
8 |
|
9 |
-
|
10 |
-
|
11 |
-
!git clone -b v1.2 https://github.com/camenduru/GPTQ-for-LLaMa.git
|
12 |
-
%cd GPTQ-for-LLaMa
|
13 |
-
!python setup_cuda.py install
|
14 |
|
15 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
|
17 |
-
|
18 |
-
|
19 |
-
wget -O "${output_dir}/special_tokens_map.json" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/special_tokens_map.json"
|
20 |
-
wget -O "${output_dir}/tokenizer.model" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/resolve/main/tokenizer.model"
|
21 |
-
wget -O "${output_dir}/tokenizer_config.json" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/tokenizer_config.json"
|
22 |
-
wget -O "${output_dir}/gptq_model-4bit-128g.safetensors" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/resolve/main/gptq_model-4bit-128g.safetensors"
|
23 |
-
|
24 |
-
%cd /content/text-generation-webui
|
25 |
-
!python server.py --share --chat --wbits 4 --groupsize 128 --model_type llama
|
|
|
1 |
+
output_dir = "/content/text-generation-webui/models/Llama-2-7b-Chat-GPTQ"
|
|
|
2 |
|
3 |
+
# Set the paths to the GPT-Q model files
|
4 |
+
config_json_path = os.path.join(output_dir, "config.json")
|
5 |
+
generation_config_json_path = os.path.join(output_dir, "generation_config.json")
|
6 |
+
special_tokens_map_json_path = os.path.join(output_dir, "special_tokens_map.json")
|
7 |
+
tokenizer_model_path = os.path.join(output_dir, "tokenizer.model")
|
8 |
+
tokenizer_config_json_path = os.path.join(output_dir, "tokenizer_config.json")
|
9 |
+
gptq_model_path = os.path.join(output_dir, "gptq_model-4bit-128g.safetensors")
|
10 |
|
11 |
+
# Change the directory to the output directory
|
12 |
+
os.chdir(output_dir)
|
|
|
|
|
|
|
13 |
|
14 |
+
# Download the GPT-Q model files
|
15 |
+
wget -O "${config_json_path}" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/config.json"
|
16 |
+
wget -O "${generation_config_json_path}" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/generation_config.json"
|
17 |
+
wget -O "${special_tokens_map_json_path}" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/special_tokens_map.json"
|
18 |
+
wget -O "${tokenizer_model_path}" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/resolve/main/tokenizer.model"
|
19 |
+
wget -O "${tokenizer_config_json_path}" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/tokenizer_config.json"
|
20 |
+
wget -O "${gptq_model_path}" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/resolve/main/gptq_model-4bit-128g.safetensors"
|
21 |
|
22 |
+
# Start the text generation web UI
|
23 |
+
python server.py --share --chat --wbits 4 --groupsize 128 --model_type llama
|
|
|
|
|
|
|
|
|
|
|
|
|
|