AnishKumbhar commited on
Commit
fd0c4b7
·
1 Parent(s): 13a1b4a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -19,5 +19,6 @@ wget -O "${output_dir}/generation_config.json" "https://huggingface.co/4bit/Llam
19
  wget -O "${output_dir}/special_tokens_map.json" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/special_tokens_map.json"
20
  wget -O "${output_dir}/tokenizer.model" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/resolve/main/tokenizer.model"
21
  wget -O "${output_dir}/tokenizer_config.json" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/tokenizer_config.json"
22
- wget -O "${output_dir}/gptq_model-4bit-128g.safetensors" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/resolve/main/gptq_model-4bit-128g.safetensors"%cd /content/text-generation-webui
 
23
  !python server.py --share --chat --wbits 4 --groupsize 128 --model_type llama
 
19
  wget -O "${output_dir}/special_tokens_map.json" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/special_tokens_map.json"
20
  wget -O "${output_dir}/tokenizer.model" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/resolve/main/tokenizer.model"
21
  wget -O "${output_dir}/tokenizer_config.json" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/raw/main/tokenizer_config.json"
22
+ wget -O "${output_dir}/gptq_model-4bit-128g.safetensors" "https://huggingface.co/4bit/Llama-2-7b-Chat-GPTQ/resolve/main/gptq_model-4bit-128g.safetensors"
23
+ %cd /content/text-generation-webui
24
  !python server.py --share --chat --wbits 4 --groupsize 128 --model_type llama