1inkusFace commited on
Commit
b573b6e
·
verified ·
1 Parent(s): 63538d5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -25,12 +25,12 @@ repo_id = "Sosaka/Vicuna-7B-4bit-ggml"
25
  filename = "vicuna-7B-1.1-ggml_q4_0-ggjt_v3.bin"
26
  hf_hub_download(repo_id=repo_id, filename=filename)
27
  llm = Llama(
28
- model_path="./models/7B/llama-model.gguf",
29
  n_gpu_layers=-1, # Uncomment to use GPU acceleration
30
  # seed=1337, # Uncomment to set a specific seed
31
- # n_ctx=2048, # Uncomment to increase the context window
32
  )
33
-
34
  llm = Llama.from_pretrained(
35
  repo_id="TheBloke/vicuna-7B-v1.5-GGUF",
36
  filename="vicuna-7b-v1.5.Q8_0.gguf",
@@ -38,7 +38,7 @@ llm = Llama.from_pretrained(
38
  n_ctx = 4096,
39
  verbose=False
40
  )
41
-
42
  try:
43
  nltk.data.find('taggers/averaged_perceptron_tagger_eng')
44
  except LookupError:
 
25
  filename = "vicuna-7B-1.1-ggml_q4_0-ggjt_v3.bin"
26
  hf_hub_download(repo_id=repo_id, filename=filename)
27
  llm = Llama(
28
+ model_path="./vicuna-7B-1.1-ggml_q4_0-ggjt_v3.bin",
29
  n_gpu_layers=-1, # Uncomment to use GPU acceleration
30
  # seed=1337, # Uncomment to set a specific seed
31
+ n_ctx=4096, # Uncomment to increase the context window
32
  )
33
+ '''
34
  llm = Llama.from_pretrained(
35
  repo_id="TheBloke/vicuna-7B-v1.5-GGUF",
36
  filename="vicuna-7b-v1.5.Q8_0.gguf",
 
38
  n_ctx = 4096,
39
  verbose=False
40
  )
41
+ '''
42
  try:
43
  nltk.data.find('taggers/averaged_perceptron_tagger_eng')
44
  except LookupError: