vilarin commited on
Commit
7ede767
Β·
verified Β·
1 Parent(s): 7d8bfa5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -5,7 +5,7 @@ import os
5
  from huggingface_hub import hf_hub_download
6
  import base64
7
  from llama_cpp import Llama
8
- from llama_cpp.llama_chat_format import NanoLlavaChatHandler
9
 
10
 
11
  os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
@@ -27,15 +27,15 @@ CSS = """
27
  }
28
  """
29
 
30
- chat_handler = NanoLlavaChatHandler.from_pretrained(
31
- repo_id="openbmb/MiniCPM-Llama3-V-2_5-gguf",
32
- filename="*mmproj*",
33
- )
34
 
35
  llm = Llama.from_pretrained(
36
  repo_id="openbmb/MiniCPM-Llama3-V-2_5-gguf",
37
  filename="ggml-model-Q5_K_M.gguf",
38
- chat_handler=chat_handler,
39
  n_ctx=2048, # n_ctx should be increased to accommodate the image embedding
40
  )
41
 
 
5
  from huggingface_hub import hf_hub_download
6
  import base64
7
  from llama_cpp import Llama
8
+ # from llama_cpp.llama_chat_format import NanoLlavaChatHandler
9
 
10
 
11
  os.environ["HF_HUB_ENABLE_HF_TRANSFER"] = "1"
 
27
  }
28
  """
29
 
30
+ # chat_handler = NanoLlavaChatHandler.from_pretrained(
31
+ # repo_id="openbmb/MiniCPM-Llama3-V-2_5-gguf",
32
+ # filename="*mmproj*",
33
+ # )
34
 
35
  llm = Llama.from_pretrained(
36
  repo_id="openbmb/MiniCPM-Llama3-V-2_5-gguf",
37
  filename="ggml-model-Q5_K_M.gguf",
38
+ # chat_handler=chat_handler,
39
  n_ctx=2048, # n_ctx should be increased to accommodate the image embedding
40
  )
41