Spaces:
Running
on
Zero
Running
on
Zero
Bobber Cheng
commited on
Commit
•
4d29a3f
1
Parent(s):
771ca71
correct mistake
Browse files
app.py
CHANGED
@@ -11,7 +11,7 @@ import os
|
|
11 |
|
12 |
|
13 |
CLIP_PATH = "google/siglip-so400m-patch14-384"
|
14 |
-
VLM_PROMPT = "A descriptive caption for this image
|
15 |
MODEL_PATH = "meta-llama/Meta-Llama-3.1-8B"
|
16 |
CHECKPOINT_PATH = Path("wpkklhc6")
|
17 |
TITLE = "<h1><center>JoyCaption Pre-Alpha (2024-07-30a)</center></h1>"
|
@@ -73,7 +73,7 @@ def stream_chat(input_image: Image.Image, vlm_prompt):
|
|
73 |
# Tokenize the prompt
|
74 |
if not vlm_prompt:
|
75 |
vlm_prompt = VLM_PROMPT
|
76 |
-
|
77 |
prompt = tokenizer.encode(
|
78 |
vlm_prompt,
|
79 |
return_tensors='pt',
|
|
|
11 |
|
12 |
|
13 |
CLIP_PATH = "google/siglip-so400m-patch14-384"
|
14 |
+
VLM_PROMPT = "A descriptive caption for this image"
|
15 |
MODEL_PATH = "meta-llama/Meta-Llama-3.1-8B"
|
16 |
CHECKPOINT_PATH = Path("wpkklhc6")
|
17 |
TITLE = "<h1><center>JoyCaption Pre-Alpha (2024-07-30a)</center></h1>"
|
|
|
73 |
# Tokenize the prompt
|
74 |
if not vlm_prompt:
|
75 |
vlm_prompt = VLM_PROMPT
|
76 |
+
vlm_prompt = vlm_prompt + ":\n"
|
77 |
prompt = tokenizer.encode(
|
78 |
vlm_prompt,
|
79 |
return_tensors='pt',
|