Update joycaption.py
Browse filesAdded name_input and custom_prompt back
- joycaption.py +1 -1
joycaption.py
CHANGED
@@ -266,7 +266,7 @@ load_text_model(MODEL_PATH, None, LOAD_IN_NF4, True)
|
|
266 |
|
267 |
@spaces.GPU
|
268 |
@torch.inference_mode()
|
269 |
-
def stream_chat_mod(input_image: Image.Image, caption_type: str, caption_length: Union[str, int], extra_options: list[str],
|
270 |
max_new_tokens: int=300, top_p: float=0.9, temperature: float=0.6, model_name: str=MODEL_PATH, progress=gr.Progress(track_tqdm=True)) -> tuple[str, str]:
|
271 |
global tokenizer, text_model, image_adapter, pixtral_model, pixtral_processor, text_model_client, use_inference_client
|
272 |
torch.cuda.empty_cache()
|
|
|
266 |
|
267 |
@spaces.GPU
|
268 |
@torch.inference_mode()
|
269 |
+
def stream_chat_mod(input_image: Image.Image, caption_type: str, caption_length: Union[str, int], extra_options: list[str], name_input: str, custom_prompt: str,
|
270 |
max_new_tokens: int=300, top_p: float=0.9, temperature: float=0.6, model_name: str=MODEL_PATH, progress=gr.Progress(track_tqdm=True)) -> tuple[str, str]:
|
271 |
global tokenizer, text_model, image_adapter, pixtral_model, pixtral_processor, text_model_client, use_inference_client
|
272 |
torch.cuda.empty_cache()
|