Update modeling_clip.py
Browse files- modeling_clip.py +2 -2
modeling_clip.py
CHANGED
@@ -250,7 +250,7 @@ class JinaCLIPModel(JinaCLIPPreTrainedModel):
|
|
250 |
|
251 |
def get_tokenizer(self):
|
252 |
if not self.tokenizer:
|
253 |
-
self.tokenizer = AutoTokenizer.from_pretrained(config._name_or_path, trust_remote_code=True)
|
254 |
return self.tokenizer
|
255 |
|
256 |
@torch.inference_mode()
|
@@ -363,7 +363,7 @@ class JinaCLIPModel(JinaCLIPPreTrainedModel):
|
|
363 |
|
364 |
def get_preprocess(self):
|
365 |
if not self.preprocess:
|
366 |
-
self.preprocess = AutoImageProcessor.from_pretrained(config._name_or_path, trust_remote_code=True)
|
367 |
return self.preprocess
|
368 |
|
369 |
|
|
|
250 |
|
251 |
def get_tokenizer(self):
|
252 |
if not self.tokenizer:
|
253 |
+
self.tokenizer = AutoTokenizer.from_pretrained(self.config._name_or_path, trust_remote_code=True)
|
254 |
return self.tokenizer
|
255 |
|
256 |
@torch.inference_mode()
|
|
|
363 |
|
364 |
def get_preprocess(self):
|
365 |
if not self.preprocess:
|
366 |
+
self.preprocess = AutoImageProcessor.from_pretrained(self.config._name_or_path, trust_remote_code=True)
|
367 |
return self.preprocess
|
368 |
|
369 |
|