Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -64,11 +64,18 @@ model = LlavaMistralForCausalLM.from_pretrained(
|
|
64 |
)
|
65 |
|
66 |
# Weisen Sie die Komponenten den alten Variablennamen zu, damit der restliche Code funktioniert
|
|
|
|
|
67 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
68 |
try:
|
69 |
image_processor = AutoImageProcessor.from_pretrained(model_path)
|
|
|
|
|
70 |
except Exception:
|
71 |
-
|
|
|
|
|
|
|
72 |
|
73 |
# Setzen Sie die Kontextlänge (falls der restliche Code sie benötigt)
|
74 |
context_len = model.config.max_position_embeddings
|
|
|
64 |
)
|
65 |
|
66 |
# Weisen Sie die Komponenten den alten Variablennamen zu, damit der restliche Code funktioniert
|
67 |
+
from transformers import AutoTokenizer, AutoImageProcessor
|
68 |
+
|
69 |
tokenizer = AutoTokenizer.from_pretrained(model_path)
|
70 |
try:
|
71 |
image_processor = AutoImageProcessor.from_pretrained(model_path)
|
72 |
+
if image_processor is None:
|
73 |
+
raise Exception()
|
74 |
except Exception:
|
75 |
+
# Fallback: Passender Vision Tower!
|
76 |
+
image_processor = AutoImageProcessor.from_pretrained("openai/clip-vit-large-patch14-336")
|
77 |
+
assert image_processor is not None, "Could not load image_processor!"
|
78 |
+
|
79 |
|
80 |
# Setzen Sie die Kontextlänge (falls der restliche Code sie benötigt)
|
81 |
context_len = model.config.max_position_embeddings
|