Update
Browse files
app.py
CHANGED
@@ -245,7 +245,7 @@ if __name__ == '__main__':
|
|
245 |
|
246 |
gradio_helpers.set_warmup_function(warmup)
|
247 |
for name, (repo, filename) in models.MODELS.items():
|
248 |
-
gradio_helpers.register_download(name, repo, filename + "-text-model-q4_k_m.gguf")
|
249 |
-
gradio_helpers.register_download(name, repo, filename + "-mmproj-f16.gguf")
|
250 |
|
251 |
create_app().queue().launch()
|
|
|
245 |
|
246 |
gradio_helpers.set_warmup_function(warmup)
|
247 |
for name, (repo, filename) in models.MODELS.items():
|
248 |
+
gradio_helpers.register_download(name + "-text-model-q4_k_m.gguf", repo, filename + "-text-model-q4_k_m.gguf")
|
249 |
+
gradio_helpers.register_download(name + "-mmproj-f16.gguf", repo, filename + "-mmproj-f16.gguf")
|
250 |
|
251 |
create_app().queue().launch()
|
models.py
CHANGED
@@ -78,7 +78,8 @@ def generate(
|
|
78 |
# with gradio_helpers.timed('computation', start_message=True):
|
79 |
# tokens = model.predict(params, batch, sampler=sampler)
|
80 |
|
81 |
-
model_path = gradio_helpers.get_paths()[model_name]
|
|
|
82 |
print(model_path)
|
83 |
print(gradio_helpers.get_paths())
|
84 |
model = llama_cpp.Llama(
|
|
|
78 |
# with gradio_helpers.timed('computation', start_message=True):
|
79 |
# tokens = model.predict(params, batch, sampler=sampler)
|
80 |
|
81 |
+
model_path = gradio_helpers.get_paths()[model_name + "-text-model-q4_k_m.gguf"]
|
82 |
+
clip_path = gradio_helpers.get_paths()[model_name + "-mmproj-f16.gguf"]
|
83 |
print(model_path)
|
84 |
print(gradio_helpers.get_paths())
|
85 |
model = llama_cpp.Llama(
|