abetlen commited on
Commit
7a12ec7
·
1 Parent(s): db10c0e
Files changed (2) hide show
  1. app.py +2 -2
  2. models.py +1 -1
app.py CHANGED
@@ -14,7 +14,7 @@ import gradio_helpers
14
  import models
15
  import paligemma_parse
16
 
17
- INTRO_TEXT = """🤲 PaliGemma demo\n\n
18
  | [Paper](https://arxiv.org/abs/2407.07726)
19
  | [GitHub](https://github.com/google-research/big_vision/blob/main/big_vision/configs/proj/paligemma/README.md)
20
  | [HF blog post](https://huggingface.co/blog/paligemma)
@@ -98,7 +98,7 @@ def compute(image, prompt, model_name, sampler):
98
 
99
  def warmup(model_name):
100
  image = PIL.Image.new('RGB', [1, 1])
101
- _ = compute(image, '', model_name + "-text-model-q4_k_m.gguf", 'greedy')
102
 
103
 
104
  def reset():
 
14
  import models
15
  import paligemma_parse
16
 
17
+ INTRO_TEXT = """🤲 PaliGemma GGUF demo\n\n
18
  | [Paper](https://arxiv.org/abs/2407.07726)
19
  | [GitHub](https://github.com/google-research/big_vision/blob/main/big_vision/configs/proj/paligemma/README.md)
20
  | [HF blog post](https://huggingface.co/blog/paligemma)
 
98
 
99
  def warmup(model_name):
100
  image = PIL.Image.new('RGB', [1, 1])
101
+ _ = compute(image, '', model_name, 'greedy')
102
 
103
 
104
  def reset():
models.py CHANGED
@@ -29,7 +29,7 @@ MODELS = {
29
 
30
  MODELS_INFO = {
31
  'paligemma-3b-mix-224': (
32
- 'JAX/FLAX PaliGemma 3B weights, finetuned with 224x224 input images and 256 token input/output '
33
  'text sequences on a mixture of downstream academic datasets. The models are available in float32, '
34
  'bfloat16 and float16 format for research purposes only.'
35
  ),
 
29
 
30
  MODELS_INFO = {
31
  'paligemma-3b-mix-224': (
32
+ 'GGUF PaliGemma 3B weights quantized in Q4_K_M Format, finetuned with 224x224 input images and 256 token input/output '
33
  'text sequences on a mixture of downstream academic datasets. The models are available in float32, '
34
  'bfloat16 and float16 format for research purposes only.'
35
  ),