Fixing model size to 1B ? - What wind.surf will do ?
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ MODELS = {
|
|
9 |
"filename": "*Q4_K_M.gguf"
|
10 |
},
|
11 |
"Llama-3.2-1.5B": {
|
12 |
-
"repo_id": "lmstudio-community/Llama-3.2-
|
13 |
"filename": "*Q4_K_M.gguf"
|
14 |
}
|
15 |
}
|
@@ -85,7 +85,7 @@ demo = gr.ChatInterface(
|
|
85 |
choices=list(MODELS.keys()),
|
86 |
value=list(MODELS.keys())[0],
|
87 |
label="Select Model",
|
88 |
-
interactive=
|
89 |
allow_custom_value=False,
|
90 |
elem_id="model_selector",
|
91 |
show_label=True
|
|
|
9 |
"filename": "*Q4_K_M.gguf"
|
10 |
},
|
11 |
"Llama-3.2-1.5B": {
|
12 |
+
"repo_id": "lmstudio-community/Llama-3.2-1B-Instruct-GGUF",
|
13 |
"filename": "*Q4_K_M.gguf"
|
14 |
}
|
15 |
}
|
|
|
85 |
choices=list(MODELS.keys()),
|
86 |
value=list(MODELS.keys())[0],
|
87 |
label="Select Model",
|
88 |
+
interactive=True,
|
89 |
allow_custom_value=False,
|
90 |
elem_id="model_selector",
|
91 |
show_label=True
|