CaioXapelaum commited on
Commit
46016b8
1 Parent(s): b40c910

Added c4ai command-r 08, Gemma 2 9B IT, Gemma 2 27B IT

Browse files
Files changed (1) hide show
  1. app.py +25 -1
app.py CHANGED
@@ -111,6 +111,27 @@ hf_hub_download(
111
  token=huggingface_token
112
  )
113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114
 
115
 
116
  llm = None
@@ -209,7 +230,10 @@ demo = gr.ChatInterface(
209
  'Mistral-Small-Instruct-2409-Q5_K_M.gguf',
210
  'Llama-3.1-SuperNova-Lite-Q5_K_M.gguf',
211
  'mixtralorochi8x7b.Q4_K_M.gguf',
212
- 'Phi-3.5-mini-instruct-Q6_K.gguf'
 
 
 
213
  ],
214
  value="gemma-2-2b-it-Q6_K_L.gguf",
215
  label="Model"
 
111
  token=huggingface_token
112
  )
113
 
114
+ hf_hub_download(
115
+ repo_id="bartowski/c4ai-command-r-08-2024-GGUF",
116
+ filename="c4ai-command-r-08-2024-Q5_K_M.gguf",
117
+ local_dir="./models",
118
+ token=huggingface_token
119
+ )
120
+
121
+ hf_hub_download(
122
+ repo_id="bartowski/gemma-2-9b-it-GGUF",
123
+ filename="gemma-2-9b-it-Q5_K_M.gguf",
124
+ local_dir="./models",
125
+ token=huggingface_token
126
+ )
127
+
128
+ hf_hub_download(
129
+ repo_id="bartowski/gemma-2-27b-it-GGUF",
130
+ filename="gemma-2-27b-it-Q5_K_M.gguf",
131
+ local_dir="./models",
132
+ token=huggingface_token
133
+ )
134
+
135
 
136
 
137
  llm = None
 
230
  'Mistral-Small-Instruct-2409-Q5_K_M.gguf',
231
  'Llama-3.1-SuperNova-Lite-Q5_K_M.gguf',
232
  'mixtralorochi8x7b.Q4_K_M.gguf',
233
+ 'Phi-3.5-mini-instruct-Q6_K.gguf',
234
+ 'c4ai-command-r-08-2024-Q5_K_M.gguf',
235
+ 'gemma-2-9b-it-Q5_K_M.gguf',
236
+ 'gemma-2-27b-it-Q5_K_M.gguf'
237
  ],
238
  value="gemma-2-2b-it-Q6_K_L.gguf",
239
  label="Model"