Spaces:
Running
Running
Remove support for Gemini 1.5 Flash as 2.0 is now in GA
Browse files- README.md +0 -1
- global_config.py +2 -6
README.md
CHANGED
@@ -50,7 +50,6 @@ The supported LLMs offer different styles of content generation. Use one of the
|
|
50 |
|:---------------------------------| :------- |:-------------------------------------------------------------------------------------------------------------------------|:-------------------------|
|
51 |
| Mistral 7B Instruct v0.2 | Hugging Face (`hf`) | Optional but strongly encouraged; [get here](https://huggingface.co/settings/tokens) | Faster, shorter content |
|
52 |
| Mistral NeMo Instruct 2407 | Hugging Face (`hf`) | Optional but strongly encouraged; [get here](https://huggingface.co/settings/tokens) | Slower, longer content |
|
53 |
-
| Gemini 1.5 Flash | Google Gemini API (`gg`) | Mandatory; [get here](https://aistudio.google.com/apikey) | Faster, longer content |
|
54 |
| Gemini 2.0 Flash | Google Gemini API (`gg`) | Mandatory; [get here](https://aistudio.google.com/apikey) | Faster, longer content |
|
55 |
| Gemini 2.0 Flash Lite | Google Gemini API (`gg`) | Mandatory; [get here](https://aistudio.google.com/apikey) | Faster, longer content |
|
56 |
| GPT | Azure OpenAI (`az`) | Mandatory; [get here](https://ai.azure.com/resource/playground) NOTE: You need to have your subscription/billing set up | Faster, longer content |
|
|
|
50 |
|:---------------------------------| :------- |:-------------------------------------------------------------------------------------------------------------------------|:-------------------------|
|
51 |
| Mistral 7B Instruct v0.2 | Hugging Face (`hf`) | Optional but strongly encouraged; [get here](https://huggingface.co/settings/tokens) | Faster, shorter content |
|
52 |
| Mistral NeMo Instruct 2407 | Hugging Face (`hf`) | Optional but strongly encouraged; [get here](https://huggingface.co/settings/tokens) | Slower, longer content |
|
|
|
53 |
| Gemini 2.0 Flash | Google Gemini API (`gg`) | Mandatory; [get here](https://aistudio.google.com/apikey) | Faster, longer content |
|
54 |
| Gemini 2.0 Flash Lite | Google Gemini API (`gg`) | Mandatory; [get here](https://aistudio.google.com/apikey) | Faster, longer content |
|
55 |
| GPT | Azure OpenAI (`az`) | Mandatory; [get here](https://ai.azure.com/resource/playground) NOTE: You need to have your subscription/billing set up | Faster, longer content |
|
global_config.py
CHANGED
@@ -42,11 +42,6 @@ class GlobalConfig:
|
|
42 |
'max_new_tokens': 4096,
|
43 |
'paid': True,
|
44 |
},
|
45 |
-
'[gg]gemini-1.5-flash-002': {
|
46 |
-
'description': 'faster, detailed',
|
47 |
-
'max_new_tokens': 8192,
|
48 |
-
'paid': True,
|
49 |
-
},
|
50 |
'[gg]gemini-2.0-flash': {
|
51 |
'description': 'fast, detailed',
|
52 |
'max_new_tokens': 8192,
|
@@ -80,13 +75,14 @@ class GlobalConfig:
|
|
80 |
}
|
81 |
LLM_PROVIDER_HELP = (
|
82 |
'LLM provider codes:\n\n'
|
|
|
83 |
'- **[co]**: Cohere\n'
|
84 |
'- **[gg]**: Google Gemini API\n'
|
85 |
'- **[hf]**: Hugging Face Inference API\n'
|
86 |
'- **[to]**: Together AI\n\n'
|
87 |
'[Find out more](https://github.com/barun-saha/slide-deck-ai?tab=readme-ov-file#summary-of-the-llms)'
|
88 |
)
|
89 |
-
DEFAULT_MODEL_INDEX =
|
90 |
LLM_MODEL_TEMPERATURE = 0.2
|
91 |
LLM_MODEL_MIN_OUTPUT_LENGTH = 100
|
92 |
LLM_MODEL_MAX_INPUT_LENGTH = 400 # characters
|
|
|
42 |
'max_new_tokens': 4096,
|
43 |
'paid': True,
|
44 |
},
|
|
|
|
|
|
|
|
|
|
|
45 |
'[gg]gemini-2.0-flash': {
|
46 |
'description': 'fast, detailed',
|
47 |
'max_new_tokens': 8192,
|
|
|
75 |
}
|
76 |
LLM_PROVIDER_HELP = (
|
77 |
'LLM provider codes:\n\n'
|
78 |
+
'- **[az]**: Azure OpenAI\n'
|
79 |
'- **[co]**: Cohere\n'
|
80 |
'- **[gg]**: Google Gemini API\n'
|
81 |
'- **[hf]**: Hugging Face Inference API\n'
|
82 |
'- **[to]**: Together AI\n\n'
|
83 |
'[Find out more](https://github.com/barun-saha/slide-deck-ai?tab=readme-ov-file#summary-of-the-llms)'
|
84 |
)
|
85 |
+
DEFAULT_MODEL_INDEX = 4
|
86 |
LLM_MODEL_TEMPERATURE = 0.2
|
87 |
LLM_MODEL_MIN_OUTPUT_LENGTH = 100
|
88 |
LLM_MODEL_MAX_INPUT_LENGTH = 400 # characters
|