Spaces:
Running
Running
Merge pull request #89 from barun-saha/visual
Browse files- app.py +14 -12
- global_config.py +2 -2
app.py
CHANGED
@@ -368,26 +368,28 @@ def set_up_chat_ui():
|
|
368 |
)
|
369 |
return
|
370 |
except Exception as ex:
|
371 |
-
_msg = str(ex)
|
372 |
-
if 'payment required' in _msg:
|
373 |
handle_error(
|
374 |
'The available inference quota has exhausted.'
|
375 |
' Please use your own Hugging Face access token. Paste your token in'
|
376 |
' the input field on the sidebar to the left.'
|
377 |
'\n\nDon\'t have a token? Get your free'
|
378 |
' [HF access token](https://huggingface.co/settings/tokens) now'
|
379 |
-
' and
|
|
|
380 |
should_log=True
|
381 |
)
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
|
|
391 |
return
|
392 |
|
393 |
history.add_user_message(prompt)
|
|
|
368 |
)
|
369 |
return
|
370 |
except Exception as ex:
|
371 |
+
_msg = str(ex)
|
372 |
+
if 'payment required' in _msg.lower():
|
373 |
handle_error(
|
374 |
'The available inference quota has exhausted.'
|
375 |
' Please use your own Hugging Face access token. Paste your token in'
|
376 |
' the input field on the sidebar to the left.'
|
377 |
'\n\nDon\'t have a token? Get your free'
|
378 |
' [HF access token](https://huggingface.co/settings/tokens) now'
|
379 |
+
' and start creating your slide deck! Alternatively, choose a different LLM'
|
380 |
+
' and provider from the list.',
|
381 |
should_log=True
|
382 |
)
|
383 |
+
else:
|
384 |
+
handle_error(
|
385 |
+
f'An unexpected error occurred while generating the content: {_msg}'
|
386 |
+
'\n\nPlease try again later, possibly with different inputs.'
|
387 |
+
' Alternatively, try selecting a different LLM from the dropdown list.'
|
388 |
+
' If you are using Azure OpenAI, Cohere, Gemini, or Together AI models, make'
|
389 |
+
' sure that you have provided a correct API key.'
|
390 |
+
' Read **[how to get free LLM API keys](https://github.com/barun-saha/slide-deck-ai?tab=readme-ov-file#summary-of-the-llms)**.',
|
391 |
+
True
|
392 |
+
)
|
393 |
return
|
394 |
|
395 |
history.add_user_message(prompt)
|
global_config.py
CHANGED
@@ -59,7 +59,7 @@ class GlobalConfig:
|
|
59 |
},
|
60 |
'[hf]mistralai/Mistral-Nemo-Instruct-2407': {
|
61 |
'description': 'longer response',
|
62 |
-
'max_new_tokens':
|
63 |
'paid': False,
|
64 |
},
|
65 |
'[to]meta-llama/Llama-3.3-70B-Instruct-Turbo': {
|
@@ -82,7 +82,7 @@ class GlobalConfig:
|
|
82 |
'- **[to]**: Together AI\n\n'
|
83 |
'[Find out more](https://github.com/barun-saha/slide-deck-ai?tab=readme-ov-file#summary-of-the-llms)'
|
84 |
)
|
85 |
-
DEFAULT_MODEL_INDEX = 4
|
86 |
LLM_MODEL_TEMPERATURE = 0.2
|
87 |
LLM_MODEL_MIN_OUTPUT_LENGTH = 100
|
88 |
LLM_MODEL_MAX_INPUT_LENGTH = 400 # characters
|
|
|
59 |
},
|
60 |
'[hf]mistralai/Mistral-Nemo-Instruct-2407': {
|
61 |
'description': 'longer response',
|
62 |
+
'max_new_tokens': 8192,
|
63 |
'paid': False,
|
64 |
},
|
65 |
'[to]meta-llama/Llama-3.3-70B-Instruct-Turbo': {
|
|
|
82 |
'- **[to]**: Together AI\n\n'
|
83 |
'[Find out more](https://github.com/barun-saha/slide-deck-ai?tab=readme-ov-file#summary-of-the-llms)'
|
84 |
)
|
85 |
+
DEFAULT_MODEL_INDEX = int(os.environ.get('DEFAULT_MODEL_INDEX', '4'))
|
86 |
LLM_MODEL_TEMPERATURE = 0.2
|
87 |
LLM_MODEL_MIN_OUTPUT_LENGTH = 100
|
88 |
LLM_MODEL_MAX_INPUT_LENGTH = 400 # characters
|