barunsaha commited on
Commit
a18e5bc
·
1 Parent(s): 258a34d

Set default LLM via env variable

Browse files
Files changed (2) hide show
  1. app.py +13 -12
  2. global_config.py +2 -2
app.py CHANGED
@@ -368,26 +368,27 @@ def set_up_chat_ui():
368
  )
369
  return
370
  except Exception as ex:
371
- _msg = str(ex).lower()
372
- if 'payment required' in _msg:
373
  handle_error(
374
  'The available inference quota has exhausted.'
375
  ' Please use your own Hugging Face access token. Paste your token in'
376
  ' the input field on the sidebar to the left.'
377
  '\n\nDon\'t have a token? Get your free'
378
  ' [HF access token](https://huggingface.co/settings/tokens) now'
379
- ' and create a magical slide deck!',
380
  should_log=True
381
  )
382
- handle_error(
383
- f'An unexpected error occurred while generating the content: {ex}'
384
- '\n\nPlease try again later, possibly with different inputs.'
385
- ' Alternatively, try selecting a different LLM from the dropdown list.'
386
- ' If you are using Azure OpenAI, Cohere, Gemini, or Together AI models, make sure'
387
- ' that you have provided a correct API key.'
388
- ' Read **[how to get free LLM API keys](https://github.com/barun-saha/slide-deck-ai?tab=readme-ov-file#summary-of-the-llms)**.',
389
- True
390
- )
 
391
  return
392
 
393
  history.add_user_message(prompt)
 
368
  )
369
  return
370
  except Exception as ex:
371
+ _msg = str(ex)
372
+ if 'payment required' in _msg.lower():
373
  handle_error(
374
  'The available inference quota has exhausted.'
375
  ' Please use your own Hugging Face access token. Paste your token in'
376
  ' the input field on the sidebar to the left.'
377
  '\n\nDon\'t have a token? Get your free'
378
  ' [HF access token](https://huggingface.co/settings/tokens) now'
379
+ ' and start creating your slide deck!',
380
  should_log=True
381
  )
382
+ else:
383
+ handle_error(
384
+ f'An unexpected error occurred while generating the content: {_msg}'
385
+ '\n\nPlease try again later, possibly with different inputs.'
386
+ ' Alternatively, try selecting a different LLM from the dropdown list.'
387
+ ' If you are using Azure OpenAI, Cohere, Gemini, or Together AI models, make'
388
+ ' sure that you have provided a correct API key.'
389
+ ' Read **[how to get free LLM API keys](https://github.com/barun-saha/slide-deck-ai?tab=readme-ov-file#summary-of-the-llms)**.',
390
+ True
391
+ )
392
  return
393
 
394
  history.add_user_message(prompt)
global_config.py CHANGED
@@ -59,7 +59,7 @@ class GlobalConfig:
59
  },
60
  '[hf]mistralai/Mistral-Nemo-Instruct-2407': {
61
  'description': 'longer response',
62
- 'max_new_tokens': 10240,
63
  'paid': False,
64
  },
65
  '[to]meta-llama/Llama-3.3-70B-Instruct-Turbo': {
@@ -82,7 +82,7 @@ class GlobalConfig:
82
  '- **[to]**: Together AI\n\n'
83
  '[Find out more](https://github.com/barun-saha/slide-deck-ai?tab=readme-ov-file#summary-of-the-llms)'
84
  )
85
- DEFAULT_MODEL_INDEX = 4
86
  LLM_MODEL_TEMPERATURE = 0.2
87
  LLM_MODEL_MIN_OUTPUT_LENGTH = 100
88
  LLM_MODEL_MAX_INPUT_LENGTH = 400 # characters
 
59
  },
60
  '[hf]mistralai/Mistral-Nemo-Instruct-2407': {
61
  'description': 'longer response',
62
+ 'max_new_tokens': 8192,
63
  'paid': False,
64
  },
65
  '[to]meta-llama/Llama-3.3-70B-Instruct-Turbo': {
 
82
  '- **[to]**: Together AI\n\n'
83
  '[Find out more](https://github.com/barun-saha/slide-deck-ai?tab=readme-ov-file#summary-of-the-llms)'
84
  )
85
+ DEFAULT_MODEL_INDEX = int(os.environ.get('DEFAULT_MODEL_INDEX', '4'))
86
  LLM_MODEL_TEMPERATURE = 0.2
87
  LLM_MODEL_MIN_OUTPUT_LENGTH = 100
88
  LLM_MODEL_MAX_INPUT_LENGTH = 400 # characters