File size: 6,699 Bytes
f224a78
 
 
9c0dccd
6d7d653
 
3e68ccf
 
 
 
 
 
 
 
 
f224a78
 
 
 
44d6df8
cf45a37
44d6df8
4184417
46d1ee4
e65d286
4184417
 
 
 
46d1ee4
e65d286
 
4184417
69fbdcb
e65d286
 
 
 
 
44d6df8
 
 
 
 
dd2517b
 
 
 
 
4115a1e
42180d7
cf45a37
 
 
69fbdcb
813ce6e
cf45a37
 
813ce6e
69fbdcb
f6d90e6
a18e5bc
cf45a37
f6d90e6
46d1ee4
 
 
 
 
 
 
 
 
 
813ce6e
cf45a37
 
a0206d1
44d6df8
cf45a37
4184417
1df700a
 
cf45a37
a18e5bc
e611c5a
 
8953d62
3e68ccf
724babe
c6643c7
469fc38
e690364
3e68ccf
 
e55d16a
f224a78
 
3e68ccf
e611c5a
15da154
dfe9653
ed77618
 
e611c5a
c0b5a2b
e065b20
c0b5a2b
8fcdd22
eda1694
c0b5a2b
 
8fcdd22
c0b5a2b
dc48a6d
 
 
 
c0b5a2b
 
8fcdd22
eda1694
c0b5a2b
9c0dccd
7606a6a
da03db8
34cb50e
294b6dd
da03db8
294b6dd
 
 
 
 
 
34cb50e
de55c8d
19fa605
 
 
e65d286
dd2517b
34cb50e
 
dd2517b
85a7fa9
 
16f6869
 
de55c8d
 
34cb50e
19fa605
da03db8
 
9c0dccd
 
 
 
 
 
4184417
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
"""
A set of configurations used by the app.
"""
import logging
import os

from dataclasses import dataclass
from dotenv import load_dotenv


load_dotenv()


@dataclass(frozen=True)
class GlobalConfig:
    """
    A data class holding the configurations.
    """

    PROVIDER_COHERE = 'co'
    PROVIDER_GOOGLE_GEMINI = 'gg'
    PROVIDER_HUGGING_FACE = 'hf'
    PROVIDER_OLLAMA = 'ol'
    PROVIDER_TOGETHER_AI = 'to'
    PROVIDER_AZURE_OPENAI = 'az'
    VALID_PROVIDERS = {
        PROVIDER_COHERE,
        PROVIDER_GOOGLE_GEMINI,
        PROVIDER_HUGGING_FACE,
        PROVIDER_OLLAMA,
        PROVIDER_TOGETHER_AI,
        PROVIDER_AZURE_OPENAI,
    }
    VALID_MODELS = {
        '[az]azure/open-ai': {
            'description': 'faster, detailed',
            'max_new_tokens': 8192,
            'paid': True,
        },
        '[co]command-r-08-2024': {
            'description': 'simpler, slower',
            'max_new_tokens': 4096,
            'paid': True,
        },
        '[gg]gemini-2.0-flash': {
            'description': 'fast, detailed',
            'max_new_tokens': 8192,
            'paid': True,
        },
        '[gg]gemini-2.0-flash-lite': {
            'description': 'fastest, detailed',
            'max_new_tokens': 8192,
            'paid': True,
        },
        '[hf]mistralai/Mistral-7B-Instruct-v0.2': {
            'description': 'faster, shorter',
            'max_new_tokens': 8192,
            'paid': False,
        },
        '[hf]mistralai/Mistral-Nemo-Instruct-2407': {
            'description': 'longer response',
            'max_new_tokens': 8192,
            'paid': False,
        },
        '[to]meta-llama/Llama-3.3-70B-Instruct-Turbo': {
            'description': 'detailed, slower',
            'max_new_tokens': 4096,
            'paid': True,
        },
        '[to]meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo-128K': {
            'description': 'shorter, faster',
            'max_new_tokens': 4096,
            'paid': True,
        },
    }
    LLM_PROVIDER_HELP = (
        'LLM provider codes:\n\n'
        '- **[az]**: Azure OpenAI\n'
        '- **[co]**: Cohere\n'
        '- **[gg]**: Google Gemini API\n'
        '- **[hf]**: Hugging Face Inference API\n'
        '- **[to]**: Together AI\n\n'
        '[Find out more](https://github.com/barun-saha/slide-deck-ai?tab=readme-ov-file#summary-of-the-llms)'
    )
    DEFAULT_MODEL_INDEX = int(os.environ.get('DEFAULT_MODEL_INDEX', '4'))
    LLM_MODEL_TEMPERATURE = 0.2
    LLM_MODEL_MIN_OUTPUT_LENGTH = 100
    LLM_MODEL_MAX_INPUT_LENGTH = 400  # characters

    HUGGINGFACEHUB_API_TOKEN = os.environ.get('HUGGINGFACEHUB_API_TOKEN', '')

    LOG_LEVEL = 'DEBUG'
    COUNT_TOKENS = False
    APP_STRINGS_FILE = 'strings.json'
    PRELOAD_DATA_FILE = 'examples/example_02.json'
    SLIDES_TEMPLATE_FILE = 'langchain_templates/template_combined.txt'
    INITIAL_PROMPT_TEMPLATE = 'langchain_templates/chat_prompts/initial_template_v4_two_cols_img.txt'
    REFINEMENT_PROMPT_TEMPLATE = 'langchain_templates/chat_prompts/refinement_template_v4_two_cols_img.txt'

    LLM_PROGRESS_MAX = 90
    ICONS_DIR = 'icons/png128/'
    TINY_BERT_MODEL = 'gaunernst/bert-mini-uncased'
    EMBEDDINGS_FILE_NAME = 'file_embeddings/embeddings.npy'
    ICONS_FILE_NAME = 'file_embeddings/icons.npy'

    PPTX_TEMPLATE_FILES = {
        'Basic': {
            'file': 'pptx_templates/Blank.pptx',
            'caption': 'A good start (Uses [photos](https://unsplash.com/photos/AFZ-qBPEceA) by [cetteup](https://unsplash.com/@cetteup?utm_content=creditCopyText&utm_medium=referral&utm_source=unsplash) on [Unsplash](https://unsplash.com/photos/a-foggy-forest-filled-with-lots-of-trees-d3ci37Gcgxg?utm_content=creditCopyText&utm_medium=referral&utm_source=unsplash)) 🟧'
        },
        'Ion Boardroom': {
            'file': 'pptx_templates/Ion_Boardroom.pptx',
            'caption': 'Make some bold decisions 🟥'
        },
        'Minimalist Sales Pitch': {
            'file': 'pptx_templates/Minimalist_sales_pitch.pptx',
            'caption': 'In high contrast ⬛'
        },
        'Urban Monochrome': {
            'file': 'pptx_templates/Urban_monochrome.pptx',
            'caption': 'Marvel in a monochrome dream ⬜'
        },
    }

    # This is a long text, so not incorporated as a string in `strings.json`
    CHAT_USAGE_INSTRUCTIONS = (
        'Briefly describe your topic of presentation in the textbox provided below. For example:\n'
        '- Make a slide deck on AI.'
        '\n\n'
        'Subsequently, you can add follow-up instructions, e.g.:\n'
        '- Can you add a slide on GPUs?'
        '\n\n'
        ' You can also ask it to refine any particular slide, e.g.:\n'
        '- Make the slide with title \'Examples of AI\' a bit more descriptive.'
        '\n\n'
        'Finally, click on the download button at the bottom to download the slide deck.'
        ' See this [demo video](https://youtu.be/QvAKzNKtk9k) for a brief walkthrough.\n\n'
        'Remember, the conversational interface is meant to (and will) update yor *initial*'
        ' slide deck. If you want to create a new slide deck on a different topic,'
        ' start a new chat session by reloading this page.\n\n'
        'Currently, paid or *free-to-use* LLMs from five different providers are supported.'
        ' If one is not available, choose the other from the dropdown list. A [summary of'
        ' the supported LLMs]('
        'https://github.com/barun-saha/slide-deck-ai/blob/main/README.md#summary-of-the-llms)'
        ' is available for reference. SlideDeck AI does **NOT** store your API keys.\n\n'
        ' SlideDeck AI does not have access to the Web, apart for searching for images relevant'
        ' to the slides. Photos are added probabilistically; transparency needs to be changed'
        ' manually, if required.\n\n'
        '[SlideDeck AI](https://github.com/barun-saha/slide-deck-ai) is an Open-Source project,'
        ' released under the'
        ' [MIT license](https://github.com/barun-saha/slide-deck-ai?tab=MIT-1-ov-file#readme).'
        '\n\n---\n\n'
        '© Copyright 2023-2025 Barun Saha.\n\n'
    )


logging.basicConfig(
    level=GlobalConfig.LOG_LEVEL,
    format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
    datefmt='%Y-%m-%d %H:%M:%S'
)


def get_max_output_tokens(llm_name: str) -> int:
    """
    Get the max output tokens value configured for an LLM. Return a default value if not configured.

    :param llm_name: The name of the LLM.
    :return: Max output tokens or a default count.
    """

    try:
        return GlobalConfig.VALID_MODELS[llm_name]['max_new_tokens']
    except KeyError:
        return 2048