barunsaha commited on
Commit
33fa518
·
unverified ·
2 Parent(s): 156f462 a2b7695

Merge pull request #96 from rsrini7/openrouter-integration

Browse files

added openrouter api and loaded the api key if exist in .env file

.env.example ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Example .env file for SlideDeck AI
2
+ # Add your API keys and configuration values here
3
+
4
+ # OpenRouter API key (if using OpenRouter as a provider)
5
+
6
+ OPENROUTER_API_KEY=your-openrouter-api-key
.gitignore CHANGED
@@ -144,3 +144,4 @@ dmypy.json
144
  # Cython debug symbols
145
  cython_debug/
146
 
 
 
144
  # Cython debug symbols
145
  cython_debug/
146
 
147
+ .idea
.idea/.gitignore DELETED
@@ -1,3 +0,0 @@
1
- # Default ignored files
2
- /shelf/
3
- /workspace.xml
 
 
 
 
.idea/inspectionProfiles/Project_Default.xml DELETED
@@ -1,14 +0,0 @@
1
- <component name="InspectionProjectProfileManager">
2
- <profile version="1.0">
3
- <option name="myName" value="Project Default" />
4
- <inspection_tool class="PyPackageRequirementsInspection" enabled="true" level="WARNING" enabled_by_default="true">
5
- <option name="ignoredPackages">
6
- <value>
7
- <list size="1">
8
- <item index="0" class="java.lang.String" itemvalue="numpy" />
9
- </list>
10
- </value>
11
- </option>
12
- </inspection_tool>
13
- </profile>
14
- </component>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.idea/inspectionProfiles/profiles_settings.xml DELETED
@@ -1,6 +0,0 @@
1
- <component name="InspectionProjectProfileManager">
2
- <settings>
3
- <option name="USE_PROJECT_PROFILE" value="false" />
4
- <version value="1.0" />
5
- </settings>
6
- </component>
 
 
 
 
 
 
 
.idea/misc.xml DELETED
@@ -1,7 +0,0 @@
1
- <?xml version="1.0" encoding="UTF-8"?>
2
- <project version="4">
3
- <component name="Black">
4
- <option name="sdkName" value="Python 3.10 (slide-deck-ai)" />
5
- </component>
6
- <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.10 (slide-deck-ai)" project-jdk-type="Python SDK" />
7
- </project>
 
 
 
 
 
 
 
 
.idea/modules.xml DELETED
@@ -1,8 +0,0 @@
1
- <?xml version="1.0" encoding="UTF-8"?>
2
- <project version="4">
3
- <component name="ProjectModuleManager">
4
- <modules>
5
- <module fileurl="file://$PROJECT_DIR$/.idea/slide-deck-ai.iml" filepath="$PROJECT_DIR$/.idea/slide-deck-ai.iml" />
6
- </modules>
7
- </component>
8
- </project>
 
 
 
 
 
 
 
 
 
.idea/slide-deck-ai.iml DELETED
@@ -1,10 +0,0 @@
1
- <?xml version="1.0" encoding="UTF-8"?>
2
- <module type="PYTHON_MODULE" version="4">
3
- <component name="NewModuleRootManager">
4
- <content url="file://$MODULE_DIR$">
5
- <excludeFolder url="file://$MODULE_DIR$/venv" />
6
- </content>
7
- <orderEntry type="inheritedJdk" />
8
- <orderEntry type="sourceFolder" forTests="false" />
9
- </component>
10
- </module>
 
 
 
 
 
 
 
 
 
 
 
.idea/vcs.xml DELETED
@@ -1,6 +0,0 @@
1
- <?xml version="1.0" encoding="UTF-8"?>
2
- <project version="4">
3
- <component name="VcsDirectoryMappings">
4
- <mapping directory="$PROJECT_DIR$" vcs="Git" />
5
- </component>
6
- </project>
 
 
 
 
 
 
 
README.md CHANGED
@@ -54,6 +54,8 @@ The supported LLMs offer different styles of content generation. Use one of the
54
  | Gemini 2.0 Flash Lite | Google Gemini API (`gg`) | Mandatory; [get here](https://aistudio.google.com/apikey) | Fastest, longer content |
55
  | GPT | Azure OpenAI (`az`) | Mandatory; [get here](https://ai.azure.com/resource/playground) NOTE: You need to have your subscription/billing set up | Faster, longer content |
56
  | Command R+ | Cohere (`co`) | Mandatory; [get here](https://dashboard.cohere.com/api-keys) | Shorter, simpler content |
 
 
57
  | Llama 3.3 70B Instruct Turbo | Together AI (`to`) | Mandatory; [get here](https://api.together.ai/settings/api-keys) | Detailed, slower |
58
  | Llama 3.1 8B Instruct Turbo 128K | Together AI (`to`) | Mandatory; [get here](https://api.together.ai/settings/api-keys) | Shorter |
59
 
 
54
  | Gemini 2.0 Flash Lite | Google Gemini API (`gg`) | Mandatory; [get here](https://aistudio.google.com/apikey) | Fastest, longer content |
55
  | GPT | Azure OpenAI (`az`) | Mandatory; [get here](https://ai.azure.com/resource/playground) NOTE: You need to have your subscription/billing set up | Faster, longer content |
56
  | Command R+ | Cohere (`co`) | Mandatory; [get here](https://dashboard.cohere.com/api-keys) | Shorter, simpler content |
57
+ | Gemini-2.0-flash-001 | OpenRouter (`or`) | Mandatory; [get here](https://openrouter.ai/settings/keys) | Faster, longer content |
58
+ | GPT-3.5 Turbo | OpenRouter (`or`) | Mandatory; [get here](https://openrouter.ai/settings/keys) | Faster, longer content |
59
  | Llama 3.3 70B Instruct Turbo | Together AI (`to`) | Mandatory; [get here](https://api.together.ai/settings/api-keys) | Detailed, slower |
60
  | Llama 3.1 8B Instruct Turbo 128K | Together AI (`to`) | Mandatory; [get here](https://api.together.ai/settings/api-keys) | Shorter |
61
 
app.py CHANGED
@@ -25,10 +25,8 @@ import helpers.file_manager as filem
25
  from global_config import GlobalConfig
26
  from helpers import llm_helper, pptx_helper, text_helper
27
 
28
-
29
  load_dotenv()
30
 
31
-
32
  RUN_IN_OFFLINE_MODE = os.getenv('RUN_IN_OFFLINE_MODE', 'False').lower() == 'true'
33
 
34
 
@@ -182,14 +180,24 @@ with st.sidebar:
182
  on_change=reset_api_key
183
  ).split(' ')[0]
184
 
185
- # The API key/access token
 
 
 
 
 
 
 
 
 
186
  api_key_token = st.text_input(
187
  label=(
188
  '3: Paste your API key/access token:\n\n'
189
  '*Mandatory* for all providers.'
190
  ),
 
191
  type='password',
192
- key='api_key_input'
193
  )
194
 
195
  # Additional configs for Azure OpenAI
@@ -349,7 +357,11 @@ def set_up_chat_ui():
349
  if isinstance(chunk, str):
350
  response += chunk
351
  else:
352
- response += chunk.content # AIMessageChunk
 
 
 
 
353
 
354
  # Update the progress bar with an approx progress percentage
355
  progress_bar.progress(
@@ -581,3 +593,4 @@ def main():
581
 
582
  if __name__ == '__main__':
583
  main()
 
 
25
  from global_config import GlobalConfig
26
  from helpers import llm_helper, pptx_helper, text_helper
27
 
 
28
  load_dotenv()
29
 
 
30
  RUN_IN_OFFLINE_MODE = os.getenv('RUN_IN_OFFLINE_MODE', 'False').lower() == 'true'
31
 
32
 
 
180
  on_change=reset_api_key
181
  ).split(' ')[0]
182
 
183
+ # --- Automatically fetch API key from .env if available ---
184
+ provider_match = GlobalConfig.PROVIDER_REGEX.match(llm_provider_to_use)
185
+ selected_provider = provider_match.group(1) if provider_match else llm_provider_to_use
186
+ env_key_name = GlobalConfig.PROVIDER_ENV_KEYS.get(selected_provider)
187
+ default_api_key = os.getenv(env_key_name, "") if env_key_name else ""
188
+
189
+ # Always sync session state to env value if needed (auto-fill on provider change)
190
+ if default_api_key and st.session_state.get('api_key_input', None) != default_api_key:
191
+ st.session_state['api_key_input'] = default_api_key
192
+
193
  api_key_token = st.text_input(
194
  label=(
195
  '3: Paste your API key/access token:\n\n'
196
  '*Mandatory* for all providers.'
197
  ),
198
+ key='api_key_input',
199
  type='password',
200
+ disabled=bool(default_api_key),
201
  )
202
 
203
  # Additional configs for Azure OpenAI
 
357
  if isinstance(chunk, str):
358
  response += chunk
359
  else:
360
+ content = getattr(chunk, 'content', None)
361
+ if content is not None:
362
+ response += content
363
+ else:
364
+ response += str(chunk)
365
 
366
  # Update the progress bar with an approx progress percentage
367
  progress_bar.progress(
 
593
 
594
  if __name__ == '__main__':
595
  main()
596
+
global_config.py CHANGED
@@ -3,6 +3,7 @@ A set of configurations used by the app.
3
  """
4
  import logging
5
  import os
 
6
 
7
  from dataclasses import dataclass
8
  from dotenv import load_dotenv
@@ -20,9 +21,10 @@ class GlobalConfig:
20
  PROVIDER_COHERE = 'co'
21
  PROVIDER_GOOGLE_GEMINI = 'gg'
22
  PROVIDER_HUGGING_FACE = 'hf'
 
23
  PROVIDER_OLLAMA = 'ol'
 
24
  PROVIDER_TOGETHER_AI = 'to'
25
- PROVIDER_AZURE_OPENAI = 'az'
26
  VALID_PROVIDERS = {
27
  PROVIDER_COHERE,
28
  PROVIDER_GOOGLE_GEMINI,
@@ -30,7 +32,17 @@ class GlobalConfig:
30
  PROVIDER_OLLAMA,
31
  PROVIDER_TOGETHER_AI,
32
  PROVIDER_AZURE_OPENAI,
 
 
 
 
 
 
 
 
 
33
  }
 
34
  VALID_MODELS = {
35
  '[az]azure/open-ai': {
36
  'description': 'faster, detailed',
@@ -62,6 +74,16 @@ class GlobalConfig:
62
  'max_new_tokens': 8192,
63
  'paid': False,
64
  },
 
 
 
 
 
 
 
 
 
 
65
  '[to]meta-llama/Llama-3.3-70B-Instruct-Turbo': {
66
  'description': 'detailed, slower',
67
  'max_new_tokens': 4096,
@@ -71,7 +93,7 @@ class GlobalConfig:
71
  'description': 'shorter, faster',
72
  'max_new_tokens': 4096,
73
  'paid': True,
74
- },
75
  }
76
  LLM_PROVIDER_HELP = (
77
  'LLM provider codes:\n\n'
@@ -79,14 +101,14 @@ class GlobalConfig:
79
  '- **[co]**: Cohere\n'
80
  '- **[gg]**: Google Gemini API\n'
81
  '- **[hf]**: Hugging Face Inference API\n'
82
- '- **[to]**: Together AI\n\n'
 
83
  '[Find out more](https://github.com/barun-saha/slide-deck-ai?tab=readme-ov-file#summary-of-the-llms)'
84
  )
85
  DEFAULT_MODEL_INDEX = int(os.environ.get('DEFAULT_MODEL_INDEX', '4'))
86
  LLM_MODEL_TEMPERATURE = 0.2
87
- LLM_MODEL_MIN_OUTPUT_LENGTH = 100
88
- LLM_MODEL_MAX_INPUT_LENGTH = 400 # characters
89
  MAX_PAGE_COUNT = 50
 
90
 
91
  LOG_LEVEL = 'DEBUG'
92
  COUNT_TOKENS = False
 
3
  """
4
  import logging
5
  import os
6
+ import re
7
 
8
  from dataclasses import dataclass
9
  from dotenv import load_dotenv
 
21
  PROVIDER_COHERE = 'co'
22
  PROVIDER_GOOGLE_GEMINI = 'gg'
23
  PROVIDER_HUGGING_FACE = 'hf'
24
+ PROVIDER_AZURE_OPENAI = 'az'
25
  PROVIDER_OLLAMA = 'ol'
26
+ PROVIDER_OPENROUTER = 'or'
27
  PROVIDER_TOGETHER_AI = 'to'
 
28
  VALID_PROVIDERS = {
29
  PROVIDER_COHERE,
30
  PROVIDER_GOOGLE_GEMINI,
 
32
  PROVIDER_OLLAMA,
33
  PROVIDER_TOGETHER_AI,
34
  PROVIDER_AZURE_OPENAI,
35
+ PROVIDER_OPENROUTER,
36
+ }
37
+ PROVIDER_ENV_KEYS = {
38
+ PROVIDER_COHERE: "COHERE_API_KEY",
39
+ PROVIDER_GOOGLE_GEMINI: "GOOGLE_API_KEY",
40
+ PROVIDER_HUGGING_FACE: "HUGGINGFACEHUB_API_TOKEN",
41
+ PROVIDER_AZURE_OPENAI: "AZURE_OPENAI_API_KEY",
42
+ PROVIDER_OPENROUTER: "OPENROUTER_API_KEY",
43
+ PROVIDER_TOGETHER_AI: "TOGETHER_API_KEY",
44
  }
45
+ PROVIDER_REGEX = re.compile(r'\[(.*?)\]')
46
  VALID_MODELS = {
47
  '[az]azure/open-ai': {
48
  'description': 'faster, detailed',
 
74
  'max_new_tokens': 8192,
75
  'paid': False,
76
  },
77
+ '[or]google/gemini-2.0-flash-001': {
78
+ 'description': 'Google Gemini-2.0-flash-001 (via OpenRouter)',
79
+ 'max_new_tokens': 8192,
80
+ 'paid': True,
81
+ },
82
+ '[or]openai/gpt-3.5-turbo': {
83
+ 'description': 'OpenAI GPT-3.5 Turbo (via OpenRouter)',
84
+ 'max_new_tokens': 4096,
85
+ 'paid': True,
86
+ },
87
  '[to]meta-llama/Llama-3.3-70B-Instruct-Turbo': {
88
  'description': 'detailed, slower',
89
  'max_new_tokens': 4096,
 
93
  'description': 'shorter, faster',
94
  'max_new_tokens': 4096,
95
  'paid': True,
96
+ }
97
  }
98
  LLM_PROVIDER_HELP = (
99
  'LLM provider codes:\n\n'
 
101
  '- **[co]**: Cohere\n'
102
  '- **[gg]**: Google Gemini API\n'
103
  '- **[hf]**: Hugging Face Inference API\n'
104
+ '- **[or]**: OpenRouter\n\n'
105
+ '- **[to]**: Together AI\n'
106
  '[Find out more](https://github.com/barun-saha/slide-deck-ai?tab=readme-ov-file#summary-of-the-llms)'
107
  )
108
  DEFAULT_MODEL_INDEX = int(os.environ.get('DEFAULT_MODEL_INDEX', '4'))
109
  LLM_MODEL_TEMPERATURE = 0.2
 
 
110
  MAX_PAGE_COUNT = 50
111
+ LLM_MODEL_MAX_INPUT_LENGTH = 1000 # characters
112
 
113
  LOG_LEVEL = 'DEBUG'
114
  COUNT_TOKENS = False
helpers/llm_helper.py CHANGED
@@ -11,7 +11,7 @@ import requests
11
  from requests.adapters import HTTPAdapter
12
  from urllib3.util import Retry
13
  from langchain_core.language_models import BaseLLM, BaseChatModel
14
-
15
 
16
  sys.path.append('..')
17
 
@@ -23,6 +23,7 @@ OLLAMA_MODEL_REGEX = re.compile(r'[a-zA-Z0-9._:-]+$')
23
  # 94 characters long, only containing alphanumeric characters, hyphens, and underscores
24
  API_KEY_REGEX = re.compile(r'^[a-zA-Z0-9_-]{6,94}$')
25
  REQUEST_TIMEOUT = 35
 
26
 
27
 
28
  logger = logging.getLogger(__name__)
@@ -188,6 +189,22 @@ def get_langchain_llm(
188
  api_key=api_key,
189
  )
190
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191
  if provider == GlobalConfig.PROVIDER_COHERE:
192
  from langchain_cohere.llms import Cohere
193
 
 
11
  from requests.adapters import HTTPAdapter
12
  from urllib3.util import Retry
13
  from langchain_core.language_models import BaseLLM, BaseChatModel
14
+ import os
15
 
16
  sys.path.append('..')
17
 
 
23
  # 94 characters long, only containing alphanumeric characters, hyphens, and underscores
24
  API_KEY_REGEX = re.compile(r'^[a-zA-Z0-9_-]{6,94}$')
25
  REQUEST_TIMEOUT = 35
26
+ OPENROUTER_BASE_URL = 'https://openrouter.ai/api/v1'
27
 
28
 
29
  logger = logging.getLogger(__name__)
 
189
  api_key=api_key,
190
  )
191
 
192
+ if provider == GlobalConfig.PROVIDER_OPENROUTER:
193
+ # Use langchain-openai's ChatOpenAI for OpenRouter
194
+ from langchain_openai import ChatOpenAI
195
+
196
+ logger.debug('Getting LLM via OpenRouter: %s', model)
197
+ openrouter_api_key = api_key
198
+
199
+ return ChatOpenAI(
200
+ base_url=OPENROUTER_BASE_URL,
201
+ openai_api_key=openrouter_api_key,
202
+ model_name=model,
203
+ temperature=GlobalConfig.LLM_MODEL_TEMPERATURE,
204
+ max_tokens=max_new_tokens,
205
+ streaming=True,
206
+ )
207
+
208
  if provider == GlobalConfig.PROVIDER_COHERE:
209
  from langchain_cohere.llms import Cohere
210