mgbam commited on
Commit
4494ca7
·
verified ·
1 Parent(s): ec83649

Update config.py

Browse files
Files changed (1) hide show
  1. config.py +17 -13
config.py CHANGED
@@ -1,13 +1,17 @@
1
- import os
2
-
3
- # In a production Hugging Face Space, set these as Secrets
4
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
5
- GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "")
6
- MY_PUBMED_EMAIL = os.getenv("MY_PUBMED_EMAIL", "[email protected]")
7
-
8
- # Default LLM models
9
- OPENAI_DEFAULT_MODEL = "gpt-3.5-turbo"
10
- GEMINI_DEFAULT_MODEL = "models/chat-bison-001"
11
-
12
- # Summarization chunk size
13
- DEFAULT_CHUNK_SIZE = 512
 
 
 
 
 
1
+ import os
2
+
3
+ # Environment-based secrets (for Hugging Face Spaces, set these in "Secrets")
4
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY", "")
5
+ GEMINI_API_KEY = os.getenv("GEMINI_API_KEY", "")
6
+ MY_PUBMED_EMAIL = os.getenv("MY_PUBMED_EMAIL", "[email protected]")
7
+
8
+ # Default LLM models for text
9
+ OPENAI_DEFAULT_MODEL = "gpt-4o-mini" # or "gpt-4" if you have access
10
+ GEMINI_DEFAULT_MODEL = "models/chat-bison-001"
11
+
12
+ # Default Summarization chunk size
13
+ DEFAULT_CHUNK_SIZE = 512
14
+
15
+ # A stable image captioning model recognized by the HF "image-to-text" pipeline
16
+ # Example: "nlpconnect/vit-gpt2-image-captioning" or "Salesforce/blip-image-captioning-base"
17
+ IMAGE_MODEL_NAME = "nlpconnect/vit-gpt2-image-captioning"