import os from pydantic_settings import BaseSettings, SettingsConfigDict from pydantic import Field, SecretStr, HttpUrl, validator, Json from typing import List, Optional, Literal, Union # Helper function to load .env file if it exists # Ensure python-dotenv is installed: pip install python-dotenv try: from dotenv import load_dotenv print("Attempting to load .env file...") if load_dotenv(): print(".env file loaded successfully.") else: print(".env file not found or empty.") except ImportError: print("python-dotenv not installed, skipping .env file loading.") pass # Optional: Handle missing dotenv library class Settings(BaseSettings): # Load from .env file model_config = SettingsConfigDict(env_file='.env', env_file_encoding='utf-8', extra='ignore') # Neo4j Credentials neo4j_uri: str = Field(..., validation_alias='NEO4J_URI') neo4j_username: str = Field("neo4j", validation_alias='NEO4J_USERNAME') neo4j_password: SecretStr = os.getenv("NEO4J_PASSWORD") # API Keys openai_api_key: Optional[SecretStr] = os.getenv("OPENAI_API_KEY") gemini_api_key: Optional[SecretStr] = os.getenv("GEMINI_API_KEY") langsmith_api_key: Optional[SecretStr] = os.getenv("LANGSMITH_API_KEY") langchain_project: Optional[str] = Field("KIG_Refactored", validation_alias='LANGCHAIN_PROJECT') # LLM Configuration main_llm_model: str = Field("gemini-1.5-flash", validation_alias='MAIN_LLM_MODEL') eval_llm_model: str = Field("gemini-1.5-flash", validation_alias='EVAL_LLM_MODEL') summarize_llm_model: str = Field("gemini-1.5-flash", validation_alias='SUMMARIZE_LLM_MODEL') # Add other models if needed (e.g., cypher gen, concept selection) # Planner Configuration plan_method: Literal["generation", "modification"] = Field("generation", validation_alias='PLAN_METHOD') use_detailed_query: bool = Field(False, validation_alias='USE_DETAILED_QUERY') # Graph Operations Configuration cypher_gen_method: Literal["guided", "auto"] = Field("guided", validation_alias='CYPHER_GEN_METHOD') validate_cypher: bool = Field(False, validation_alias='VALIDATE_CYPHER') eval_method: Literal["binary", "score"] = Field("binary", validation_alias='EVAL_METHOD') eval_threshold: float = Field(0.7, validation_alias='EVAL_THRESHOLD') max_docs: int = Field(10, validation_alias='MAX_DOCS') # Processing Configuration # Load processing steps from JSON string in .env process_steps: Json[List[Union[str, dict]]] = Field('["summarize"]', validation_alias='PROCESS_STEPS') compression_method: Optional[str] = Field(None, validation_alias='COMPRESSION_METHOD') compress_rate: Optional[float] = Field(0.5, validation_alias='COMPRESS_RATE') # Langsmith Tracing (set automatically based on key) langsmith_tracing_v2: str = "false" @validator('langsmith_tracing_v2', pre=True, always=True) def set_langsmith_tracing(cls, v, values): return "true" if values.get('langsmith_api_key') else "false" def configure_langsmith(self): """Sets Langsmith environment variables if API key is provided.""" if self.langsmith_api_key: os.environ["LANGCHAIN_TRACING_V2"] = self.langsmith_tracing_v2 os.environ["LANGCHAIN_API_KEY"] = self.langsmith_api_key.get_secret_value() if self.langchain_project: os.environ["LANGCHAIN_PROJECT"] = self.langchain_project print("Langsmith configured.") else: # Ensure tracing is disabled if no key os.environ["LANGCHAIN_TRACING_V2"] = "false" print("Langsmith key not found, tracing disabled.") # Create a single instance to be imported elsewhere settings = Settings() # Automatically configure Langsmith on import settings.configure_langsmith() # Optionally set Gemini key in environment if needed by library implicitly if settings.gemini_api_key: os.environ["GOOGLE_API_KEY"] = settings.gemini_api_key.get_secret_value() print("Set GOOGLE_API_KEY environment variable.") if settings.openai_api_key: os.environ["OPENAI_API_KEY"] = settings.openai_api_key.get_secret_value() print("Set OPENAI_API_KEY environment variable.")