Spaces:
Building
Building
""" | |
LLM Provider Factory for Flare | |
""" | |
import os | |
from typing import Optional | |
from dotenv import load_dotenv | |
from llm_interface import LLMInterface, SparkLLM, GPT4oLLM | |
from config_provider import ConfigProvider | |
from utils import log | |
class LLMFactory: | |
def create_provider() -> LLMInterface: | |
"""Create LLM provider based on configuration""" | |
cfg = ConfigProvider.get() | |
llm_config = cfg.global_config.llm_provider | |
if not llm_config: | |
raise ValueError("No LLM provider configured") | |
provider_name = llm_config.name | |
log(f"π Creating LLM provider: {provider_name}") | |
# Get provider definition | |
provider_def = cfg.global_config.get_provider_config("llm", provider_name) | |
if not provider_def: | |
raise ValueError(f"Unknown LLM provider: {provider_name}") | |
# Get API key | |
api_key = LLMFactory._get_api_key(provider_name, llm_config.api_key) | |
# Create provider based on name | |
if provider_name == "spark": | |
return LLMFactory._create_spark_provider(llm_config, api_key, provider_def) | |
elif provider_name in ["gpt4o", "gpt4o-mini"]: | |
return LLMFactory._create_gpt_provider(llm_config, api_key, provider_def) | |
else: | |
raise ValueError(f"Unsupported LLM provider: {provider_name}") | |
def _create_spark_provider(llm_config, api_key: str, provider_def) -> SparkLLM: | |
"""Create Spark LLM provider""" | |
if not llm_config.endpoint: | |
raise ValueError("Spark endpoint is required") | |
if not api_key: | |
raise ValueError("Spark API token is required") | |
# Extract work mode variant (for backward compatibility) | |
provider_variant = "cloud" # Default | |
if os.getenv("SPACE_ID"): # HuggingFace Space | |
provider_variant = "hfcloud" | |
log(f"π Initializing SparkLLM: {llm_config.endpoint}") | |
log(f"π§ Provider variant: {provider_variant}") | |
return SparkLLM( | |
spark_endpoint=llm_config.endpoint, | |
spark_token=api_key, | |
provider_variant=provider_variant, | |
settings=llm_config.settings | |
) | |
def _create_gpt_provider(llm_config, api_key: str, provider_def) -> GPT4oLLM: | |
"""Create GPT-4 provider""" | |
if not api_key: | |
raise ValueError("OpenAI API key is required") | |
# Get model-specific settings | |
settings = llm_config.settings or {} | |
model = provider_def.name # gpt4o or gpt4o-mini | |
log(f"π€ Initializing GPT4oLLM with model: {model}") | |
return GPT4oLLM( | |
api_key=api_key, | |
model=model, | |
settings=settings | |
) | |
def _get_api_key(provider_name: str, config_key: Optional[str]) -> Optional[str]: | |
"""Get API key from config or environment""" | |
# First check config | |
if config_key: | |
if config_key.startswith("enc:"): | |
# Decrypt if encrypted | |
from encryption_utils import decrypt | |
decrypted = decrypt(config_key) | |
log(f"π Using encrypted API key from config: ***{decrypted[-4:]}") | |
return decrypted | |
else: | |
log(f"π Using plain API key from config: ***{config_key[-4:]}") | |
return config_key | |
# Then check environment | |
env_mappings = { | |
"spark": "SPARK_TOKEN", | |
"gpt4o": "OPENAI_API_KEY", | |
"gpt4o-mini": "OPENAI_API_KEY" | |
} | |
env_var = env_mappings.get(provider_name) | |
if env_var: | |
# Check if we're in HuggingFace Space | |
if os.getenv("SPACE_ID"): | |
# HuggingFace mode - direct environment | |
api_key = os.environ.get(env_var) | |
if api_key: | |
log(f"π Using API key from HuggingFace secrets: {env_var}") | |
return api_key | |
else: | |
# Local mode - use dotenv | |
load_dotenv() | |
api_key = os.getenv(env_var) | |
if api_key: | |
log(f"π Using API key from .env: {env_var}") | |
return api_key | |
return None |