from dotenv import load_dotenv
from llama_index import OpenAIEmbedding
from llama_index.llms import OpenAI
from llama_index.llms import AzureOpenAI
from llama_index.embeddings import AzureOpenAIEmbedding
from schemas import ServiceProvider, ChatbotVersion

load_dotenv()

def get_service_provider_config(service_provider: ServiceProvider):
    if service_provider == ServiceProvider.AZURE:
        return get_azure_openai_config()
    if service_provider == ServiceProvider.OPENAI:
        llm = OpenAI(model=ChatbotVersion.CHATGPT_35)
        embed_model = OpenAIEmbedding()
        return llm, embed_model


def get_azure_openai_config():
    api_key = "<api-key>"
    azure_endpoint = "https://<your-resource-name>.openai.azure.com/"
    api_version = "2023-07-01-preview"

    llm = AzureOpenAI(
        model="gpt-35-turbo-16k",
        deployment_name="my-custom-llm",
        api_key=api_key,
        azure_endpoint=azure_endpoint,
        api_version=api_version,
    )

    # You need to deploy your own embedding model as well as your own chat completion model
    embed_model = AzureOpenAIEmbedding(
        model="text-embedding-ada-002",
        deployment_name="my-custom-embedding",
        api_key=api_key,
        azure_endpoint=azure_endpoint,
        api_version=api_version,
    )
    return llm, embed_model