Spaces:
Sleeping
Sleeping
File size: 5,052 Bytes
0bcc252 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 |
import dotenv from 'dotenv';
import { ProxyAgent, setGlobalDispatcher } from 'undici';
import { createGoogleGenerativeAI } from '@ai-sdk/google';
import { createOpenAI, OpenAIProviderSettings } from '@ai-sdk/openai';
import configJson from '../config.json';
// Load environment variables
dotenv.config();
// Types
export type LLMProvider = 'openai' | 'gemini' | 'vertex';
export type ToolName = keyof typeof configJson.models.gemini.tools;
// Type definitions for our config structure
type EnvConfig = typeof configJson.env;
interface ProviderConfig {
createClient: string;
clientConfig?: Record<string, any>;
}
// Environment setup
const env: EnvConfig = { ...configJson.env };
(Object.keys(env) as (keyof EnvConfig)[]).forEach(key => {
if (process.env[key]) {
env[key] = process.env[key] || env[key];
}
});
// Setup proxy if present
if (env.https_proxy) {
try {
const proxyUrl = new URL(env.https_proxy).toString();
const dispatcher = new ProxyAgent({ uri: proxyUrl });
setGlobalDispatcher(dispatcher);
} catch (error) {
console.error('Failed to set proxy:', error);
}
}
// Export environment variables
export const OPENAI_BASE_URL = env.OPENAI_BASE_URL;
export const GEMINI_API_KEY = env.GEMINI_API_KEY;
export const OPENAI_API_KEY = env.OPENAI_API_KEY;
export const JINA_API_KEY = env.JINA_API_KEY;
export const BRAVE_API_KEY = env.BRAVE_API_KEY;
export const SEARCH_PROVIDER = configJson.defaults.search_provider;
export const STEP_SLEEP = configJson.defaults.step_sleep;
// Determine LLM provider
export const LLM_PROVIDER: LLMProvider = (() => {
const provider = process.env.LLM_PROVIDER || configJson.defaults.llm_provider;
if (!isValidProvider(provider)) {
throw new Error(`Invalid LLM provider: ${provider}`);
}
return provider;
})();
function isValidProvider(provider: string): provider is LLMProvider {
return provider === 'openai' || provider === 'gemini' || provider === 'vertex';
}
interface ToolConfig {
model: string;
temperature: number;
maxTokens: number;
}
interface ToolOverrides {
temperature?: number;
maxTokens?: number;
}
// Get tool configuration
export function getToolConfig(toolName: ToolName): ToolConfig {
const providerConfig = configJson.models[LLM_PROVIDER === 'vertex' ? 'gemini' : LLM_PROVIDER];
const defaultConfig = providerConfig.default;
const toolOverrides = providerConfig.tools[toolName] as ToolOverrides;
return {
model: process.env.DEFAULT_MODEL_NAME || defaultConfig.model,
temperature: toolOverrides.temperature ?? defaultConfig.temperature,
maxTokens: toolOverrides.maxTokens ?? defaultConfig.maxTokens
};
}
export function getMaxTokens(toolName: ToolName): number {
return getToolConfig(toolName).maxTokens;
}
// Get model instance
export function getModel(toolName: ToolName) {
const config = getToolConfig(toolName);
const providerConfig = (configJson.providers as Record<string, ProviderConfig | undefined>)[LLM_PROVIDER];
if (LLM_PROVIDER === 'openai') {
if (!OPENAI_API_KEY) {
throw new Error('OPENAI_API_KEY not found');
}
const opt: OpenAIProviderSettings = {
apiKey: OPENAI_API_KEY,
compatibility: providerConfig?.clientConfig?.compatibility
};
if (OPENAI_BASE_URL) {
opt.baseURL = OPENAI_BASE_URL;
}
return createOpenAI(opt)(config.model);
}
if (LLM_PROVIDER === 'vertex') {
const createVertex = require('@ai-sdk/google-vertex').createVertex;
if (toolName === 'searchGrounding') {
return createVertex({ project: process.env.GCLOUD_PROJECT, ...providerConfig?.clientConfig })(config.model, { useSearchGrounding: true });
}
return createVertex({ project: process.env.GCLOUD_PROJECT, ...providerConfig?.clientConfig })(config.model);
}
if (!GEMINI_API_KEY) {
throw new Error('GEMINI_API_KEY not found');
}
if (toolName === 'searchGrounding') {
return createGoogleGenerativeAI({ apiKey: GEMINI_API_KEY })(config.model, { useSearchGrounding: true });
}
return createGoogleGenerativeAI({ apiKey: GEMINI_API_KEY })(config.model);
}
// Validate required environment variables
if (LLM_PROVIDER === 'gemini' && !GEMINI_API_KEY) throw new Error("GEMINI_API_KEY not found");
if (LLM_PROVIDER === 'openai' && !OPENAI_API_KEY) throw new Error("OPENAI_API_KEY not found");
if (!JINA_API_KEY) throw new Error("JINA_API_KEY not found");
// Log all configurations
const configSummary = {
provider: {
name: LLM_PROVIDER,
model: LLM_PROVIDER === 'openai'
? configJson.models.openai.default.model
: configJson.models.gemini.default.model,
...(LLM_PROVIDER === 'openai' && { baseUrl: OPENAI_BASE_URL })
},
search: {
provider: SEARCH_PROVIDER
},
tools: Object.fromEntries(
Object.keys(configJson.models[LLM_PROVIDER === 'vertex' ? 'gemini' : LLM_PROVIDER].tools).map(name => [
name,
getToolConfig(name as ToolName)
])
),
defaults: {
stepSleep: STEP_SLEEP
}
};
console.log('Configuration Summary:', JSON.stringify(configSummary, null, 2));
|