import os from lagent.llms import (GPTAPI, INTERNLM2_META, HFTransformerCasualLM, LMDeployClient, LMDeployServer) ''' Search Agent with specific large language models. You can choose the appropriate language models accroding to your region and linguistic preference. ''' internlm_server = dict(type=LMDeployServer, path='internlm/internlm2_5-7b-chat', model_name='internlm2', meta_template=INTERNLM2_META, top_p=0.8, top_k=1, temperature=0, max_new_tokens=8192, repetition_penalty=1.02, stop_words=['<|im_end|>']) internlm_client = dict(type=LMDeployClient, model_name='internlm2_5-7b-chat', url='http://127.0.0.1:23333', meta_template=INTERNLM2_META, top_p=0.8, top_k=1, temperature=0, max_new_tokens=8192, repetition_penalty=1.02, stop_words=['<|im_end|>']) internlm_hf = dict(type=HFTransformerCasualLM, path='internlm/internlm2_5-7b-chat', meta_template=INTERNLM2_META, top_p=0.8, top_k=None, temperature=1e-6, max_new_tokens=8192, repetition_penalty=1.02, stop_words=['<|im_end|>']) gpt4 = dict(type=GPTAPI, model_type='gpt-4-turbo', key=os.environ.get('OPENAI_API_KEY', 'YOUR OPENAI API KEY'), openai_api_base=os.environ.get('OPENAI_API_BASE', 'https://api.openai.com/v1'), ) url = 'https://dashscope.aliyuncs.com/api/v1/services/aigc/text-generation/generation' qwen = dict(type=GPTAPI, model_type='qwen-max-longcontext', key=os.environ.get('QWEN_API_KEY', 'YOUR QWEN API KEY'), openai_api_base=url, meta_template=[ dict(role='system', api_role='system'), dict(role='user', api_role='user'), dict(role='assistant', api_role='assistant'), dict(role='environment', api_role='system') ], top_p=0.8, top_k=1, temperature=0, max_new_tokens=4096, repetition_penalty=1.02, stop_words=['<|im_end|>'])