Streamlit-LlamaIndex-API / llamaindex_internlm.py
yanyoyo
fix
4e34758
raw
history blame
401 Bytes
from llama_index.llms.huggingface import HuggingFaceLLM
from llama_index.core.llms import ChatMessage
llm = HuggingFaceLLM(
model_name="internlm/internlm2_5-1_8b-chat",
tokenizer_name="internlm/internlm2_5-1_8b-chat",
model_kwargs={"trust_remote_code":True},
tokenizer_kwargs={"trust_remote_code":True}
)
rsp = llm.chat(messages=[ChatMessage(content="xtuner是什么?")])
print(rsp)