from langchain_nvidia_ai_endpoints import ChatNVIDIA from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate import gradio as gr import os from smolagents import HfApiModel prompt = ChatPromptTemplate.from_messages([("system", "You are a helpful AI assistant named Arun."), ("user", "{input}")]) llm = ChatNVIDIA(model="mistralai/mixtral-8x7b-instruct-v0.1") chain = prompt | llm | StrOutputParser() model = HfApiModel(model_id="mistralai/Mixtral-8x7B-Instruct-v0.1", token=os.environ.get("HF_TOKEN")) def chat(prompt, history): data = [ { "role":"system", "content":[ { "type":"text", "text": "You are a doctor who specializes on helping patients with addiction issues" } ] }, { "role":"user", "content":[ { "type":"text", "text": prompt } ] } ] return model(data).content demo = gr.ChatInterface(chat, title="ArunGPT",theme = gr.themes.Soft(), description="Hello this is chatbot is created for only educational purpose and is powered by mistral 8x 7b model").queue() demo.launch()