import os import gradio as gr from langchain_openai import ChatOpenAI from langchain_core.output_parsers import StrOutputParser from langchain_core.prompts import ChatPromptTemplate from langchain_core.runnables import RunnablePassthrough, chain def create_dynamic_chain(api_key): llm = ChatOpenAI(model="gpt-4o-mini", api_key=api_key) contextualize_prompt = ChatPromptTemplate.from_messages([ ("system", "Convert the question into a standalone question given the chat history."), ("placeholder", "{chat_history}"), ("human", "{question}") ]) contextualize_question = contextualize_prompt | llm | StrOutputParser() @chain def contextualize_if_needed(input_dict): if input_dict.get("chat_history"): return contextualize_question return RunnablePassthrough() | (lambda x: x["question"]) return contextualize_if_needed def process_message(message, history, api_key): if not api_key: return "", [{"role": "assistant", "content": "Please enter your OpenAI API key."}] try: chain = create_dynamic_chain(api_key) chat_history = [(msg["role"], msg["content"]) for msg in history] response = chain.invoke({ "question": message, "chat_history": chat_history }) history.append({"role": "user", "content": message}) history.append({"role": "assistant", "content": response}) return "", history except Exception as e: return "", history + [{"role": "assistant", "content": f"Error: {str(e)}"}] with gr.Blocks() as demo: gr.Markdown("# Dynamic Chain Demo") api_key = gr.Textbox( label="OpenAI API Key", placeholder="Enter your OpenAI API key", type="password" ) chatbot = gr.Chatbot(type="messages") msg = gr.Textbox(label="Message") clear = gr.ClearButton([msg, chatbot]) msg.submit( process_message, inputs=[msg, chatbot, api_key], outputs=[msg, chatbot] ) if __name__ == "__main__": demo.launch()