|
|
|
import gradio as gr |
|
from langchain_openai import ChatOpenAI |
|
from langchain_core.prompts import PromptTemplate |
|
from langchain_core.runnables import ConfigurableField |
|
|
|
def process_with_config(topic, temperature, mode, api_key): |
|
try: |
|
|
|
model = ChatOpenAI(temperature=0).configurable_fields( |
|
temperature=ConfigurableField( |
|
id="llm_temperature", |
|
name="LLM Temperature", |
|
description="Temperature for response generation" |
|
) |
|
) |
|
|
|
|
|
prompt = PromptTemplate.from_template( |
|
"Tell me a {mode} about {topic}" |
|
).configurable_alternatives( |
|
ConfigurableField(id="prompt"), |
|
default_key="joke", |
|
poem=PromptTemplate.from_template("Write a poem about {topic}") |
|
) |
|
|
|
|
|
chain = prompt | model |
|
|
|
|
|
response = chain.with_config( |
|
configurable={ |
|
"llm_temperature": float(temperature), |
|
"prompt": mode |
|
} |
|
).invoke({"topic": topic}) |
|
|
|
return response.content |
|
|
|
except Exception as e: |
|
return f"Error: {str(e)}" |
|
|
|
|
|
demo = gr.Interface( |
|
fn=process_with_config, |
|
inputs=[ |
|
gr.Textbox(label="Topic", placeholder="Enter a topic..."), |
|
gr.Slider(0, 1, value=0.5, label="Temperature"), |
|
gr.Radio(["joke", "poem"], label="Mode", value="joke"), |
|
gr.Textbox(label="OpenAI API Key", type="password") |
|
], |
|
outputs=gr.Textbox(label="Generated Response"), |
|
title="LangChain Configuration Demo", |
|
description="Generate content with configurable temperature and mode" |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|