File size: 1,910 Bytes
3b232a5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
#https://python.langchain.com/docs/how_to/configure/
import gradio as gr
from langchain_openai import ChatOpenAI
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import ConfigurableField
def process_with_config(topic, temperature, mode, api_key):
try:
# Initialize configurable model
model = ChatOpenAI(temperature=0).configurable_fields(
temperature=ConfigurableField(
id="llm_temperature",
name="LLM Temperature",
description="Temperature for response generation"
)
)
# Create configurable prompt
prompt = PromptTemplate.from_template(
"Tell me a {mode} about {topic}"
).configurable_alternatives(
ConfigurableField(id="prompt"),
default_key="joke",
poem=PromptTemplate.from_template("Write a poem about {topic}")
)
# Create chain
chain = prompt | model
# Configure and run
response = chain.with_config(
configurable={
"llm_temperature": float(temperature),
"prompt": mode
}
).invoke({"topic": topic})
return response.content
except Exception as e:
return f"Error: {str(e)}"
# Create Gradio interface
demo = gr.Interface(
fn=process_with_config,
inputs=[
gr.Textbox(label="Topic", placeholder="Enter a topic..."),
gr.Slider(0, 1, value=0.5, label="Temperature"),
gr.Radio(["joke", "poem"], label="Mode", value="joke"),
gr.Textbox(label="OpenAI API Key", type="password")
],
outputs=gr.Textbox(label="Generated Response"),
title="LangChain Configuration Demo",
description="Generate content with configurable temperature and mode"
)
if __name__ == "__main__":
demo.launch()
|