DexterSptizu's picture
Update app.py
a9d0465 verified
raw
history blame
2.11 kB
#https://python.langchain.com/docs/how_to/configure/
import gradio as gr
import os
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
def generate_response(prompt, temperature, api_key):
try:
# Initialize model with given temperature
llm = ChatOpenAI(
model="gpt-4o-mini", # Using 3.5-turbo as it's more reliable
temperature=float(temperature),
openai_api_key=api_key
)
# Create simple prompt
template = ChatPromptTemplate.from_messages([
("system", "You are a creative assistant."),
("user", "Generate a creative description of {prompt}")
])
# Create and run chain
chain = template | llm
response = chain.invoke({"prompt": prompt})
return response.content
except Exception as e:
return f"Error: {str(e)}"
# Create Gradio interface
demo = gr.Interface(
fn=generate_response,
inputs=[
gr.Textbox(
label="Prompt",
placeholder="Example: 'a blue elephant'",
value="a blue elephant"
),
gr.Slider(
minimum=0,
maximum=2,
value=0.7,
step=0.1,
label="Temperature (0: Focused, 2: More Creative)"
),
gr.Textbox(
label="OpenAI API Key",
type="password"
)
],
outputs=gr.Textbox(label="Generated Response", lines=5),
title="🎨 Temperature Effect Demo",
description="""
Try the same prompt with different temperatures:
- Temperature 0: More focused, consistent responses
- Temperature 0.7: Balanced creativity
- Temperature 2: More random, creative responses
Example prompt: 'a blue elephant'
- T=0: "A blue elephant is a fictional creature with blue-colored skin..."
- T=0.7: "Imagine a majestic pachyderm with sapphire-tinted hide..."
- T=2: "Dancing through cotton candy clouds, this azure giant..."
"""
)
if __name__ == "__main__":
demo.launch()