|
|
|
import gradio as gr |
|
import os |
|
from langchain_openai import ChatOpenAI |
|
from langchain_core.prompts import ChatPromptTemplate |
|
|
|
def generate_response(prompt, temperature, api_key): |
|
try: |
|
|
|
llm = ChatOpenAI( |
|
model="gpt-4o-mini", |
|
temperature=float(temperature), |
|
openai_api_key=api_key |
|
) |
|
|
|
|
|
template = ChatPromptTemplate.from_messages([ |
|
("system", "You are a creative assistant."), |
|
("user", "Generate a creative description of {prompt}") |
|
]) |
|
|
|
|
|
chain = template | llm |
|
response = chain.invoke({"prompt": prompt}) |
|
|
|
return response.content |
|
|
|
except Exception as e: |
|
return f"Error: {str(e)}" |
|
|
|
|
|
demo = gr.Interface( |
|
fn=generate_response, |
|
inputs=[ |
|
gr.Textbox( |
|
label="Prompt", |
|
placeholder="Example: 'a blue elephant'", |
|
value="a blue elephant" |
|
), |
|
gr.Slider( |
|
minimum=0, |
|
maximum=2, |
|
value=0.7, |
|
step=0.1, |
|
label="Temperature (0: Focused, 2: More Creative)" |
|
), |
|
gr.Textbox( |
|
label="OpenAI API Key", |
|
type="password" |
|
) |
|
], |
|
outputs=gr.Textbox(label="Generated Response", lines=5), |
|
title="🎨 Temperature Effect Demo", |
|
description=""" |
|
Try the same prompt with different temperatures: |
|
- Temperature 0: More focused, consistent responses |
|
- Temperature 0.7: Balanced creativity |
|
- Temperature 2: More random, creative responses |
|
|
|
Example prompt: 'a blue elephant' |
|
- T=0: "A blue elephant is a fictional creature with blue-colored skin..." |
|
- T=0.7: "Imagine a majestic pachyderm with sapphire-tinted hide..." |
|
- T=2: "Dancing through cotton candy clouds, this azure giant..." |
|
""" |
|
) |
|
|
|
if __name__ == "__main__": |
|
demo.launch() |
|
|