File size: 2,855 Bytes
3b232a5
 
141e5c4
3b232a5
 
 
 
 
 
141e5c4
 
 
 
 
 
 
 
 
3b232a5
 
 
141e5c4
3b232a5
 
 
 
 
 
 
 
 
141e5c4
 
3b232a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
141e5c4
 
 
 
3b232a5
 
 
 
 
141e5c4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3b232a5
141e5c4
 
 
 
 
 
3b232a5
 
141e5c4
3b232a5
141e5c4
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
#https://python.langchain.com/docs/how_to/configure/
import gradio as gr
import os
from langchain_openai import ChatOpenAI
from langchain_core.prompts import PromptTemplate
from langchain_core.runnables import ConfigurableField

def process_with_config(topic, temperature, mode, api_key):
    try:
        # Set API key
        os.environ["OPENAI_API_KEY"] = api_key
        
        # Initialize configurable model with GPT-4o-mini
        model = ChatOpenAI(
            model="gpt-4o-mini",  # Specifically using GPT-4o-mini
            openai_api_key=api_key,
            temperature=0
        ).configurable_fields(
            temperature=ConfigurableField(
                id="llm_temperature",
                name="LLM Temperature",
                description="Temperature for GPT-4o-mini response generation"
            )
        )
        
        # Create configurable prompt
        prompt = PromptTemplate.from_template(
            "Tell me a {mode} about {topic}"
        ).configurable_alternatives(
            ConfigurableField(id="prompt"),
            default_key="joke",
            poem=PromptTemplate.from_template("Write a poem about {topic}"),
            joke=PromptTemplate.from_template("Tell me a joke about {topic}")
        )
        
        # Create chain
        chain = prompt | model
        
        # Configure and run
        response = chain.with_config(
            configurable={
                "llm_temperature": float(temperature),
                "prompt": mode
            }
        ).invoke({"topic": topic})
        
        return response.content
        
    except Exception as e:
        return f"Error: {str(e)}"
    finally:
        # Clear API key from environment for security
        if "OPENAI_API_KEY" in os.environ:
            del os.environ["OPENAI_API_KEY"]

# Create Gradio interface
demo = gr.Interface(
    fn=process_with_config,
    inputs=[
        gr.Textbox(
            label="Topic",
            placeholder="Enter a topic...",
            lines=1
        ),
        gr.Slider(
            minimum=0,
            maximum=1,
            value=0.5,
            step=0.1,
            label="Temperature (GPT-4o-mini)"
        ),
        gr.Radio(
            choices=["joke", "poem"],
            label="Mode",
            value="joke"
        ),
        gr.Textbox(
            label="OpenAI API Key",
            placeholder="Enter your OpenAI API key",
            type="password"
        )
    ],
    outputs=gr.Textbox(
        label="Generated Response",
        lines=5
    ),
    title="🤖 GPT-4o-mini Configuration Demo",
    description="Generate content using GPT-4o-mini with configurable temperature and mode"
)

# Launch the application
if __name__ == "__main__":
    demo.launch(
        share=False,
        server_name="0.0.0.0",
        server_port=7860
    )