File size: 1,224 Bytes
948b490
 
 
40afb3a
7096535
948b490
40afb3a
948b490
40afb3a
 
948b490
 
40afb3a
948b490
 
40afb3a
0330a7a
948b490
985bcdd
948b490
 
0330a7a
985bcdd
40afb3a
 
0330a7a
 
1b73e6b
 
 
0330a7a
948b490
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import gradio as gr
from transformers import pipeline

# βœ… Fast model
generator = pipeline("text2text-generation", model="google/flan-t5-small")

# 🧠 Prompt Template
TEMPLATE = (
    "You are a polite, humble, and professional customer support agent. "
    "Respond to the following customer message:\n\n{input}\n\nReply:"
)

# πŸ” Generate Reply
def generate_reply(user_input):
    prompt = TEMPLATE.format(input=user_input)
    response = generator(prompt, max_length=60, do_sample=False)[0]["generated_text"]
    return response.strip()

# πŸŽ›οΈ Gradio Interface
iface = gr.Interface(
    fn=generate_reply,
    inputs=gr.Textbox(lines=6, label="Customer Message", placeholder="Enter complaint or question..."),
    outputs=gr.Textbox(label="Polite Support Reply"),
    title="⚑ Ultra-Fast Auto-Reply Generator for Customer Support",
    description="Get polite, helpful replies in seconds using FLAN-T5-small. Built for speed + tone.",
    examples=[
        ["I still haven't received my order and it's been 10 days."],
        ["Why was I charged twice for my subscription?"],
        ["Thanks for the quick response yesterday!"],
        ["My login isn't working since the update."]
    ]
)

iface.launch()