File size: 4,449 Bytes
31b863d
9002a71
 
31b863d
50238a6
9002a71
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer

# Load the Qwen2.5-72B-Instruct model
model_name = "Qwen/Qwen2.5-72B-Instruct"
model = AutoModelForCausalLM.from_pretrained(
    model_name,
    torch_dtype="auto",
    device_map="auto"
)
tokenizer = AutoTokenizer.from_pretrained(model_name)

# Initial messages list for chat history
messages = [
    {"role": "system", "content": "You are an experienced Fashion designer who starts conversation with proper greeting, "
                                   "giving valuable and catchy fashion advice and suggestions, stays to the point and precise."}
]

# Function to reset the chat
def reset_chat():
    global messages
    messages = []  # Reset the message history
    return [], "New Chat"

# Function to handle questionnaire submission
def submit_questionnaire(name, age, location, gender, ethnicity, height, weight,
                         style_preference, color_palette, everyday_style):
    # Store questionnaire responses as needed
    # Placeholder logic for storing responses
    return "Thank you for completing the questionnaire!"

# Function to handle chat
def chat(user_input):
    global messages
    if user_input:
        # Append user message to the conversation history
        messages.append({"role": "user", "content": user_input})

        # Prepare input for the model
        text = tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )
        model_inputs = tokenizer([text], return_tensors="pt").to(model.device)

        # Generate a response using the model
        try:
            generated_ids = model.generate(
                **model_inputs,
                max_new_tokens=512
            )
            generated_ids = [
                output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
            ]

            # Decode the response
            response_content = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()

        except Exception as e:
            response_content = f"Error: {str(e)}"

        # Store assistant response in the chat history
        messages.append({"role": "assistant", "content": response_content})

        return messages, response_content
    return messages, ""

# Gradio Interface
with gr.Blocks() as demo:
    gr.Markdown("## Fashion Assistant Chatbot")

    # Sidebar for user inputs
    with gr.Row():
        with gr.Column():
            name = gr.Textbox(label="Name")
            age = gr.Number(label="Age", value=25, minimum=1, maximum=100)
            location = gr.Textbox(label="Location")
            gender = gr.Radio(label="Gender", choices=["Male", "Female", "Other"])
            ethnicity = gr.Radio(label="Ethnicity", choices=["Asian", "Black", "Hispanic", "White", "Other"])
            height = gr.Number(label="Height (cm)", value=170, minimum=50, maximum=250)
            weight = gr.Number(label="Weight (kg)", value=70, minimum=20, maximum=200)

        with gr.Column():
            submit_btn = gr.Button("Submit Questionnaire")
            reset_btn = gr.Button("Reset Chat")

    # Questionnaire with fashion-related questions
    style_preference = gr.Radio(label="Which style do you prefer the most?", choices=["Casual", "Formal", "Streetwear", "Athleisure", "Baggy"])
    color_palette = gr.Radio(label="What color palette do you wear often?", choices=["Neutrals", "Bright Colors", "Pastels", "Dark Shades"])
    everyday_style = gr.Radio(label="How would you describe your everyday style?", choices=["Relaxed", "Trendy", "Elegant", "Bold"])

    # Chat functionality
    chatbox = gr.Chatbot(type='messages')
    user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")

    # Connect the buttons to their respective functions
    output_message = gr.Textbox(label="Output Message", interactive=False)
    submit_btn.click(submit_questionnaire, inputs=[name, age, location, gender, ethnicity, height, weight,
                                                    style_preference, color_palette, everyday_style], outputs=output_message)

    reset_btn.click(reset_chat, outputs=[chatbox, output_message])  # Corrected outputs
    user_input.submit(chat, inputs=user_input, outputs=[chatbox, user_input])  # Fixed to include chatbox output

# Run the app
demo.launch()