File size: 4,803 Bytes
31b863d
 
a157698
f205342
31b863d
03620de
f205342
 
 
 
 
 
03620de
31b863d
f205342
 
 
 
 
 
 
31b863d
 
 
c036871
31b863d
f2c765e
 
 
bd69f09
31b863d
 
 
f2c765e
31b863d
f2c765e
 
 
 
 
 
31b863d
 
 
 
 
 
 
 
 
 
f2c765e
31b863d
9f60b4a
31b863d
 
a157698
f205342
 
 
a157698
f205342
 
 
 
 
 
 
 
 
31b863d
 
 
 
 
 
 
 
 
5bb7226
31b863d
 
 
 
 
 
 
 
f2c765e
 
 
 
 
5bb7226
31b863d
 
f2c765e
 
 
 
 
5d951f6
 
 
31b863d
2dcd8c5
5bb7226
 
 
f2c765e
5bb7226
f2c765e
31b863d
740ea71
31b863d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import gradio as gr
import pandas as pd
import torch
import transformers

# Load the model pipeline
model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
pipeline = transformers.pipeline(
    "text-generation",
    model=model_id,
    model_kwargs={"torch_dtype": torch.bfloat16},
    device_map="auto",
)

# Define the initial system message
system_message = {
    "role": "system",
    "content": "You are an experienced Fashion designer who starts conversation with proper greeting, "
               "giving valuable and catchy fashion advice and suggestions, stays to the point and precise, "
               "asks questions only if the user has any concerns over your provided suggestions."
}

# Function to reset the chat
def reset_chat():
    return [], "New Chat"  # Returns an empty chat history and a new title

# Function to handle the questionnaire submission
def submit_questionnaire(name, age, location, gender, ethnicity, height, weight,
                         style_preference, color_palette, everyday_style):
    # Store questionnaire responses in a DataFrame
    questionnaire_data = {
        "Name": name,
        "Age": age,
        "Location": location,
        "Gender": gender,
        "Ethnicity": ethnicity,
        "Height": height,
        "Weight": weight,
        "Style Preference": style_preference,
        "Color Palette": color_palette,
        "Everyday Style": everyday_style
    }

    df = pd.DataFrame([questionnaire_data])  # Create DataFrame from dictionary

    # Append to CSV file
    df.to_csv("questionnaire_responses.csv", mode='a', header=not pd.io.common.file_exists("questionnaire_responses.csv"), index=False)

    return "Thank you for completing the questionnaire!"

# Function to handle chat
def chat(user_input, messages):
    if user_input:
        # Append user message to the conversation history
        messages.append({"role": "user", "content": user_input})

        # Prepare the input for the model
        input_text = messages.copy()  # Make a copy of messages

        # Generate a response using the pipeline
        try:
            # Convert the messages to a format the model can understand
            formatted_input = "\n".join([f"{msg['role']}: {msg['content']}" for msg in input_text])
            response = pipeline(formatted_input, max_new_tokens=256)

            # Extract the assistant's response
            response_content = response[0]["generated_text"].strip()
            
            # Store assistant response in the chat history
            messages.append({"role": "assistant", "content": response_content})

        except Exception as e:
            response_content = f"Error: {str(e)}"

        return messages, response_content
    return messages, ""

# Gradio Interface
with gr.Blocks() as demo:
    gr.Markdown("## Fashion Assistant Chatbot")

    # Sidebar for user inputs
    with gr.Row():
        with gr.Column():
            name = gr.Textbox(label="Name")
            age = gr.Number(label="Age", value=25, minimum=1, maximum=100)
            location = gr.Textbox(label="Location")
            gender = gr.Radio(label="Gender", choices=["Male", "Female", "Other"])
            ethnicity = gr.Radio(label="Ethnicity", choices=["Asian", "Black", "Hispanic", "White", "Other"])
            height = gr.Number(label="Height (cm)", value=170, minimum=50, maximum=250)
            weight = gr.Number(label="Weight (kg)", value=70, minimum=20, maximum=200)

        with gr.Column():
            submit_btn = gr.Button("Submit Questionnaire")
            reset_btn = gr.Button("Reset Chat")

    # Questionnaire with fashion-related questions
    style_preference = gr.Radio(label="Which style do you prefer the most?", choices=["Casual", "Formal", "Streetwear", "Athleisure", "Baggy"])
    color_palette = gr.Radio(label="What color palette do you wear often?", choices=["Neutrals", "Bright Colors", "Pastels", "Dark Shades"])
    everyday_style = gr.Radio(label="How would you describe your everyday style?", choices=["Relaxed", "Trendy", "Elegant", "Bold"])

    # Chat functionality
    chatbox = gr.Chatbot(type='messages')
    user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")

    # Connect the buttons to their respective functions
    output_message = gr.Textbox(label="Output Message")  # Define an output component
    submit_btn.click(submit_questionnaire, inputs=[name, age, location, gender, ethnicity, height, weight,
                                                    style_preference, color_palette, everyday_style], outputs=output_message)

    reset_btn.click(reset_chat, outputs=[chatbox, output_message])  # Corrected outputs
    user_input.submit(chat, inputs=[user_input, chatbox], outputs=[chatbox, user_input])

# Run the app
demo.launch()