File size: 4,804 Bytes
31b863d
 
a157698
 
31b863d
a157698
 
 
 
31b863d
 
a157698
 
 
 
 
31b863d
 
 
c036871
31b863d
f2c765e
 
 
bd69f09
31b863d
 
 
f2c765e
31b863d
f2c765e
 
 
 
 
 
31b863d
 
 
 
 
 
 
 
 
 
f2c765e
31b863d
9f60b4a
31b863d
 
a157698
 
 
 
 
31b863d
a157698
 
 
 
31b863d
 
 
 
 
 
 
 
 
 
 
 
5bb7226
31b863d
 
 
 
 
 
 
 
f2c765e
 
 
 
 
5bb7226
31b863d
 
f2c765e
 
 
 
 
5d951f6
 
 
31b863d
2dcd8c5
5bb7226
 
 
f2c765e
5bb7226
f2c765e
31b863d
740ea71
31b863d
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import gradio as gr
import pandas as pd
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer

# Load the model and tokenizer
model_name = "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF"
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.bfloat16, device_map="auto")
tokenizer = AutoTokenizer.from_pretrained(model_name)

# Define the system message for the model
system_message = (
    "You are an experienced Fashion designer who starts conversation with proper greeting, "
    "giving valuable and catchy fashion advice and suggestions, stays to the point and precise, "
    "asks questions only if the user has any concerns over your provided suggestions."
)

# Function to reset the chat
def reset_chat():
    return [], "New Chat"  # Returns an empty chat history and a new title

# Function to handle the questionnaire submission
def submit_questionnaire(name, age, location, gender, ethnicity, height, weight,
                         style_preference, color_palette, everyday_style):
    # Store questionnaire responses in a DataFrame
    questionnaire_data = {
        "Name": name,
        "Age": age,
        "Location": location,
        "Gender": gender,
        "Ethnicity": ethnicity,
        "Height": height,
        "Weight": weight,
        "Style Preference": style_preference,
        "Color Palette": color_palette,
        "Everyday Style": everyday_style
    }

    df = pd.DataFrame([questionnaire_data])  # Create DataFrame from dictionary

    # Append to CSV file
    df.to_csv("questionnaire_responses.csv", mode='a', header=not pd.io.common.file_exists("questionnaire_responses.csv"), index=False)

    return "Thank you for completing the questionnaire!"

# Function to handle chat
def chat(user_input, messages):
    if user_input:
        # Append user message to the conversation history
        messages.append({"role": "user", "content": user_input})

        # Prepare the input for the model
        input_text = system_message + "\n" + "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
        
        # Tokenize and encode the input text
        inputs = tokenizer(input_text, return_tensors="pt").to(model.device)

        try:
            # Generate a response from the model
            outputs = model.generate(**inputs, max_length=150, num_return_sequences=1, temperature=0.7)
            response_content = tokenizer.decode(outputs[0], skip_special_tokens=True)

        except Exception as e:
            response_content = f"Error: {str(e)}"

        # Store assistant response in the chat history
        messages.append({"role": "assistant", "content": response_content})

        return messages, response_content
    return messages, ""

# Gradio Interface
with gr.Blocks() as demo:
    gr.Markdown("## Fashion Assistant Chatbot")

    # Sidebar for user inputs
    with gr.Row():
        with gr.Column():
            name = gr.Textbox(label="Name")
            age = gr.Number(label="Age", value=25, minimum=1, maximum=100)
            location = gr.Textbox(label="Location")
            gender = gr.Radio(label="Gender", choices=["Male", "Female", "Other"])
            ethnicity = gr.Radio(label="Ethnicity", choices=["Asian", "Black", "Hispanic", "White", "Other"])
            height = gr.Number(label="Height (cm)", value=170, minimum=50, maximum=250)
            weight = gr.Number(label="Weight (kg)", value=70, minimum=20, maximum=200)

        with gr.Column():
            submit_btn = gr.Button("Submit Questionnaire")
            reset_btn = gr.Button("Reset Chat")

    # Questionnaire with fashion-related questions
    style_preference = gr.Radio(label="Which style do you prefer the most?", choices=["Casual", "Formal", "Streetwear", "Athleisure", "Baggy"])
    color_palette = gr.Radio(label="What color palette do you wear often?", choices=["Neutrals", "Bright Colors", "Pastels", "Dark Shades"])
    everyday_style = gr.Radio(label="How would you describe your everyday style?", choices=["Relaxed", "Trendy", "Elegant", "Bold"])

    # Chat functionality
    chatbox = gr.Chatbot(type='messages')
    user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")

    # Connect the buttons to their respective functions
    output_message = gr.Textbox(label="Output Message")  # Define an output component
    submit_btn.click(submit_questionnaire, inputs=[name, age, location, gender, ethnicity, height, weight,
                                                    style_preference, color_palette, everyday_style], outputs=output_message)

    reset_btn.click(reset_chat, outputs=[chatbox, output_message])  # Corrected outputs
    user_input.submit(chat, inputs=[user_input, chatbox], outputs=[chatbox, user_input])

# Run the app
demo.launch()