Friday / app.py
ans123's picture
Update app.py
aa5530b verified
raw
history blame
4.6 kB
import gradio as gr
import pandas as pd
import torch
from transformers import pipeline
# Load the Zephyr-7B-Beta model pipeline
pipe = pipeline(
"text-generation",
model="HuggingFaceH4/zephyr-7b-beta",
torch_dtype=torch.bfloat16,
device_map="auto"
)
# Define the initial system message
system_message = {
"role": "system",
"content": "You are an experienced Fashion designer who starts conversation with proper greeting, "
"giving valuable and catchy fashion advice and suggestions, stays to the point and precise, "
"asks questions only if the user has any concerns over your provided suggestions."
}
# Function to reset the chat
def reset_chat():
return [], "New Chat" # Returns an empty chat history and a new title
# Function to handle the questionnaire submission
def submit_questionnaire(name, age, location, gender, ethnicity, height, weight,
style_preference, color_palette, everyday_style):
# Store questionnaire responses in a DataFrame
questionnaire_data = {
"Name": name,
"Age": age,
"Location": location,
"Gender": gender,
"Ethnicity": ethnicity,
"Height": height,
"Weight": weight,
"Style Preference": style_preference,
"Color Palette": color_palette,
"Everyday Style": everyday_style
}
df = pd.DataFrame([questionnaire_data]) # Create DataFrame from dictionary
# Append to CSV file
df.to_csv("questionnaire_responses.csv", mode='a', header=not pd.io.common.file_exists("questionnaire_responses.csv"), index=False)
return "Thank you for completing the questionnaire!"
# Function to handle chat
def chat(user_input, messages):
if user_input:
# Append user message to the conversation history
messages.append({"role": "user", "content": user_input})
# Prepare the input for the model
input_text = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
# Generate a response using the pipeline
try:
response = pipe(input_text, max_new_tokens=256, return_full_text=False)
# Extract the assistant's response
response_content = response[0]['generated_text'].strip()
# Store assistant response in the chat history
messages.append({"role": "assistant", "content": response_content})
except Exception as e:
response_content = f"Error: {str(e)}"
return messages, response_content
return messages, ""
# Gradio Interface
with gr.Blocks() as demo:
gr.Markdown("## FRIDAY")
# Sidebar for user inputs
with gr.Row():
with gr.Column():
name = gr.Textbox(label="Name")
age = gr.Number(label="Age", value=25, minimum=1, maximum=100)
location = gr.Textbox(label="Location")
gender = gr.Radio(label="Gender", choices=["Male", "Female", "Other"])
ethnicity = gr.Radio(label="Ethnicity", choices=["Asian", "Black", "Hispanic", "White", "Other"])
height = gr.Number(label="Height (cm)", value=170, minimum=50, maximum=250)
weight = gr.Number(label="Weight (kg)", value=70, minimum=20, maximum=200)
with gr.Column():
submit_btn = gr.Button("Submit Questionnaire")
reset_btn = gr.Button("Reset Chat")
# Questionnaire with fashion-related questions
style_preference = gr.Radio(label="Which style do you prefer the most?", choices=["Casual", "Formal", "Streetwear", "Athleisure", "Baggy"])
color_palette = gr.Radio(label="What color palette do you wear often?", choices=["Neutrals", "Bright Colors", "Pastels", "Dark Shades"])
everyday_style = gr.Radio(label="How would you describe your everyday style?", choices=["Relaxed", "Trendy", "Elegant", "Bold"])
# Chat functionality
chatbox = gr.Chatbot(type='messages')
user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")
# Connect the buttons to their respective functions
output_message = gr.Textbox(label="Output Message") # Define an output component
submit_btn.click(submit_questionnaire, inputs=[name, age, location, gender, ethnicity, height, weight,
style_preference, color_palette, everyday_style], outputs=output_message)
reset_btn.click(reset_chat, outputs=[chatbox, output_message]) # Corrected outputs
user_input.submit(chat, inputs=[user_input, chatbox], outputs=[chatbox, user_input])
# Run the app
demo.launch()