ans123 commited on
Commit
9002a71
·
verified ·
1 Parent(s): a36d7f8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +104 -1
app.py CHANGED
@@ -1,4 +1,107 @@
1
  import gradio as gr
 
 
2
 
3
  # Load the Qwen2.5-72B-Instruct model
4
- model = gr.load("models/Qwen/Qwen2.5-72B-Instruct")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
 
5
  # Load the Qwen2.5-72B-Instruct model
6
+ model_name = "Qwen/Qwen2.5-72B-Instruct"
7
+ model = AutoModelForCausalLM.from_pretrained(
8
+ model_name,
9
+ torch_dtype="auto",
10
+ device_map="auto"
11
+ )
12
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
13
+
14
+ # Initial messages list for chat history
15
+ messages = [
16
+ {"role": "system", "content": "You are an experienced Fashion designer who starts conversation with proper greeting, "
17
+ "giving valuable and catchy fashion advice and suggestions, stays to the point and precise."}
18
+ ]
19
+
20
+ # Function to reset the chat
21
+ def reset_chat():
22
+ global messages
23
+ messages = [] # Reset the message history
24
+ return [], "New Chat"
25
+
26
+ # Function to handle questionnaire submission
27
+ def submit_questionnaire(name, age, location, gender, ethnicity, height, weight,
28
+ style_preference, color_palette, everyday_style):
29
+ # Store questionnaire responses as needed
30
+ # Placeholder logic for storing responses
31
+ return "Thank you for completing the questionnaire!"
32
+
33
+ # Function to handle chat
34
+ def chat(user_input):
35
+ global messages
36
+ if user_input:
37
+ # Append user message to the conversation history
38
+ messages.append({"role": "user", "content": user_input})
39
+
40
+ # Prepare input for the model
41
+ text = tokenizer.apply_chat_template(
42
+ messages,
43
+ tokenize=False,
44
+ add_generation_prompt=True
45
+ )
46
+ model_inputs = tokenizer([text], return_tensors="pt").to(model.device)
47
+
48
+ # Generate a response using the model
49
+ try:
50
+ generated_ids = model.generate(
51
+ **model_inputs,
52
+ max_new_tokens=512
53
+ )
54
+ generated_ids = [
55
+ output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids)
56
+ ]
57
+
58
+ # Decode the response
59
+ response_content = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
60
+
61
+ except Exception as e:
62
+ response_content = f"Error: {str(e)}"
63
+
64
+ # Store assistant response in the chat history
65
+ messages.append({"role": "assistant", "content": response_content})
66
+
67
+ return messages, response_content
68
+ return messages, ""
69
+
70
+ # Gradio Interface
71
+ with gr.Blocks() as demo:
72
+ gr.Markdown("## Fashion Assistant Chatbot")
73
+
74
+ # Sidebar for user inputs
75
+ with gr.Row():
76
+ with gr.Column():
77
+ name = gr.Textbox(label="Name")
78
+ age = gr.Number(label="Age", value=25, minimum=1, maximum=100)
79
+ location = gr.Textbox(label="Location")
80
+ gender = gr.Radio(label="Gender", choices=["Male", "Female", "Other"])
81
+ ethnicity = gr.Radio(label="Ethnicity", choices=["Asian", "Black", "Hispanic", "White", "Other"])
82
+ height = gr.Number(label="Height (cm)", value=170, minimum=50, maximum=250)
83
+ weight = gr.Number(label="Weight (kg)", value=70, minimum=20, maximum=200)
84
+
85
+ with gr.Column():
86
+ submit_btn = gr.Button("Submit Questionnaire")
87
+ reset_btn = gr.Button("Reset Chat")
88
+
89
+ # Questionnaire with fashion-related questions
90
+ style_preference = gr.Radio(label="Which style do you prefer the most?", choices=["Casual", "Formal", "Streetwear", "Athleisure", "Baggy"])
91
+ color_palette = gr.Radio(label="What color palette do you wear often?", choices=["Neutrals", "Bright Colors", "Pastels", "Dark Shades"])
92
+ everyday_style = gr.Radio(label="How would you describe your everyday style?", choices=["Relaxed", "Trendy", "Elegant", "Bold"])
93
+
94
+ # Chat functionality
95
+ chatbox = gr.Chatbot(type='messages')
96
+ user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")
97
+
98
+ # Connect the buttons to their respective functions
99
+ output_message = gr.Textbox(label="Output Message", interactive=False)
100
+ submit_btn.click(submit_questionnaire, inputs=[name, age, location, gender, ethnicity, height, weight,
101
+ style_preference, color_palette, everyday_style], outputs=output_message)
102
+
103
+ reset_btn.click(reset_chat, outputs=[chatbox, output_message]) # Corrected outputs
104
+ user_input.submit(chat, inputs=user_input, outputs=[chatbox, user_input]) # Fixed to include chatbox output
105
+
106
+ # Run the app
107
+ demo.launch()