ans123 commited on
Commit
96cb6bb
·
verified ·
1 Parent(s): 50238a6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -34
app.py CHANGED
@@ -1,38 +1,32 @@
1
  import gradio as gr
 
 
2
 
3
  # Load the Qwen2.5-72B-Instruct model
4
- model = gr.load("models/Qwen/Qwen2.5-72B-Instruct")
5
-
6
- # Define the initial system message
7
- system_message = {
8
- "role": "system",
9
- "content": "You are an experienced Fashion designer who starts conversation with proper greeting, "
10
- "giving valuable and catchy fashion advice and suggestions, stays to the point and precise, "
11
- "asks questions only if the user has any concerns over your provided suggestions."
12
- }
 
 
 
13
 
14
  # Function to reset the chat
15
  def reset_chat():
16
- return [], "New Chat" # Returns an empty chat history and a new title
 
 
17
 
18
- # Function to handle the questionnaire submission
19
  def submit_questionnaire(name, age, location, gender, ethnicity, height, weight,
20
  style_preference, color_palette, everyday_style):
21
- # Store questionnaire responses in a DataFrame
22
- questionnaire_data = {
23
- "Name": name,
24
- "Age": age,
25
- "Location": location,
26
- "Gender": gender,
27
- "Ethnicity": ethnicity,
28
- "Height": height,
29
- "Weight": weight,
30
- "Style Preference": style_preference,
31
- "Color Palette": color_palette,
32
- "Everyday Style": everyday_style
33
- }
34
-
35
- # Here you can add logic to save the data as required, e.g., in a CSV
36
  return "Thank you for completing the questionnaire!"
37
 
38
  # Function to handle chat
@@ -44,18 +38,22 @@ def chat(user_input, messages):
44
  # Prepare input for the model
45
  input_text = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
46
 
47
- # Generate a response using the Qwen model
48
  try:
49
- response = model(input_text) # Call the model directly
50
- # Assuming the response is a list with one dictionary containing 'generated_text'
51
- response_content = response[0]['generated_text'].strip() # Access the generated text
52
-
53
- # Store assistant response in the chat history
54
- messages.append({"role": "assistant", "content": response_content})
 
55
 
56
  except Exception as e:
57
  response_content = f"Error: {str(e)}"
58
 
 
 
 
59
  return messages, response_content
60
  return messages, ""
61
 
@@ -88,7 +86,7 @@ with gr.Blocks() as demo:
88
  user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")
89
 
90
  # Connect the buttons to their respective functions
91
- output_message = gr.Textbox(label="Output Message") # Define an output component
92
  submit_btn.click(submit_questionnaire, inputs=[name, age, location, gender, ethnicity, height, weight,
93
  style_preference, color_palette, everyday_style], outputs=output_message)
94
 
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import pipeline
4
 
5
  # Load the Qwen2.5-72B-Instruct model
6
+ pipe = pipeline(
7
+ "text-generation",
8
+ model="HuggingFaceH4/Qwen2.5-72B-Instruct",
9
+ model_kwargs={"torch_dtype": torch.bfloat16},
10
+ device_map="auto",
11
+ )
12
+
13
+ # Initial messages list for chat history
14
+ messages = [
15
+ {"role": "system", "content": "You are an experienced Fashion designer who starts conversation with proper greeting, "
16
+ "giving valuable and catchy fashion advice and suggestions, stays to the point and precise."}
17
+ ]
18
 
19
  # Function to reset the chat
20
  def reset_chat():
21
+ global messages
22
+ messages = [] # Reset the message history
23
+ return [], "New Chat"
24
 
25
+ # Function to handle questionnaire submission
26
  def submit_questionnaire(name, age, location, gender, ethnicity, height, weight,
27
  style_preference, color_palette, everyday_style):
28
+ # Store questionnaire responses in a DataFrame or process as needed
29
+ # This is just a placeholder to indicate processing
 
 
 
 
 
 
 
 
 
 
 
 
 
30
  return "Thank you for completing the questionnaire!"
31
 
32
  # Function to handle chat
 
38
  # Prepare input for the model
39
  input_text = "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
40
 
41
+ # Generate a response using the model
42
  try:
43
+ response = pipe(input_text, max_new_tokens=256) # Using the pipeline
44
+
45
+ # Check if response is valid and structured correctly
46
+ if isinstance(response, list) and len(response) > 0:
47
+ response_content = response[0]['generated_text'].strip() # Accessing generated text
48
+ else:
49
+ response_content = "Sorry, I couldn't generate a response."
50
 
51
  except Exception as e:
52
  response_content = f"Error: {str(e)}"
53
 
54
+ # Store assistant response in the chat history
55
+ messages.append({"role": "assistant", "content": response_content})
56
+
57
  return messages, response_content
58
  return messages, ""
59
 
 
86
  user_input = gr.Textbox(label="Your Message", placeholder="Type your message here...")
87
 
88
  # Connect the buttons to their respective functions
89
+ output_message = gr.Textbox(label="Output Message")
90
  submit_btn.click(submit_questionnaire, inputs=[name, age, location, gender, ethnicity, height, weight,
91
  style_preference, color_palette, everyday_style], outputs=output_message)
92