ans123 commited on
Commit
f205342
·
verified ·
1 Parent(s): 03620de

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -21
app.py CHANGED
@@ -1,23 +1,24 @@
1
  import gradio as gr
2
  import pandas as pd
3
  import torch
4
- from transformers import pipeline
5
 
6
  # Load the model pipeline
7
- model_id = "meta-llama/Llama-3.2-1B"
8
- pipe = pipeline(
9
- "text-generation",
10
- model=model_id,
11
- torch_dtype=torch.bfloat16,
12
- device_map="auto"
13
  )
14
 
15
- # Define the system message for the model
16
- system_message = (
17
- "You are an experienced Fashion designer who starts conversation with proper greeting, "
18
- "giving valuable and catchy fashion advice and suggestions, stays to the point and precise, "
19
- "asks questions only if the user has any concerns over your provided suggestions."
20
- )
 
21
 
22
  # Function to reset the chat
23
  def reset_chat():
@@ -54,19 +55,23 @@ def chat(user_input, messages):
54
  messages.append({"role": "user", "content": user_input})
55
 
56
  # Prepare the input for the model
57
- input_text = system_message + "\n" + "\n".join([f"{msg['role']}: {msg['content']}" for msg in messages])
58
-
 
59
  try:
60
- # Generate a response using the pipeline
61
- response = pipe(input_text, max_length=150, num_return_sequences=1, temperature=0.7)
62
- response_content = response[0]['generated_text'].split('\n')[-1].strip() # Extract the last line of the generated text
 
 
 
 
 
 
63
 
64
  except Exception as e:
65
  response_content = f"Error: {str(e)}"
66
 
67
- # Store assistant response in the chat history
68
- messages.append({"role": "assistant", "content": response_content})
69
-
70
  return messages, response_content
71
  return messages, ""
72
 
 
1
  import gradio as gr
2
  import pandas as pd
3
  import torch
4
+ import transformers
5
 
6
  # Load the model pipeline
7
+ model_id = "meta-llama/Meta-Llama-3.1-8B-Instruct"
8
+ pipeline = transformers.pipeline(
9
+ "text-generation",
10
+ model=model_id,
11
+ model_kwargs={"torch_dtype": torch.bfloat16},
12
+ device_map="auto",
13
  )
14
 
15
+ # Define the initial system message
16
+ system_message = {
17
+ "role": "system",
18
+ "content": "You are an experienced Fashion designer who starts conversation with proper greeting, "
19
+ "giving valuable and catchy fashion advice and suggestions, stays to the point and precise, "
20
+ "asks questions only if the user has any concerns over your provided suggestions."
21
+ }
22
 
23
  # Function to reset the chat
24
  def reset_chat():
 
55
  messages.append({"role": "user", "content": user_input})
56
 
57
  # Prepare the input for the model
58
+ input_text = messages.copy() # Make a copy of messages
59
+
60
+ # Generate a response using the pipeline
61
  try:
62
+ # Convert the messages to a format the model can understand
63
+ formatted_input = "\n".join([f"{msg['role']}: {msg['content']}" for msg in input_text])
64
+ response = pipeline(formatted_input, max_new_tokens=256)
65
+
66
+ # Extract the assistant's response
67
+ response_content = response[0]["generated_text"].strip()
68
+
69
+ # Store assistant response in the chat history
70
+ messages.append({"role": "assistant", "content": response_content})
71
 
72
  except Exception as e:
73
  response_content = f"Error: {str(e)}"
74
 
 
 
 
75
  return messages, response_content
76
  return messages, ""
77