CamiloVega commited on
Commit
485dd71
verified
1 Parent(s): e6dc8f0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -30
app.py CHANGED
@@ -36,7 +36,7 @@ try:
36
  )
37
  logger.info("Tokenizer loaded successfully")
38
 
39
- # Load model - Modificada la carga del modelo
40
  logger.info("Loading model...")
41
  model = AutoModelForCausalLM.from_pretrained(
42
  model_name,
@@ -47,7 +47,7 @@ try:
47
  model = model.to(device)
48
  logger.info("Model loaded successfully")
49
 
50
- # Create pipeline - Modificada la creaci贸n del pipeline
51
  logger.info("Creating generation pipeline...")
52
  model_gen = pipeline(
53
  "text-generation",
@@ -67,13 +67,10 @@ except Exception as e:
67
  raise
68
 
69
  # Configure system message
70
- system_message = {
71
- "role": "system",
72
- "content": """You are AQuaBot, an AI assistant aware of environmental impact.
73
- You help users with any topic while raising awareness about water consumption
74
- in AI. Did you know that training GPT-3 consumed 5.4 million liters of water,
75
- equivalent to the daily consumption of a city of 10,000 people?"""
76
- }
77
 
78
  # Constants for water consumption calculation
79
  WATER_PER_TOKEN = {
@@ -84,7 +81,6 @@ WATER_PER_TOKEN = {
84
  }
85
 
86
  # Initialize variables
87
- messages = [system_message]
88
  total_water_consumption = 0
89
 
90
  def calculate_tokens(text):
@@ -100,30 +96,27 @@ def calculate_water_consumption(text, is_input=True):
100
  return tokens * (WATER_PER_TOKEN["input_training"] + WATER_PER_TOKEN["input_inference"])
101
  return tokens * (WATER_PER_TOKEN["output_training"] + WATER_PER_TOKEN["output_inference"])
102
 
 
 
 
103
  @spaces.GPU(duration=60)
104
  @torch.inference_mode()
105
  def generate_response(user_input, chat_history):
106
  try:
107
  logger.info("Generating response for user input...")
108
- global total_water_consumption, messages
109
 
110
  # Calculate water consumption for input
111
  input_water_consumption = calculate_water_consumption(user_input, True)
112
  total_water_consumption += input_water_consumption
113
 
114
- # Add user input to messages
115
- messages.append({"role": "user", "content": user_input})
116
-
117
  # Create prompt
118
- prompt = ""
119
- for m in messages:
120
- if m["role"] == "system":
121
- prompt += f"<START SYSTEM MESSAGE>\n{m['content']}\n<END SYSTEM MESSAGE>\n\n"
122
- elif m["role"] == "user":
123
- prompt += f"User: {m['content']}\n"
124
- else:
125
- prompt += f"Assistant: {m['content']}\n"
126
- prompt += "Assistant:"
127
 
128
  logger.info("Generating model response...")
129
  outputs = model_gen(
@@ -140,11 +133,8 @@ def generate_response(user_input, chat_history):
140
  output_water_consumption = calculate_water_consumption(assistant_response, False)
141
  total_water_consumption += output_water_consumption
142
 
143
- # Add assistant's response to messages
144
- messages.append({"role": "assistant", "content": assistant_response})
145
-
146
- # Update chat history
147
- chat_history.append((user_input, assistant_response))
148
 
149
  # Prepare water consumption message
150
  water_message = f"""
@@ -166,7 +156,7 @@ def generate_response(user_input, chat_history):
166
  except Exception as e:
167
  logger.error(f"Error in generate_response: {str(e)}")
168
  error_message = f"An error occurred: {str(e)}"
169
- chat_history.append((user_input, error_message))
170
  return chat_history, show_water
171
 
172
  # Create Gradio interface
@@ -183,7 +173,7 @@ try:
183
  </div>
184
  """)
185
 
186
- chatbot = gr.Chatbot(type="messages")
187
  message = gr.Textbox(
188
  placeholder="Type your message here...",
189
  show_label=False
 
36
  )
37
  logger.info("Tokenizer loaded successfully")
38
 
39
+ # Load model
40
  logger.info("Loading model...")
41
  model = AutoModelForCausalLM.from_pretrained(
42
  model_name,
 
47
  model = model.to(device)
48
  logger.info("Model loaded successfully")
49
 
50
+ # Create pipeline
51
  logger.info("Creating generation pipeline...")
52
  model_gen = pipeline(
53
  "text-generation",
 
67
  raise
68
 
69
  # Configure system message
70
+ system_message = """You are AQuaBot, an AI assistant aware of environmental impact.
71
+ You help users with any topic while raising awareness about water consumption
72
+ in AI. Did you know that training GPT-3 consumed 5.4 million liters of water,
73
+ equivalent to the daily consumption of a city of 10,000 people?"""
 
 
 
74
 
75
  # Constants for water consumption calculation
76
  WATER_PER_TOKEN = {
 
81
  }
82
 
83
  # Initialize variables
 
84
  total_water_consumption = 0
85
 
86
  def calculate_tokens(text):
 
96
  return tokens * (WATER_PER_TOKEN["input_training"] + WATER_PER_TOKEN["input_inference"])
97
  return tokens * (WATER_PER_TOKEN["output_training"] + WATER_PER_TOKEN["output_inference"])
98
 
99
+ def format_message(role, content):
100
+ return {"role": role, "content": content}
101
+
102
  @spaces.GPU(duration=60)
103
  @torch.inference_mode()
104
  def generate_response(user_input, chat_history):
105
  try:
106
  logger.info("Generating response for user input...")
107
+ global total_water_consumption
108
 
109
  # Calculate water consumption for input
110
  input_water_consumption = calculate_water_consumption(user_input, True)
111
  total_water_consumption += input_water_consumption
112
 
 
 
 
113
  # Create prompt
114
+ conversation_history = ""
115
+ if chat_history:
116
+ for message in chat_history:
117
+ conversation_history += f"User: {message[0]}\nAssistant: {message[1]}\n"
118
+
119
+ prompt = f"{system_message}\n\n{conversation_history}User: {user_input}\nAssistant:"
 
 
 
120
 
121
  logger.info("Generating model response...")
122
  outputs = model_gen(
 
133
  output_water_consumption = calculate_water_consumption(assistant_response, False)
134
  total_water_consumption += output_water_consumption
135
 
136
+ # Update chat history with the new formatted messages
137
+ chat_history.append([user_input, assistant_response])
 
 
 
138
 
139
  # Prepare water consumption message
140
  water_message = f"""
 
156
  except Exception as e:
157
  logger.error(f"Error in generate_response: {str(e)}")
158
  error_message = f"An error occurred: {str(e)}"
159
+ chat_history.append([user_input, error_message])
160
  return chat_history, show_water
161
 
162
  # Create Gradio interface
 
173
  </div>
174
  """)
175
 
176
+ chatbot = gr.Chatbot()
177
  message = gr.Textbox(
178
  placeholder="Type your message here...",
179
  show_label=False