hylee commited on
Commit
1f758f7
·
1 Parent(s): d0d0944

try to log

Browse files
Files changed (1) hide show
  1. handler.py +10 -5
handler.py CHANGED
@@ -10,8 +10,7 @@ from utils import MultiHeadModel, BertInputBuilder, get_num_words
10
  import transformers
11
  from transformers import BertTokenizer, BertForSequenceClassification
12
  import psutil
13
- import time
14
-
15
 
16
  transformers.logging.set_verbosity_debug()
17
 
@@ -244,33 +243,39 @@ class EndpointHandler():
244
 
245
  print("Running inference on %d examples..." % transcript.length())
246
  cpu_percent = psutil.cpu_percent()
247
- print(f"CPU Usage before models loaded: {cpu_percent}%")
 
 
248
  # Uptake
249
  uptake_model = UptakeModel(
250
  self.device, self.tokenizer, self.input_builder)
251
  uptake_model.run_inference(transcript, min_prev_words=params['uptake_min_num_words'],
252
  uptake_speaker=params.pop("uptake_speaker", None))
253
  cpu_percent = psutil.cpu_percent()
254
- print(f"CPU Usage after model 1 loaded: {cpu_percent}%")
255
  del uptake_model
256
  cpu_percent = psutil.cpu_percent()
257
- print(f"CPU Usage after model 1 deleted: {cpu_percent}%")
258
  # Reasoning
259
  reasoning_model = ReasoningModel(
260
  self.device, self.tokenizer, self.input_builder)
261
  reasoning_model.run_inference(transcript)
262
  cpu_percent = psutil.cpu_percent()
 
263
  print(f"CPU Usage after model 2 loaded: {cpu_percent}%")
264
  del reasoning_model
265
  cpu_percent = psutil.cpu_percent()
 
266
  print(f"CPU Usage after model 2 deleted: {cpu_percent}%")
267
  # Question
268
  question_model = QuestionModel(
269
  self.device, self.tokenizer, self.input_builder)
270
  question_model.run_inference(transcript)
271
  cpu_percent = psutil.cpu_percent()
 
272
  print(f"CPU Usage after model 3 loaded: {cpu_percent}%")
273
  del question_model
274
  cpu_percent = psutil.cpu_percent()
 
275
  print(f"CPU Usage after model 3 deleted: {cpu_percent}%")
276
  return transcript.to_dict()
 
10
  import transformers
11
  from transformers import BertTokenizer, BertForSequenceClassification
12
  import psutil
13
+ from transformers.utils import logging
 
14
 
15
  transformers.logging.set_verbosity_debug()
16
 
 
243
 
244
  print("Running inference on %d examples..." % transcript.length())
245
  cpu_percent = psutil.cpu_percent()
246
+ logging.set_verbosity_info()
247
+ logger = logging.get_logger("transformers")
248
+ logger.info(f"CPU Usage before models loaded: {cpu_percent}%")
249
  # Uptake
250
  uptake_model = UptakeModel(
251
  self.device, self.tokenizer, self.input_builder)
252
  uptake_model.run_inference(transcript, min_prev_words=params['uptake_min_num_words'],
253
  uptake_speaker=params.pop("uptake_speaker", None))
254
  cpu_percent = psutil.cpu_percent()
255
+ logger.info(f"CPU Usage after model 1 loaded: {cpu_percent}%")
256
  del uptake_model
257
  cpu_percent = psutil.cpu_percent()
258
+ logger.info(f"CPU Usage after model 1 deleted: {cpu_percent}%")
259
  # Reasoning
260
  reasoning_model = ReasoningModel(
261
  self.device, self.tokenizer, self.input_builder)
262
  reasoning_model.run_inference(transcript)
263
  cpu_percent = psutil.cpu_percent()
264
+ logger.info(f"CPU Usage after model 2 loaded: {cpu_percent}%")
265
  print(f"CPU Usage after model 2 loaded: {cpu_percent}%")
266
  del reasoning_model
267
  cpu_percent = psutil.cpu_percent()
268
+ logger.info(f"CPU Usage after model 2 deleted: {cpu_percent}%")
269
  print(f"CPU Usage after model 2 deleted: {cpu_percent}%")
270
  # Question
271
  question_model = QuestionModel(
272
  self.device, self.tokenizer, self.input_builder)
273
  question_model.run_inference(transcript)
274
  cpu_percent = psutil.cpu_percent()
275
+ logger.info(f"CPU Usage after model 3 loaded: {cpu_percent}%")
276
  print(f"CPU Usage after model 3 loaded: {cpu_percent}%")
277
  del question_model
278
  cpu_percent = psutil.cpu_percent()
279
+ logger.info(f"CPU Usage after model 3 deleted: {cpu_percent}%")
280
  print(f"CPU Usage after model 3 deleted: {cpu_percent}%")
281
  return transcript.to_dict()