hylee commited on
Commit
f776d9e
·
1 Parent(s): c9e7917
Files changed (1) hide show
  1. handler.py +1 -71
handler.py CHANGED
@@ -31,7 +31,6 @@ class Utterance:
31
  self.role = None
32
  self.word_count = self.get_num_words()
33
  self.timestamp = [starttime, endtime]
34
- # self.unit_measure = endtime - starttime
35
  self.unit_measure = None
36
  self.aggregate_unit_measure = endtime
37
 
@@ -310,94 +309,25 @@ class EndpointHandler():
310
  transcript.add_utterance(Utterance(**utt))
311
 
312
  print("Running inference on %d examples..." % transcript.length())
313
- # cpu_percent = psutil.cpu_percent()
314
  logging.set_verbosity_info()
315
- # logger = logging.get_logger("transformers")
316
- # logger.info(f"CPU Usage before models loaded: {cpu_percent}%")
317
- # mem_info = psutil.virtual_memory()
318
- # used_mem = mem_info.used / (1024 ** 3) # Convert to gigabytes
319
- # total_mem = mem_info.total / (1024 ** 3) # Convert to gigabytes
320
- # logger.info(
321
- # f"Used Memory before models loaded: {used_mem:.2f} GB, Total RAM: {total_mem:.2f} GB")
322
-
323
  # Uptake
324
  uptake_model = UptakeModel(
325
  self.device, self.tokenizer, self.input_builder)
326
  uptake_speaker = params.pop("uptake_speaker", None)
327
  uptake_model.run_inference(transcript, min_prev_words=params['uptake_min_num_words'],
328
  uptake_speaker=uptake_speaker)
329
-
330
- # cpu_percent = psutil.cpu_percent()
331
- # mem_info = psutil.virtual_memory()
332
- # used_mem = mem_info.used / (1024 ** 3) # Convert to gigabytes
333
- # total_mem = mem_info.total / (1024 ** 3) # Convert to gigabytes
334
- # logger.info(
335
- # f"Used Memory after model 1 loaded: {used_mem:.2f} GB, Total Mem: {total_mem:.2f} GB")
336
- # logger.info(f"CPU Usage after model 1 loaded: {cpu_percent}%")
337
- # del uptake_model
338
- # cpu_percent = psutil.cpu_percent()
339
- # mem_info = psutil.virtual_memory()
340
- # used_mem = mem_info.used / (1024 ** 3) # Convert to gigabytes
341
- # total_mem = mem_info.total / (1024 ** 3) # Convert to gigabytes
342
- # logger.info(f"Used Memory after model 1 deleted: {used_mem:.2f} GB, Total Mem: {total_mem:.2f} GB")
343
- # logger.info(f"CPU Usage after model 1 deleted: {cpu_percent}%")
344
  # Reasoning
345
  reasoning_model = ReasoningModel(
346
  self.device, self.tokenizer, self.input_builder)
347
  reasoning_model.run_inference(transcript)
348
- # cpu_percent = psutil.cpu_percent()
349
- # mem_info = psutil.virtual_memory()
350
- # used_mem = mem_info.used / (1024 ** 3) # Convert to gigabytes
351
- # total_mem = mem_info.total / (1024 ** 3) # Convert to gigabytes
352
- # logger.info(
353
- # f"Used Memory after model 2 loaded: {used_mem:.2f} GB, Total Mem: {total_mem:.2f} GB")
354
- # logger.info(f"CPU Usage after model 2 loaded: {cpu_percent}%")
355
- # # print(f"CPU Usage after model 2 loaded: {cpu_percent}%")
356
- # # del reasoning_model
357
- # cpu_percent = psutil.cpu_percent()
358
- # mem_info = psutil.virtual_memory()
359
- # used_mem = mem_info.used / (1024 ** 3) # Convert to gigabytes
360
- # total_mem = mem_info.total / (1024 ** 3) # Convert to gigabytes
361
- # logger.info(f"Used Memory after model 2 deleted: {used_mem:.2f} GB, Total Mem: {total_mem:.2f} GB")
362
- # logger.info(f"CPU Usage after model 2 deleted: {cpu_percent}%")
363
- # print(f"CPU Usage after model 2 deleted: {cpu_percent}%")
364
  # Question
365
  question_model = QuestionModel(
366
  self.device, self.tokenizer, self.input_builder)
367
  question_model.run_inference(transcript)
368
- # cpu_percent = psutil.cpu_percent()
369
- # logger.info(f"CPU Usage after model 3 loaded: {cpu_percent}%")
370
- # mem_info = psutil.virtual_memory()
371
- # used_mem = mem_info.used / (1024 ** 3) # Convert to gigabytes
372
- # total_mem = mem_info.total / (1024 ** 3) # Convert to gigabytes
373
- # logger.info(
374
- # f"Used Memory after model 3 loaded: {used_mem:.2f} GB, Total Mem: {total_mem:.2f} GB")
375
- # print(f"CPU Usage after model 3 loaded: {cpu_percent}%")
376
- # del question_model
377
- # cpu_percent = psutil.cpu_percent()
378
- # logger.info(f"CPU Usage after model 3 deleted: {cpu_percent}%")
379
- # mem_info = psutil.virtual_memory()
380
- # used_mem = mem_info.used / (1024 ** 3) # Convert to gigabytes
381
- # total_mem = mem_info.total / (1024 ** 3) # Convert to gigabytes
382
- # logger.info(f"Used Memory after model 3 deleted: {used_mem:.2f} GB, Total Mem: {total_mem:.2f} GB")
383
- # print(f"CPU Usage after model 3 deleted: {cpu_percent}%")
384
  transcript.update_utterance_roles
385
  talk_dist, talk_len = transcript.get_talk_distribution_and_length(uptake_speaker)
386
  talk_timeline = transcript.get_talk_timeline()
387
  word_cloud = transcript.get_word_cloud_dicts()
388
 
389
- # return transcript.to_dict(), talk_dist, talk_len, talk_timeline, word_cloud
390
  return talk_dist, talk_len, talk_timeline, word_cloud
391
-
392
- # {
393
- # "inputs": [
394
- # {"uid": "1", "speaker": "Alice", "text": "How much is the fish?" },
395
- # {"uid": "2", "speaker": "Bob", "text": "I do not know about the fish. Because you put a long side and it’s a long side. What do you think." },
396
- # {"uid": "3", "speaker": "Alice", "text": "OK, thank you Bob." }
397
- # ],
398
- # "parameters": {
399
- # "uptake_min_num_words": 5,
400
- # "uptake_speaker": "Bob",
401
- # "filename": "sample.csv"
402
- # }
403
- # }
 
31
  self.role = None
32
  self.word_count = self.get_num_words()
33
  self.timestamp = [starttime, endtime]
 
34
  self.unit_measure = None
35
  self.aggregate_unit_measure = endtime
36
 
 
309
  transcript.add_utterance(Utterance(**utt))
310
 
311
  print("Running inference on %d examples..." % transcript.length())
 
312
  logging.set_verbosity_info()
 
 
 
 
 
 
 
 
313
  # Uptake
314
  uptake_model = UptakeModel(
315
  self.device, self.tokenizer, self.input_builder)
316
  uptake_speaker = params.pop("uptake_speaker", None)
317
  uptake_model.run_inference(transcript, min_prev_words=params['uptake_min_num_words'],
318
  uptake_speaker=uptake_speaker)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
  # Reasoning
320
  reasoning_model = ReasoningModel(
321
  self.device, self.tokenizer, self.input_builder)
322
  reasoning_model.run_inference(transcript)
323
+
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
324
  # Question
325
  question_model = QuestionModel(
326
  self.device, self.tokenizer, self.input_builder)
327
  question_model.run_inference(transcript)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328
  transcript.update_utterance_roles
329
  talk_dist, talk_len = transcript.get_talk_distribution_and_length(uptake_speaker)
330
  talk_timeline = transcript.get_talk_timeline()
331
  word_cloud = transcript.get_word_cloud_dicts()
332
 
 
333
  return talk_dist, talk_len, talk_timeline, word_cloud