Sangjun2 commited on
Commit
b1ad3e2
·
verified ·
1 Parent(s): 3e334b3

add gpu decorator

Browse files
Files changed (1) hide show
  1. app.py +7 -0
app.py CHANGED
@@ -47,20 +47,26 @@ t5_model_path = './ke_t5.pt'
47
  # Load first model ko-deplot
48
  processor1 = Pix2StructProcessor.from_pretrained('nuua/ko-deplot')
49
  model1 = Pix2StructForConditionalGeneration.from_pretrained('nuua/ko-deplot')
 
50
  model1.load_state_dict(torch.load(ko_deplot_model_path, map_location=device))
 
51
  model1.to(device)
52
 
53
  # Load second model aihub-deplot
54
  processor2 = AutoProcessor.from_pretrained("ybelkada/pix2struct-base")
55
  model2 = Pix2StructForConditionalGeneration.from_pretrained("ybelkada/pix2struct-base")
 
56
  model2.load_state_dict(torch.load(aihub_deplot_model_path, map_location=device))
57
 
58
 
59
  tokenizer = T5Tokenizer.from_pretrained("KETI-AIR/ke-t5-base")
60
  t5_model = T5ForConditionalGeneration.from_pretrained("KETI-AIR/ke-t5-base")
 
61
  t5_model.load_state_dict(torch.load(t5_model_path, map_location=device))
62
 
 
63
  model2.to(device)
 
64
  t5_model.to(device)
65
 
66
  #Load third model unichart
@@ -68,6 +74,7 @@ unichart_model_path = "./unichart"
68
  model3 = VisionEncoderDecoderModel.from_pretrained(unichart_model_path)
69
  processor3 = DonutProcessor.from_pretrained(unichart_model_path)
70
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
71
  model3.to(device)
72
 
73
  #ko-deplot 추론함수
 
47
  # Load first model ko-deplot
48
  processor1 = Pix2StructProcessor.from_pretrained('nuua/ko-deplot')
49
  model1 = Pix2StructForConditionalGeneration.from_pretrained('nuua/ko-deplot')
50
+ @spaces.GPU(enable_queue=True)
51
  model1.load_state_dict(torch.load(ko_deplot_model_path, map_location=device))
52
+ @spaces.GPU(enable_queue=True)
53
  model1.to(device)
54
 
55
  # Load second model aihub-deplot
56
  processor2 = AutoProcessor.from_pretrained("ybelkada/pix2struct-base")
57
  model2 = Pix2StructForConditionalGeneration.from_pretrained("ybelkada/pix2struct-base")
58
+ @spaces.GPU(enable_queue=True)
59
  model2.load_state_dict(torch.load(aihub_deplot_model_path, map_location=device))
60
 
61
 
62
  tokenizer = T5Tokenizer.from_pretrained("KETI-AIR/ke-t5-base")
63
  t5_model = T5ForConditionalGeneration.from_pretrained("KETI-AIR/ke-t5-base")
64
+ @spaces.GPU(enable_queue=True)
65
  t5_model.load_state_dict(torch.load(t5_model_path, map_location=device))
66
 
67
+ @spaces.GPU(enable_queue=True)
68
  model2.to(device)
69
+ @spaces.GPU(enable_queue=True)
70
  t5_model.to(device)
71
 
72
  #Load third model unichart
 
74
  model3 = VisionEncoderDecoderModel.from_pretrained(unichart_model_path)
75
  processor3 = DonutProcessor.from_pretrained(unichart_model_path)
76
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
77
+ @spaces.GPU(enable_queue=True)
78
  model3.to(device)
79
 
80
  #ko-deplot 추론함수