feiyang-cai commited on
Commit
4294df8
·
1 Parent(s): 0e7c02e
Files changed (1) hide show
  1. utils.py +3 -2
utils.py CHANGED
@@ -209,7 +209,7 @@ class ReactionPredictionModel():
209
  self.forward_model.to("cuda")
210
  self.forward_model.eval()
211
 
212
- @spaces.GPU(duration=30)
213
  def predict(self, test_loader, task_type):
214
  predictions = []
215
  for i, batch in tqdm(enumerate(test_loader), total=len(test_loader), desc="Evaluating"):
@@ -248,9 +248,10 @@ class ReactionPredictionModel():
248
  )
249
 
250
  print(outputs)
251
- original_smiles_list = self.tokenizer.batch_decode(outputs[:, len(inputs['input_ids'][0]):],
252
  skip_special_tokens=True)
253
  original_smiles_list = map(lambda x: x.replace(" ", ""), original_smiles_list)
 
254
  # canonize the SMILES
255
  canonized_smiles_list = []
256
  temp = []
 
209
  self.forward_model.to("cuda")
210
  self.forward_model.eval()
211
 
212
+ @spaces.GPU(duration=60)
213
  def predict(self, test_loader, task_type):
214
  predictions = []
215
  for i, batch in tqdm(enumerate(test_loader), total=len(test_loader), desc="Evaluating"):
 
248
  )
249
 
250
  print(outputs)
251
+ original_smiles_list = self.tokenizer.batch_decode(outputs.detach().cpu().numpy()[:, len(inputs['input_ids'][0]):],
252
  skip_special_tokens=True)
253
  original_smiles_list = map(lambda x: x.replace(" ", ""), original_smiles_list)
254
+ print(original_smiles_list)
255
  # canonize the SMILES
256
  canonized_smiles_list = []
257
  temp = []