liujch1998 commited on
Commit
25ede7d
·
1 Parent(s): b6a4c0b
Files changed (1) hide show
  1. app.py +1 -2
app.py CHANGED
@@ -56,11 +56,10 @@ class Processor:
56
 
57
  prompts = [question + (f' \\n Knowledge: {knowledge} \\n Answer: ' if knowledge != '' else ' \\n Answer:') for knowledge in knowledges]
58
  prompts_tok = self.tokenizer(prompts, return_tensors='pt', padding='max_length', truncation='longest_first', max_length=max_question_len + max_knowledge_len).to(device) # (1+K, QL+KL)
59
- print(prompts_tok.input_ids.size(), choices_ids[0].unsqueeze(0).expand(len(knowledges), -1).size())
60
  output = self.model(
61
  input_ids=prompts_tok.input_ids,
62
  attention_mask=prompts_tok.attention_mask,
63
- labels=choices_ids[0].unsqueeze(0).expand(len(knowledges), -1),
64
  )
65
  logitsss = output.logits # (1+K, AL, V)
66
  logitss = logitsss[:, 0, :] # (1+K, V)
 
56
 
57
  prompts = [question + (f' \\n Knowledge: {knowledge} \\n Answer: ' if knowledge != '' else ' \\n Answer:') for knowledge in knowledges]
58
  prompts_tok = self.tokenizer(prompts, return_tensors='pt', padding='max_length', truncation='longest_first', max_length=max_question_len + max_knowledge_len).to(device) # (1+K, QL+KL)
 
59
  output = self.model(
60
  input_ids=prompts_tok.input_ids,
61
  attention_mask=prompts_tok.attention_mask,
62
+ labels=choices_ids[0].unsqueeze(0).repeat(len(knowledges), 1),
63
  )
64
  logitsss = output.logits # (1+K, AL, V)
65
  logitss = logitsss[:, 0, :] # (1+K, V)