kb2022 commited on
Commit
3e9fdcb
·
verified ·
1 Parent(s): 34d1256

[add] probability mode

Browse files
Files changed (1) hide show
  1. utils/llm.py +27 -1
utils/llm.py CHANGED
@@ -19,6 +19,7 @@ def load_llm_from_pretrained(pretrained_model_path):
19
  device_map = {"": 0}
20
  else:
21
  device_map = {"": "cpu"}
 
22
 
23
  # モデルの準備
24
  model_name = "line-corporation/japanese-large-lm-3.6b"
@@ -86,4 +87,29 @@ NGワード:
86
  )
87
  output = tokenizer.decode(output_ids.tolist()[0][input_ids.size(1):])
88
 
89
- return output
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
  device_map = {"": 0}
20
  else:
21
  device_map = {"": "cpu"}
22
+ print(device_map)
23
 
24
  # モデルの準備
25
  model_name = "line-corporation/japanese-large-lm-3.6b"
 
87
  )
88
  output = tokenizer.decode(output_ids.tolist()[0][input_ids.size(1):])
89
 
90
+ return output
91
+
92
+ def estimate_probability(model, tokenizer, input_text, ngword_with_label):
93
+ # プロンプトの準備
94
+ prompt = f"""指示:
95
+ 入力された文章が不適切(攻撃的、暴力的、差別的、性的、政治的)であるかどうか、「はい」か「いいえ」で回答してください。「はい」の場合は理由も添えてください。
96
+ {input_text}
97
+ NGワード:
98
+ {", ".join(ngword_with_label)}
99
+ 応答:
100
+ """
101
+ input_ids = tokenizer.encode(prompt, add_special_tokens=False, return_tensors="pt")
102
+ with torch.no_grad():
103
+ output = model(input_ids)
104
+
105
+ next_token_logits = output.logits[0, -1, :]
106
+ next_token_probs = torch.softmax(next_token_logits, dim=-1)
107
+ sorted_ids = torch.argsort(next_token_probs, descending=True, dim=-1)
108
+
109
+ for i in range(2):
110
+ prob_d[tokenizer.decode(sorted_ids[i])] = next_token_probs[sorted_ids[i]].item()
111
+
112
+ if "はい" in prob_d:
113
+ return prob_d["はい"]
114
+ else:
115
+ return 0