eljanmahammadli commited on
Commit
9df8406
1 Parent(s): dee0f90

added human vs ai highlighter

Browse files
.gitignore CHANGED
@@ -1,6 +1 @@
1
- __pycache__/analysis.cpython-311.pyc
2
- __pycache__/app.cpython-311.pyc
3
- __pycache__/explainability.cpython-311.pyc
4
- __pycache__/plagiarism.cpython-311.pyc
5
- __pycache__/predictors.cpython-311.pyc
6
- __pycache__/utils.cpython-311.pyc
 
1
+ __pycache__/
 
 
 
 
 
__pycache__/analysis.cpython-311.pyc DELETED
Binary file (4.75 kB)
 
__pycache__/app.cpython-311.pyc DELETED
Binary file (10.9 kB)
 
__pycache__/explainability.cpython-311.pyc DELETED
Binary file (7.89 kB)
 
__pycache__/plagiarism.cpython-311.pyc DELETED
Binary file (14.1 kB)
 
__pycache__/predictors.cpython-311.pyc DELETED
Binary file (12 kB)
 
__pycache__/utils.cpython-310.pyc DELETED
Binary file (7.17 kB)
 
__pycache__/utils.cpython-311.pyc DELETED
Binary file (3.76 kB)
 
__pycache__/utils.cpython-39.pyc DELETED
Binary file (7.19 kB)
 
__pycache__/writing_analysis.cpython-310.pyc DELETED
Binary file (4.57 kB)
 
__pycache__/writing_analysis.cpython-39.pyc DELETED
Binary file (4.64 kB)
 
app.py CHANGED
@@ -8,6 +8,8 @@ from plagiarism import plagiarism_check, build_date
8
  from highlighter import analyze_and_highlight
9
  from utils import extract_text_from_pdf, len_validator
10
  import yaml
 
 
11
 
12
  np.set_printoptions(suppress=True)
13
 
@@ -17,6 +19,10 @@ with open("config.yaml", "r") as file:
17
  model_list = params["MC_OUTPUT_LABELS"]
18
 
19
 
 
 
 
 
20
  def ai_generated_test(option, input, models):
21
  if option == "Human vs AI":
22
  return predict_bc_scores(input), None
@@ -131,15 +137,17 @@ with gr.Blocks() as demo:
131
  with gr.Row():
132
  with gr.Column():
133
  only_ai_btn = gr.Button("AI Check")
134
-
135
  with gr.Column():
136
  only_plagiarism_btn = gr.Button("Source Check")
137
 
138
- with gr.Row():
139
- quillbot_check = gr.Button("Humanized Text Check")
140
 
141
  with gr.Row():
142
- quillbot_highlighter = gr.Button("Humanized Highlighter")
 
 
 
143
 
144
  with gr.Row():
145
  depth_analysis_btn = gr.Button("Detailed Writing Analysis")
@@ -157,16 +165,21 @@ with gr.Blocks() as demo:
157
  with gr.Column():
158
  bcLabel = gr.Label(label="Source")
159
  with gr.Column():
160
- mcLabel = gr.Label(label="Creator")
 
161
  # with gr.Column():
162
  # mc1on1Label = gr.Label(label="Creator(1 on 1 Approach)")
163
 
 
 
 
 
164
  with gr.Row():
165
  with gr.Column():
166
  QLabel = gr.Label(label="Humanized")
167
  with gr.Column():
168
- highlighter_html = gr.HTML(label='Humanized Highlighter')
169
-
170
  with gr.Group():
171
  with gr.Row():
172
  month_from = gr.Dropdown(
@@ -280,13 +293,26 @@ with gr.Blocks() as demo:
280
  api_name="depth_analysis",
281
  )
282
 
283
- quillbot_highlighter.click(
284
- fn=analyze_and_highlight,
285
  inputs=[input_text],
286
- outputs=[highlighter_html],
287
- api_name="quillbot_highlighter",
288
  )
289
-
 
 
 
 
 
 
 
 
 
 
 
 
 
290
 
291
  date_from = ""
292
  date_to = ""
 
8
  from highlighter import analyze_and_highlight
9
  from utils import extract_text_from_pdf, len_validator
10
  import yaml
11
+ from functools import partial
12
+
13
 
14
  np.set_printoptions(suppress=True)
15
 
 
19
  model_list = params["MC_OUTPUT_LABELS"]
20
 
21
 
22
+ analyze_and_highlight_bc = partial(analyze_and_highlight, model_type="bc")
23
+ analyze_and_highlight_quillbot = partial(analyze_and_highlight, model_type="quillbot")
24
+
25
+
26
  def ai_generated_test(option, input, models):
27
  if option == "Human vs AI":
28
  return predict_bc_scores(input), None
 
137
  with gr.Row():
138
  with gr.Column():
139
  only_ai_btn = gr.Button("AI Check")
 
140
  with gr.Column():
141
  only_plagiarism_btn = gr.Button("Source Check")
142
 
143
+ with gr.Column():
144
+ quillbot_check = gr.Button("Humanized Text Check")
145
 
146
  with gr.Row():
147
+ with gr.Column():
148
+ bc_highlighter_button = gr.Button("Human vs. AI Highlighter")
149
+ with gr.Column():
150
+ quillbot_highlighter_button = gr.Button("Humanized Highlighter")
151
 
152
  with gr.Row():
153
  depth_analysis_btn = gr.Button("Detailed Writing Analysis")
 
165
  with gr.Column():
166
  bcLabel = gr.Label(label="Source")
167
  with gr.Column():
168
+ bc_highlighter_output = gr.HTML(label="Human vs. AI Highlighter")
169
+
170
  # with gr.Column():
171
  # mc1on1Label = gr.Label(label="Creator(1 on 1 Approach)")
172
 
173
+ with gr.Row():
174
+ with gr.Column():
175
+ mcLabel = gr.Label(label="Creator")
176
+
177
  with gr.Row():
178
  with gr.Column():
179
  QLabel = gr.Label(label="Humanized")
180
  with gr.Column():
181
+ quillbot_highlighter_output = gr.HTML(label="Humanized Highlighter")
182
+
183
  with gr.Group():
184
  with gr.Row():
185
  month_from = gr.Dropdown(
 
293
  api_name="depth_analysis",
294
  )
295
 
296
+ quillbot_highlighter_button.click(
297
+ fn=analyze_and_highlight_quillbot,
298
  inputs=[input_text],
299
+ outputs=[quillbot_highlighter_output],
300
+ api_name="humanized_highlighter",
301
  )
302
+
303
+ bc_highlighter_button.click(
304
+ fn=analyze_and_highlight_bc,
305
+ inputs=[input_text],
306
+ outputs=[bc_highlighter_output],
307
+ api_name="bc_highlighter",
308
+ )
309
+
310
+ # quillbot_highlighter.click(
311
+ # fn=analyze_and_highlight,
312
+ # inputs=[input_text],
313
+ # outputs=[highlighter_html],
314
+ # api_name="quillbot_highlighter",
315
+ # )
316
 
317
  date_from = ""
318
  date_to = ""
highlighter.py CHANGED
@@ -1,43 +1,52 @@
1
  from lime.lime_text import LimeTextExplainer
2
  from nltk.tokenize import sent_tokenize
3
- from predictors import predict_proba_quillbot
4
 
5
 
6
- def explainer(text):
7
- class_names = ['negative', 'positive']
8
- explainer = LimeTextExplainer(class_names=class_names, split_expression=sent_tokenize)
9
- exp = explainer.explain_instance(text, predict_proba_quillbot, num_features=20, num_samples=300)
 
 
 
 
10
  sentences = [sent for sent in sent_tokenize(text)]
 
 
 
 
11
  weights_mapping = exp.as_map()[1]
12
  sentences_weights = {sentence: 0 for sentence in sentences}
13
  for idx, weight in weights_mapping:
14
  if 0 <= idx < len(sentences):
15
  sentences_weights[sentences[idx]] = weight
16
- print(sentences_weights)
17
- return sentences_weights
18
 
19
 
20
- def analyze_and_highlight(text):
21
  highlighted_text = ""
22
- sentences_weights = explainer(text)
23
  min_weight = min(sentences_weights.values())
24
  max_weight = max(sentences_weights.values())
25
 
26
  for sentence, weight in sentences_weights.items():
27
  normalized_weight = (weight - min_weight) / (max_weight - min_weight)
28
  if weight >= 0:
29
- color = f'rgba(255, {255 * (1 - normalized_weight)}, {255 * (1 - normalized_weight)}, 1)'
30
  else:
31
- color = f'rgba({255 * normalized_weight}, 255, {255 * normalized_weight}, 1)'
 
 
32
 
33
  sentence = sentence.strip()
34
  if not sentence:
35
  continue
36
-
37
- highlighted_sentence = f'<span style="background-color: {color}; color: black;">{sentence}</span> '
38
- highlighted_text += highlighted_sentence
39
-
40
- return highlighted_text
41
-
42
 
 
 
 
 
43
 
 
 
1
  from lime.lime_text import LimeTextExplainer
2
  from nltk.tokenize import sent_tokenize
3
+ from predictors import predict_for_explainanility
4
 
5
 
6
+ def explainer(text, model_type):
7
+ def predictor_wrapper(text):
8
+ return predict_for_explainanility(text=text, model_type=model_type)
9
+
10
+ class_names = ["negative", "positive"]
11
+ explainer_ = LimeTextExplainer(
12
+ class_names=class_names, split_expression=sent_tokenize
13
+ )
14
  sentences = [sent for sent in sent_tokenize(text)]
15
+ num_sentences = len(sentences)
16
+ exp = explainer_.explain_instance(
17
+ text, predictor_wrapper, num_features=num_sentences, num_samples=500
18
+ )
19
  weights_mapping = exp.as_map()[1]
20
  sentences_weights = {sentence: 0 for sentence in sentences}
21
  for idx, weight in weights_mapping:
22
  if 0 <= idx < len(sentences):
23
  sentences_weights[sentences[idx]] = weight
24
+ print(sentences_weights, model_type)
25
+ return sentences_weights, exp
26
 
27
 
28
+ def analyze_and_highlight(text, model_type):
29
  highlighted_text = ""
30
+ sentences_weights, _ = explainer(text, model_type)
31
  min_weight = min(sentences_weights.values())
32
  max_weight = max(sentences_weights.values())
33
 
34
  for sentence, weight in sentences_weights.items():
35
  normalized_weight = (weight - min_weight) / (max_weight - min_weight)
36
  if weight >= 0:
37
+ color = f"rgba(255, {255 * (1 - normalized_weight)}, {255 * (1 - normalized_weight)}, 1)"
38
  else:
39
+ color = (
40
+ f"rgba({255 * normalized_weight}, 255, {255 * normalized_weight}, 1)"
41
+ )
42
 
43
  sentence = sentence.strip()
44
  if not sentence:
45
  continue
 
 
 
 
 
 
46
 
47
+ highlighted_sentence = (
48
+ f'<span style="background-color: {color}; color: black;">{sentence}</span> '
49
+ )
50
+ highlighted_text += highlighted_sentence
51
 
52
+ return highlighted_text
predictors.py CHANGED
@@ -50,9 +50,16 @@ tokenizers_1on1 = {}
50
  models_1on1 = {}
51
  for model_name, model in zip(mc_label_map, text_1on1_models):
52
  tokenizers_1on1[model_name] = AutoTokenizer.from_pretrained(model)
53
- models_1on1[model_name] = (
54
- AutoModelForSequenceClassification.from_pretrained(model).to(device)
55
- )
 
 
 
 
 
 
 
56
 
57
 
58
  def split_text_allow_complete_sentences_nltk(
@@ -153,10 +160,30 @@ def predict_quillbot(text):
153
  return q_score
154
 
155
 
156
- def predict_proba_quillbot(text):
 
 
 
 
 
 
 
 
 
 
 
 
157
  with torch.no_grad():
158
- tokenized_text = quillbot_tokenizer(text, return_tensors="pt", padding=True).to(device)
159
- outputs = quillbot_model(**tokenized_text)
 
 
 
 
 
 
 
 
160
  tensor_logits = outputs[0]
161
  probas = F.softmax(tensor_logits).detach().cpu().numpy()
162
  return probas
@@ -196,9 +223,7 @@ def predict_mc_scores(input):
196
  bc_scores = []
197
  mc_scores = []
198
 
199
- samples_len_bc = len(
200
- split_text_allow_complete_sentences_nltk(input, type_det="bc")
201
- )
202
  segments_bc = split_text_allow_complete_sentences_nltk(input, type_det="bc")
203
  for i in range(samples_len_bc):
204
  cleaned_text_bc = remove_special_characters(segments_bc[i])
@@ -209,9 +234,7 @@ def predict_mc_scores(input):
209
  bc_score_list = average_bc_scores.tolist()
210
  bc_score = {"AI": bc_score_list[1], "HUMAN": bc_score_list[0]}
211
  segments_mc = split_text_allow_complete_sentences_nltk(input, type_det="mc")
212
- samples_len_mc = len(
213
- split_text_allow_complete_sentences_nltk(input, type_det="mc")
214
- )
215
  for i in range(samples_len_mc):
216
  cleaned_text_mc = remove_special_characters(segments_mc[i])
217
  mc_score = predict_mc(text_mc_model, text_mc_tokenizer, cleaned_text_mc)
@@ -234,9 +257,7 @@ def predict_mc_scores(input):
234
 
235
  def predict_bc_scores(input):
236
  bc_scores = []
237
- samples_len_bc = len(
238
- split_text_allow_complete_sentences_nltk(input, type_det="bc")
239
- )
240
  segments_bc = split_text_allow_complete_sentences_nltk(input, type_det="bc")
241
  for i in range(samples_len_bc):
242
  cleaned_text_bc = remove_special_characters(segments_bc[i])
@@ -274,9 +295,7 @@ def predict_1on1_combined(input):
274
 
275
 
276
  def predict_1on1_single(input, model):
277
- predictions = predict_1on1(
278
- models_1on1[model], tokenizers_1on1[model], input
279
- )[1]
280
  return predictions
281
 
282
 
@@ -288,9 +307,7 @@ def predict_1on1_scores(input, models):
288
  print(f"Models to Test: {models}")
289
  # BC SCORE
290
  bc_scores = []
291
- samples_len_bc = len(
292
- split_text_allow_complete_sentences_nltk(input, type_det="bc")
293
- )
294
  segments_bc = split_text_allow_complete_sentences_nltk(input, type_det="bc")
295
  for i in range(samples_len_bc):
296
  cleaned_text_bc = remove_special_characters(segments_bc[i])
@@ -305,17 +322,13 @@ def predict_1on1_scores(input, models):
305
  if len(models) > 1:
306
  print("Starting MC")
307
  mc_scores = []
308
- segments_mc = split_text_allow_complete_sentences_nltk(
309
- input, type_det="mc"
310
- )
311
  samples_len_mc = len(
312
  split_text_allow_complete_sentences_nltk(input, type_det="mc")
313
  )
314
  for i in range(samples_len_mc):
315
  cleaned_text_mc = remove_special_characters(segments_mc[i])
316
- mc_score = predict_mc(
317
- text_mc_model, text_mc_tokenizer, cleaned_text_mc
318
- )
319
  mc_scores.append(mc_score)
320
  mc_scores_array = np.array(mc_scores)
321
  average_mc_scores = np.mean(mc_scores_array, axis=0)
@@ -325,9 +338,7 @@ def predict_1on1_scores(input, models):
325
  mc_score[label.upper()] = score
326
 
327
  mc_score = {
328
- key: mc_score[key.upper()]
329
- for key in models
330
- if key.upper() in mc_score
331
  }
332
  total = sum(mc_score.values())
333
  # Normalize each value by dividing it by the total
@@ -342,9 +353,7 @@ def predict_1on1_scores(input, models):
342
  elif len(models) == 1:
343
  print("Starting 1on1")
344
  mc_scores = []
345
- segments_mc = split_text_allow_complete_sentences_nltk(
346
- input, type_det="mc"
347
- )
348
  samples_len_mc = len(
349
  split_text_allow_complete_sentences_nltk(input, type_det="mc")
350
  )
 
50
  models_1on1 = {}
51
  for model_name, model in zip(mc_label_map, text_1on1_models):
52
  tokenizers_1on1[model_name] = AutoTokenizer.from_pretrained(model)
53
+ models_1on1[model_name] = AutoModelForSequenceClassification.from_pretrained(
54
+ model
55
+ ).to(device)
56
+
57
+ # proxy models for explainability
58
+ mini_model_name = "polygraf-ai/bc-model-bert-mini"
59
+ bc_tokenizer_mini = AutoTokenizer.from_pretrained(mini_model_name)
60
+ bc_model_mini = AutoModelForSequenceClassification.from_pretrained(mini_model_name).to(
61
+ device
62
+ )
63
 
64
 
65
  def split_text_allow_complete_sentences_nltk(
 
160
  return q_score
161
 
162
 
163
+ def predict_for_explainanility(text, model_type=None):
164
+ if model_type == "quillbot":
165
+ cleaning = False
166
+ max_length = 256
167
+ model = quillbot_model
168
+ tokenizer = quillbot_tokenizer
169
+ elif model_type == "bc":
170
+ cleaning = True
171
+ max_length = 512
172
+ model = bc_model_mini
173
+ tokenizer = bc_tokenizer_mini
174
+ else:
175
+ raise ValueError("Invalid model type")
176
  with torch.no_grad():
177
+ if cleaning:
178
+ text = [remove_special_characters(t) for t in text]
179
+ tokenized_text = tokenizer(
180
+ text,
181
+ return_tensors="pt",
182
+ padding="max_length",
183
+ truncation=True,
184
+ max_length=max_length,
185
+ ).to(device)
186
+ outputs = model(**tokenized_text)
187
  tensor_logits = outputs[0]
188
  probas = F.softmax(tensor_logits).detach().cpu().numpy()
189
  return probas
 
223
  bc_scores = []
224
  mc_scores = []
225
 
226
+ samples_len_bc = len(split_text_allow_complete_sentences_nltk(input, type_det="bc"))
 
 
227
  segments_bc = split_text_allow_complete_sentences_nltk(input, type_det="bc")
228
  for i in range(samples_len_bc):
229
  cleaned_text_bc = remove_special_characters(segments_bc[i])
 
234
  bc_score_list = average_bc_scores.tolist()
235
  bc_score = {"AI": bc_score_list[1], "HUMAN": bc_score_list[0]}
236
  segments_mc = split_text_allow_complete_sentences_nltk(input, type_det="mc")
237
+ samples_len_mc = len(split_text_allow_complete_sentences_nltk(input, type_det="mc"))
 
 
238
  for i in range(samples_len_mc):
239
  cleaned_text_mc = remove_special_characters(segments_mc[i])
240
  mc_score = predict_mc(text_mc_model, text_mc_tokenizer, cleaned_text_mc)
 
257
 
258
  def predict_bc_scores(input):
259
  bc_scores = []
260
+ samples_len_bc = len(split_text_allow_complete_sentences_nltk(input, type_det="bc"))
 
 
261
  segments_bc = split_text_allow_complete_sentences_nltk(input, type_det="bc")
262
  for i in range(samples_len_bc):
263
  cleaned_text_bc = remove_special_characters(segments_bc[i])
 
295
 
296
 
297
  def predict_1on1_single(input, model):
298
+ predictions = predict_1on1(models_1on1[model], tokenizers_1on1[model], input)[1]
 
 
299
  return predictions
300
 
301
 
 
307
  print(f"Models to Test: {models}")
308
  # BC SCORE
309
  bc_scores = []
310
+ samples_len_bc = len(split_text_allow_complete_sentences_nltk(input, type_det="bc"))
 
 
311
  segments_bc = split_text_allow_complete_sentences_nltk(input, type_det="bc")
312
  for i in range(samples_len_bc):
313
  cleaned_text_bc = remove_special_characters(segments_bc[i])
 
322
  if len(models) > 1:
323
  print("Starting MC")
324
  mc_scores = []
325
+ segments_mc = split_text_allow_complete_sentences_nltk(input, type_det="mc")
 
 
326
  samples_len_mc = len(
327
  split_text_allow_complete_sentences_nltk(input, type_det="mc")
328
  )
329
  for i in range(samples_len_mc):
330
  cleaned_text_mc = remove_special_characters(segments_mc[i])
331
+ mc_score = predict_mc(text_mc_model, text_mc_tokenizer, cleaned_text_mc)
 
 
332
  mc_scores.append(mc_score)
333
  mc_scores_array = np.array(mc_scores)
334
  average_mc_scores = np.mean(mc_scores_array, axis=0)
 
338
  mc_score[label.upper()] = score
339
 
340
  mc_score = {
341
+ key: mc_score[key.upper()] for key in models if key.upper() in mc_score
 
 
342
  }
343
  total = sum(mc_score.values())
344
  # Normalize each value by dividing it by the total
 
353
  elif len(models) == 1:
354
  print("Starting 1on1")
355
  mc_scores = []
356
+ segments_mc = split_text_allow_complete_sentences_nltk(input, type_det="mc")
 
 
357
  samples_len_mc = len(
358
  split_text_allow_complete_sentences_nltk(input, type_det="mc")
359
  )