Spaces:
Running
Running
from utils import cosineSim, googleSearch, getSentences, parallel_scrap, matchingScore | |
import gradio as gr | |
from urllib.request import urlopen, Request | |
from googleapiclient.discovery import build | |
import requests | |
import httpx | |
import re | |
from bs4 import BeautifulSoup | |
import numpy as np | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
import asyncio | |
from scipy.special import softmax | |
from evaluate import load | |
from datetime import date | |
import nltk | |
import os | |
np.set_printoptions(suppress=True) | |
def plagiarism_check( | |
input, | |
year_from, | |
month_from, | |
day_from, | |
year_to, | |
month_to, | |
day_to, | |
domains_to_skip, | |
): | |
api_key = "AIzaSyCLyCCpOPLZWuptuPAPSg8cUIZhdEMVf6g" | |
api_key = "AIzaSyCS1WQDMl1IMjaXtwSd_2rA195-Yc4psQE" | |
api_key = "AIzaSyCB61O70B8AC3l5Kk3KMoLb6DN37B7nqIk" | |
api_key = "AIzaSyCg1IbevcTAXAPYeYreps6wYWDbU0Kz8tg" | |
# api_key = "AIzaSyBrx_pgb6A64wPFQXSGQRgGtukoxVV_0Fk" | |
cse_id = "851813e81162b4ed4" | |
sentences = getSentences(input) | |
urlCount = {} | |
ScoreArray = [] | |
urlList = [] | |
date_from = build_date(year_from, month_from, day_from) | |
date_to = build_date(year_to, month_to, day_to) | |
sort_date = f"date:r:{date_from}:{date_to}" | |
# get list of URLS to check | |
urlCount, ScoreArray = googleSearch( | |
sentences, | |
urlCount, | |
ScoreArray, | |
urlList, | |
sort_date, | |
domains_to_skip, | |
api_key, | |
cse_id, | |
) | |
print("Number of URLs: ", len(urlCount)) | |
# print("Old Score Array:\n") | |
# print2D(ScoreArray) | |
# Scrape URLs in list | |
formatted_tokens = [] | |
soups = asyncio.run(parallel_scrap(urlList)) | |
print(len(soups)) | |
print( | |
"Successful scraping: " | |
+ str(len([x for x in soups if x is not None])) | |
+ "out of " | |
+ str(len(urlList)) | |
) | |
# Populate matching scores for scrapped pages | |
for i, soup in enumerate(soups): | |
print(f"Analyzing {i+1} of {len(soups)} soups........................") | |
if soup: | |
page_content = soup.text | |
for j, sent in enumerate(sentences): | |
score = matchingScore(sent, page_content) | |
ScoreArray[i][j] = score | |
# ScoreArray = asyncio.run(parallel_analyze_2(soups, sentences, ScoreArray)) | |
# print("New Score Array:\n") | |
# print2D(ScoreArray) | |
# Gradio formatting section | |
sentencePlag = [False] * len(sentences) | |
sentenceToMaxURL = [-1] * len(sentences) | |
for j in range(len(sentences)): | |
if j > 0: | |
maxScore = ScoreArray[sentenceToMaxURL[j - 1]][j] | |
sentenceToMaxURL[j] = sentenceToMaxURL[j - 1] | |
else: | |
maxScore = -1 | |
for i in range(len(ScoreArray)): | |
margin = ( | |
0.1 | |
if (j > 0 and sentenceToMaxURL[j] == sentenceToMaxURL[j - 1]) | |
else 0 | |
) | |
if ScoreArray[i][j] - maxScore > margin: | |
maxScore = ScoreArray[i][j] | |
sentenceToMaxURL[j] = i | |
if maxScore > 0.5: | |
sentencePlag[j] = True | |
if ( | |
(len(sentences) > 1) | |
and (sentenceToMaxURL[1] != sentenceToMaxURL[0]) | |
and ( | |
ScoreArray[sentenceToMaxURL[0]][0] | |
- ScoreArray[sentenceToMaxURL[1]][0] | |
< 0.1 | |
) | |
): | |
sentenceToMaxURL[0] = sentenceToMaxURL[1] | |
index = np.unique(sentenceToMaxURL) | |
urlMap = {} | |
for count, i in enumerate(index): | |
urlMap[i] = count + 1 | |
for i, sent in enumerate(sentences): | |
formatted_tokens.append( | |
(sent, "[" + str(urlMap[sentenceToMaxURL[i]]) + "]") | |
) | |
formatted_tokens.append(("\n", None)) | |
formatted_tokens.append(("\n", None)) | |
formatted_tokens.append(("\n", None)) | |
urlScore = {} | |
for url in index: | |
s = [ | |
ScoreArray[url][sen] | |
for sen in range(len(sentences)) | |
if sentenceToMaxURL[sen] == url | |
] | |
urlScore[url] = sum(s) / len(s) | |
for ind in index: | |
formatted_tokens.append( | |
( | |
urlList[ind] + " --- Matching Score: " + str(urlScore[ind]), | |
"[" + str(urlMap[ind]) + "]", | |
) | |
) | |
formatted_tokens.append(("\n", None)) | |
print(f"Formatted Tokens: {formatted_tokens}") | |
return formatted_tokens | |
""" | |
AI DETECTION SECTION | |
""" | |
text_bc_model_path = "polygraf-ai/ai-text-bc-bert-1-4m" | |
text_bc_tokenizer = AutoTokenizer.from_pretrained(text_bc_model_path) | |
text_bc_model = AutoModelForSequenceClassification.from_pretrained(text_bc_model_path) | |
text_mc_model_path = "polygraf-ai/ai-text-mc-v5-lighter-spec" | |
text_mc_tokenizer = AutoTokenizer.from_pretrained(text_mc_model_path) | |
text_mc_model = AutoModelForSequenceClassification.from_pretrained(text_mc_model_path) | |
def remove_special_characters(text): | |
cleaned_text = re.sub(r'[^a-zA-Z0-9\s]', '', text) | |
return cleaned_text | |
def predict_bc(model, tokenizer, text): | |
tokens = tokenizer( | |
text, padding=True, truncation=True, return_tensors="pt" | |
)["input_ids"] | |
output = model(tokens) | |
output_norm = softmax(output.logits.detach().numpy(), 1)[0] | |
print("BC Score: ", output_norm) | |
bc_score = {"AI": output_norm[1].item(), "HUMAN": output_norm[0].item()} | |
return bc_score | |
def predict_mc(model, tokenizer, text): | |
tokens = tokenizer( | |
text, padding=True, truncation=True, return_tensors="pt" | |
)["input_ids"] | |
output = model(tokens) | |
output_norm = softmax(output.logits.detach().numpy(), 1)[0] | |
print("MC Score: ", output_norm) | |
mc_score = {} | |
label_map = ["GPT 3.5", "GPT 4", "CLAUDE", "BARD", "LLAMA 2"] | |
for score, label in zip(output_norm, label_map): | |
mc_score[label.upper()] = score.item() | |
return mc_score | |
def ai_generated_test(input, models): | |
cleaned_text = remove_special_characters(input) | |
bc_score = predict_bc(text_bc_model, text_bc_tokenizer, cleaned_text) | |
mc_score = predict_mc(text_mc_model, text_mc_tokenizer, cleaned_text) | |
sum_prob = 1 - bc_score["HUMAN"] | |
for key, value in mc_score.items(): | |
mc_score[key] = value * sum_prob | |
return bc_score, mc_score | |
# COMBINED | |
def main( | |
input, | |
models, | |
year_from, | |
month_from, | |
day_from, | |
year_to, | |
month_to, | |
day_to, | |
domains_to_skip, | |
): | |
bc_score, mc_score = ai_generated_test(input, models) | |
formatted_tokens = plaigiarism_check( | |
input, | |
year_from, | |
month_from, | |
day_from, | |
year_to, | |
month_to, | |
day_to, | |
domains_to_skip, | |
) | |
return ( | |
bc_score, | |
mc_score, | |
formatted_tokens, | |
) | |
def build_date(year, month, day): | |
return f"{year}{months[month]}{day}" | |
# START OF GRADIO | |
title = "Copyright Checker" | |
months = { | |
"January": "01", | |
"February": "02", | |
"March": "03", | |
"April": "04", | |
"May": "05", | |
"June": "06", | |
"July": "07", | |
"August": "08", | |
"September": "09", | |
"October": "10", | |
"November": "11", | |
"December": "12", | |
} | |
with gr.Blocks() as demo: | |
today = date.today() | |
# dd/mm/YY | |
d1 = today.strftime("%d/%B/%Y") | |
d1 = d1.split("/") | |
model_list = ["GPT 3.5", "GPT 4", "CLAUDE", "BARD", "LLAMA2"] | |
domain_list = ["com", "org", "net", "int", "edu", "gov", "mil"] | |
gr.Markdown( | |
""" | |
# Copyright Checker | |
""" | |
) | |
input_text = gr.Textbox(label="Input text", lines=5, placeholder="") | |
with gr.Row(): | |
with gr.Column(): | |
only_ai_btn = gr.Button("AI Check") | |
with gr.Column(): | |
only_plagiarism_btn = gr.Button("Plagiarism Check") | |
with gr.Column(): | |
submit_btn = gr.Button("Full Check") | |
gr.Markdown( | |
""" | |
## Output | |
""" | |
) | |
with gr.Row(): | |
models = gr.Dropdown( | |
model_list, | |
value=model_list, | |
multiselect=True, | |
label="Models to test against", | |
) | |
with gr.Row(): | |
with gr.Column(): | |
bcLabel = gr.Label(label="Source") | |
with gr.Column(): | |
mcLabel = gr.Label(label="Creator") | |
with gr.Group(): | |
with gr.Row(): | |
month_from = gr.Dropdown( | |
choices=months, | |
label="From Month", | |
value="January", | |
interactive=True, | |
) | |
day_from = gr.Textbox(label="From Day", value="01") | |
year_from = gr.Textbox(label="From Year", value="2000") | |
# from_date_button = gr.Button("Submit") | |
with gr.Row(): | |
month_to = gr.Dropdown( | |
choices=months, | |
label="To Month", | |
value=d1[1], | |
interactive=True, | |
) | |
day_to = gr.Textbox(label="To Day", value=d1[0]) | |
year_to = gr.Textbox(label="To Year", value=d1[2]) | |
# to_date_button = gr.Button("Submit") | |
with gr.Row(): | |
domains_to_skip = gr.Dropdown( | |
domain_list, | |
multiselect=True, | |
label="Domain To Skip", | |
) | |
with gr.Row(): | |
with gr.Column(): | |
sentenceBreakdown = gr.HighlightedText( | |
label="Plagiarism Sentence Breakdown", | |
combine_adjacent=True, | |
color_map={ | |
"[1]": "red", | |
"[2]": "orange", | |
"[3]": "yellow", | |
"[4]": "green", | |
}, | |
) | |
submit_btn.click( | |
fn=main, | |
inputs=[ | |
input_text, | |
models, | |
year_from, | |
month_from, | |
day_from, | |
year_to, | |
month_to, | |
day_to, | |
domains_to_skip, | |
], | |
outputs=[ | |
bcLabel, | |
mcLabel, | |
sentenceBreakdown, | |
], | |
api_name="main", | |
) | |
only_ai_btn.click( | |
fn=ai_generated_test, | |
inputs=[input_text, models], | |
outputs=[ | |
bcLabel, | |
mcLabel, | |
], | |
api_name="ai_check", | |
) | |
only_plagiarism_btn.click( | |
fn=plagiarism_check, | |
inputs=[ | |
input_text, | |
year_from, | |
month_from, | |
day_from, | |
year_to, | |
month_to, | |
day_to, | |
domains_to_skip, | |
], | |
outputs=[ | |
sentenceBreakdown, | |
], | |
api_name="plagiarism_check", | |
) | |
date_from = "" | |
date_to = "" | |
demo.launch() |