|
import streamlit as st |
|
import zipfile |
|
import os |
|
import requests |
|
from keras.models import load_model |
|
from keras.preprocessing.text import Tokenizer |
|
from keras.preprocessing.sequence import pad_sequences |
|
from sklearn.preprocessing import LabelEncoder |
|
import pickle |
|
import numpy as np |
|
from PIL import Image |
|
|
|
|
|
|
|
headers = { |
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3', |
|
} |
|
|
|
|
|
|
|
|
|
zip_file_path = "my_authorship_model_zip.zip" |
|
if not os.path.exists('my_authorship_model'): |
|
try: |
|
|
|
model_url = 'https://jaifar.net/ADS/my_authorship_model_zip.zip' |
|
r = requests.get(model_url, headers=headers) |
|
r.raise_for_status() |
|
|
|
|
|
st.write(f"Downloaded model size: {len(r.content)} bytes") |
|
|
|
|
|
with open(zip_file_path, "wb") as f: |
|
f.write(r.content) |
|
|
|
|
|
if os.path.exists(zip_file_path): |
|
st.write("Zip file exists") |
|
|
|
|
|
with zipfile.ZipFile(zip_file_path, 'r') as zip_ref: |
|
zip_ref.extractall('my_authorship_model') |
|
|
|
|
|
if os.path.exists('my_authorship_model'): |
|
st.write("Model folder successfully extracted using zipfile") |
|
|
|
st.write("Listing directory contents:") |
|
st.write(os.listdir('.')) |
|
else: |
|
st.write("Model folder was not extracted successfully using zipfile") |
|
exit(1) |
|
|
|
else: |
|
st.write("Zip file does not exist") |
|
exit(1) |
|
except Exception as e: |
|
st.write(f"Failed to download or extract the model: {e}") |
|
exit(1) |
|
|
|
|
|
|
|
|
|
|
|
file_urls = { |
|
'tokenizer.pkl': 'https://jaifar.net/ADS/tokenizer.pkl', |
|
'label_encoder.pkl': 'https://jaifar.net/ADS/label_encoder.pkl' |
|
} |
|
|
|
for filename, url in file_urls.items(): |
|
try: |
|
r = requests.get(url, headers=headers) |
|
r.raise_for_status() |
|
with open(filename, 'wb') as f: |
|
f.write(r.content) |
|
except Exception as e: |
|
st.write(f"Failed to download {filename}: {e}") |
|
exit(1) |
|
|
|
|
|
loaded_model = load_model("my_authorship_model") |
|
|
|
|
|
with open('tokenizer.pkl', 'rb') as handle: |
|
tokenizer = pickle.load(handle) |
|
|
|
with open('label_encoder.pkl', 'rb') as handle: |
|
label_encoder = pickle.load(handle) |
|
|
|
max_length = 300 |
|
|
|
|
|
def predict_author(new_text, model, tokenizer, label_encoder): |
|
sequence = tokenizer.texts_to_sequences([new_text]) |
|
padded_sequence = pad_sequences(sequence, maxlen=max_length, padding='post', truncating='post') |
|
prediction = model.predict(padded_sequence) |
|
|
|
predicted_label = label_encoder.inverse_transform([prediction.argmax()])[0] |
|
probabilities = prediction[0] |
|
author_probabilities = {} |
|
for idx, prob in enumerate(probabilities): |
|
author = label_encoder.inverse_transform([idx])[0] |
|
author_probabilities[author] = prob |
|
|
|
return predicted_label, author_probabilities |
|
|
|
new_text = st.text_area("Input your text here") |
|
|
|
|
|
press_me_button = st.button("Writer or Robot?") |
|
|
|
if press_me_button: |
|
predicted_author, author_probabilities = predict_author(new_text, loaded_model, tokenizer, label_encoder) |
|
sorted_probabilities = sorted(author_probabilities.items(), key=lambda x: x[1], reverse=True) |
|
|
|
st.write(f"The text is most likely written by: {predicted_author}") |
|
st.write("Probabilities for each author are (sorted):") |
|
for author, prob in sorted_probabilities: |
|
st.write(f"{author}: {prob * 100:.2f}%") |
|
|
|
import streamlit as st |
|
|
|
|
|
|
|
|
|
st.subheader("Frequently Asked Questions (FAQ)") |
|
|
|
|
|
with st.expander("What is this project about?"): |
|
st.write(""" |
|
This project is part of an MSc in Data Analytics at the University of Portsmouth. |
|
Developed by Jaifar Al Shizawi, it aims to identify whether a text is written by a human or a specific Large Language Model (LLM) like ChatGPT-3, ChatGPT-4, Google Bard, or HuggingChat. |
|
For inquiries, contact [[email protected]](mailto:[email protected]). |
|
Supervised by Dr. Mohamed Bader. |
|
""") |
|
|
|
|
|
with st.expander("Aim and Objectives"): |
|
st.write(""" |
|
The project aims to help staff at the University of Portsmouth distinguish between student-written artifacts and those generated by LLMs. It focuses on text feature extraction, model testing, and implementing a user-friendly dashboard among other objectives. |
|
""") |
|
|
|
|
|
with st.expander("How does the system work?"): |
|
st.write(""" |
|
The system is trained using a CNN model on a dataset of 140,546 paragraphs, varying in length from 10 to 500 words. |
|
It achieves an accuracy of 0.9964 with a validation loss of 0.094. |
|
""") |
|
|
|
|
|
accuracy_image_request = requests.get("https://jaifar.net/ADS/best_accuracy.png", headers=headers) |
|
|
|
|
|
image_path = "best_accuracy.png" |
|
with open(image_path, "wb") as f: |
|
f.write(accuracy_image_request.content) |
|
|
|
|
|
|
|
accuracy_image = Image.open(image_path) |
|
|
|
|
|
st.image(accuracy_image, caption='Best Accuracy', use_column_width=True) |
|
|
|
|
|
with st.expander("Does the system store my data?"): |
|
st.write("No, the system does not collect or store any user input data.") |
|
|
|
|
|
with st.expander("Can I use this as evidence?"): |
|
st.write(""" |
|
No, this system is a Proof of Concept (POC) and should not be used as evidence against students or similar entities. |
|
""") |
|
|
|
|
|
|
|
|
|
|