Spaces:
Runtime error
Runtime error
# # import dependencies | |
# import gradio as gr | |
# from openai import OpenAI | |
# import os | |
# import re | |
# # define the openai key | |
# api_key = "sk-proj-UCoZZMs4MyfyHwXdHjT8T3BlbkFJjYkSZyPfIPNqXfXwoekm" | |
# # make an instance of the openai client | |
# client = OpenAI(api_key = api_key) | |
# # finetuned model instance | |
# finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ" | |
# # function to humanize the text | |
# def humanize_text(AI_text): | |
# """Humanizes the provided AI text using the fine-tuned model.""" | |
# response = completion = client.chat.completions.create( | |
# model=finetuned_model, | |
# temperature = 0.85, | |
# messages=[ | |
# {"role": "system", "content": """ | |
# You are a text humanizer. | |
# You humanize AI generated text. | |
# The text must appear like humanly written. | |
# THE INPUT AND THE OUTPUT TEXT SHOULD HAVE THE SAME FORMAT. | |
# THE HEADINGS AND THE BULLETS IN THE INPUT SHOULD REMAIN IN PLACE"""}, | |
# {"role": "user", "content": f"THE LANGUAGE OF THE INPUT AND THE OUTPUT MUST BE SAME. THE SENTENCES SHOULD NOT BE SHORT LENGTH - THEY SHOULD BE SAME AS IN THE INPUT. ALSO THE PARAGRAPHS SHOULD NOT BE SHORT EITHER - PARAGRAPHS MUST HAVE THE SAME LENGTH"}, | |
# {"role": "user", "content": f"Humanize the text. Keep the output format i.e. the bullets and the headings as it is and dont use the list of words that are not permissible. \nTEXT: {AI_text}"} | |
# ] | |
# ) | |
# humanized_text = response.choices[0].message.content.strip() | |
# return humanized_text | |
# # Gradio interface definition | |
# interface = gr.Interface( | |
# fn=humanize_text, | |
# inputs="textbox", | |
# outputs="textbox", | |
# title="AI Text Humanizer: NoaiGPT.com Demo", | |
# description="Enter AI-generated text and get a human-written version.", | |
# ) | |
# # Launch the Gradio app | |
# interface.launch(debug = True) | |
# import dependencies | |
import gradio as gr | |
from openai import OpenAI | |
import torch | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification, TFAutoModelForSequenceClassification | |
# define the openai key | |
api_key = "sk-proj-UCoZZMs4MyfyHwXdHjT8T3BlbkFJjYkSZyPfIPNqXfXwoekm" | |
# make an instance of the openai client | |
client = OpenAI(api_key = api_key) | |
# finetuned model instance | |
finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ" | |
# Load model directly | |
model_name = "tommyliphys/ai-detector-distilbert" | |
tokenizer = AutoTokenizer.from_pretrained(model_name) | |
# Load TensorFlow model | |
tf_model = TFAutoModelForSequenceClassification.from_pretrained(model_name, from_tf=True) | |
# Convert to PyTorch model | |
model = AutoModelForSequenceClassification.from_pretrained(model_name, from_tf=True) | |
# Define the function to get predictions | |
def get_prediction(text): | |
inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512) | |
with torch.no_grad(): | |
outputs = model(**inputs) | |
probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1) | |
ai_probability = probabilities[0][1].item() # Assuming 1 is the index for "AI" | |
return {"label": "AI" if ai_probability > 0.5 else "Human", "score": ai_probability} | |
# function to humanize the text | |
def humanize_text(AI_text): | |
"""Humanizes the provided AI text using the fine-tuned model.""" | |
humanized_text = AI_text | |
attempts = 0 | |
max_attempts = 3 | |
while attempts < max_attempts: | |
response = client.chat.completions.create( | |
model=finetuned_model, | |
temperature=0.85, | |
messages=[ | |
{"role": "system", "content": """ | |
You are a text humanizer. | |
You humanize AI generated text. | |
The text must appear like humanly written. | |
THE INPUT AND THE OUTPUT TEXT SHOULD HAVE THE SAME FORMAT. | |
THE HEADINGS AND THE BULLETS IN THE INPUT SHOULD REMAIN IN PLACE"""}, | |
{"role": "user", "content": "THE LANGUAGE OF THE INPUT AND THE OUTPUT MUST BE SAME. THE SENTENCES SHOULD NOT BE SHORT LENGTH - THEY SHOULD BE SAME AS IN THE INPUT. ALSO THE PARAGRAPHS SHOULD NOT BE SHORT EITHER - PARAGRAPHS MUST HAVE THE SAME LENGTH"}, | |
{"role": "user", "content": f"Humanize the text. Keep the output format i.e. the bullets and the headings as it is and dont use the list of words that are not permissible. \nTEXT: {humanized_text}"} | |
] | |
) | |
humanized_text = response.choices[0].message.content.strip() | |
# Check if the humanized text is still detected as AI | |
prediction = get_prediction(humanized_text) | |
if prediction['label'] != 'AI' or prediction['score'] < 0.9: | |
break | |
attempts += 1 | |
return humanized_text | |
# Gradio interface definition | |
interface = gr.Interface( | |
fn=humanize_text, | |
inputs="textbox", | |
outputs="textbox", | |
title="AI Text Humanizer: NoaiGPT.com Demo", | |
description="Enter AI-generated text and get a human-written version.", | |
) | |
# Launch the Gradio app | |
interface.launch(debug=True) |