Spaces:
Runtime error
Runtime error
asd
Browse files
app.py
CHANGED
@@ -1,8 +1,59 @@
|
|
1 |
-
# # import dependencies
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
# import gradio as gr
|
3 |
# from openai import OpenAI
|
4 |
# import os
|
5 |
# import re
|
|
|
6 |
|
7 |
# # define the openai key
|
8 |
# api_key = "sk-proj-UCoZZMs4MyfyHwXdHjT8T3BlbkFJjYkSZyPfIPNqXfXwoekm"
|
@@ -10,44 +61,62 @@
|
|
10 |
# # make an instance of the openai client
|
11 |
# client = OpenAI(api_key = api_key)
|
12 |
|
13 |
-
|
14 |
# # finetuned model instance
|
15 |
# finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ"
|
16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
# # function to humanize the text
|
18 |
# def humanize_text(AI_text):
|
19 |
-
#
|
20 |
-
#
|
21 |
-
#
|
22 |
-
#
|
23 |
-
|
24 |
-
#
|
25 |
-
#
|
26 |
-
#
|
27 |
-
#
|
28 |
-
#
|
29 |
-
#
|
30 |
-
#
|
31 |
-
#
|
32 |
-
#
|
33 |
-
#
|
34 |
-
|
35 |
-
#
|
36 |
-
|
37 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
|
|
|
39 |
|
40 |
# # Gradio interface definition
|
41 |
# interface = gr.Interface(
|
42 |
-
#
|
43 |
-
#
|
44 |
-
#
|
45 |
-
#
|
46 |
-
#
|
47 |
# )
|
48 |
|
49 |
# # Launch the Gradio app
|
50 |
-
# interface.launch(debug
|
51 |
|
52 |
import gradio as gr
|
53 |
from openai import OpenAI
|
@@ -59,7 +128,7 @@ from transformers import pipeline
|
|
59 |
api_key = "sk-proj-UCoZZMs4MyfyHwXdHjT8T3BlbkFJjYkSZyPfIPNqXfXwoekm"
|
60 |
|
61 |
# make an instance of the openai client
|
62 |
-
client = OpenAI(api_key
|
63 |
|
64 |
# finetuned model instance
|
65 |
finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ"
|
@@ -71,6 +140,7 @@ pipe = pipeline("text-classification", model="tommyliphys/ai-detector-distilbert
|
|
71 |
def get_prediction(text):
|
72 |
return pipe(text)[0]
|
73 |
|
|
|
74 |
def clean_text(text):
|
75 |
# Remove double asterisks
|
76 |
text = re.sub(r'\*\*', '', text)
|
@@ -78,7 +148,6 @@ def clean_text(text):
|
|
78 |
text = re.sub(r'##', '', text)
|
79 |
return text
|
80 |
|
81 |
-
|
82 |
# function to humanize the text
|
83 |
def humanize_text(AI_text):
|
84 |
"""Humanizes the provided AI text using the fine-tuned model."""
|
@@ -111,10 +180,10 @@ def humanize_text(AI_text):
|
|
111 |
break
|
112 |
|
113 |
attempts += 1
|
114 |
-
|
115 |
-
humanize_text=clean_text(clean_text)
|
116 |
|
117 |
-
|
|
|
|
|
118 |
|
119 |
# Gradio interface definition
|
120 |
interface = gr.Interface(
|
@@ -126,4 +195,4 @@ interface = gr.Interface(
|
|
126 |
)
|
127 |
|
128 |
# Launch the Gradio app
|
129 |
-
interface.launch(debug=True)
|
|
|
1 |
+
# # # import dependencies
|
2 |
+
# # import gradio as gr
|
3 |
+
# # from openai import OpenAI
|
4 |
+
# # import os
|
5 |
+
# # import re
|
6 |
+
|
7 |
+
# # # define the openai key
|
8 |
+
# # api_key = "sk-proj-UCoZZMs4MyfyHwXdHjT8T3BlbkFJjYkSZyPfIPNqXfXwoekm"
|
9 |
+
|
10 |
+
# # # make an instance of the openai client
|
11 |
+
# # client = OpenAI(api_key = api_key)
|
12 |
+
|
13 |
+
|
14 |
+
# # # finetuned model instance
|
15 |
+
# # finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ"
|
16 |
+
|
17 |
+
# # # function to humanize the text
|
18 |
+
# # def humanize_text(AI_text):
|
19 |
+
# # """Humanizes the provided AI text using the fine-tuned model."""
|
20 |
+
# # response = completion = client.chat.completions.create(
|
21 |
+
# # model=finetuned_model,
|
22 |
+
# # temperature = 0.86,
|
23 |
+
# # messages=[
|
24 |
+
# # {"role": "system", "content": """
|
25 |
+
# # You are a text humanizer.
|
26 |
+
# # You humanize AI generated text.
|
27 |
+
# # The text must appear like humanly written.
|
28 |
+
# # THE INPUT AND THE OUTPUT TEXT SHOULD HAVE THE SAME FORMAT.
|
29 |
+
# # THE HEADINGS AND THE BULLETS IN THE INPUT SHOULD REMAIN IN PLACE"""},
|
30 |
+
# # {"role": "user", "content": f"THE LANGUAGE OF THE INPUT AND THE OUTPUT MUST BE SAME. THE SENTENCES SHOULD NOT BE SHORT LENGTH - THEY SHOULD BE SAME AS IN THE INPUT. ALSO THE PARAGRAPHS SHOULD NOT BE SHORT EITHER - PARAGRAPHS MUST HAVE THE SAME LENGTH"},
|
31 |
+
# # {"role": "user", "content": f"Humanize the text. Keep the output format i.e. the bullets and the headings as it is and dont use the list of words that are not permissible. \nTEXT: {AI_text}"}
|
32 |
+
# # ]
|
33 |
+
# # )
|
34 |
+
|
35 |
+
# # humanized_text = response.choices[0].message.content.strip()
|
36 |
+
|
37 |
+
# # return humanized_text
|
38 |
+
|
39 |
+
|
40 |
+
# # # Gradio interface definition
|
41 |
+
# # interface = gr.Interface(
|
42 |
+
# # fn=humanize_text,
|
43 |
+
# # inputs="textbox",
|
44 |
+
# # outputs="textbox",
|
45 |
+
# # title="AI Text Humanizer: NoaiGPT.com Demo",
|
46 |
+
# # description="Enter AI-generated text and get a human-written version.",
|
47 |
+
# # )
|
48 |
+
|
49 |
+
# # # Launch the Gradio app
|
50 |
+
# # interface.launch(debug = True)
|
51 |
+
|
52 |
# import gradio as gr
|
53 |
# from openai import OpenAI
|
54 |
# import os
|
55 |
# import re
|
56 |
+
# from transformers import pipeline
|
57 |
|
58 |
# # define the openai key
|
59 |
# api_key = "sk-proj-UCoZZMs4MyfyHwXdHjT8T3BlbkFJjYkSZyPfIPNqXfXwoekm"
|
|
|
61 |
# # make an instance of the openai client
|
62 |
# client = OpenAI(api_key = api_key)
|
63 |
|
|
|
64 |
# # finetuned model instance
|
65 |
# finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ"
|
66 |
|
67 |
+
# # Load the AI detection model
|
68 |
+
# pipe = pipeline("text-classification", model="tommyliphys/ai-detector-distilbert")
|
69 |
+
|
70 |
+
# # Define the function to get predictions
|
71 |
+
# def get_prediction(text):
|
72 |
+
# return pipe(text)[0]
|
73 |
+
|
74 |
# # function to humanize the text
|
75 |
# def humanize_text(AI_text):
|
76 |
+
# """Humanizes the provided AI text using the fine-tuned model."""
|
77 |
+
# humanized_text = AI_text
|
78 |
+
# attempts = 0
|
79 |
+
# max_attempts = 5
|
80 |
+
|
81 |
+
# while attempts < max_attempts:
|
82 |
+
# response = client.chat.completions.create(
|
83 |
+
# model=finetuned_model,
|
84 |
+
# temperature=0.85,
|
85 |
+
# messages=[
|
86 |
+
# {"role": "system", "content": """
|
87 |
+
# You are a text humanizer.
|
88 |
+
# You humanize AI generated text.
|
89 |
+
# The text must appear like humanly written.
|
90 |
+
# THE INPUT AND THE OUTPUT TEXT SHOULD HAVE THE SAME FORMAT.
|
91 |
+
# THE HEADINGS AND THE BULLETS IN THE INPUT SHOULD REMAIN IN PLACE"""},
|
92 |
+
# {"role": "user", "content": "THE LANGUAGE OF THE INPUT AND THE OUTPUT MUST BE SAME. THE SENTENCES SHOULD NOT BE SHORT LENGTH - THEY SHOULD BE SAME AS IN THE INPUT. ALSO THE PARAGRAPHS SHOULD NOT BE SHORT EITHER - PARAGRAPHS MUST HAVE THE SAME LENGTH"},
|
93 |
+
# {"role": "user", "content": f"Humanize the text. Keep the output format i.e. the bullets and the headings as it is and dont use the list of words that are not permissible. \nTEXT: {humanized_text}"}
|
94 |
+
# ]
|
95 |
+
# )
|
96 |
+
|
97 |
+
# humanized_text = response.choices[0].message.content.strip()
|
98 |
+
|
99 |
+
# # Check if the humanized text is still detected as AI
|
100 |
+
# prediction = get_prediction(humanized_text)
|
101 |
+
|
102 |
+
# if prediction['label'] != 'AI':
|
103 |
+
# break
|
104 |
+
|
105 |
+
# attempts += 1
|
106 |
|
107 |
+
# return humanized_text
|
108 |
|
109 |
# # Gradio interface definition
|
110 |
# interface = gr.Interface(
|
111 |
+
# fn=humanize_text,
|
112 |
+
# inputs="textbox",
|
113 |
+
# outputs="textbox",
|
114 |
+
# title="AI Text Humanizer: NoaiGPT.com Demo",
|
115 |
+
# description="Enter AI-generated text and get a human-written version.",
|
116 |
# )
|
117 |
|
118 |
# # Launch the Gradio app
|
119 |
+
# interface.launch(debug=True)
|
120 |
|
121 |
import gradio as gr
|
122 |
from openai import OpenAI
|
|
|
128 |
api_key = "sk-proj-UCoZZMs4MyfyHwXdHjT8T3BlbkFJjYkSZyPfIPNqXfXwoekm"
|
129 |
|
130 |
# make an instance of the openai client
|
131 |
+
client = OpenAI(api_key=api_key)
|
132 |
|
133 |
# finetuned model instance
|
134 |
finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ"
|
|
|
140 |
def get_prediction(text):
|
141 |
return pipe(text)[0]
|
142 |
|
143 |
+
# Function to clean the text
|
144 |
def clean_text(text):
|
145 |
# Remove double asterisks
|
146 |
text = re.sub(r'\*\*', '', text)
|
|
|
148 |
text = re.sub(r'##', '', text)
|
149 |
return text
|
150 |
|
|
|
151 |
# function to humanize the text
|
152 |
def humanize_text(AI_text):
|
153 |
"""Humanizes the provided AI text using the fine-tuned model."""
|
|
|
180 |
break
|
181 |
|
182 |
attempts += 1
|
|
|
|
|
183 |
|
184 |
+
# Clean the humanized text
|
185 |
+
cleaned_text = clean_text(humanized_text)
|
186 |
+
return cleaned_text
|
187 |
|
188 |
# Gradio interface definition
|
189 |
interface = gr.Interface(
|
|
|
195 |
)
|
196 |
|
197 |
# Launch the Gradio app
|
198 |
+
interface.launch(debug=True)
|