NoaiGPT commited on
Commit
359a1a4
·
1 Parent(s): 0626f5a
Files changed (1) hide show
  1. app.py +81 -2
app.py CHANGED
@@ -118,6 +118,85 @@
118
  # # Launch the Gradio app
119
  # interface.launch(debug=True)
120
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121
  import gradio as gr
122
  from openai import OpenAI
123
  import os
@@ -134,7 +213,7 @@ client = OpenAI(api_key=api_key)
134
  finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ"
135
 
136
  # Load the AI detection model
137
- pipe = pipeline("text-classification", model="tommyliphys/ai-detector-distilbert")
138
 
139
  # Define the function to get predictions
140
  def get_prediction(text):
@@ -195,4 +274,4 @@ interface = gr.Interface(
195
  )
196
 
197
  # Launch the Gradio app
198
- interface.launch(debug=True)
 
118
  # # Launch the Gradio app
119
  # interface.launch(debug=True)
120
 
121
+ # import gradio as gr
122
+ # from openai import OpenAI
123
+ # import os
124
+ # import re
125
+ # from transformers import pipeline
126
+
127
+ # # define the openai key
128
+ # api_key = "sk-proj-UCoZZMs4MyfyHwXdHjT8T3BlbkFJjYkSZyPfIPNqXfXwoekm"
129
+
130
+ # # make an instance of the openai client
131
+ # client = OpenAI(api_key=api_key)
132
+
133
+ # # finetuned model instance
134
+ # finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ"
135
+
136
+ # # Load the AI detection model
137
+ # pipe = pipeline("text-classification", model="tommyliphys/ai-detector-distilbert")
138
+
139
+ # # Define the function to get predictions
140
+ # def get_prediction(text):
141
+ # return pipe(text)[0]
142
+
143
+ # # Function to clean the text
144
+ # def clean_text(text):
145
+ # # Remove double asterisks
146
+ # text = re.sub(r'\*\*', '', text)
147
+ # # Remove double hash symbols
148
+ # text = re.sub(r'##', '', text)
149
+ # return text
150
+
151
+ # # function to humanize the text
152
+ # def humanize_text(AI_text):
153
+ # """Humanizes the provided AI text using the fine-tuned model."""
154
+ # humanized_text = AI_text
155
+ # attempts = 0
156
+ # max_attempts = 10
157
+
158
+ # while attempts < max_attempts:
159
+ # response = client.chat.completions.create(
160
+ # model=finetuned_model,
161
+ # temperature=0.90,
162
+ # messages=[
163
+ # {"role": "system", "content": """
164
+ # You are a text humanizer.
165
+ # You humanize AI generated text.
166
+ # The text must appear like humanly written.
167
+ # THE INPUT AND THE OUTPUT TEXT SHOULD HAVE THE SAME FORMAT.
168
+ # THE HEADINGS AND THE BULLETS IN THE INPUT SHOULD REMAIN IN PLACE"""},
169
+ # {"role": "user", "content": "THE LANGUAGE OF THE INPUT AND THE OUTPUT MUST BE SAME. THE SENTENCES SHOULD NOT BE SHORT LENGTH - THEY SHOULD BE SAME AS IN THE INPUT. ALSO THE PARAGRAPHS SHOULD NOT BE SHORT EITHER - PARAGRAPHS MUST HAVE THE SAME LENGTH"},
170
+ # {"role": "user", "content": f"Humanize the text. Keep the output format i.e. the bullets and the headings as it is and dont use the list of words that are not permissible. \nTEXT: {humanized_text}"}
171
+ # ]
172
+ # )
173
+
174
+ # humanized_text = response.choices[0].message.content.strip()
175
+
176
+ # # Check if the humanized text is still detected as AI
177
+ # prediction = get_prediction(humanized_text)
178
+
179
+ # if prediction['label'] == 'human' and prediction['score'] > 0.9:
180
+ # break
181
+
182
+ # attempts += 1
183
+
184
+ # # Clean the humanized text
185
+ # cleaned_text = clean_text(humanized_text)
186
+ # return cleaned_text
187
+
188
+ # # Gradio interface definition
189
+ # interface = gr.Interface(
190
+ # fn=humanize_text,
191
+ # inputs="textbox",
192
+ # outputs="textbox",
193
+ # title="AI Text Humanizer: NoaiGPT.com Demo",
194
+ # description="Enter AI-generated text and get a human-written version.",
195
+ # )
196
+
197
+ # # Launch the Gradio app
198
+ # interface.launch(debug=True)
199
+
200
  import gradio as gr
201
  from openai import OpenAI
202
  import os
 
213
  finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ"
214
 
215
  # Load the AI detection model
216
+ pipe = pipeline("text-classification", model="andreas122001/roberta-mixed-detector")
217
 
218
  # Define the function to get predictions
219
  def get_prediction(text):
 
274
  )
275
 
276
  # Launch the Gradio app
277
+ interface.launch(debug=True)