NoaiGPT commited on
Commit
f0b63fa
·
1 Parent(s): 13ee64a
Files changed (1) hide show
  1. app.py +113 -48
app.py CHANGED
@@ -1,8 +1,59 @@
1
- # # # import dependencies
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  # # import gradio as gr
3
  # # from openai import OpenAI
4
  # # import os
5
  # # import re
 
6
 
7
  # # # define the openai key
8
  # # api_key = "sk-proj-UCoZZMs4MyfyHwXdHjT8T3BlbkFJjYkSZyPfIPNqXfXwoekm"
@@ -10,44 +61,62 @@
10
  # # # make an instance of the openai client
11
  # # client = OpenAI(api_key = api_key)
12
 
13
-
14
  # # # finetuned model instance
15
  # # finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ"
16
 
 
 
 
 
 
 
 
17
  # # # function to humanize the text
18
  # # def humanize_text(AI_text):
19
- # # """Humanizes the provided AI text using the fine-tuned model."""
20
- # # response = completion = client.chat.completions.create(
21
- # # model=finetuned_model,
22
- # # temperature = 0.86,
23
- # # messages=[
24
- # # {"role": "system", "content": """
25
- # # You are a text humanizer.
26
- # # You humanize AI generated text.
27
- # # The text must appear like humanly written.
28
- # # THE INPUT AND THE OUTPUT TEXT SHOULD HAVE THE SAME FORMAT.
29
- # # THE HEADINGS AND THE BULLETS IN THE INPUT SHOULD REMAIN IN PLACE"""},
30
- # # {"role": "user", "content": f"THE LANGUAGE OF THE INPUT AND THE OUTPUT MUST BE SAME. THE SENTENCES SHOULD NOT BE SHORT LENGTH - THEY SHOULD BE SAME AS IN THE INPUT. ALSO THE PARAGRAPHS SHOULD NOT BE SHORT EITHER - PARAGRAPHS MUST HAVE THE SAME LENGTH"},
31
- # # {"role": "user", "content": f"Humanize the text. Keep the output format i.e. the bullets and the headings as it is and dont use the list of words that are not permissible. \nTEXT: {AI_text}"}
32
- # # ]
33
- # # )
34
-
35
- # # humanized_text = response.choices[0].message.content.strip()
36
-
37
- # # return humanized_text
 
 
 
 
 
 
 
 
 
 
 
38
 
 
39
 
40
  # # # Gradio interface definition
41
  # # interface = gr.Interface(
42
- # # fn=humanize_text,
43
- # # inputs="textbox",
44
- # # outputs="textbox",
45
- # # title="AI Text Humanizer: NoaiGPT.com Demo",
46
- # # description="Enter AI-generated text and get a human-written version.",
47
  # # )
48
 
49
  # # # Launch the Gradio app
50
- # # interface.launch(debug = True)
51
 
52
  # import gradio as gr
53
  # from openai import OpenAI
@@ -56,10 +125,10 @@
56
  # from transformers import pipeline
57
 
58
  # # define the openai key
59
- # api_key = "sk-proj-UCoZZMs4MyfyHwXdHjT8T3BlbkFJjYkSZyPfIPNqXfXwoekm"
60
 
61
  # # make an instance of the openai client
62
- # client = OpenAI(api_key = api_key)
63
 
64
  # # finetuned model instance
65
  # finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ"
@@ -71,17 +140,25 @@
71
  # def get_prediction(text):
72
  # return pipe(text)[0]
73
 
 
 
 
 
 
 
 
 
74
  # # function to humanize the text
75
  # def humanize_text(AI_text):
76
  # """Humanizes the provided AI text using the fine-tuned model."""
77
  # humanized_text = AI_text
78
  # attempts = 0
79
- # max_attempts = 5
80
 
81
  # while attempts < max_attempts:
82
  # response = client.chat.completions.create(
83
  # model=finetuned_model,
84
- # temperature=0.85,
85
  # messages=[
86
  # {"role": "system", "content": """
87
  # You are a text humanizer.
@@ -99,12 +176,14 @@
99
  # # Check if the humanized text is still detected as AI
100
  # prediction = get_prediction(humanized_text)
101
 
102
- # if prediction['label'] != 'AI':
103
  # break
104
 
105
  # attempts += 1
106
 
107
- # return humanized_text
 
 
108
 
109
  # # Gradio interface definition
110
  # interface = gr.Interface(
@@ -122,7 +201,6 @@ import gradio as gr
122
  from openai import OpenAI
123
  import os
124
  import re
125
- from transformers import pipeline
126
 
127
  # define the openai key
128
  api_key = "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"
@@ -133,13 +211,6 @@ client = OpenAI(api_key=api_key)
133
  # finetuned model instance
134
  finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ"
135
 
136
- # Load the AI detection model
137
- pipe = pipeline("text-classification", model="tommyliphys/ai-detector-distilbert")
138
-
139
- # Define the function to get predictions
140
- def get_prediction(text):
141
- return pipe(text)[0]
142
-
143
  # Function to clean the text
144
  def clean_text(text):
145
  # Remove double asterisks
@@ -173,12 +244,6 @@ def humanize_text(AI_text):
173
 
174
  humanized_text = response.choices[0].message.content.strip()
175
 
176
- # Check if the humanized text is still detected as AI
177
- prediction = get_prediction(humanized_text)
178
-
179
- if prediction['label'] == 'human' and prediction['score'] > 0.9:
180
- break
181
-
182
  attempts += 1
183
 
184
  # Clean the humanized text
@@ -195,4 +260,4 @@ interface = gr.Interface(
195
  )
196
 
197
  # Launch the Gradio app
198
- interface.launch(debug=True)
 
1
+ # # # # import dependencies
2
+ # # # import gradio as gr
3
+ # # # from openai import OpenAI
4
+ # # # import os
5
+ # # # import re
6
+
7
+ # # # # define the openai key
8
+ # # # api_key = "sk-proj-UCoZZMs4MyfyHwXdHjT8T3BlbkFJjYkSZyPfIPNqXfXwoekm"
9
+
10
+ # # # # make an instance of the openai client
11
+ # # # client = OpenAI(api_key = api_key)
12
+
13
+
14
+ # # # # finetuned model instance
15
+ # # # finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ"
16
+
17
+ # # # # function to humanize the text
18
+ # # # def humanize_text(AI_text):
19
+ # # # """Humanizes the provided AI text using the fine-tuned model."""
20
+ # # # response = completion = client.chat.completions.create(
21
+ # # # model=finetuned_model,
22
+ # # # temperature = 0.86,
23
+ # # # messages=[
24
+ # # # {"role": "system", "content": """
25
+ # # # You are a text humanizer.
26
+ # # # You humanize AI generated text.
27
+ # # # The text must appear like humanly written.
28
+ # # # THE INPUT AND THE OUTPUT TEXT SHOULD HAVE THE SAME FORMAT.
29
+ # # # THE HEADINGS AND THE BULLETS IN THE INPUT SHOULD REMAIN IN PLACE"""},
30
+ # # # {"role": "user", "content": f"THE LANGUAGE OF THE INPUT AND THE OUTPUT MUST BE SAME. THE SENTENCES SHOULD NOT BE SHORT LENGTH - THEY SHOULD BE SAME AS IN THE INPUT. ALSO THE PARAGRAPHS SHOULD NOT BE SHORT EITHER - PARAGRAPHS MUST HAVE THE SAME LENGTH"},
31
+ # # # {"role": "user", "content": f"Humanize the text. Keep the output format i.e. the bullets and the headings as it is and dont use the list of words that are not permissible. \nTEXT: {AI_text}"}
32
+ # # # ]
33
+ # # # )
34
+
35
+ # # # humanized_text = response.choices[0].message.content.strip()
36
+
37
+ # # # return humanized_text
38
+
39
+
40
+ # # # # Gradio interface definition
41
+ # # # interface = gr.Interface(
42
+ # # # fn=humanize_text,
43
+ # # # inputs="textbox",
44
+ # # # outputs="textbox",
45
+ # # # title="AI Text Humanizer: NoaiGPT.com Demo",
46
+ # # # description="Enter AI-generated text and get a human-written version.",
47
+ # # # )
48
+
49
+ # # # # Launch the Gradio app
50
+ # # # interface.launch(debug = True)
51
+
52
  # # import gradio as gr
53
  # # from openai import OpenAI
54
  # # import os
55
  # # import re
56
+ # # from transformers import pipeline
57
 
58
  # # # define the openai key
59
  # # api_key = "sk-proj-UCoZZMs4MyfyHwXdHjT8T3BlbkFJjYkSZyPfIPNqXfXwoekm"
 
61
  # # # make an instance of the openai client
62
  # # client = OpenAI(api_key = api_key)
63
 
 
64
  # # # finetuned model instance
65
  # # finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ"
66
 
67
+ # # # Load the AI detection model
68
+ # # pipe = pipeline("text-classification", model="tommyliphys/ai-detector-distilbert")
69
+
70
+ # # # Define the function to get predictions
71
+ # # def get_prediction(text):
72
+ # # return pipe(text)[0]
73
+
74
  # # # function to humanize the text
75
  # # def humanize_text(AI_text):
76
+ # # """Humanizes the provided AI text using the fine-tuned model."""
77
+ # # humanized_text = AI_text
78
+ # # attempts = 0
79
+ # # max_attempts = 5
80
+
81
+ # # while attempts < max_attempts:
82
+ # # response = client.chat.completions.create(
83
+ # # model=finetuned_model,
84
+ # # temperature=0.85,
85
+ # # messages=[
86
+ # # {"role": "system", "content": """
87
+ # # You are a text humanizer.
88
+ # # You humanize AI generated text.
89
+ # # The text must appear like humanly written.
90
+ # # THE INPUT AND THE OUTPUT TEXT SHOULD HAVE THE SAME FORMAT.
91
+ # # THE HEADINGS AND THE BULLETS IN THE INPUT SHOULD REMAIN IN PLACE"""},
92
+ # # {"role": "user", "content": "THE LANGUAGE OF THE INPUT AND THE OUTPUT MUST BE SAME. THE SENTENCES SHOULD NOT BE SHORT LENGTH - THEY SHOULD BE SAME AS IN THE INPUT. ALSO THE PARAGRAPHS SHOULD NOT BE SHORT EITHER - PARAGRAPHS MUST HAVE THE SAME LENGTH"},
93
+ # # {"role": "user", "content": f"Humanize the text. Keep the output format i.e. the bullets and the headings as it is and dont use the list of words that are not permissible. \nTEXT: {humanized_text}"}
94
+ # # ]
95
+ # # )
96
+
97
+ # # humanized_text = response.choices[0].message.content.strip()
98
+
99
+ # # # Check if the humanized text is still detected as AI
100
+ # # prediction = get_prediction(humanized_text)
101
+
102
+ # # if prediction['label'] != 'AI':
103
+ # # break
104
+
105
+ # # attempts += 1
106
 
107
+ # # return humanized_text
108
 
109
  # # # Gradio interface definition
110
  # # interface = gr.Interface(
111
+ # # fn=humanize_text,
112
+ # # inputs="textbox",
113
+ # # outputs="textbox",
114
+ # # title="AI Text Humanizer: NoaiGPT.com Demo",
115
+ # # description="Enter AI-generated text and get a human-written version.",
116
  # # )
117
 
118
  # # # Launch the Gradio app
119
+ # # interface.launch(debug=True)
120
 
121
  # import gradio as gr
122
  # from openai import OpenAI
 
125
  # from transformers import pipeline
126
 
127
  # # define the openai key
128
+ # api_key = "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"
129
 
130
  # # make an instance of the openai client
131
+ # client = OpenAI(api_key=api_key)
132
 
133
  # # finetuned model instance
134
  # finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ"
 
140
  # def get_prediction(text):
141
  # return pipe(text)[0]
142
 
143
+ # # Function to clean the text
144
+ # def clean_text(text):
145
+ # # Remove double asterisks
146
+ # text = re.sub(r'\*\*', '', text)
147
+ # # Remove double hash symbols
148
+ # text = re.sub(r'##', '', text)
149
+ # return text
150
+
151
  # # function to humanize the text
152
  # def humanize_text(AI_text):
153
  # """Humanizes the provided AI text using the fine-tuned model."""
154
  # humanized_text = AI_text
155
  # attempts = 0
156
+ # max_attempts = 10
157
 
158
  # while attempts < max_attempts:
159
  # response = client.chat.completions.create(
160
  # model=finetuned_model,
161
+ # temperature=0.90,
162
  # messages=[
163
  # {"role": "system", "content": """
164
  # You are a text humanizer.
 
176
  # # Check if the humanized text is still detected as AI
177
  # prediction = get_prediction(humanized_text)
178
 
179
+ # if prediction['label'] == 'human' and prediction['score'] > 0.9:
180
  # break
181
 
182
  # attempts += 1
183
 
184
+ # # Clean the humanized text
185
+ # cleaned_text = clean_text(humanized_text)
186
+ # return cleaned_text
187
 
188
  # # Gradio interface definition
189
  # interface = gr.Interface(
 
201
  from openai import OpenAI
202
  import os
203
  import re
 
204
 
205
  # define the openai key
206
  api_key = "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"
 
211
  # finetuned model instance
212
  finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ"
213
 
 
 
 
 
 
 
 
214
  # Function to clean the text
215
  def clean_text(text):
216
  # Remove double asterisks
 
244
 
245
  humanized_text = response.choices[0].message.content.strip()
246
 
 
 
 
 
 
 
247
  attempts += 1
248
 
249
  # Clean the humanized text
 
260
  )
261
 
262
  # Launch the Gradio app
263
+ interface.launch(debug=True)