Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -60,19 +60,38 @@ def predict(query):
|
|
60 |
return real
|
61 |
|
62 |
def findRealProb(text):
|
63 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
results = []
|
65 |
for chunk in chunksOfText:
|
66 |
-
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
69 |
ans = 0
|
70 |
cnt = 0
|
71 |
-
for
|
|
|
|
|
72 |
cnt += length
|
73 |
-
ans = ans +
|
74 |
realProb = ans/cnt
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
76 |
|
77 |
demo = gr.Interface(
|
78 |
fn=findRealProb,
|
|
|
60 |
return real
|
61 |
|
62 |
def findRealProb(text):
|
63 |
+
data = request.json.get('text')
|
64 |
+
if data is None or len(data) == 0:
|
65 |
+
return jsonify({'error': 'No query provided'}), 400
|
66 |
+
if len(data) > 9400:
|
67 |
+
return jsonify({'error': 'Cannot analyze more than 9400 characters!'}), 400
|
68 |
+
if count_words(data) > 1500:
|
69 |
+
return jsonify({'error': 'Cannot analyze more than 1500 words'}), 400
|
70 |
+
|
71 |
+
# return {"Real": predict(data)}
|
72 |
+
chunksOfText = (chunks_of_900(data))
|
73 |
results = []
|
74 |
for chunk in chunksOfText:
|
75 |
+
outputv1 = predict(chunk, model, tokenizer)
|
76 |
+
# outputv2 = predict(chunk, modelv2, tokenizerv2)
|
77 |
+
label = "CG"
|
78 |
+
if(outputv1>=0.5):
|
79 |
+
label = "OR"
|
80 |
+
results.append({"Text":chunk, "Label": label, "Confidence":(outputv1)})
|
81 |
ans = 0
|
82 |
cnt = 0
|
83 |
+
for result in results:
|
84 |
+
length = len(result["Text"])
|
85 |
+
confidence = result["Confidence"]
|
86 |
cnt += length
|
87 |
+
ans = ans + (confidence)*(length)
|
88 |
realProb = ans/cnt
|
89 |
+
label = "AI"
|
90 |
+
if realProb > 0.7:
|
91 |
+
label = "Human"
|
92 |
+
elif realProb > 0.3 and realProb < 0.7:
|
93 |
+
label = "Might be AI"
|
94 |
+
return jsonify({"Real": realProb, "Fake": 1-realProb, "Label": label, "Chunks": results})
|
95 |
|
96 |
demo = gr.Interface(
|
97 |
fn=findRealProb,
|