Spaces:
Sleeping
Sleeping
Tirath5504
commited on
Commit
•
2169a6e
1
Parent(s):
e267973
Update app.py
Browse files
app.py
CHANGED
@@ -1,8 +1,13 @@
|
|
1 |
import google.generativeai as genai
|
2 |
import gradio as gr
|
|
|
|
|
|
|
3 |
|
4 |
api_key = "AIzaSyCmmus8HFPLXskU170_FR4j2CQeWZBKGMY"
|
5 |
|
|
|
|
|
6 |
model = genai.GenerativeModel('gemini-pro')
|
7 |
genai.configure(api_key = api_key)
|
8 |
|
@@ -11,14 +16,33 @@ def get_response(feedback):
|
|
11 |
try:
|
12 |
#response = model.generate_content(f"State whether given response is positive, negative or neutral in one word: {feedback}")
|
13 |
score = model.generate_content(f"Give me the polarity score between -1 to 1 for: {feedback}")
|
14 |
-
issue = model.generate_content(f'Issues should be from ["Misconduct" , "Negligence" , "Discrimination" , "Corruption" , "Violation of Rights" , "Inefficiency" , "Unprofessional Conduct", "Response Time" , "Use of Firearms" , "Property Damage"]. Give me the issue faced by the feedback giver in less than four words: {feedback}')
|
15 |
return [score.text, issue.text]
|
16 |
except Exception as e:
|
17 |
return [-2, "Offensive"]
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
iface = gr.Interface(
|
20 |
-
fn =
|
21 |
inputs = ["text"],
|
22 |
outputs = ["text", "text"]
|
23 |
)
|
|
|
24 |
iface.launch(share=True)
|
|
|
1 |
import google.generativeai as genai
|
2 |
import gradio as gr
|
3 |
+
from deep_translator import (GoogleTranslator)
|
4 |
+
from transformers import pipeline
|
5 |
+
|
6 |
|
7 |
api_key = "AIzaSyCmmus8HFPLXskU170_FR4j2CQeWZBKGMY"
|
8 |
|
9 |
+
spam_detector = pipeline("text-classification", model="madhurjindal/autonlp-Gibberish-Detector-492513457")
|
10 |
+
|
11 |
model = genai.GenerativeModel('gemini-pro')
|
12 |
genai.configure(api_key = api_key)
|
13 |
|
|
|
16 |
try:
|
17 |
#response = model.generate_content(f"State whether given response is positive, negative or neutral in one word: {feedback}")
|
18 |
score = model.generate_content(f"Give me the polarity score between -1 to 1 for: {feedback}")
|
19 |
+
issue = model.generate_content(f'Issues should be from ["Tech-Savvy Staff" , "Co-operative Staff" , "Well-Maintained Premises" , "Responsive Staff", "Misconduct" , "Negligence" , "Discrimination" , "Corruption" , "Violation of Rights" , "Inefficiency" , "Unprofessional Conduct", "Response Time" , "Use of Firearms" , "Property Damage"]. Give me the issue faced by the feedback giver in less than four words: {feedback}')
|
20 |
return [score.text, issue.text]
|
21 |
except Exception as e:
|
22 |
return [-2, "Offensive"]
|
23 |
|
24 |
+
def translate(input_text):
|
25 |
+
source_lang = detect(input_text)
|
26 |
+
translated = GoogleTranslator(source=source_lang, target='en').translate(text=input_text)
|
27 |
+
return translated
|
28 |
+
|
29 |
+
def spam_detection(input_text):
|
30 |
+
return spam_detector(input_text)[0]['label'] == 'clean'
|
31 |
+
|
32 |
+
def pipeline(input_text):
|
33 |
+
|
34 |
+
input_text = translate(input_text)
|
35 |
+
|
36 |
+
if spam_detection(input_text):
|
37 |
+
return get_response(input_text)
|
38 |
+
|
39 |
+
else:
|
40 |
+
return "Spam" , ""
|
41 |
+
|
42 |
iface = gr.Interface(
|
43 |
+
fn = pipeline,
|
44 |
inputs = ["text"],
|
45 |
outputs = ["text", "text"]
|
46 |
)
|
47 |
+
|
48 |
iface.launch(share=True)
|