runaksh commited on
Commit
9b3729e
·
1 Parent(s): 646817e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -13
app.py CHANGED
@@ -1,33 +1,75 @@
 
1
  import gradio
2
- from transformers import pipeline
 
 
 
 
 
3
 
4
  username = "runaksh"
5
  repo_name = "finetuned-sentiment-model"
6
  repo_path = username+ '/' + repo_name
7
- sentiment_model = pipeline(model= repo_path)
 
 
 
8
 
9
  # Function for response generation
10
  def predict_sentiment(text):
11
- result = sentiment_model(text)
12
  if result[0]['label'].endswith('0'):
13
  return 'Negative'
14
  else:
15
  return 'Positive'
16
 
 
 
 
 
 
17
  # Input from user
18
- in_prompt = gradio.components.Textbox(lines=10, placeholder=None, label='Enter review text')
19
 
20
  # Output response
21
- out_response = gradio.components.Textbox(type="text", label='Sentiment')
22
 
23
  # Gradio interface to generate UI link
24
- title = "Sentiment Classification"
25
- description = "Analyse sentiment of the given review"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
- iface = gradio.Interface(fn = predict_sentiment,
28
- inputs = [in_prompt],
29
- outputs = [out_response],
30
- title = title,
31
- description = description)
32
 
33
- iface.launch(debug = True)#, server_name = "0.0.0.0", server_port = 8001) # Ref. for parameters: https://www.gradio.app/docs/interface
 
1
+ import os
2
  import gradio
3
+ from PIL import Image
4
+ from timeit import default_timer as timer
5
+ from tensorflow import keras
6
+ import torch
7
+ from transformers import AutoTokenizer, AutoModelForSequenceClassification, pipeline
8
+ import numpy as np
9
 
10
  username = "runaksh"
11
  repo_name = "finetuned-sentiment-model"
12
  repo_path = username+ '/' + repo_name
13
+ model_1 = pipeline(model= repo_path)
14
+
15
+ model_2 = AutoModelForSequenceClassification.from_pretrained("runaksh/Symptom-2-disease_distilBERT")
16
+ tokenizer_2 = AutoTokenizer.from_pretrained("runaksh/Symptom-2-disease_distilBERT")
17
 
18
  # Function for response generation
19
  def predict_sentiment(text):
20
+ result = model_1(text)
21
  if result[0]['label'].endswith('0'):
22
  return 'Negative'
23
  else:
24
  return 'Positive'
25
 
26
+ def predict(sample, validate=True):
27
+ classifier = pipeline("text-classification", model=model_2, tokenizer=tokenizer_2)
28
+ pred = classifier(sample)[0]['label']
29
+ return pred
30
+
31
  # Input from user
32
+ in_prompt_1 = gradio.components.Textbox(lines=10, placeholder=None, label='Enter review text')
33
 
34
  # Output response
35
+ out_response_1 = gradio.components.Textbox(type="text", label='Sentiment')
36
 
37
  # Gradio interface to generate UI link
38
+ title_1 = "Sentiment Classification"
39
+ description_1 = "Analyse sentiment of the given review"
40
+
41
+ iface_1 = gradio.Interface(fn = predict_sentiment,
42
+ inputs = [in_prompt_1],
43
+ outputs = [out_response_1],
44
+ title = title_1,
45
+ description = description_1)
46
+
47
+ title_2 = "Symptoms and Disease"
48
+ description_2 = "Enter the Symptoms to know the disease"
49
+
50
+ # Input from user
51
+ in_prompt_2 = gradio.components.Textbox(lines=2, label='Enter the Symptoms')
52
+
53
+ # Output response
54
+ out_response_2 = gradio.components.Textbox(label='Disease')
55
+
56
+ # Gradio interface to generate UI link
57
+ iface_2 = gradio.Interface(fn=predict,
58
+ inputs = in_prompt_2,
59
+ outputs = out_response_2,
60
+ title=title_2,
61
+ description=description_2
62
+ )
63
+
64
+ combined_interface = gr.Interface(
65
+ [
66
+ iface_1,
67
+ iface_2
68
+ ],
69
+ title="Multiple Models Interface",
70
+ description="This interface showcases multiple models"
71
+ )
72
 
73
+ combined_interface.launch(debug = True)
 
 
 
 
74
 
75
+ #iface.launch(debug = True)#, server_name = "0.0.0.0", server_port = 8001) # Ref. for parameters: https://www.gradio.app/docs/interface