Spaces:
Sleeping
Sleeping
EC2 Default User
commited on
Commit
·
d19676a
1
Parent(s):
bb9fa24
files
Browse files- Dockerfile +23 -0
- app.py +78 -0
- flagged/samplefile.txt +1 -0
- requirements.txt +11 -0
- test_reviews.csv +0 -0
Dockerfile
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# pull python base image
|
2 |
+
FROM python:3.10
|
3 |
+
|
4 |
+
COPY requirements.txt requirements.txt
|
5 |
+
|
6 |
+
# update pip
|
7 |
+
RUN pip install --upgrade pip
|
8 |
+
|
9 |
+
# install dependencies
|
10 |
+
RUN pip install -r requirements.txt
|
11 |
+
|
12 |
+
RUN useradd -m -u 1000 myuser
|
13 |
+
|
14 |
+
USER myuser
|
15 |
+
|
16 |
+
# copy application files
|
17 |
+
COPY --chown=myuser . .
|
18 |
+
|
19 |
+
# expose port for application
|
20 |
+
EXPOSE 8001
|
21 |
+
|
22 |
+
# start fastapi application
|
23 |
+
CMD ["python", "app.py"]
|
app.py
ADDED
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import FastAPI, Request, Response
|
2 |
+
|
3 |
+
import gradio
|
4 |
+
from transformers import pipeline
|
5 |
+
|
6 |
+
from sklearn.metrics import f1_score, precision_score, recall_score
|
7 |
+
import prometheus_client as prom
|
8 |
+
|
9 |
+
|
10 |
+
app = FastAPI()
|
11 |
+
|
12 |
+
username = "yrajm1997"
|
13 |
+
repo_name = "finetuned-sentiment-model"
|
14 |
+
repo_path = username+ '/' + repo_name
|
15 |
+
sentiment_model = pipeline(model= repo_path)
|
16 |
+
|
17 |
+
import pandas as pd
|
18 |
+
|
19 |
+
test_data = pd.read_csv("test_reviews.csv")
|
20 |
+
|
21 |
+
f1_metric = prom.Gauge('sentiment_f1_score', 'F1 score for random 100 test samples')
|
22 |
+
precision_metric = prom.Gauge('sentiment_precision_score', 'Precision score for random 100 test samples')
|
23 |
+
recall_metric = prom.Gauge('sentiment_recall_score', 'Recall score for random 100 test samples')
|
24 |
+
|
25 |
+
|
26 |
+
# Function for response generation
|
27 |
+
def predict_sentiment(text):
|
28 |
+
result = sentiment_model(text)
|
29 |
+
if result[0]['label'].endswith('0'):
|
30 |
+
return 'Negative'
|
31 |
+
else:
|
32 |
+
return 'Positive'
|
33 |
+
|
34 |
+
# Function for updating metrics
|
35 |
+
def update_metrics():
|
36 |
+
test = test_data.sample(100)
|
37 |
+
test_text = test['Text'].values
|
38 |
+
test_pred = sentiment_model(list(test_text))
|
39 |
+
pred_labels = [int(pred['label'].split("_")[1]) for pred in test_pred]
|
40 |
+
f1 = f1_score(test['labels'], pred_labels).round(3)
|
41 |
+
precision = precision_score(test['labels'], pred_labels).round(3)
|
42 |
+
recall = recall_score(test['labels'], pred_labels).round(3)
|
43 |
+
|
44 |
+
f1_metric.set(f1)
|
45 |
+
precision_metric.set(precision)
|
46 |
+
recall_metric.set(recall)
|
47 |
+
|
48 |
+
|
49 |
+
@app.get("/metrics")
|
50 |
+
async def get_metrics():
|
51 |
+
update_metrics()
|
52 |
+
return Response(media_type="text/plain", content= prom.generate_latest())
|
53 |
+
|
54 |
+
|
55 |
+
# Input from user
|
56 |
+
in_prompt = gradio.components.Textbox(lines=10, placeholder=None, label='Enter review text')
|
57 |
+
|
58 |
+
# Output response
|
59 |
+
out_response = gradio.components.Textbox(type="text", label='Sentiment')
|
60 |
+
|
61 |
+
# Gradio interface to generate UI link
|
62 |
+
title = "Sentiment Classification"
|
63 |
+
description = "Analyse sentiment of the given review"
|
64 |
+
|
65 |
+
iface = gradio.Interface(fn = predict_sentiment,
|
66 |
+
inputs = [in_prompt],
|
67 |
+
outputs = [out_response],
|
68 |
+
title = title,
|
69 |
+
description = description)
|
70 |
+
|
71 |
+
app = gradio.mount_gradio_app(app, iface, path="/")
|
72 |
+
|
73 |
+
#iface.launch(server_name = "0.0.0.0", server_port = 8001) # Ref. for parameters: https://www.gradio.app/docs/interface
|
74 |
+
|
75 |
+
if __name__ == "__main__":
|
76 |
+
# Use this for debugging purposes only
|
77 |
+
import uvicorn
|
78 |
+
uvicorn.run(app, host="0.0.0.0", port=8001)
|
flagged/samplefile.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
Sample file
|
requirements.txt
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
torch
|
2 |
+
gradio
|
3 |
+
transformers
|
4 |
+
|
5 |
+
uvicorn
|
6 |
+
fastapi
|
7 |
+
python-multipart
|
8 |
+
pydantic
|
9 |
+
|
10 |
+
scikit-learn
|
11 |
+
prometheus-client
|
test_reviews.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|