Spaces:
Sleeping
Sleeping
Rename random_charactor_generator.py to sentiment_analysis.py
Browse files- random_charactor_generator.py +0 -75
- sentiment_analysis.py +44 -0
random_charactor_generator.py
DELETED
@@ -1,75 +0,0 @@
|
|
1 |
-
import requests
|
2 |
-
|
3 |
-
class RandomCharatorGeneratorTool:
|
4 |
-
name = "random_character"
|
5 |
-
description = "This tool fetches a random character from the 'https://randomuser.me/api/' open API."
|
6 |
-
|
7 |
-
inputs = ["text"] # Adding an empty list for inputs
|
8 |
-
|
9 |
-
outputs = ["json"]
|
10 |
-
|
11 |
-
def __call__(self, inputs: str):
|
12 |
-
API_URL = "https://randomuser.me/api/"
|
13 |
-
|
14 |
-
response = requests.get(API_URL)
|
15 |
-
data = response.json()['results'][0]
|
16 |
-
|
17 |
-
# Extract the relevant character information
|
18 |
-
character = {
|
19 |
-
"gender": data['gender'],
|
20 |
-
"name": {
|
21 |
-
"title": data['name']['title'],
|
22 |
-
"first": data['name']['first'],
|
23 |
-
"last": data['name']['last']
|
24 |
-
},
|
25 |
-
"location": {
|
26 |
-
"street": {
|
27 |
-
"number": data['location']['street']['number'],
|
28 |
-
"name": data['location']['street']['name']
|
29 |
-
},
|
30 |
-
"city": data['location']['city'],
|
31 |
-
"state": data['location']['state'],
|
32 |
-
"country": data['location']['country'],
|
33 |
-
"postcode": data['location']['postcode'],
|
34 |
-
"coordinates": {
|
35 |
-
"latitude": data['location']['coordinates']['latitude'],
|
36 |
-
"longitude": data['location']['coordinates']['longitude']
|
37 |
-
},
|
38 |
-
"timezone": {
|
39 |
-
"offset": data['location']['timezone']['offset'],
|
40 |
-
"description": data['location']['timezone']['description']
|
41 |
-
}
|
42 |
-
},
|
43 |
-
"email": data['email'],
|
44 |
-
"login": {
|
45 |
-
"uuid": data['login']['uuid'],
|
46 |
-
"username": data['login']['username'],
|
47 |
-
"password": data['login']['password'],
|
48 |
-
"salt": data['login']['salt'],
|
49 |
-
"md5": data['login']['md5'],
|
50 |
-
"sha1": data['login']['sha1'],
|
51 |
-
"sha256": data['login']['sha256']
|
52 |
-
},
|
53 |
-
"dob": {
|
54 |
-
"date": data['dob']['date'],
|
55 |
-
"age": data['dob']['age']
|
56 |
-
},
|
57 |
-
"registered": {
|
58 |
-
"date": data['registered']['date'],
|
59 |
-
"age": data['registered']['age']
|
60 |
-
},
|
61 |
-
"phone": data['phone'],
|
62 |
-
"cell": data['cell'],
|
63 |
-
"id": {
|
64 |
-
"name": data['id']['name'],
|
65 |
-
"value": data['id']['value']
|
66 |
-
},
|
67 |
-
"picture": {
|
68 |
-
"large": data['picture']['large'],
|
69 |
-
"medium": data['picture']['medium'],
|
70 |
-
"thumbnail": data['picture']['thumbnail']
|
71 |
-
},
|
72 |
-
"nat": data['nat']
|
73 |
-
}
|
74 |
-
|
75 |
-
return {"character": character}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
sentiment_analysis.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import requests
|
2 |
+
|
3 |
+
class SentimentAnalysisTool:
|
4 |
+
name = "sentiment_analysis"
|
5 |
+
description = "This tool analyses the sentiment of a given text input."
|
6 |
+
|
7 |
+
inputs = ["text"] # Adding an empty list for inputs
|
8 |
+
|
9 |
+
outputs = ["json"]
|
10 |
+
|
11 |
+
model_id_1 = "nlptown/bert-base-multilingual-uncased-sentiment"
|
12 |
+
model_id_2 = "microsoft/deberta-xlarge-mnli"
|
13 |
+
model_id_3 = "distilbert-base-uncased-finetuned-sst-2-english"
|
14 |
+
model_id_4 = "lordtt13/emo-mobilebert"
|
15 |
+
model_id_5 = "juliensimon/reviews-sentiment-analysis"
|
16 |
+
model_id_6 = "sbcBI/sentiment_analysis_model"
|
17 |
+
model_id_7 = "models/oliverguhr/german-sentiment-bert"
|
18 |
+
|
19 |
+
def __call__(self, inputs: str):
|
20 |
+
return predict(str)
|
21 |
+
|
22 |
+
|
23 |
+
|
24 |
+
def parse_output(output_json):
|
25 |
+
list_pred=[]
|
26 |
+
for i in range(len(output_json[0])):
|
27 |
+
label = output_json[0][i]['label']
|
28 |
+
score = output_json[0][i]['score']
|
29 |
+
list_pred.append((label, score))
|
30 |
+
return list_pred
|
31 |
+
|
32 |
+
def get_prediction(model_id):
|
33 |
+
classifier = pipeline("text-classification", model=model_id, return_all_scores=True)
|
34 |
+
|
35 |
+
def predict(review):
|
36 |
+
classifier = get_prediction(model_id_7)
|
37 |
+
prediction = classifier(review)
|
38 |
+
print(prediction)
|
39 |
+
return parse_output(prediction)
|
40 |
+
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
|