Spaces:
Sleeping
Sleeping
samiNCL
commited on
Commit
·
05a80ad
1
Parent(s):
3173d64
Update code
Browse files- .DS_Store +0 -0
- Full_Codes.py +53 -0
- Gradio.pdf +0 -0
- app.py +1 -1
.DS_Store
CHANGED
Binary files a/.DS_Store and b/.DS_Store differ
|
|
Full_Codes.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
import spacy
|
3 |
+
import gradio as gr
|
4 |
+
import csv
|
5 |
+
from nrclex import NRCLex
|
6 |
+
from transformers import pipeline
|
7 |
+
from rake_nltk import Rake
|
8 |
+
|
9 |
+
# Initialize objects
|
10 |
+
emotion_pipeline = pipeline('sentiment-analysis', model='nlptown/bert-base-multilingual-uncased-sentiment')
|
11 |
+
nlp = spacy.load('en_core_web_sm')
|
12 |
+
rake = Rake()
|
13 |
+
|
14 |
+
def process_csv(file):
|
15 |
+
reader = csv.DictReader(file)
|
16 |
+
emotions = []
|
17 |
+
sentiments = []
|
18 |
+
entities = []
|
19 |
+
keywords = []
|
20 |
+
for row in reader:
|
21 |
+
text = row['Content'] # Replace 'Content' with the correct column name
|
22 |
+
nrc_obj = NRCLex(text)
|
23 |
+
emotion_scores = nrc_obj.affect_frequencies
|
24 |
+
emotions.append(emotion_scores)
|
25 |
+
sentiment = analyze_emotion(text)
|
26 |
+
sentiments.append(sentiment)
|
27 |
+
entities.append(analyze_entities(text))
|
28 |
+
keywords.append(extract_keywords(text)) # Extract keywords for each text
|
29 |
+
|
30 |
+
fieldnames = reader.fieldnames + list(emotions[0].keys()) + ['sentiment', 'entities', 'keywords']
|
31 |
+
output = []
|
32 |
+
for row, emotion_scores, sentiment, entity, keyword in zip(reader, emotions, sentiments, entities, keywords):
|
33 |
+
row.update(emotion_scores) # Update the row dictionary with emotion scores
|
34 |
+
row.update({'sentiment': sentiment, 'entities': entity, 'keywords': keyword}) # Update the row dictionary with sentiment, entities and keywords
|
35 |
+
output.append({field: row.get(field, '') for field in fieldnames}) # Write row with matching fields or empty values
|
36 |
+
return pd.DataFrame(output).to_csv(index=False)
|
37 |
+
|
38 |
+
def analyze_emotion(text):
|
39 |
+
result = emotion_pipeline(text)[0]
|
40 |
+
sentiment = result['label']
|
41 |
+
return sentiment
|
42 |
+
|
43 |
+
def analyze_entities(text):
|
44 |
+
doc = nlp(text)
|
45 |
+
entities = [(ent.text, ent.label_) for ent in doc.ents]
|
46 |
+
return entities
|
47 |
+
|
48 |
+
def extract_keywords(text):
|
49 |
+
rake.extract_keywords_from_text(text)
|
50 |
+
return rake.get_ranked_phrases() # Extract keywords from text
|
51 |
+
|
52 |
+
iface = gr.Interface(fn=process_csv, inputs=gr.inputs.File(type='csv'), outputs=gr.outputs.File())
|
53 |
+
iface.launch()
|
Gradio.pdf
ADDED
Binary file (206 kB). View file
|
|
app.py
CHANGED
@@ -7,7 +7,7 @@ import io
|
|
7 |
sentiment_pipeline = pipeline('sentiment-analysis')
|
8 |
|
9 |
def process_csv(file):
|
10 |
-
df = pd.read_csv(io.StringIO(file
|
11 |
sentiments = []
|
12 |
for _, row in df.iterrows():
|
13 |
text = row['Content']
|
|
|
7 |
sentiment_pipeline = pipeline('sentiment-analysis')
|
8 |
|
9 |
def process_csv(file):
|
10 |
+
df = pd.read_csv(io.StringIO(file))
|
11 |
sentiments = []
|
12 |
for _, row in df.iterrows():
|
13 |
text = row['Content']
|