Spaces:
Sleeping
Sleeping
Commit
·
f61c84d
1
Parent(s):
24f8e16
Upload 11 files
Browse files- Emotion_classify_Data.csv +0 -0
- abcd.py +49 -0
- app.py +48 -0
- count_vectorizer.joblib +3 -0
- emotion_model.joblib +3 -0
- main.py +25 -0
- model1.py +49 -0
- model2.py +46 -0
- model3.py +45 -0
- requirements.txt +0 -0
- tweet_emotions.csv +0 -0
Emotion_classify_Data.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|
abcd.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import torch
|
3 |
+
from transformers import RobertaForSequenceClassification, RobertaTokenizer
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
def model1():
|
7 |
+
# Your Model 1 code here
|
8 |
+
st.subheader("Emotion Analysis")
|
9 |
+
model_path = "mode.pth" # Replace with the actual path to your trained model
|
10 |
+
tokenizer = RobertaTokenizer.from_pretrained(model_path)
|
11 |
+
model = RobertaForSequenceClassification.from_pretrained(model_path)
|
12 |
+
|
13 |
+
# Set the model to evaluation mode
|
14 |
+
model.eval()
|
15 |
+
|
16 |
+
# Labels for your specific task
|
17 |
+
labels = ["anger", "fear", "joy"] # Replace with your actual label names
|
18 |
+
|
19 |
+
# Streamlit app
|
20 |
+
user_input = st.text_area("Enter text for analysis:")
|
21 |
+
if st.button("Analyze"):
|
22 |
+
if user_input:
|
23 |
+
# Tokenize and preprocess the input
|
24 |
+
input_ids = tokenizer.encode(user_input, return_tensors="pt")
|
25 |
+
# Make prediction
|
26 |
+
with torch.no_grad():
|
27 |
+
output = model(input_ids)
|
28 |
+
# Get predicted probabilities
|
29 |
+
probabilities = torch.sigmoid(output.logits)
|
30 |
+
|
31 |
+
# Check if the lengths match before creating the DataFrame
|
32 |
+
if len(labels) == len(probabilities[0]):
|
33 |
+
# Display the probabilities as individual bars
|
34 |
+
df = pd.DataFrame({
|
35 |
+
"Label": labels,
|
36 |
+
"Probability": probabilities[0].tolist()
|
37 |
+
})
|
38 |
+
|
39 |
+
st.bar_chart(df.set_index("Label"))
|
40 |
+
|
41 |
+
# Display the emotion labels and scores
|
42 |
+
st.subheader("Emotion Analysis Output:")
|
43 |
+
for i, result in enumerate(sorted(zip(labels, probabilities[0]), key=lambda x: x[1], reverse=True)):
|
44 |
+
label, score = result
|
45 |
+
st.write(f"{i + 1}. {label.capitalize()}: {score:.4f}")
|
46 |
+
else:
|
47 |
+
st.error("Error: The length of labels and probabilities does not match.")
|
48 |
+
else:
|
49 |
+
st.warning("Please enter text for analysis.")
|
app.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import random
|
3 |
+
|
4 |
+
def celsius_to_fahrenheit(celsius):
|
5 |
+
return (celsius * 9/5) + 32
|
6 |
+
|
7 |
+
def main():
|
8 |
+
st.title("Smart Home Control")git
|
9 |
+
|
10 |
+
# General on/off button
|
11 |
+
general_status = st.button("General On")
|
12 |
+
general_status_off = st.button("General Off")
|
13 |
+
|
14 |
+
# Temperature control
|
15 |
+
st.header("Temperature Control")
|
16 |
+
temperature_status = st.button("Temperature On")
|
17 |
+
temperature_status_off = st.button("Temperature Off")
|
18 |
+
|
19 |
+
temperature_unit = st.radio("Select Temperature Unit", ["Celsius", "Fahrenheit"])
|
20 |
+
min_temp = 28 if temperature_unit == "Celsius" else celsius_to_fahrenheit(28)
|
21 |
+
max_temp = 40 if temperature_unit == "Celsius" else celsius_to_fahrenheit(40)
|
22 |
+
|
23 |
+
temperature_value = st.number_input(f"Set Temperature ({temperature_unit})", min_value=float(min_temp), max_value=float(max_temp), step=1.0)
|
24 |
+
|
25 |
+
# Camera control
|
26 |
+
st.header("Camera Control")
|
27 |
+
camera_status = st.button("Camera On")
|
28 |
+
camera_status_off = st.button("Camera Off")
|
29 |
+
|
30 |
+
# Light control
|
31 |
+
st.header("Light Control")
|
32 |
+
light_status = st.button("Light On")
|
33 |
+
light_status_off = st.button("Light Off")
|
34 |
+
|
35 |
+
# Display weight randomly
|
36 |
+
st.header("Weight Display")
|
37 |
+
weight_value = random.uniform(100, 300)
|
38 |
+
st.write(f"Weight: {weight_value:.2f} pounds")
|
39 |
+
|
40 |
+
# Show status based on button clicks
|
41 |
+
st.write("\n**Status:**")
|
42 |
+
st.write(f"- General: {'On' if general_status else 'Off'}")
|
43 |
+
st.write(f"- Temperature: {'On' if temperature_status else 'Off'}")
|
44 |
+
st.write(f"- Camera: {'On' if camera_status else 'Off'}")
|
45 |
+
st.write(f"- Light: {'On' if light_status else 'Off'}")
|
46 |
+
|
47 |
+
if __name__ == "__main__":
|
48 |
+
main()
|
count_vectorizer.joblib
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:28846f62e43097bf18bd2a51ff2dc544bc7cad7814a3c0a7aaa8ec1c2521328a
|
3 |
+
size 101316
|
emotion_model.joblib
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:38e8be9c8fdcbbdcfe5507ecc82102f12d7dce91f837737d514652ef409b7e87
|
3 |
+
size 380047
|
main.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from model1 import model1
|
3 |
+
from model2 import model2
|
4 |
+
from model3 import model3
|
5 |
+
|
6 |
+
|
7 |
+
def main():
|
8 |
+
st.title("Text Classification App")
|
9 |
+
|
10 |
+
# Model selection
|
11 |
+
model_selection = st.selectbox("Select Model", ["Model 1", "Model 2", "Model 3"])
|
12 |
+
|
13 |
+
if model_selection == "Model 1":
|
14 |
+
model3()
|
15 |
+
elif model_selection == "Model 2":
|
16 |
+
model1()
|
17 |
+
elif model_selection == "Model 3":
|
18 |
+
model2()
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
|
24 |
+
if __name__ == "__main__":
|
25 |
+
main()
|
model1.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import torch
|
3 |
+
from transformers import RobertaForSequenceClassification, RobertaTokenizer
|
4 |
+
import pandas as pd
|
5 |
+
|
6 |
+
def model1():
|
7 |
+
# Your Model 1 code here
|
8 |
+
st.subheader("Model 2 Analysis")
|
9 |
+
model_path = "mymodel.pth" # Replace with the actual path to your trained model
|
10 |
+
tokenizer = RobertaTokenizer.from_pretrained(model_path)
|
11 |
+
model = RobertaForSequenceClassification.from_pretrained(model_path)
|
12 |
+
|
13 |
+
# Set the model to evaluation mode
|
14 |
+
model.eval()
|
15 |
+
|
16 |
+
# Labels for your specific task
|
17 |
+
labels = ["anger", "fear", "joy"] # Replace with your actual label names
|
18 |
+
|
19 |
+
# Streamlit app
|
20 |
+
user_input = st.text_area("Enter text for analysis:")
|
21 |
+
if st.button("Analyze"):
|
22 |
+
if user_input:
|
23 |
+
# Tokenize and preprocess the input
|
24 |
+
input_ids = tokenizer.encode(user_input, return_tensors="pt")
|
25 |
+
# Make prediction
|
26 |
+
with torch.no_grad():
|
27 |
+
output = model(input_ids)
|
28 |
+
# Get predicted probabilities
|
29 |
+
probabilities = torch.sigmoid(output.logits)
|
30 |
+
|
31 |
+
# Check if the lengths match before creating the DataFrame
|
32 |
+
if len(labels) == len(probabilities[0]):
|
33 |
+
# Display the probabilities as individual bars
|
34 |
+
df = pd.DataFrame({
|
35 |
+
"Label": labels,
|
36 |
+
"Probability": probabilities[0].tolist()
|
37 |
+
})
|
38 |
+
|
39 |
+
st.bar_chart(df.set_index("Label"))
|
40 |
+
|
41 |
+
# Display the emotion labels and scores
|
42 |
+
st.subheader("Emotion Analysis Output:")
|
43 |
+
for i, result in enumerate(sorted(zip(labels, probabilities[0]), key=lambda x: x[1], reverse=True)):
|
44 |
+
label, score = result
|
45 |
+
st.write(f"{i + 1}. {label.capitalize()}: {score:.4f}")
|
46 |
+
else:
|
47 |
+
st.error("Error: The length of labels and probabilities does not match.")
|
48 |
+
else:
|
49 |
+
st.warning("Please enter text for analysis.")
|
model2.py
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import joblib
|
3 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
4 |
+
|
5 |
+
|
6 |
+
def model2():
|
7 |
+
# Load the saved model
|
8 |
+
model_filename = "emotion_model.joblib"
|
9 |
+
loaded_model = joblib.load(model_filename)
|
10 |
+
|
11 |
+
# Load the TfidfVectorizer (assuming you used TfidfVectorizer during training)
|
12 |
+
vectorizer_filename = "count_vectorizer.joblib" # Update this to the correct filename
|
13 |
+
vectorizer = joblib.load(vectorizer_filename)
|
14 |
+
|
15 |
+
# Streamlit App
|
16 |
+
st.title("Emotion Prediction App")
|
17 |
+
|
18 |
+
# Input text from the user
|
19 |
+
user_input = st.text_area("Enter your text:")
|
20 |
+
|
21 |
+
# Analyze button
|
22 |
+
if st.button("Analyze"):
|
23 |
+
# Make predictions with new data
|
24 |
+
if user_input:
|
25 |
+
new_data = [user_input]
|
26 |
+
new_features = vectorizer.transform(new_data)
|
27 |
+
new_predictions = loaded_model.predict_proba(new_features)
|
28 |
+
|
29 |
+
# Display predictions using a progress bar
|
30 |
+
st.subheader("Emotion Scores:")
|
31 |
+
|
32 |
+
# Assuming there are three classes (Fear, Anger, Joy)
|
33 |
+
progress_bar_fear = st.progress(new_predictions[0][0])
|
34 |
+
st.write("Fear:", round(new_predictions[0][0], 2))
|
35 |
+
progress_bar_anger = st.progress(new_predictions[0][1])
|
36 |
+
st.write("Anger:", round(new_predictions[0][1], 2))
|
37 |
+
progress_bar_joy = st.progress(new_predictions[0][2])
|
38 |
+
st.write("Joy:", round(new_predictions[0][2], 2))
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
# Call the function to run the app
|
45 |
+
if __name__ == "__main__":
|
46 |
+
model2()
|
model3.py
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import pandas as pd
|
3 |
+
from sklearn.model_selection import train_test_split
|
4 |
+
from sklearn.feature_extraction.text import CountVectorizer
|
5 |
+
from sklearn.naive_bayes import MultinomialNB
|
6 |
+
from sklearn.pipeline import make_pipeline
|
7 |
+
from sklearn.metrics import accuracy_score, classification_report
|
8 |
+
|
9 |
+
|
10 |
+
def model3():
|
11 |
+
# Load the CSV file
|
12 |
+
df = pd.read_csv('Emotion_classify_Data.csv')
|
13 |
+
|
14 |
+
# Assuming your CSV has two columns: 'text' and 'label'
|
15 |
+
X = df['Comment']
|
16 |
+
y = df['Emotion']
|
17 |
+
|
18 |
+
# Split the dataset into training and testing sets
|
19 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
|
20 |
+
|
21 |
+
# Create a text classification pipeline using a bag-of-words model and a Naive Bayes classifier
|
22 |
+
model = make_pipeline(CountVectorizer(), MultinomialNB())
|
23 |
+
|
24 |
+
# Train the model
|
25 |
+
model.fit(X_train, y_train)
|
26 |
+
|
27 |
+
# Function to make predictions
|
28 |
+
def predict_emotion(text):
|
29 |
+
prediction = model.predict([text])
|
30 |
+
return prediction[0]
|
31 |
+
|
32 |
+
# Streamlit app
|
33 |
+
st.title("Emotion Classification App")
|
34 |
+
|
35 |
+
# User input for prediction
|
36 |
+
user_input = st.text_area("Enter a sentence:")
|
37 |
+
|
38 |
+
if st.button("Predict"):
|
39 |
+
if user_input:
|
40 |
+
# Make prediction
|
41 |
+
prediction = predict_emotion(user_input)
|
42 |
+
st.success(f"Predicted Emotion: {prediction}")
|
43 |
+
else:
|
44 |
+
st.warning("Please enter a sentence for prediction.")
|
45 |
+
|
requirements.txt
ADDED
Binary file (2.25 kB). View file
|
|
tweet_emotions.csv
ADDED
The diff for this file is too large to render.
See raw diff
|
|