Spaces:
Sleeping
Sleeping
Commit
·
555cfd1
1
Parent(s):
5109f73
modle UI
Browse files- .streamlit/config.toml +9 -0
- app.py +108 -0
- style.css +3 -0
.streamlit/config.toml
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[theme]
|
2 |
+
primaryColor="#FF6B6B"
|
3 |
+
backgroundColor="#F0E6ED" # Elegant contrast background color
|
4 |
+
secondaryBackgroundColor="#FFFFFF"
|
5 |
+
textColor="#000000"
|
6 |
+
font="sans-serif"
|
7 |
+
|
8 |
+
|
9 |
+
|
app.py
ADDED
@@ -0,0 +1,108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# in new conda env install below pakages
|
2 |
+
# pip install tensorflow==2.13.0
|
3 |
+
# pip install -U "tensorflow-text==2.13.*"
|
4 |
+
# pip install -q streamlit==1.26.0
|
5 |
+
# pip install openai==0.28.0
|
6 |
+
|
7 |
+
import openai
|
8 |
+
import streamlit as st
|
9 |
+
import tensorflow as tf
|
10 |
+
import tensorflow_text
|
11 |
+
import numpy as np
|
12 |
+
|
13 |
+
###############################################
|
14 |
+
# Setting up styles for app
|
15 |
+
###############################################
|
16 |
+
# Set page title and icon
|
17 |
+
# st.set_page_config(page_title="Bard ChatBot",
|
18 |
+
# page_icon=":robot_face:",
|
19 |
+
# initial_sidebar_state="collapsed",)
|
20 |
+
|
21 |
+
# # Custom css styles
|
22 |
+
# with open('style.css') as f:
|
23 |
+
# st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
|
24 |
+
|
25 |
+
|
26 |
+
# st.title("ChatGPT-like clone")
|
27 |
+
|
28 |
+
openai.api_key = "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"
|
29 |
+
reloaded_model = tf.saved_model.load('one_2')
|
30 |
+
|
31 |
+
emotion_categories = {
|
32 |
+
0: 'anger',
|
33 |
+
1: 'fear',
|
34 |
+
2: 'joy',
|
35 |
+
3: 'love',
|
36 |
+
4: 'neutral',
|
37 |
+
5: 'sadness',
|
38 |
+
6: 'surprise'
|
39 |
+
}
|
40 |
+
|
41 |
+
if "openai_model" not in st.session_state:
|
42 |
+
st.session_state["openai_model"] = "gpt-3.5-turbo"
|
43 |
+
|
44 |
+
if "messages" not in st.session_state:
|
45 |
+
st.session_state.messages = []
|
46 |
+
|
47 |
+
# col1, col2 = st.columns([4, 3])
|
48 |
+
|
49 |
+
# with col1:
|
50 |
+
# st.markdown('helow')
|
51 |
+
# for message in st.session_state.messages:
|
52 |
+
# with st.chat_message(message["role"]):
|
53 |
+
# st.markdown(message["content"])
|
54 |
+
|
55 |
+
|
56 |
+
# with col2:
|
57 |
+
# a = st.session_state.messages[-1]['content']
|
58 |
+
# st.markdown(len(st.session_state.messages) > 1)
|
59 |
+
# if len(st.session_state.messages) != 0:
|
60 |
+
# st.markdown('prediction:')
|
61 |
+
# st.markdown(st.session_state.messages[-2])
|
62 |
+
# q = st.session_state.messages[-2]['content']
|
63 |
+
# emotion = reloaded_model([q])
|
64 |
+
# true_classes = np.argmax(emotion, axis=1)
|
65 |
+
# emotion_category = emotion_categories.get(int(true_classes))
|
66 |
+
# st.markdown(emotion_category)
|
67 |
+
|
68 |
+
|
69 |
+
if prompt := st.chat_input("What is up?"):
|
70 |
+
st.session_state.messages.append({"role": "user", "content": prompt})
|
71 |
+
# USER
|
72 |
+
with st.chat_message("user"):
|
73 |
+
st.markdown(prompt)
|
74 |
+
|
75 |
+
# EMOTION
|
76 |
+
with st.chat_message("Emotion", avatar='😶'):
|
77 |
+
emotion = reloaded_model([prompt])
|
78 |
+
true_classes = np.argmax(emotion, axis=1)
|
79 |
+
emotion_category = emotion_categories.get(int(true_classes))
|
80 |
+
st.write("Emotion: {}".format(emotion_category))
|
81 |
+
|
82 |
+
# AI BOT
|
83 |
+
with st.chat_message("assistant"):
|
84 |
+
message_placeholder = st.empty()
|
85 |
+
full_response = ""
|
86 |
+
for response in openai.ChatCompletion.create(
|
87 |
+
model=st.session_state["openai_model"],
|
88 |
+
messages=[
|
89 |
+
{"role": m["role"], "content": m["content"]}
|
90 |
+
for m in st.session_state.messages
|
91 |
+
],
|
92 |
+
stream=True,
|
93 |
+
):
|
94 |
+
full_response += response.choices[0].delta.get("content", "")
|
95 |
+
|
96 |
+
message_placeholder.markdown(full_response + "▌")
|
97 |
+
|
98 |
+
message_placeholder.markdown(full_response)
|
99 |
+
st.session_state.messages.append(
|
100 |
+
{"role": "assistant", "content": full_response})
|
101 |
+
|
102 |
+
|
103 |
+
uploaded_files = st.file_uploader(
|
104 |
+
"Choose a CSV file", accept_multiple_files=True)
|
105 |
+
for uploaded_file in uploaded_files:
|
106 |
+
bytes_data = uploaded_file.read()
|
107 |
+
st.write("filename:", uploaded_file.name)
|
108 |
+
st.write(bytes_data)
|
style.css
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
.block-container.css-1y4p8pa.ea3mdgi4{
|
2 |
+
padding-top: 2rem;
|
3 |
+
}
|