Archisman Karmakar
commited on
Commit
·
2c0d348
1
Parent(s):
b4e0bee
2025.03.18.post3
Browse files- app_main_hf.py +136 -8
- dashboard.py +35 -0
- imports.py +1 -0
- poetry.lock +362 -1
- pyproject.toml +4 -1
- pyprojectOLD.toml +3 -1
- requirements.txt +24 -2
- requirements_windows.txt +215 -0
- sentiment_analysis/config/stage1_models.json +17 -0
- sentiment_analysis/hmv_cfg_base_stage1/__pycache__/model1.cpython-312.pyc +0 -0
- sentiment_analysis/hmv_cfg_base_stage1/model1.py +10 -8
- sentiment_analysis/hmv_cfg_base_stage1/model2.py +117 -110
- sentiment_analysis/hmv_cfg_base_stage1/model3.py +94 -14
- sentiment_analysis/sentiment_analysis_main.py +320 -8
app_main_hf.py
CHANGED
@@ -1,7 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
|
|
2 |
import os
|
3 |
import asyncio
|
4 |
import sys
|
|
|
|
|
|
|
5 |
|
6 |
if sys.platform == "win32":
|
7 |
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
@@ -18,36 +30,152 @@ import importlib.util
|
|
18 |
|
19 |
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), )))
|
20 |
|
21 |
-
from imports import *
|
22 |
|
|
|
23 |
|
|
|
|
|
|
|
|
|
24 |
|
25 |
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
|
31 |
|
32 |
def main():
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
st.sidebar.title("Navigation")
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
35 |
|
36 |
if selection == "Dashboard":
|
|
|
|
|
37 |
show_dashboard()
|
|
|
38 |
elif selection == "Stage 1: Sentiment Polarity Analysis":
|
|
|
|
|
39 |
show_sentiment_analysis()
|
|
|
40 |
elif selection == "Stage 2: Emotion Mood-tag Analysis":
|
|
|
|
|
41 |
# show_emotion_analysis()
|
42 |
st.write("This section is under development.")
|
|
|
43 |
elif selection == "Stage 3: Text Transformation & Normalization":
|
|
|
|
|
44 |
# show_text_transformation()
|
45 |
st.write("This section is under development.")
|
46 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
st.sidebar.title("About")
|
48 |
st.sidebar.info("""
|
49 |
**Contributors:**
|
50 |
-
- Archisman Karmakar
|
51 |
- [LinkedIn](https://www.linkedin.com/in/archismankarmakar/)
|
52 |
- [GitHub](https://www.github.com/ArchismanKarmakar)
|
53 |
- [Kaggle](https://www.kaggle.com/archismancoder)
|
@@ -70,4 +198,4 @@ def main():
|
|
70 |
""")
|
71 |
|
72 |
if __name__ == "__main__":
|
73 |
-
main()
|
|
|
1 |
+
from streamlit_extras.bottom_container import bottom
|
2 |
+
from streamlit_extras.app_logo import add_logo
|
3 |
+
from streamlit_extras.add_vertical_space import add_vertical_space
|
4 |
+
from streamlit_extras.stylable_container import stylable_container
|
5 |
+
from emotion_analysis import show_emotion_analysis
|
6 |
+
from sentiment_analysis.sentiment_analysis_main import show_sentiment_analysis
|
7 |
+
from dashboard import show_dashboard
|
8 |
+
from imports import *
|
9 |
import streamlit as st
|
10 |
+
from streamlit_option_menu import option_menu
|
11 |
import os
|
12 |
import asyncio
|
13 |
import sys
|
14 |
+
import shutil
|
15 |
+
import gc
|
16 |
+
from transformers.utils.hub import TRANSFORMERS_CACHE
|
17 |
|
18 |
if sys.platform == "win32":
|
19 |
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
|
|
|
30 |
|
31 |
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), )))
|
32 |
|
|
|
33 |
|
34 |
+
# from text_transformation import show_text_transformation
|
35 |
|
36 |
+
st.set_page_config(
|
37 |
+
page_title="Tachygraphy Microtext Analysis & Normalization",
|
38 |
+
# layout="wide"
|
39 |
+
)
|
40 |
|
41 |
|
42 |
+
def free_memory():
|
43 |
+
# """Free up CPU & GPU memory before loading a new model."""
|
44 |
+
global current_model, current_tokenizer
|
45 |
+
|
46 |
+
if current_model is not None:
|
47 |
+
del current_model # Delete the existing model
|
48 |
+
current_model = None # Reset reference
|
49 |
+
|
50 |
+
if current_tokenizer is not None:
|
51 |
+
del current_tokenizer # Delete the tokenizer
|
52 |
+
current_tokenizer = None
|
53 |
+
|
54 |
+
gc.collect() # Force garbage collection for CPU memory
|
55 |
+
|
56 |
+
if torch.cuda.is_available():
|
57 |
+
torch.cuda.empty_cache() # Free GPU memory
|
58 |
+
torch.cuda.ipc_collect() # Clean up PyTorch GPU cache
|
59 |
+
|
60 |
+
# If running on CPU, reclaim memory using OS-level commands
|
61 |
+
try:
|
62 |
+
if torch.cuda.is_available() is False:
|
63 |
+
psutil.virtual_memory() # Refresh memory stats
|
64 |
+
except Exception as e:
|
65 |
+
print(f"Memory cleanup error: {e}")
|
66 |
+
|
67 |
+
# Delete cached Hugging Face models
|
68 |
+
try:
|
69 |
+
cache_dir = TRANSFORMERS_CACHE
|
70 |
+
if os.path.exists(cache_dir):
|
71 |
+
shutil.rmtree(cache_dir)
|
72 |
+
print("Cache cleared!")
|
73 |
+
except Exception as e:
|
74 |
+
print(f"❌ Cache cleanup error: {e}")
|
75 |
+
|
76 |
|
77 |
|
78 |
def main():
|
79 |
+
# selection = option_menu(
|
80 |
+
# menu_title="Navigation",
|
81 |
+
# options=[
|
82 |
+
# "Dashboard",
|
83 |
+
# "Stage 1: Sentiment Polarity Analysis",
|
84 |
+
# "Stage 2: Emotion Mood-tag Analysis",
|
85 |
+
# "Stage 3: Text Transformation & Normalization"
|
86 |
+
# ],
|
87 |
+
# icons=["joystick", "bar-chart", "emoji-laughing", "pencil"],
|
88 |
+
# styles={
|
89 |
+
# "container": {}},
|
90 |
+
# menu_icon="menu-button-wide-fill",
|
91 |
+
# default_index=0,
|
92 |
+
# orientation="horizontal"
|
93 |
+
# )
|
94 |
+
|
95 |
st.sidebar.title("Navigation")
|
96 |
+
with st.sidebar:
|
97 |
+
selection = option_menu(
|
98 |
+
menu_title=None, # No title for a sleek look
|
99 |
+
options=["Dashboard", "Stage 1: Sentiment Polarity Analysis", "Stage 2: Emotion Mood-tag Analysis", "Stage 3: Text Transformation & Normalization"],
|
100 |
+
icons=None,
|
101 |
+
menu_icon="cast", # Main menu icon
|
102 |
+
default_index=0, # Highlight the first option
|
103 |
+
orientation="vertical",
|
104 |
+
styles={
|
105 |
+
"container": {"padding": "0!important", "background-color": "#f8f9fa"},
|
106 |
+
"icon": {"color": "#6c757d", "font-size": "18px"},
|
107 |
+
"nav-link": {
|
108 |
+
"font-size": "16px",
|
109 |
+
"text-align": "center",
|
110 |
+
"margin": "0px",
|
111 |
+
"color": "#6c757d",
|
112 |
+
"transition": "0.3s",
|
113 |
+
},
|
114 |
+
"nav-link-selected": {
|
115 |
+
"background-color": "#FF4B4B",
|
116 |
+
"color": "white",
|
117 |
+
"font-weight": "bold",
|
118 |
+
"border-radius": "8px",
|
119 |
+
},
|
120 |
+
}
|
121 |
+
)
|
122 |
+
|
123 |
+
# st.sidebar.title("Navigation")
|
124 |
+
# selection = st.sidebar.radio("Go to", ["Dashboard", "Stage 1: Sentiment Polarity Analysis", "Stage 2: Emotion Mood-tag Analysis", "Stage 3: Text Transformation & Normalization"])
|
125 |
+
|
126 |
+
# if selection == "Dashboard":
|
127 |
+
# show_dashboard()
|
128 |
+
# elif selection == "Stage 1: Sentiment Polarity Analysis":
|
129 |
+
# show_sentiment_analysis()
|
130 |
+
# elif selection == "Stage 2: Emotion Mood-tag Analysis":
|
131 |
+
# # show_emotion_analysis()
|
132 |
+
# st.write("This section is under development.")
|
133 |
+
# elif selection == "Stage 3: Text Transformation & Normalization":
|
134 |
+
# # show_text_transformation()
|
135 |
+
# st.write("This section is under development.")
|
136 |
|
137 |
if selection == "Dashboard":
|
138 |
+
st.cache_resource.clear()
|
139 |
+
free_memory()
|
140 |
show_dashboard()
|
141 |
+
|
142 |
elif selection == "Stage 1: Sentiment Polarity Analysis":
|
143 |
+
st.cache_resource.clear()
|
144 |
+
free_memory()
|
145 |
show_sentiment_analysis()
|
146 |
+
|
147 |
elif selection == "Stage 2: Emotion Mood-tag Analysis":
|
148 |
+
st.cache_resource.clear()
|
149 |
+
free_memory()
|
150 |
# show_emotion_analysis()
|
151 |
st.write("This section is under development.")
|
152 |
+
|
153 |
elif selection == "Stage 3: Text Transformation & Normalization":
|
154 |
+
st.cache_resource.clear()
|
155 |
+
free_memory()
|
156 |
# show_text_transformation()
|
157 |
st.write("This section is under development.")
|
158 |
|
159 |
+
|
160 |
+
|
161 |
+
# st.sidebar.title("Navigation")
|
162 |
+
# selection = st.sidebar.radio("Go to", ["Dashboard", "Stage 1: Sentiment Polarity Analysis", "Stage 2: Emotion Mood-tag Analysis", "Stage 3: Text Transformation & Normalization"])
|
163 |
+
|
164 |
+
# if selection == "Dashboard":
|
165 |
+
# show_dashboard()
|
166 |
+
# elif selection == "Stage 1: Sentiment Polarity Analysis":
|
167 |
+
# show_sentiment_analysis()
|
168 |
+
# elif selection == "Stage 2: Emotion Mood-tag Analysis":
|
169 |
+
# # show_emotion_analysis()
|
170 |
+
# st.write("This section is under development.")
|
171 |
+
# elif selection == "Stage 3: Text Transformation & Normalization":
|
172 |
+
# # show_text_transformation()
|
173 |
+
# st.write("This section is under development.")
|
174 |
+
|
175 |
st.sidebar.title("About")
|
176 |
st.sidebar.info("""
|
177 |
**Contributors:**
|
178 |
+
- Archisman Karmakar
|
179 |
- [LinkedIn](https://www.linkedin.com/in/archismankarmakar/)
|
180 |
- [GitHub](https://www.github.com/ArchismanKarmakar)
|
181 |
- [Kaggle](https://www.kaggle.com/archismancoder)
|
|
|
198 |
""")
|
199 |
|
200 |
if __name__ == "__main__":
|
201 |
+
main()
|
dashboard.py
CHANGED
@@ -40,6 +40,39 @@ def free_memory():
|
|
40 |
except Exception as e:
|
41 |
print(f"❌ Cache cleanup error: {e}")
|
42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
43 |
def show_dashboard():
|
44 |
# free_memory()
|
45 |
st.title("Tachygraphy Micro-text Analysis & Normalization")
|
@@ -50,6 +83,8 @@ def show_dashboard():
|
|
50 |
3. Text Transformation & Normalization
|
51 |
""")
|
52 |
|
|
|
|
|
53 |
|
54 |
def __main__():
|
55 |
show_dashboard()
|
|
|
40 |
except Exception as e:
|
41 |
print(f"❌ Cache cleanup error: {e}")
|
42 |
|
43 |
+
|
44 |
+
def create_footer():
|
45 |
+
st.divider()
|
46 |
+
|
47 |
+
# 🛠️ Layout using Streamlit columns
|
48 |
+
col1, col2, col3 = st.columns([1, 1, 1])
|
49 |
+
|
50 |
+
# 🚀 Contributors Section
|
51 |
+
with col1:
|
52 |
+
st.markdown("### 🚀 Contributors")
|
53 |
+
st.write("**Archisman Karmakar**")
|
54 |
+
st.write("[🔗 LinkedIn](https://www.linkedin.com/in/archismankarmakar/) | [🐙 GitHub](https://www.github.com/ArchismanKarmakar) | [📊 Kaggle](https://www.kaggle.com/archismancoder)")
|
55 |
+
|
56 |
+
st.write("**Sumon Chatterjee**")
|
57 |
+
st.write("[🔗 LinkedIn](https://www.linkedin.com/in/sumon-chatterjee-3b3b43227) | [🐙 GitHub](https://github.com/Sumon670) | [📊 Kaggle](https://www.kaggle.com/sumonchatterjee)")
|
58 |
+
|
59 |
+
# 🎓 Mentors Section
|
60 |
+
with col2:
|
61 |
+
st.markdown("### 🎓 Mentors")
|
62 |
+
st.write("**Prof. Anupam Mondal**")
|
63 |
+
st.write("[🔗 LinkedIn](https://www.linkedin.com/in/anupam-mondal-ph-d-8a7a1a39/) | [📚 Google Scholar](https://scholar.google.com/citations?user=ESRR9o4AAAAJ&hl=en) | [🌐 Website](https://sites.google.com/view/anupammondal/home)")
|
64 |
+
|
65 |
+
st.write("**Prof. Sainik Kumar Mahata**")
|
66 |
+
st.write("[🔗 LinkedIn](https://www.linkedin.com/in/mahatasainikk) | [📚 Google Scholar](https://scholar.google.co.in/citations?user=OcJDM50AAAAJ&hl=en) | [🌐 Website](https://sites.google.com/view/sainik-kumar-mahata/home)")
|
67 |
+
|
68 |
+
# 📌 Research Project Info Section
|
69 |
+
with col3:
|
70 |
+
st.markdown("### 📝 About the Project")
|
71 |
+
st.write("This is our research project for our **B.Tech final year** and a **journal** which is yet to be published.")
|
72 |
+
st.write("Built with 💙 using **Streamlit**.")
|
73 |
+
|
74 |
+
# 🚀 Display Footer
|
75 |
+
|
76 |
def show_dashboard():
|
77 |
# free_memory()
|
78 |
st.title("Tachygraphy Micro-text Analysis & Normalization")
|
|
|
83 |
3. Text Transformation & Normalization
|
84 |
""")
|
85 |
|
86 |
+
create_footer()
|
87 |
+
|
88 |
|
89 |
def __main__():
|
90 |
show_dashboard()
|
imports.py
CHANGED
@@ -14,6 +14,7 @@ import importlib.util
|
|
14 |
import asyncio
|
15 |
import sys
|
16 |
import pytorch_lightning as pl
|
|
|
17 |
|
18 |
|
19 |
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), )))
|
|
|
14 |
import asyncio
|
15 |
import sys
|
16 |
import pytorch_lightning as pl
|
17 |
+
from transformers.utils.hub import TRANSFORMERS_CACHE
|
18 |
|
19 |
|
20 |
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), )))
|
poetry.lock
CHANGED
@@ -1116,6 +1116,18 @@ files = [
|
|
1116 |
[package.extras]
|
1117 |
dev = ["coverage", "pytest (>=7.4.4)"]
|
1118 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1119 |
[[package]]
|
1120 |
name = "et-xmlfile"
|
1121 |
version = "2.0.0"
|
@@ -1179,6 +1191,21 @@ files = [
|
|
1179 |
[package.extras]
|
1180 |
tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich ; python_version >= \"3.11\""]
|
1181 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1182 |
[[package]]
|
1183 |
name = "fastjsonschema"
|
1184 |
version = "2.21.1"
|
@@ -1194,6 +1221,22 @@ files = [
|
|
1194 |
[package.extras]
|
1195 |
devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"]
|
1196 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1197 |
[[package]]
|
1198 |
name = "filelock"
|
1199 |
version = "3.18.0"
|
@@ -1652,6 +1695,17 @@ files = [
|
|
1652 |
[package.dependencies]
|
1653 |
numpy = ">=1.19.3"
|
1654 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1655 |
[[package]]
|
1656 |
name = "httpcore"
|
1657 |
version = "1.0.7"
|
@@ -2497,6 +2551,27 @@ profiling = ["gprof2dot"]
|
|
2497 |
rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"]
|
2498 |
testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
|
2499 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2500 |
[[package]]
|
2501 |
name = "markupsafe"
|
2502 |
version = "3.0.2"
|
@@ -3835,6 +3910,21 @@ docs = ["sphinx (>=1.7.1)"]
|
|
3835 |
redis = ["redis"]
|
3836 |
tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "pytest-rerunfailures (>=15.0)", "pytest-timeout (>=2.1.0)", "redis", "sphinx (>=6.0.0)", "types-redis"]
|
3837 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3838 |
[[package]]
|
3839 |
name = "prompt-toolkit"
|
3840 |
version = "3.0.50"
|
@@ -4175,6 +4265,25 @@ files = [
|
|
4175 |
[package.extras]
|
4176 |
windows-terminal = ["colorama (>=0.4.6)"]
|
4177 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
4178 |
[[package]]
|
4179 |
name = "pyparsing"
|
4180 |
version = "3.2.1"
|
@@ -5264,6 +5373,36 @@ files = [
|
|
5264 |
{file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"},
|
5265 |
]
|
5266 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5267 |
[[package]]
|
5268 |
name = "stack-data"
|
5269 |
version = "0.6.3"
|
@@ -5319,6 +5458,213 @@ watchdog = {version = ">=2.1.5,<7", markers = "platform_system != \"Darwin\""}
|
|
5319 |
[package.extras]
|
5320 |
snowflake = ["snowflake-connector-python (>=3.3.0) ; python_version < \"3.12\"", "snowflake-snowpark-python[modin] (>=1.17.0) ; python_version < \"3.12\""]
|
5321 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5322 |
[[package]]
|
5323 |
name = "sympy"
|
5324 |
version = "1.13.1"
|
@@ -5941,6 +6287,21 @@ h2 = ["h2 (>=4,<5)"]
|
|
5941 |
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
|
5942 |
zstd = ["zstandard (>=0.18.0)"]
|
5943 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5944 |
[[package]]
|
5945 |
name = "virtualenv"
|
5946 |
version = "20.29.3"
|
@@ -6517,4 +6878,4 @@ cffi = ["cffi (>=1.11)"]
|
|
6517 |
[metadata]
|
6518 |
lock-version = "2.1"
|
6519 |
python-versions = ">=3.12"
|
6520 |
-
content-hash = "
|
|
|
1116 |
[package.extras]
|
1117 |
dev = ["coverage", "pytest (>=7.4.4)"]
|
1118 |
|
1119 |
+
[[package]]
|
1120 |
+
name = "entrypoints"
|
1121 |
+
version = "0.4"
|
1122 |
+
description = "Discover and load entry points from installed packages."
|
1123 |
+
optional = false
|
1124 |
+
python-versions = ">=3.6"
|
1125 |
+
groups = ["main"]
|
1126 |
+
files = [
|
1127 |
+
{file = "entrypoints-0.4-py3-none-any.whl", hash = "sha256:f174b5ff827504fd3cd97cc3f8649f3693f51538c7e4bdf3ef002c8429d42f9f"},
|
1128 |
+
{file = "entrypoints-0.4.tar.gz", hash = "sha256:b706eddaa9218a19ebcd67b56818f05bb27589b1ca9e8d797b74affad4ccacd4"},
|
1129 |
+
]
|
1130 |
+
|
1131 |
[[package]]
|
1132 |
name = "et-xmlfile"
|
1133 |
version = "2.0.0"
|
|
|
1191 |
[package.extras]
|
1192 |
tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich ; python_version >= \"3.11\""]
|
1193 |
|
1194 |
+
[[package]]
|
1195 |
+
name = "faker"
|
1196 |
+
version = "37.0.0"
|
1197 |
+
description = "Faker is a Python package that generates fake data for you."
|
1198 |
+
optional = false
|
1199 |
+
python-versions = ">=3.9"
|
1200 |
+
groups = ["main"]
|
1201 |
+
files = [
|
1202 |
+
{file = "faker-37.0.0-py3-none-any.whl", hash = "sha256:2598f78b76710a4ed05e197dda5235be409b4c291ba5c9c7514989cfbc7a5144"},
|
1203 |
+
{file = "faker-37.0.0.tar.gz", hash = "sha256:d2e4e2a30d459a8ec0ae52a552aa51c48973cb32cf51107dee90f58a8322a880"},
|
1204 |
+
]
|
1205 |
+
|
1206 |
+
[package.dependencies]
|
1207 |
+
tzdata = "*"
|
1208 |
+
|
1209 |
[[package]]
|
1210 |
name = "fastjsonschema"
|
1211 |
version = "2.21.1"
|
|
|
1221 |
[package.extras]
|
1222 |
devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"]
|
1223 |
|
1224 |
+
[[package]]
|
1225 |
+
name = "favicon"
|
1226 |
+
version = "0.7.0"
|
1227 |
+
description = "Get a website's favicon."
|
1228 |
+
optional = false
|
1229 |
+
python-versions = "*"
|
1230 |
+
groups = ["main"]
|
1231 |
+
files = [
|
1232 |
+
{file = "favicon-0.7.0-py2.py3-none-any.whl", hash = "sha256:7fec0617c73dcb8521ea788e1d38cdc7226c7cb8e28c81e11625d85fa1534880"},
|
1233 |
+
{file = "favicon-0.7.0.tar.gz", hash = "sha256:6d6b5a78de2a0d0084589f687f384b2ecd6a6527093fec564403b1a30605d7a8"},
|
1234 |
+
]
|
1235 |
+
|
1236 |
+
[package.dependencies]
|
1237 |
+
beautifulsoup4 = ">=4.7.0"
|
1238 |
+
requests = ">=2.21.0"
|
1239 |
+
|
1240 |
[[package]]
|
1241 |
name = "filelock"
|
1242 |
version = "3.18.0"
|
|
|
1695 |
[package.dependencies]
|
1696 |
numpy = ">=1.19.3"
|
1697 |
|
1698 |
+
[[package]]
|
1699 |
+
name = "htbuilder"
|
1700 |
+
version = "0.9.0"
|
1701 |
+
description = "A purely-functional HTML builder for Python. Think JSX rather than templates."
|
1702 |
+
optional = false
|
1703 |
+
python-versions = ">=3.7"
|
1704 |
+
groups = ["main"]
|
1705 |
+
files = [
|
1706 |
+
{file = "htbuilder-0.9.0.tar.gz", hash = "sha256:58c0bc5502c1a46b42ae9e074c43ec0f6fdc24ed334936cb17e1ed5a8938aee2"},
|
1707 |
+
]
|
1708 |
+
|
1709 |
[[package]]
|
1710 |
name = "httpcore"
|
1711 |
version = "1.0.7"
|
|
|
2551 |
rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"]
|
2552 |
testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
|
2553 |
|
2554 |
+
[[package]]
|
2555 |
+
name = "markdownlit"
|
2556 |
+
version = "0.0.7"
|
2557 |
+
description = "markdownlit adds a couple of lit Markdown capabilities to your Streamlit apps"
|
2558 |
+
optional = false
|
2559 |
+
python-versions = ">=3.6"
|
2560 |
+
groups = ["main"]
|
2561 |
+
files = [
|
2562 |
+
{file = "markdownlit-0.0.7-py3-none-any.whl", hash = "sha256:b58bb539dcb52e0b040ab2fed32f1f3146cbb2746dc3812940d9dd359c378bb6"},
|
2563 |
+
{file = "markdownlit-0.0.7.tar.gz", hash = "sha256:553e2db454e2be4567caebef5176c98a40a7e24f7ea9c2fe8a1f05c1d9ea4005"},
|
2564 |
+
]
|
2565 |
+
|
2566 |
+
[package.dependencies]
|
2567 |
+
favicon = "*"
|
2568 |
+
htbuilder = "*"
|
2569 |
+
lxml = "*"
|
2570 |
+
markdown = "*"
|
2571 |
+
pymdown-extensions = "*"
|
2572 |
+
streamlit = "*"
|
2573 |
+
streamlit-extras = "*"
|
2574 |
+
|
2575 |
[[package]]
|
2576 |
name = "markupsafe"
|
2577 |
version = "3.0.2"
|
|
|
3910 |
redis = ["redis"]
|
3911 |
tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "pytest-rerunfailures (>=15.0)", "pytest-timeout (>=2.1.0)", "redis", "sphinx (>=6.0.0)", "types-redis"]
|
3912 |
|
3913 |
+
[[package]]
|
3914 |
+
name = "prometheus-client"
|
3915 |
+
version = "0.21.1"
|
3916 |
+
description = "Python client for the Prometheus monitoring system."
|
3917 |
+
optional = false
|
3918 |
+
python-versions = ">=3.8"
|
3919 |
+
groups = ["main"]
|
3920 |
+
files = [
|
3921 |
+
{file = "prometheus_client-0.21.1-py3-none-any.whl", hash = "sha256:594b45c410d6f4f8888940fe80b5cc2521b305a1fafe1c58609ef715a001f301"},
|
3922 |
+
{file = "prometheus_client-0.21.1.tar.gz", hash = "sha256:252505a722ac04b0456be05c05f75f45d760c2911ffc45f2a06bcaed9f3ae3fb"},
|
3923 |
+
]
|
3924 |
+
|
3925 |
+
[package.extras]
|
3926 |
+
twisted = ["twisted"]
|
3927 |
+
|
3928 |
[[package]]
|
3929 |
name = "prompt-toolkit"
|
3930 |
version = "3.0.50"
|
|
|
4265 |
[package.extras]
|
4266 |
windows-terminal = ["colorama (>=0.4.6)"]
|
4267 |
|
4268 |
+
[[package]]
|
4269 |
+
name = "pymdown-extensions"
|
4270 |
+
version = "10.14.3"
|
4271 |
+
description = "Extension pack for Python Markdown."
|
4272 |
+
optional = false
|
4273 |
+
python-versions = ">=3.8"
|
4274 |
+
groups = ["main"]
|
4275 |
+
files = [
|
4276 |
+
{file = "pymdown_extensions-10.14.3-py3-none-any.whl", hash = "sha256:05e0bee73d64b9c71a4ae17c72abc2f700e8bc8403755a00580b49a4e9f189e9"},
|
4277 |
+
{file = "pymdown_extensions-10.14.3.tar.gz", hash = "sha256:41e576ce3f5d650be59e900e4ceff231e0aed2a88cf30acaee41e02f063a061b"},
|
4278 |
+
]
|
4279 |
+
|
4280 |
+
[package.dependencies]
|
4281 |
+
markdown = ">=3.6"
|
4282 |
+
pyyaml = "*"
|
4283 |
+
|
4284 |
+
[package.extras]
|
4285 |
+
extra = ["pygments (>=2.19.1)"]
|
4286 |
+
|
4287 |
[[package]]
|
4288 |
name = "pyparsing"
|
4289 |
version = "3.2.1"
|
|
|
5373 |
{file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"},
|
5374 |
]
|
5375 |
|
5376 |
+
[[package]]
|
5377 |
+
name = "st-annotated-text"
|
5378 |
+
version = "4.0.2"
|
5379 |
+
description = "A simple component to display annotated text in Streamlit apps."
|
5380 |
+
optional = false
|
5381 |
+
python-versions = ">=3.5"
|
5382 |
+
groups = ["main"]
|
5383 |
+
files = [
|
5384 |
+
{file = "st_annotated_text-4.0.2-py3-none-any.whl", hash = "sha256:712c45821f020eccafad3a58c10b9d2d51d449f4db999a06308ecf39a753a4df"},
|
5385 |
+
{file = "st_annotated_text-4.0.2.tar.gz", hash = "sha256:b0134dcf734697cc3dbdb11862c4174071e7fb6f365af55a37c710d357fd88a5"},
|
5386 |
+
]
|
5387 |
+
|
5388 |
+
[package.dependencies]
|
5389 |
+
htbuilder = "*"
|
5390 |
+
|
5391 |
+
[[package]]
|
5392 |
+
name = "st-theme"
|
5393 |
+
version = "1.2.3"
|
5394 |
+
description = "A component that returns the active theme of the Streamlit app."
|
5395 |
+
optional = false
|
5396 |
+
python-versions = ">=3.8"
|
5397 |
+
groups = ["main"]
|
5398 |
+
files = [
|
5399 |
+
{file = "st-theme-1.2.3.tar.gz", hash = "sha256:ca97aece1a48ded6e83fd742c27cb0851e1bce2100ab4b6c37c7b6e003b65b42"},
|
5400 |
+
{file = "st_theme-1.2.3-py3-none-any.whl", hash = "sha256:0a54d9817dd5f8a6d7b0d071b25ae72eacf536c63a5fb97374923938021b1389"},
|
5401 |
+
]
|
5402 |
+
|
5403 |
+
[package.dependencies]
|
5404 |
+
streamlit = ">=1.33"
|
5405 |
+
|
5406 |
[[package]]
|
5407 |
name = "stack-data"
|
5408 |
version = "0.6.3"
|
|
|
5458 |
[package.extras]
|
5459 |
snowflake = ["snowflake-connector-python (>=3.3.0) ; python_version < \"3.12\"", "snowflake-snowpark-python[modin] (>=1.17.0) ; python_version < \"3.12\""]
|
5460 |
|
5461 |
+
[[package]]
|
5462 |
+
name = "streamlit-avatar"
|
5463 |
+
version = "0.1.3"
|
5464 |
+
description = "Component to display avatar icon in Streamlit"
|
5465 |
+
optional = false
|
5466 |
+
python-versions = ">=3.7"
|
5467 |
+
groups = ["main"]
|
5468 |
+
files = [
|
5469 |
+
{file = "streamlit_avatar-0.1.3-py3-none-any.whl", hash = "sha256:873e6f0022635eda29d6b76e570adcbc095d8e6d23f18dd1e288720b4736364d"},
|
5470 |
+
{file = "streamlit_avatar-0.1.3.tar.gz", hash = "sha256:023893bd80db5a923d397fc64d27c7a972107da4f0bea0feb99a32f9363f2691"},
|
5471 |
+
]
|
5472 |
+
|
5473 |
+
[package.dependencies]
|
5474 |
+
streamlit = ">=0.63"
|
5475 |
+
|
5476 |
+
[package.extras]
|
5477 |
+
devel = ["wheel"]
|
5478 |
+
|
5479 |
+
[[package]]
|
5480 |
+
name = "streamlit-camera-input-live"
|
5481 |
+
version = "0.2.0"
|
5482 |
+
description = "Alternative version of st.camera_input which returns the webcam images live, without any button press needed"
|
5483 |
+
optional = false
|
5484 |
+
python-versions = ">=3.7"
|
5485 |
+
groups = ["main"]
|
5486 |
+
files = [
|
5487 |
+
{file = "streamlit-camera-input-live-0.2.0.tar.gz", hash = "sha256:20ceb952b98410084176fcfeb9148e02ea29033a88d4a923161ac7890cedae0f"},
|
5488 |
+
{file = "streamlit_camera_input_live-0.2.0-py3-none-any.whl", hash = "sha256:dacb56cdedbb0d6c07e35a66b755b9145b5023e5c855c64193c3d3e73198e9be"},
|
5489 |
+
]
|
5490 |
+
|
5491 |
+
[package.dependencies]
|
5492 |
+
jinja2 = "*"
|
5493 |
+
streamlit = ">=1.2"
|
5494 |
+
|
5495 |
+
[[package]]
|
5496 |
+
name = "streamlit-card"
|
5497 |
+
version = "1.0.2"
|
5498 |
+
description = "A streamlit component, to make UI cards"
|
5499 |
+
optional = false
|
5500 |
+
python-versions = ">=3.8"
|
5501 |
+
groups = ["main"]
|
5502 |
+
files = [
|
5503 |
+
{file = "streamlit_card-1.0.2-py3-none-any.whl", hash = "sha256:f5d01ce57d6481eb3ba44e504146f56a7b82907d6700f0c19266ed6381a9c58f"},
|
5504 |
+
{file = "streamlit_card-1.0.2.tar.gz", hash = "sha256:8001cd5edd8a6e2db36ee81f37dc645f08f78c21a2ba968403176c68b4f33cb1"},
|
5505 |
+
]
|
5506 |
+
|
5507 |
+
[package.dependencies]
|
5508 |
+
streamlit = ">=0.63"
|
5509 |
+
|
5510 |
+
[[package]]
|
5511 |
+
name = "streamlit-elements"
|
5512 |
+
version = "0.1.0"
|
5513 |
+
description = "React Components for Streamlit."
|
5514 |
+
optional = false
|
5515 |
+
python-versions = ">=3.6"
|
5516 |
+
groups = ["main"]
|
5517 |
+
files = [
|
5518 |
+
{file = "streamlit-elements-0.1.0.tar.gz", hash = "sha256:5f9f116f22df3ce4a8636b1dee7c2fd3dc3cb0c66267fd28c3e0314aa1d303a7"},
|
5519 |
+
{file = "streamlit_elements-0.1.0-py3-none-any.whl", hash = "sha256:593c4b88c399c55879aa76f7f42970f30106f66acaa4baada6338ae5571790df"},
|
5520 |
+
]
|
5521 |
+
|
5522 |
+
[package.dependencies]
|
5523 |
+
streamlit = ">=1.4.0"
|
5524 |
+
|
5525 |
+
[[package]]
|
5526 |
+
name = "streamlit-embedcode"
|
5527 |
+
version = "0.1.2"
|
5528 |
+
description = "Streamlit component for embedded code snippets"
|
5529 |
+
optional = false
|
5530 |
+
python-versions = ">=3.6"
|
5531 |
+
groups = ["main"]
|
5532 |
+
files = [
|
5533 |
+
{file = "streamlit-embedcode-0.1.2.tar.gz", hash = "sha256:22a50eb43407bab3d0ed2d4b58e89819da477cd0592ef87edbd373c286712e3a"},
|
5534 |
+
{file = "streamlit_embedcode-0.1.2-py3-none-any.whl", hash = "sha256:b3c9520c1b48f2eef3c702b5a967f64c9a8ff2ea8e74ebb26c0e9195965bb923"},
|
5535 |
+
]
|
5536 |
+
|
5537 |
+
[package.dependencies]
|
5538 |
+
streamlit = ">=0.63"
|
5539 |
+
|
5540 |
+
[[package]]
|
5541 |
+
name = "streamlit-extras"
|
5542 |
+
version = "0.6.0"
|
5543 |
+
description = "A library to discover, try, install and share Streamlit extras"
|
5544 |
+
optional = false
|
5545 |
+
python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8"
|
5546 |
+
groups = ["main"]
|
5547 |
+
files = [
|
5548 |
+
{file = "streamlit_extras-0.6.0-py3-none-any.whl", hash = "sha256:b80134871046586a55dd155079c2971d2be47024b6a330fd077a7b505b81d470"},
|
5549 |
+
{file = "streamlit_extras-0.6.0.tar.gz", hash = "sha256:3cfa5cf7f17428b53a1d963a8c84903a1ba6f21173364d1664d58b6a58ee91b9"},
|
5550 |
+
]
|
5551 |
+
|
5552 |
+
[package.dependencies]
|
5553 |
+
entrypoints = ">=0.4"
|
5554 |
+
htbuilder = ">=0.6.2"
|
5555 |
+
markdownlit = ">=0.0.5"
|
5556 |
+
plotly = ">=1.0.0"
|
5557 |
+
prometheus-client = ">=0.14.0"
|
5558 |
+
protobuf = "!=3.20.2"
|
5559 |
+
st-annotated-text = ">=3.0.0"
|
5560 |
+
st-theme = ">=1.0.1"
|
5561 |
+
streamlit = ">=1.0.0"
|
5562 |
+
streamlit-avatar = ">=0.1.3"
|
5563 |
+
streamlit-camera-input-live = ">=0.2.0"
|
5564 |
+
streamlit-card = ">=0.0.4"
|
5565 |
+
streamlit-embedcode = ">=0.1.2"
|
5566 |
+
streamlit-faker = ">=0.0.2"
|
5567 |
+
streamlit-image-coordinates = ">=0.1.1,<0.2.0"
|
5568 |
+
streamlit-keyup = ">=0.1.9"
|
5569 |
+
streamlit-toggle-switch = ">=1.0.2"
|
5570 |
+
streamlit-vertical-slider = ">=2.5.5"
|
5571 |
+
validators = ">=0.20.0"
|
5572 |
+
|
5573 |
+
[[package]]
|
5574 |
+
name = "streamlit-faker"
|
5575 |
+
version = "0.0.3"
|
5576 |
+
description = "streamlit-faker is a library to very easily fake Streamlit commands"
|
5577 |
+
optional = false
|
5578 |
+
python-versions = ">=3.6"
|
5579 |
+
groups = ["main"]
|
5580 |
+
files = [
|
5581 |
+
{file = "streamlit_faker-0.0.3-py3-none-any.whl", hash = "sha256:caf410867b55b4877d8fe73cc987d089e1938f8e63594f1eb579e28015844215"},
|
5582 |
+
{file = "streamlit_faker-0.0.3.tar.gz", hash = "sha256:bff0f053aa514a99313a3699746183b41111891c82d6e9b41b1c69a7d719bf2f"},
|
5583 |
+
]
|
5584 |
+
|
5585 |
+
[package.dependencies]
|
5586 |
+
faker = "*"
|
5587 |
+
matplotlib = "*"
|
5588 |
+
streamlit = "*"
|
5589 |
+
streamlit-extras = "*"
|
5590 |
+
|
5591 |
+
[[package]]
|
5592 |
+
name = "streamlit-image-coordinates"
|
5593 |
+
version = "0.1.9"
|
5594 |
+
description = "Streamlit component that displays an image and returns the coordinates when you click on it"
|
5595 |
+
optional = false
|
5596 |
+
python-versions = ">=3.7"
|
5597 |
+
groups = ["main"]
|
5598 |
+
files = [
|
5599 |
+
{file = "streamlit_image_coordinates-0.1.9-py3-none-any.whl", hash = "sha256:e577d475707ce8a3f7be1825027af6b4d7b609a456f4b25b794756ed2436ab06"},
|
5600 |
+
{file = "streamlit_image_coordinates-0.1.9.tar.gz", hash = "sha256:825e1f49053f1363913014a4e9130a03b9ca01fb5f7bd269b17afe8162d2ba37"},
|
5601 |
+
]
|
5602 |
+
|
5603 |
+
[package.dependencies]
|
5604 |
+
jinja2 = "*"
|
5605 |
+
streamlit = ">=1.2"
|
5606 |
+
|
5607 |
+
[[package]]
|
5608 |
+
name = "streamlit-keyup"
|
5609 |
+
version = "0.3.0"
|
5610 |
+
description = "Text input that renders on keyup"
|
5611 |
+
optional = false
|
5612 |
+
python-versions = ">=3.7"
|
5613 |
+
groups = ["main"]
|
5614 |
+
files = [
|
5615 |
+
{file = "streamlit_keyup-0.3.0-py3-none-any.whl", hash = "sha256:ec7221617b1c832526db52859196c417578d6b4285942fbd10a0b2ff313899b3"},
|
5616 |
+
{file = "streamlit_keyup-0.3.0.tar.gz", hash = "sha256:8595a14892423243669e5d50e982853ffb7eb201b65952a48676133ab9bbc937"},
|
5617 |
+
]
|
5618 |
+
|
5619 |
+
[package.dependencies]
|
5620 |
+
jinja2 = "*"
|
5621 |
+
streamlit = ">=1.2"
|
5622 |
+
|
5623 |
+
[[package]]
|
5624 |
+
name = "streamlit-option-menu"
|
5625 |
+
version = "0.4.0"
|
5626 |
+
description = "streamlit-option-menu is a simple Streamlit component that allows users to select a single item from a list of options in a menu."
|
5627 |
+
optional = false
|
5628 |
+
python-versions = ">=3.6"
|
5629 |
+
groups = ["main"]
|
5630 |
+
files = [
|
5631 |
+
{file = "streamlit-option-menu-0.4.0.tar.gz", hash = "sha256:48ec69d59e547fa2fa4bfae001620df8af56a80de2f765ddbb9fcbfb84017129"},
|
5632 |
+
{file = "streamlit_option_menu-0.4.0-py3-none-any.whl", hash = "sha256:a55fc7554047b6db371595af2182e435b8a2c715ee6124e8543685bd4670b07e"},
|
5633 |
+
]
|
5634 |
+
|
5635 |
+
[package.dependencies]
|
5636 |
+
streamlit = ">=1.36"
|
5637 |
+
|
5638 |
+
[[package]]
|
5639 |
+
name = "streamlit-toggle-switch"
|
5640 |
+
version = "1.0.2"
|
5641 |
+
description = "Creates a customizable toggle"
|
5642 |
+
optional = false
|
5643 |
+
python-versions = ">=3.6"
|
5644 |
+
groups = ["main"]
|
5645 |
+
files = [
|
5646 |
+
{file = "streamlit_toggle_switch-1.0.2-py3-none-any.whl", hash = "sha256:0081212d80d178bda337acf2432425e2016d757f57834b18645d4c5b928d4c0f"},
|
5647 |
+
{file = "streamlit_toggle_switch-1.0.2.tar.gz", hash = "sha256:991b103cd3448b0f6507f8051777b996a17b4630956d5b6fa13344175b20e572"},
|
5648 |
+
]
|
5649 |
+
|
5650 |
+
[package.dependencies]
|
5651 |
+
streamlit = ">=0.63"
|
5652 |
+
|
5653 |
+
[[package]]
|
5654 |
+
name = "streamlit-vertical-slider"
|
5655 |
+
version = "2.5.5"
|
5656 |
+
description = "Creates a customizable vertical slider"
|
5657 |
+
optional = false
|
5658 |
+
python-versions = ">=3.8"
|
5659 |
+
groups = ["main"]
|
5660 |
+
files = [
|
5661 |
+
{file = "streamlit_vertical_slider-2.5.5-py3-none-any.whl", hash = "sha256:8182e861444fcd69e05c05e7109a636d459560c249f1addf78b58e525a719cb6"},
|
5662 |
+
{file = "streamlit_vertical_slider-2.5.5.tar.gz", hash = "sha256:d6854cf81a606f5c021df2037d2c49036df2d03ce5082a5227a2acca8322ca74"},
|
5663 |
+
]
|
5664 |
+
|
5665 |
+
[package.dependencies]
|
5666 |
+
streamlit = ">=1.22.0"
|
5667 |
+
|
5668 |
[[package]]
|
5669 |
name = "sympy"
|
5670 |
version = "1.13.1"
|
|
|
6287 |
socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"]
|
6288 |
zstd = ["zstandard (>=0.18.0)"]
|
6289 |
|
6290 |
+
[[package]]
|
6291 |
+
name = "validators"
|
6292 |
+
version = "0.34.0"
|
6293 |
+
description = "Python Data Validation for Humans™"
|
6294 |
+
optional = false
|
6295 |
+
python-versions = ">=3.8"
|
6296 |
+
groups = ["main"]
|
6297 |
+
files = [
|
6298 |
+
{file = "validators-0.34.0-py3-none-any.whl", hash = "sha256:c804b476e3e6d3786fa07a30073a4ef694e617805eb1946ceee3fe5a9b8b1321"},
|
6299 |
+
{file = "validators-0.34.0.tar.gz", hash = "sha256:647fe407b45af9a74d245b943b18e6a816acf4926974278f6dd617778e1e781f"},
|
6300 |
+
]
|
6301 |
+
|
6302 |
+
[package.extras]
|
6303 |
+
crypto-eth-addresses = ["eth-hash[pycryptodome] (>=0.7.0)"]
|
6304 |
+
|
6305 |
[[package]]
|
6306 |
name = "virtualenv"
|
6307 |
version = "20.29.3"
|
|
|
6878 |
[metadata]
|
6879 |
lock-version = "2.1"
|
6880 |
python-versions = ">=3.12"
|
6881 |
+
content-hash = "0a2b37985beb5aa45fe5004098180bab465570c2e58665ae770b0793c37b4e3d"
|
pyproject.toml
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
[project]
|
2 |
name = "tachygraphy-microtext-analysis-and-normalization"
|
3 |
-
version = "2025.03.18.
|
4 |
description = ""
|
5 |
authors = [
|
6 |
{ name = "Archisman Karmakar", email = "[email protected]" },
|
@@ -194,6 +194,9 @@ dependencies = [
|
|
194 |
"zstandard (>=0.23.0,<0.24.0)",
|
195 |
"asyncio (>=3.4.3,<4.0.0)",
|
196 |
"pytorch-lightning (>=2.5.0.post0,<3.0.0)",
|
|
|
|
|
|
|
197 |
]
|
198 |
|
199 |
|
|
|
1 |
[project]
|
2 |
name = "tachygraphy-microtext-analysis-and-normalization"
|
3 |
+
version = "2025.03.18.post3"
|
4 |
description = ""
|
5 |
authors = [
|
6 |
{ name = "Archisman Karmakar", email = "[email protected]" },
|
|
|
194 |
"zstandard (>=0.23.0,<0.24.0)",
|
195 |
"asyncio (>=3.4.3,<4.0.0)",
|
196 |
"pytorch-lightning (>=2.5.0.post0,<3.0.0)",
|
197 |
+
"streamlit-option-menu (>=0.4.0,<0.5.0)",
|
198 |
+
"streamlit-elements (>=0.1.0,<0.2.0)",
|
199 |
+
"streamlit-extras (>=0.6.0,<0.7.0)",
|
200 |
]
|
201 |
|
202 |
|
pyprojectOLD.toml
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
[project]
|
2 |
name = "tachygraphy-microtext-analysis-and-normalization"
|
3 |
-
version = "2025.03.
|
|
|
|
|
4 |
# version = "2025.03.16.post3"
|
5 |
# version = "2025.03.16.post2"
|
6 |
# version = "2025.03.16.post1"
|
|
|
1 |
[project]
|
2 |
name = "tachygraphy-microtext-analysis-and-normalization"
|
3 |
+
version = "2025.03.18.post2"
|
4 |
+
# version = "2025.03.18.post1"
|
5 |
+
# version = "2025.03.17.post1"
|
6 |
# version = "2025.03.16.post3"
|
7 |
# version = "2025.03.16.post2"
|
8 |
# version = "2025.03.16.post1"
|
requirements.txt
CHANGED
@@ -37,10 +37,13 @@ distlib==0.3.9 ; python_version >= "3.12"
|
|
37 |
distributed==2025.2.0 ; python_version >= "3.12"
|
38 |
dulwich==0.22.8 ; python_version >= "3.12"
|
39 |
emoji==2.14.1 ; python_version >= "3.12"
|
|
|
40 |
et-xmlfile==2.0.0 ; python_version >= "3.12"
|
41 |
evaluate==0.4.3 ; python_version >= "3.12"
|
42 |
executing==2.2.0 ; python_version >= "3.12"
|
|
|
43 |
fastjsonschema==2.21.1 ; python_version >= "3.12"
|
|
|
44 |
filelock==3.18.0 ; python_version >= "3.12"
|
45 |
findpython==0.6.3 ; python_version >= "3.12"
|
46 |
flatbuffers==25.2.10 ; python_version >= "3.12"
|
@@ -55,6 +58,7 @@ grpcio==1.71.0 ; python_version >= "3.12"
|
|
55 |
h11==0.14.0 ; python_version >= "3.12"
|
56 |
h2o==3.46.0.6 ; python_version >= "3.12"
|
57 |
h5py==3.13.0 ; python_version >= "3.12"
|
|
|
58 |
httpcore==1.0.7 ; python_version >= "3.12"
|
59 |
httpx==0.28.1 ; python_version >= "3.12"
|
60 |
huggingface-hub==0.29.3 ; python_version >= "3.12"
|
@@ -86,6 +90,7 @@ locket==1.0.0 ; python_version >= "3.12"
|
|
86 |
lxml==5.3.1 ; python_version >= "3.12"
|
87 |
markdown-it-py==3.0.0 ; python_version >= "3.12"
|
88 |
markdown==3.7 ; python_version >= "3.12"
|
|
|
89 |
markupsafe==3.0.2 ; python_version >= "3.12"
|
90 |
matplotlib-inline==0.1.7 ; python_version >= "3.12"
|
91 |
matplotlib==3.10.1 ; python_version >= "3.12"
|
@@ -132,6 +137,7 @@ pkginfo==1.12.1.2 ; python_version >= "3.12"
|
|
132 |
platformdirs==4.3.6 ; python_version >= "3.12"
|
133 |
plotly==6.0.0 ; python_version >= "3.12"
|
134 |
portalocker==3.1.1 ; python_version >= "3.12"
|
|
|
135 |
prompt-toolkit==3.0.50 ; python_version >= "3.12"
|
136 |
propcache==0.3.0 ; python_version >= "3.12"
|
137 |
protobuf==5.29.3 ; python_version >= "3.12"
|
@@ -143,13 +149,14 @@ pycparser==2.22 ; (sys_platform == "linux" or implementation_name == "pypy" or p
|
|
143 |
pycurl==7.45.6 ; python_version >= "3.12"
|
144 |
pydeck==0.9.1 ; python_version >= "3.12"
|
145 |
pygments==2.19.1 ; python_version >= "3.12"
|
|
|
146 |
pyparsing==3.2.1 ; python_version >= "3.12"
|
147 |
pyproject-hooks==1.2.0 ; python_version >= "3.12"
|
148 |
python-dateutil==2.9.0.post0 ; python_version >= "3.12"
|
149 |
pytorch-lightning==2.5.0.post0 ; python_version >= "3.12"
|
150 |
pytz==2025.1 ; python_version >= "3.12"
|
151 |
-
pywin32-ctypes==0.2.3 ; python_version >= "3.12"
|
152 |
-
pywin32==309 ; python_version >= "3.12"
|
153 |
pyyaml==6.0.2 ; python_version >= "3.12"
|
154 |
pyzmq==26.3.0 ; python_version >= "3.12"
|
155 |
rapidfuzz==3.12.2 ; python_version >= "3.12"
|
@@ -174,7 +181,21 @@ smmap==5.0.2 ; python_version >= "3.12"
|
|
174 |
sniffio==1.3.1 ; python_version >= "3.12"
|
175 |
sortedcontainers==2.4.0 ; python_version >= "3.12"
|
176 |
soupsieve==2.6 ; python_version >= "3.12"
|
|
|
|
|
177 |
stack-data==0.6.3 ; python_version >= "3.12"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
178 |
streamlit==1.43.2 ; python_version >= "3.12"
|
179 |
sympy==1.13.1 ; python_version >= "3.12"
|
180 |
tabulate==0.9.0 ; python_version >= "3.12"
|
@@ -202,6 +223,7 @@ trove-classifiers==2025.3.13.13 ; python_version >= "3.12"
|
|
202 |
typing-extensions==4.12.2 ; python_version >= "3.12"
|
203 |
tzdata==2025.1 ; python_version >= "3.12"
|
204 |
urllib3==2.3.0 ; python_version >= "3.12"
|
|
|
205 |
virtualenv==20.29.3 ; python_version >= "3.12"
|
206 |
watchdog==6.0.0 ; python_version >= "3.12"
|
207 |
wcwidth==0.2.13 ; python_version >= "3.12"
|
|
|
37 |
distributed==2025.2.0 ; python_version >= "3.12"
|
38 |
dulwich==0.22.8 ; python_version >= "3.12"
|
39 |
emoji==2.14.1 ; python_version >= "3.12"
|
40 |
+
entrypoints==0.4 ; python_version >= "3.12"
|
41 |
et-xmlfile==2.0.0 ; python_version >= "3.12"
|
42 |
evaluate==0.4.3 ; python_version >= "3.12"
|
43 |
executing==2.2.0 ; python_version >= "3.12"
|
44 |
+
faker==37.0.0 ; python_version >= "3.12"
|
45 |
fastjsonschema==2.21.1 ; python_version >= "3.12"
|
46 |
+
favicon==0.7.0 ; python_version >= "3.12"
|
47 |
filelock==3.18.0 ; python_version >= "3.12"
|
48 |
findpython==0.6.3 ; python_version >= "3.12"
|
49 |
flatbuffers==25.2.10 ; python_version >= "3.12"
|
|
|
58 |
h11==0.14.0 ; python_version >= "3.12"
|
59 |
h2o==3.46.0.6 ; python_version >= "3.12"
|
60 |
h5py==3.13.0 ; python_version >= "3.12"
|
61 |
+
htbuilder==0.9.0 ; python_version >= "3.12"
|
62 |
httpcore==1.0.7 ; python_version >= "3.12"
|
63 |
httpx==0.28.1 ; python_version >= "3.12"
|
64 |
huggingface-hub==0.29.3 ; python_version >= "3.12"
|
|
|
90 |
lxml==5.3.1 ; python_version >= "3.12"
|
91 |
markdown-it-py==3.0.0 ; python_version >= "3.12"
|
92 |
markdown==3.7 ; python_version >= "3.12"
|
93 |
+
markdownlit==0.0.7 ; python_version >= "3.12"
|
94 |
markupsafe==3.0.2 ; python_version >= "3.12"
|
95 |
matplotlib-inline==0.1.7 ; python_version >= "3.12"
|
96 |
matplotlib==3.10.1 ; python_version >= "3.12"
|
|
|
137 |
platformdirs==4.3.6 ; python_version >= "3.12"
|
138 |
plotly==6.0.0 ; python_version >= "3.12"
|
139 |
portalocker==3.1.1 ; python_version >= "3.12"
|
140 |
+
prometheus-client==0.21.1 ; python_version >= "3.12"
|
141 |
prompt-toolkit==3.0.50 ; python_version >= "3.12"
|
142 |
propcache==0.3.0 ; python_version >= "3.12"
|
143 |
protobuf==5.29.3 ; python_version >= "3.12"
|
|
|
149 |
pycurl==7.45.6 ; python_version >= "3.12"
|
150 |
pydeck==0.9.1 ; python_version >= "3.12"
|
151 |
pygments==2.19.1 ; python_version >= "3.12"
|
152 |
+
pymdown-extensions==10.14.3 ; python_version >= "3.12"
|
153 |
pyparsing==3.2.1 ; python_version >= "3.12"
|
154 |
pyproject-hooks==1.2.0 ; python_version >= "3.12"
|
155 |
python-dateutil==2.9.0.post0 ; python_version >= "3.12"
|
156 |
pytorch-lightning==2.5.0.post0 ; python_version >= "3.12"
|
157 |
pytz==2025.1 ; python_version >= "3.12"
|
158 |
+
pywin32-ctypes==0.2.3 ; python_version >= "3.12" and sys_platform == "win32"
|
159 |
+
pywin32==309 ; python_version >= "3.12" and sys_platform == "win32"
|
160 |
pyyaml==6.0.2 ; python_version >= "3.12"
|
161 |
pyzmq==26.3.0 ; python_version >= "3.12"
|
162 |
rapidfuzz==3.12.2 ; python_version >= "3.12"
|
|
|
181 |
sniffio==1.3.1 ; python_version >= "3.12"
|
182 |
sortedcontainers==2.4.0 ; python_version >= "3.12"
|
183 |
soupsieve==2.6 ; python_version >= "3.12"
|
184 |
+
st-annotated-text==4.0.2 ; python_version >= "3.12"
|
185 |
+
st-theme==1.2.3 ; python_version >= "3.12"
|
186 |
stack-data==0.6.3 ; python_version >= "3.12"
|
187 |
+
streamlit-avatar==0.1.3 ; python_version >= "3.12"
|
188 |
+
streamlit-camera-input-live==0.2.0 ; python_version >= "3.12"
|
189 |
+
streamlit-card==1.0.2 ; python_version >= "3.12"
|
190 |
+
streamlit-elements==0.1.0 ; python_version >= "3.12"
|
191 |
+
streamlit-embedcode==0.1.2 ; python_version >= "3.12"
|
192 |
+
streamlit-extras==0.6.0 ; python_version >= "3.12"
|
193 |
+
streamlit-faker==0.0.3 ; python_version >= "3.12"
|
194 |
+
streamlit-image-coordinates==0.1.9 ; python_version >= "3.12"
|
195 |
+
streamlit-keyup==0.3.0 ; python_version >= "3.12"
|
196 |
+
streamlit-option-menu==0.4.0 ; python_version >= "3.12"
|
197 |
+
streamlit-toggle-switch==1.0.2 ; python_version >= "3.12"
|
198 |
+
streamlit-vertical-slider==2.5.5 ; python_version >= "3.12"
|
199 |
streamlit==1.43.2 ; python_version >= "3.12"
|
200 |
sympy==1.13.1 ; python_version >= "3.12"
|
201 |
tabulate==0.9.0 ; python_version >= "3.12"
|
|
|
223 |
typing-extensions==4.12.2 ; python_version >= "3.12"
|
224 |
tzdata==2025.1 ; python_version >= "3.12"
|
225 |
urllib3==2.3.0 ; python_version >= "3.12"
|
226 |
+
validators==0.34.0 ; python_version >= "3.12"
|
227 |
virtualenv==20.29.3 ; python_version >= "3.12"
|
228 |
watchdog==6.0.0 ; python_version >= "3.12"
|
229 |
wcwidth==0.2.13 ; python_version >= "3.12"
|
requirements_windows.txt
ADDED
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
absl-py==2.1.0 ; python_version >= "3.12"
|
2 |
+
accelerate==1.5.2 ; python_version >= "3.12"
|
3 |
+
aiohappyeyeballs==2.6.1 ; python_version >= "3.12"
|
4 |
+
aiohttp==3.11.14 ; python_version >= "3.12"
|
5 |
+
aiosignal==1.3.2 ; python_version >= "3.12"
|
6 |
+
altair==5.5.0 ; python_version >= "3.12"
|
7 |
+
anyio==4.9.0 ; python_version >= "3.12"
|
8 |
+
appnope==0.1.4 ; python_version >= "3.12" and platform_system == "Darwin"
|
9 |
+
asttokens==3.0.0 ; python_version >= "3.12"
|
10 |
+
astunparse==1.6.3 ; python_version >= "3.12"
|
11 |
+
asyncio==3.4.3 ; python_version >= "3.12"
|
12 |
+
attrs==25.3.0 ; python_version >= "3.12"
|
13 |
+
autograd==1.7.0 ; python_version >= "3.12"
|
14 |
+
beautifulsoup4==4.13.3 ; python_version >= "3.12"
|
15 |
+
blinker==1.9.0 ; python_version >= "3.12"
|
16 |
+
bs4==0.0.2 ; python_version >= "3.12"
|
17 |
+
build==1.2.2.post1 ; python_version >= "3.12"
|
18 |
+
cachecontrol==0.14.2 ; python_version >= "3.12"
|
19 |
+
cachetools==5.5.2 ; python_version >= "3.12"
|
20 |
+
certifi==2025.1.31 ; python_version >= "3.12"
|
21 |
+
cffi==1.17.1 ; (sys_platform == "linux" or implementation_name == "pypy" or platform_python_implementation == "PyPy") and python_version >= "3.12"
|
22 |
+
charset-normalizer==3.4.1 ; python_version >= "3.12"
|
23 |
+
click==8.1.8 ; python_version >= "3.12"
|
24 |
+
cloudpickle==3.1.1 ; python_version >= "3.12"
|
25 |
+
colorama==0.4.6 ; python_version >= "3.12"
|
26 |
+
comm==0.2.2 ; python_version >= "3.12"
|
27 |
+
contourpy==1.3.1 ; python_version >= "3.12"
|
28 |
+
cryptography==44.0.2 ; python_version >= "3.12" and sys_platform == "linux"
|
29 |
+
cycler==0.12.1 ; python_version >= "3.12"
|
30 |
+
dask==2025.2.0 ; python_version >= "3.12"
|
31 |
+
datasets==3.4.0 ; python_version >= "3.12"
|
32 |
+
debugpy==1.8.13 ; python_version >= "3.12"
|
33 |
+
decorator==5.2.1 ; python_version >= "3.12"
|
34 |
+
diffusers==0.32.2 ; python_version >= "3.12"
|
35 |
+
dill==0.3.8 ; python_version >= "3.12"
|
36 |
+
distlib==0.3.9 ; python_version >= "3.12"
|
37 |
+
distributed==2025.2.0 ; python_version >= "3.12"
|
38 |
+
dulwich==0.22.8 ; python_version >= "3.12"
|
39 |
+
emoji==2.14.1 ; python_version >= "3.12"
|
40 |
+
et-xmlfile==2.0.0 ; python_version >= "3.12"
|
41 |
+
evaluate==0.4.3 ; python_version >= "3.12"
|
42 |
+
executing==2.2.0 ; python_version >= "3.12"
|
43 |
+
fastjsonschema==2.21.1 ; python_version >= "3.12"
|
44 |
+
filelock==3.18.0 ; python_version >= "3.12"
|
45 |
+
findpython==0.6.3 ; python_version >= "3.12"
|
46 |
+
flatbuffers==25.2.10 ; python_version >= "3.12"
|
47 |
+
fonttools==4.56.0 ; python_version >= "3.12"
|
48 |
+
frozenlist==1.5.0 ; python_version >= "3.12"
|
49 |
+
fsspec==2024.12.0 ; python_version >= "3.12"
|
50 |
+
gast==0.6.0 ; python_version >= "3.12"
|
51 |
+
gitdb==4.0.12 ; python_version >= "3.12"
|
52 |
+
gitpython==3.1.44 ; python_version >= "3.12"
|
53 |
+
google-pasta==0.2.0 ; python_version >= "3.12"
|
54 |
+
grpcio==1.71.0 ; python_version >= "3.12"
|
55 |
+
h11==0.14.0 ; python_version >= "3.12"
|
56 |
+
h2o==3.46.0.6 ; python_version >= "3.12"
|
57 |
+
h5py==3.13.0 ; python_version >= "3.12"
|
58 |
+
httpcore==1.0.7 ; python_version >= "3.12"
|
59 |
+
httpx==0.28.1 ; python_version >= "3.12"
|
60 |
+
huggingface-hub==0.29.3 ; python_version >= "3.12"
|
61 |
+
idna==3.10 ; python_version >= "3.12"
|
62 |
+
importlib-metadata==8.6.1 ; python_version >= "3.12"
|
63 |
+
importlib==1.0.4 ; python_version >= "3.12"
|
64 |
+
installer==0.7.0 ; python_version >= "3.12"
|
65 |
+
ipykernel==6.29.5 ; python_version >= "3.12"
|
66 |
+
ipython-pygments-lexers==1.1.1 ; python_version >= "3.12"
|
67 |
+
ipython==9.0.2 ; python_version >= "3.12"
|
68 |
+
jaraco-classes==3.4.0 ; python_version >= "3.12"
|
69 |
+
jaraco-context==6.0.1 ; python_version >= "3.12"
|
70 |
+
jaraco-functools==4.1.0 ; python_version >= "3.12"
|
71 |
+
jedi==0.19.2 ; python_version >= "3.12"
|
72 |
+
jeepney==0.9.0 ; python_version >= "3.12" and sys_platform == "linux"
|
73 |
+
jinja2==3.1.6 ; python_version >= "3.12"
|
74 |
+
joblib==1.4.2 ; python_version >= "3.12"
|
75 |
+
jsonschema-specifications==2024.10.1 ; python_version >= "3.12"
|
76 |
+
jsonschema==4.23.0 ; python_version >= "3.12"
|
77 |
+
jupyter-client==8.6.3 ; python_version >= "3.12"
|
78 |
+
jupyter-core==5.7.2 ; python_version >= "3.12"
|
79 |
+
kagglehub==0.3.10 ; python_version >= "3.12"
|
80 |
+
keras==3.9.0 ; python_version >= "3.12"
|
81 |
+
keyring==25.6.0 ; python_version >= "3.12"
|
82 |
+
kiwisolver==1.4.8 ; python_version >= "3.12"
|
83 |
+
libclang==18.1.1 ; python_version >= "3.12"
|
84 |
+
lightning-utilities==0.14.1 ; python_version >= "3.12"
|
85 |
+
locket==1.0.0 ; python_version >= "3.12"
|
86 |
+
lxml==5.3.1 ; python_version >= "3.12"
|
87 |
+
markdown-it-py==3.0.0 ; python_version >= "3.12"
|
88 |
+
markdown==3.7 ; python_version >= "3.12"
|
89 |
+
markupsafe==3.0.2 ; python_version >= "3.12"
|
90 |
+
matplotlib-inline==0.1.7 ; python_version >= "3.12"
|
91 |
+
matplotlib==3.10.1 ; python_version >= "3.12"
|
92 |
+
mdurl==0.1.2 ; python_version >= "3.12"
|
93 |
+
ml-dtypes==0.5.1 ; python_version >= "3.12"
|
94 |
+
more-itertools==10.6.0 ; python_version >= "3.12"
|
95 |
+
mpi4py==4.0.3 ; python_version >= "3.12"
|
96 |
+
mpmath==1.3.0 ; python_version >= "3.12"
|
97 |
+
msgpack==1.1.0 ; python_version >= "3.12"
|
98 |
+
multidict==6.1.0 ; python_version >= "3.12"
|
99 |
+
multiprocess==0.70.16 ; python_version >= "3.12"
|
100 |
+
namex==0.0.8 ; python_version >= "3.12"
|
101 |
+
narwhals==1.30.0 ; python_version >= "3.12"
|
102 |
+
nest-asyncio==1.6.0 ; python_version >= "3.12"
|
103 |
+
networkx==3.4.2 ; python_version >= "3.12"
|
104 |
+
nltk==3.9.1 ; python_version >= "3.12"
|
105 |
+
numpy==2.1.3 ; python_version >= "3.12"
|
106 |
+
nvidia-cublas-cu12==12.4.5.8 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.12"
|
107 |
+
nvidia-cuda-cupti-cu12==12.4.127 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.12"
|
108 |
+
nvidia-cuda-nvrtc-cu12==12.4.127 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.12"
|
109 |
+
nvidia-cuda-runtime-cu12==12.4.127 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.12"
|
110 |
+
nvidia-cudnn-cu12==9.1.0.70 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.12"
|
111 |
+
nvidia-cufft-cu12==11.2.1.3 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.12"
|
112 |
+
nvidia-curand-cu12==10.3.5.147 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.12"
|
113 |
+
nvidia-cusolver-cu12==11.6.1.9 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.12"
|
114 |
+
nvidia-cusparse-cu12==12.3.1.170 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.12"
|
115 |
+
nvidia-cusparselt-cu12==0.6.2 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.12"
|
116 |
+
nvidia-nccl-cu12==2.21.5 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.12"
|
117 |
+
nvidia-nvjitlink-cu12==12.4.127 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.12"
|
118 |
+
nvidia-nvtx-cu12==12.4.127 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.12"
|
119 |
+
onnx==1.17.0 ; python_version >= "3.12"
|
120 |
+
openpyxl==3.1.5 ; python_version >= "3.12"
|
121 |
+
opt-einsum==3.4.0 ; python_version >= "3.12"
|
122 |
+
optree==0.14.1 ; python_version >= "3.12"
|
123 |
+
packaging==24.2 ; python_version >= "3.12"
|
124 |
+
pandas==2.2.3 ; python_version >= "3.12"
|
125 |
+
parso==0.8.4 ; python_version >= "3.12"
|
126 |
+
partd==1.4.2 ; python_version >= "3.12"
|
127 |
+
pbs-installer==2025.3.11 ; python_version >= "3.12"
|
128 |
+
peft==0.14.0 ; python_version >= "3.12"
|
129 |
+
pexpect==4.9.0 ; python_version >= "3.12" and sys_platform != "win32" and sys_platform != "emscripten"
|
130 |
+
pillow==11.1.0 ; python_version >= "3.12"
|
131 |
+
pkginfo==1.12.1.2 ; python_version >= "3.12"
|
132 |
+
platformdirs==4.3.6 ; python_version >= "3.12"
|
133 |
+
plotly==6.0.0 ; python_version >= "3.12"
|
134 |
+
portalocker==3.1.1 ; python_version >= "3.12"
|
135 |
+
prompt-toolkit==3.0.50 ; python_version >= "3.12"
|
136 |
+
propcache==0.3.0 ; python_version >= "3.12"
|
137 |
+
protobuf==5.29.3 ; python_version >= "3.12"
|
138 |
+
psutil==7.0.0 ; python_version >= "3.12"
|
139 |
+
ptyprocess==0.7.0 ; python_version >= "3.12" and sys_platform != "win32" and sys_platform != "emscripten"
|
140 |
+
pure-eval==0.2.3 ; python_version >= "3.12"
|
141 |
+
pyarrow==19.0.1 ; python_version >= "3.12"
|
142 |
+
pycparser==2.22 ; (sys_platform == "linux" or implementation_name == "pypy" or platform_python_implementation == "PyPy") and python_version >= "3.12"
|
143 |
+
pycurl==7.45.6 ; python_version >= "3.12"
|
144 |
+
pydeck==0.9.1 ; python_version >= "3.12"
|
145 |
+
pygments==2.19.1 ; python_version >= "3.12"
|
146 |
+
pyparsing==3.2.1 ; python_version >= "3.12"
|
147 |
+
pyproject-hooks==1.2.0 ; python_version >= "3.12"
|
148 |
+
python-dateutil==2.9.0.post0 ; python_version >= "3.12"
|
149 |
+
pytorch-lightning==2.5.0.post0 ; python_version >= "3.12"
|
150 |
+
pytz==2025.1 ; python_version >= "3.12"
|
151 |
+
pywin32-ctypes==0.2.3 ; python_version >= "3.12"
|
152 |
+
pywin32==309 ; python_version >= "3.12"
|
153 |
+
pyyaml==6.0.2 ; python_version >= "3.12"
|
154 |
+
pyzmq==26.3.0 ; python_version >= "3.12"
|
155 |
+
rapidfuzz==3.12.2 ; python_version >= "3.12"
|
156 |
+
referencing==0.36.2 ; python_version >= "3.12"
|
157 |
+
regex==2024.11.6 ; python_version >= "3.12"
|
158 |
+
requests-toolbelt==1.0.0 ; python_version >= "3.12"
|
159 |
+
requests==2.32.3 ; python_version >= "3.12"
|
160 |
+
rich==13.9.4 ; python_version >= "3.12"
|
161 |
+
rouge-score==0.1.2 ; python_version >= "3.12"
|
162 |
+
rpds-py==0.23.1 ; python_version >= "3.12"
|
163 |
+
sacrebleu==2.5.1 ; python_version >= "3.12"
|
164 |
+
safetensors==0.5.3 ; python_version >= "3.12"
|
165 |
+
scikit-learn==1.6.1 ; python_version >= "3.12"
|
166 |
+
scipy==1.15.2 ; python_version >= "3.12"
|
167 |
+
secretstorage==3.3.3 ; python_version >= "3.12" and sys_platform == "linux"
|
168 |
+
sentence-transformers==3.4.1 ; python_version >= "3.12"
|
169 |
+
sentencepiece==0.2.0 ; python_version >= "3.12"
|
170 |
+
setuptools==76.0.0 ; python_version >= "3.12"
|
171 |
+
shellingham==1.5.4 ; python_version >= "3.12"
|
172 |
+
six==1.17.0 ; python_version >= "3.12"
|
173 |
+
smmap==5.0.2 ; python_version >= "3.12"
|
174 |
+
sniffio==1.3.1 ; python_version >= "3.12"
|
175 |
+
sortedcontainers==2.4.0 ; python_version >= "3.12"
|
176 |
+
soupsieve==2.6 ; python_version >= "3.12"
|
177 |
+
stack-data==0.6.3 ; python_version >= "3.12"
|
178 |
+
streamlit==1.43.2 ; python_version >= "3.12"
|
179 |
+
sympy==1.13.1 ; python_version >= "3.12"
|
180 |
+
tabulate==0.9.0 ; python_version >= "3.12"
|
181 |
+
tblib==3.0.0 ; python_version >= "3.12"
|
182 |
+
tenacity==9.0.0 ; python_version >= "3.12"
|
183 |
+
tensorboard-data-server==0.7.2 ; python_version >= "3.12"
|
184 |
+
tensorboard==2.19.0 ; python_version >= "3.12"
|
185 |
+
tensorflow==2.19.0 ; python_version >= "3.12"
|
186 |
+
termcolor==2.5.0 ; python_version >= "3.12"
|
187 |
+
threadpoolctl==3.6.0 ; python_version >= "3.12"
|
188 |
+
tiktoken==0.9.0 ; python_version >= "3.12"
|
189 |
+
tokenizers==0.21.1 ; python_version >= "3.12"
|
190 |
+
toml==0.10.2 ; python_version >= "3.12"
|
191 |
+
tomlkit==0.13.2 ; python_version >= "3.12"
|
192 |
+
toolz==1.0.0 ; python_version >= "3.12"
|
193 |
+
torch==2.6.0 ; python_version >= "3.12"
|
194 |
+
torchmetrics==1.6.3 ; python_version >= "3.12"
|
195 |
+
torchvision==0.21.0 ; python_version >= "3.12"
|
196 |
+
tornado==6.4.2 ; python_version >= "3.12"
|
197 |
+
tqdm==4.67.1 ; python_version >= "3.12"
|
198 |
+
traitlets==5.14.3 ; python_version >= "3.12"
|
199 |
+
transformers==4.49.0 ; python_version >= "3.12"
|
200 |
+
triton==3.2.0 ; platform_system == "Linux" and platform_machine == "x86_64" and python_version >= "3.12"
|
201 |
+
trove-classifiers==2025.3.13.13 ; python_version >= "3.12"
|
202 |
+
typing-extensions==4.12.2 ; python_version >= "3.12"
|
203 |
+
tzdata==2025.1 ; python_version >= "3.12"
|
204 |
+
urllib3==2.3.0 ; python_version >= "3.12"
|
205 |
+
virtualenv==20.29.3 ; python_version >= "3.12"
|
206 |
+
watchdog==6.0.0 ; python_version >= "3.12"
|
207 |
+
wcwidth==0.2.13 ; python_version >= "3.12"
|
208 |
+
werkzeug==3.1.3 ; python_version >= "3.12"
|
209 |
+
wheel==0.45.1 ; python_version >= "3.12"
|
210 |
+
wrapt==1.17.2 ; python_version >= "3.12"
|
211 |
+
xxhash==3.5.0 ; python_version >= "3.12"
|
212 |
+
yarl==1.18.3 ; python_version >= "3.12"
|
213 |
+
zict==3.0.0 ; python_version >= "3.12"
|
214 |
+
zipp==3.21.0 ; python_version >= "3.12"
|
215 |
+
zstandard==0.23.0 ; python_version >= "3.12"
|
sentiment_analysis/config/stage1_models.json
CHANGED
@@ -8,6 +8,7 @@
|
|
8 |
"model_class": "DebertaV2ForSequenceClassification",
|
9 |
"problem_type": "multi_label_classification",
|
10 |
"base_model": "microsoft/deberta-v3-base",
|
|
|
11 |
"num_labels": 3,
|
12 |
"device": "cpu",
|
13 |
"load_function": "load_model",
|
@@ -22,6 +23,22 @@
|
|
22 |
"model_class": "SentimentModel",
|
23 |
"problem_type": "multi_label_classification",
|
24 |
"base_model": "microsoft/deberta-v3-base",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
"num_labels": 3,
|
26 |
"device": "cpu",
|
27 |
"load_function": "load_model",
|
|
|
8 |
"model_class": "DebertaV2ForSequenceClassification",
|
9 |
"problem_type": "multi_label_classification",
|
10 |
"base_model": "microsoft/deberta-v3-base",
|
11 |
+
"base_model_class": "DebertaV2ForSequenceClassification",
|
12 |
"num_labels": 3,
|
13 |
"device": "cpu",
|
14 |
"load_function": "load_model",
|
|
|
23 |
"model_class": "SentimentModel",
|
24 |
"problem_type": "multi_label_classification",
|
25 |
"base_model": "microsoft/deberta-v3-base",
|
26 |
+
"base_model_class": "DebertaV2Model",
|
27 |
+
"num_labels": 3,
|
28 |
+
"device": "cpu",
|
29 |
+
"load_function": "load_model",
|
30 |
+
"predict_function": "predict"
|
31 |
+
},
|
32 |
+
"3": {
|
33 |
+
"name": "BERT Base Uncased Custom Model",
|
34 |
+
"type": "bert_base_uncased_custom",
|
35 |
+
"module_path": "hmv_cfg_base_stage1.model3",
|
36 |
+
"hf_location": "https://huggingface.co/tachygraphy-microtrext-norm-org/BERT-LV1-SentimentPolarities/resolve/main/saved_weights.pt",
|
37 |
+
"tokenizer_class": "AutoTokenizer",
|
38 |
+
"model_class": "BERT_architecture",
|
39 |
+
"problem_type": "multi_label_classification",
|
40 |
+
"base_model": "bert-base-uncased",
|
41 |
+
"base_model_class": "AutoModel",
|
42 |
"num_labels": 3,
|
43 |
"device": "cpu",
|
44 |
"load_function": "load_model",
|
sentiment_analysis/hmv_cfg_base_stage1/__pycache__/model1.cpython-312.pyc
CHANGED
Binary files a/sentiment_analysis/hmv_cfg_base_stage1/__pycache__/model1.cpython-312.pyc and b/sentiment_analysis/hmv_cfg_base_stage1/__pycache__/model1.cpython-312.pyc differ
|
|
sentiment_analysis/hmv_cfg_base_stage1/model1.py
CHANGED
@@ -16,6 +16,7 @@ MODEL_OPTIONS = {
|
|
16 |
"model_class": "DebertaV2ForSequenceClassification",
|
17 |
"problem_type": "multi_label_classification",
|
18 |
"base_model": "microsoft/deberta-v3-base",
|
|
|
19 |
"num_labels": 3,
|
20 |
"device": "cpu",
|
21 |
"load_function": "load_model",
|
@@ -24,15 +25,16 @@ MODEL_OPTIONS = {
|
|
24 |
}
|
25 |
|
26 |
|
27 |
-
|
|
|
|
|
|
|
|
|
|
|
28 |
|
29 |
-
def load_model():
|
30 |
-
model_key = "1"
|
31 |
-
model_info = MODEL_OPTIONS[model_key]
|
32 |
-
hf_location = model_info["hf_location"]
|
33 |
|
34 |
-
|
35 |
-
|
36 |
tokenizer = tokenizer_class.from_pretrained(hf_location)
|
37 |
print("Loading model 1")
|
38 |
model = model_class.from_pretrained(hf_location,
|
@@ -71,5 +73,5 @@ def predict(text, model, tokenizer, device, max_len=128):
|
|
71 |
|
72 |
|
73 |
if __name__ == "__main__":
|
74 |
-
model, tokenizer = load_model(
|
75 |
print("Model and tokenizer loaded successfully.")
|
|
|
16 |
"model_class": "DebertaV2ForSequenceClassification",
|
17 |
"problem_type": "multi_label_classification",
|
18 |
"base_model": "microsoft/deberta-v3-base",
|
19 |
+
"base_model_class": "DebertaV2ForSequenceClassification",
|
20 |
"num_labels": 3,
|
21 |
"device": "cpu",
|
22 |
"load_function": "load_model",
|
|
|
25 |
}
|
26 |
|
27 |
|
28 |
+
model_key = "1"
|
29 |
+
model_info = MODEL_OPTIONS[model_key]
|
30 |
+
hf_location = model_info["hf_location"]
|
31 |
+
|
32 |
+
tokenizer_class = globals()[model_info["tokenizer_class"]]
|
33 |
+
model_class = globals()[model_info["model_class"]]
|
34 |
|
|
|
|
|
|
|
|
|
35 |
|
36 |
+
@st.cache_resource
|
37 |
+
def load_model():
|
38 |
tokenizer = tokenizer_class.from_pretrained(hf_location)
|
39 |
print("Loading model 1")
|
40 |
model = model_class.from_pretrained(hf_location,
|
|
|
73 |
|
74 |
|
75 |
if __name__ == "__main__":
|
76 |
+
model, tokenizer = load_model()
|
77 |
print("Model and tokenizer loaded successfully.")
|
sentiment_analysis/hmv_cfg_base_stage1/model2.py
CHANGED
@@ -30,6 +30,7 @@ MODEL_OPTIONS = {
|
|
30 |
"model_class": "SentimentModel",
|
31 |
"problem_type": "multi_label_classification",
|
32 |
"base_model": "microsoft/deberta-v3-base",
|
|
|
33 |
"num_labels": 3,
|
34 |
"device": "cpu",
|
35 |
"load_function": "load_model",
|
@@ -37,83 +38,6 @@ MODEL_OPTIONS = {
|
|
37 |
}
|
38 |
}
|
39 |
|
40 |
-
|
41 |
-
# class SentimentModel(nn.Module):
|
42 |
-
# def __init__(self, roberta_model=DebertaV2Model.from_pretrained(
|
43 |
-
# 'microsoft/deberta-v3-base',
|
44 |
-
# device_map=torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
45 |
-
# ), n_classes=3, dropout_rate=0.2):
|
46 |
-
# super(SentimentModel, self).__init__()
|
47 |
-
|
48 |
-
# self.roberta = roberta_model
|
49 |
-
# self.drop = nn.Dropout(p=dropout_rate)
|
50 |
-
# self.fc1 = nn.Linear(self.roberta.config.hidden_size, 256) # Reduced neurons
|
51 |
-
# self.relu = nn.ReLU()
|
52 |
-
# self.out = nn.Linear(256, n_classes)
|
53 |
-
|
54 |
-
# def forward(self, input_ids, attention_mask):
|
55 |
-
# output = self.roberta(input_ids=input_ids, attention_mask=attention_mask)
|
56 |
-
# cls_token_state = output.last_hidden_state[:, 0, :]
|
57 |
-
# output = self.drop(cls_token_state)
|
58 |
-
# output = self.relu(self.fc1(output))
|
59 |
-
# return self.out(output)
|
60 |
-
|
61 |
-
# def save_pretrained(self, save_directory):
|
62 |
-
# os.makedirs(save_directory, exist_ok=True)
|
63 |
-
|
64 |
-
# # Save model weights using safetensors
|
65 |
-
# model_weights = self.state_dict()
|
66 |
-
# save_file(model_weights, os.path.join(save_directory, "model.safetensors"))
|
67 |
-
|
68 |
-
# # Save model config
|
69 |
-
# config = {
|
70 |
-
# "hidden_size": self.roberta.config.hidden_size,
|
71 |
-
# "num_labels": self.out.out_features,
|
72 |
-
# "dropout_rate": self.drop.p,
|
73 |
-
# "roberta_model": self.roberta.name_or_path
|
74 |
-
# }
|
75 |
-
# with open(os.path.join(save_directory, "config.json"), "w") as f:
|
76 |
-
# json.dump(config, f)
|
77 |
-
|
78 |
-
# print(f"Model saved in {save_directory}")
|
79 |
-
|
80 |
-
# @classmethod
|
81 |
-
# def load_pretrained(cls, model_path_or_repo, roberta_model):
|
82 |
-
# # if model_path_or_repo.startswith("http") or "/" not in model_path_or_repo:
|
83 |
-
# # # Load from Hugging Face Hub
|
84 |
-
# # model_config_path = hf_hub_download(model_path_or_repo, "config.json")
|
85 |
-
# # model_weights_path = hf_hub_download(model_path_or_repo, "model.safetensors")
|
86 |
-
# # else:
|
87 |
-
# # # Load from local directory
|
88 |
-
# # model_config_path = os.path.join(model_path_or_repo, "config.json")
|
89 |
-
# # model_weights_path = os.path.join(model_path_or_repo, "model.safetensors")
|
90 |
-
|
91 |
-
# model_config_path = hf_hub_download(model_path_or_repo, "config.json")
|
92 |
-
# model_weights_path = hf_hub_download(model_path_or_repo, "model.safetensors")
|
93 |
-
|
94 |
-
# # Load model config
|
95 |
-
# with open(model_config_path, "r") as f:
|
96 |
-
# config = json.load(f)
|
97 |
-
|
98 |
-
# # Load RoBERTa model
|
99 |
-
# roberta_model = DebertaV2Model.from_pretrained(config["roberta_model"])
|
100 |
-
|
101 |
-
# # Initialize SentimentModel
|
102 |
-
# model = cls(
|
103 |
-
# roberta_model,
|
104 |
-
# n_classes=config["num_labels"],
|
105 |
-
# dropout_rate=config["dropout_rate"]
|
106 |
-
# )
|
107 |
-
|
108 |
-
# # Load safetensors weights
|
109 |
-
# with safe_open(model_weights_path, framework="pt", device="cpu") as f:
|
110 |
-
# model_weights = {key: f.get_tensor(key) for key in f.keys()}
|
111 |
-
# model.load_state_dict(model_weights)
|
112 |
-
|
113 |
-
# print(f"Model loaded from {model_path_or_repo}")
|
114 |
-
# return model
|
115 |
-
|
116 |
-
|
117 |
class SentimentModel(nn.Module):
|
118 |
def __init__(self, roberta_model, n_classes=3, dropout_rate=0.2):
|
119 |
super(SentimentModel, self).__init__()
|
@@ -176,42 +100,19 @@ class SentimentModel(nn.Module):
|
|
176 |
|
177 |
print(f"Model loaded from {model_path_or_repo}")
|
178 |
return model
|
|
|
179 |
|
|
|
|
|
|
|
|
|
180 |
|
181 |
-
|
182 |
-
|
183 |
-
# def load_pretrained(model_path_or_repo):
|
184 |
-
|
185 |
-
# model_config_path = hf_hub_download(model_path_or_repo, "config.json")
|
186 |
-
# model_weights_path = hf_hub_download(model_path_or_repo, "model.safetensors")
|
187 |
-
|
188 |
-
# with open(model_config_path, "r") as f:
|
189 |
-
# config = json.load(f)
|
190 |
-
|
191 |
-
# roberta_model = DebertaV2Model.from_pretrained(
|
192 |
-
# config["roberta_model"],
|
193 |
-
# )
|
194 |
-
|
195 |
-
# model = SentimentModel(
|
196 |
-
# roberta_model, n_classes=config["num_labels"], dropout_rate=config["dropout_rate"]
|
197 |
-
# )
|
198 |
-
|
199 |
-
# with safe_open(model_weights_path, framework="pt", device="cpu") as f:
|
200 |
-
# model_weights = {key: f.get_tensor(key) for key in f.keys()}
|
201 |
-
# model.load_state_dict(model_weights)
|
202 |
-
|
203 |
-
# print(f"Model loaded from {model_path_or_repo}")
|
204 |
-
# return model
|
205 |
|
206 |
|
207 |
-
|
208 |
def load_model():
|
209 |
-
model_key = "2"
|
210 |
-
model_info = MODEL_OPTIONS[model_key]
|
211 |
-
hf_location = model_info["hf_location"]
|
212 |
-
|
213 |
-
tokenizer_class = globals()[model_info["tokenizer_class"]]
|
214 |
-
model_class = globals()[model_info["model_class"]]
|
215 |
tokenizer = tokenizer_class.from_pretrained(hf_location)
|
216 |
print("Loading model 2")
|
217 |
model = SentimentModel.load_pretrained(hf_location)
|
@@ -227,7 +128,6 @@ def predict(text, model, tokenizer, device, max_len=128):
|
|
227 |
# Tokenize and pad the input text
|
228 |
inputs = tokenizer(
|
229 |
text,
|
230 |
-
None,
|
231 |
add_special_tokens=True,
|
232 |
padding=True,
|
233 |
truncation=False,
|
@@ -246,5 +146,112 @@ def predict(text, model, tokenizer, device, max_len=128):
|
|
246 |
|
247 |
|
248 |
if __name__ == "__main__":
|
249 |
-
model, tokenizer = load_model(
|
250 |
print("Model and tokenizer loaded successfully.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
30 |
"model_class": "SentimentModel",
|
31 |
"problem_type": "multi_label_classification",
|
32 |
"base_model": "microsoft/deberta-v3-base",
|
33 |
+
"base_model_class": "DebertaV2Model",
|
34 |
"num_labels": 3,
|
35 |
"device": "cpu",
|
36 |
"load_function": "load_model",
|
|
|
38 |
}
|
39 |
}
|
40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
class SentimentModel(nn.Module):
|
42 |
def __init__(self, roberta_model, n_classes=3, dropout_rate=0.2):
|
43 |
super(SentimentModel, self).__init__()
|
|
|
100 |
|
101 |
print(f"Model loaded from {model_path_or_repo}")
|
102 |
return model
|
103 |
+
|
104 |
|
105 |
+
model_key = "2"
|
106 |
+
model_info = MODEL_OPTIONS[model_key]
|
107 |
+
hf_location = model_info["hf_location"]
|
108 |
+
base_model = model_info["base_model"]
|
109 |
|
110 |
+
tokenizer_class = globals()[model_info["tokenizer_class"]]
|
111 |
+
model_class = globals()[model_info["model_class"]]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
112 |
|
113 |
|
114 |
+
@st.cache_resource
|
115 |
def load_model():
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
tokenizer = tokenizer_class.from_pretrained(hf_location)
|
117 |
print("Loading model 2")
|
118 |
model = SentimentModel.load_pretrained(hf_location)
|
|
|
128 |
# Tokenize and pad the input text
|
129 |
inputs = tokenizer(
|
130 |
text,
|
|
|
131 |
add_special_tokens=True,
|
132 |
padding=True,
|
133 |
truncation=False,
|
|
|
146 |
|
147 |
|
148 |
if __name__ == "__main__":
|
149 |
+
model, tokenizer = load_model()
|
150 |
print("Model and tokenizer loaded successfully.")
|
151 |
+
|
152 |
+
|
153 |
+
### COMMENTED CODE ###
|
154 |
+
|
155 |
+
|
156 |
+
# @st.cache_resource
|
157 |
+
|
158 |
+
# def load_pretrained(model_path_or_repo):
|
159 |
+
|
160 |
+
# model_config_path = hf_hub_download(model_path_or_repo, "config.json")
|
161 |
+
# model_weights_path = hf_hub_download(model_path_or_repo, "model.safetensors")
|
162 |
+
|
163 |
+
# with open(model_config_path, "r") as f:
|
164 |
+
# config = json.load(f)
|
165 |
+
|
166 |
+
# roberta_model = DebertaV2Model.from_pretrained(
|
167 |
+
# config["roberta_model"],
|
168 |
+
# )
|
169 |
+
|
170 |
+
# model = SentimentModel(
|
171 |
+
# roberta_model, n_classes=config["num_labels"], dropout_rate=config["dropout_rate"]
|
172 |
+
# )
|
173 |
+
|
174 |
+
# with safe_open(model_weights_path, framework="pt", device="cpu") as f:
|
175 |
+
# model_weights = {key: f.get_tensor(key) for key in f.keys()}
|
176 |
+
# model.load_state_dict(model_weights)
|
177 |
+
|
178 |
+
# print(f"Model loaded from {model_path_or_repo}")
|
179 |
+
# return model
|
180 |
+
|
181 |
+
|
182 |
+
|
183 |
+
|
184 |
+
# class SentimentModel(nn.Module):
|
185 |
+
# def __init__(self, roberta_model=DebertaV2Model.from_pretrained(
|
186 |
+
# 'microsoft/deberta-v3-base',
|
187 |
+
# device_map=torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
188 |
+
# ), n_classes=3, dropout_rate=0.2):
|
189 |
+
# super(SentimentModel, self).__init__()
|
190 |
+
|
191 |
+
# self.roberta = roberta_model
|
192 |
+
# self.drop = nn.Dropout(p=dropout_rate)
|
193 |
+
# self.fc1 = nn.Linear(self.roberta.config.hidden_size, 256) # Reduced neurons
|
194 |
+
# self.relu = nn.ReLU()
|
195 |
+
# self.out = nn.Linear(256, n_classes)
|
196 |
+
|
197 |
+
# def forward(self, input_ids, attention_mask):
|
198 |
+
# output = self.roberta(input_ids=input_ids, attention_mask=attention_mask)
|
199 |
+
# cls_token_state = output.last_hidden_state[:, 0, :]
|
200 |
+
# output = self.drop(cls_token_state)
|
201 |
+
# output = self.relu(self.fc1(output))
|
202 |
+
# return self.out(output)
|
203 |
+
|
204 |
+
# def save_pretrained(self, save_directory):
|
205 |
+
# os.makedirs(save_directory, exist_ok=True)
|
206 |
+
|
207 |
+
# # Save model weights using safetensors
|
208 |
+
# model_weights = self.state_dict()
|
209 |
+
# save_file(model_weights, os.path.join(save_directory, "model.safetensors"))
|
210 |
+
|
211 |
+
# # Save model config
|
212 |
+
# config = {
|
213 |
+
# "hidden_size": self.roberta.config.hidden_size,
|
214 |
+
# "num_labels": self.out.out_features,
|
215 |
+
# "dropout_rate": self.drop.p,
|
216 |
+
# "roberta_model": self.roberta.name_or_path
|
217 |
+
# }
|
218 |
+
# with open(os.path.join(save_directory, "config.json"), "w") as f:
|
219 |
+
# json.dump(config, f)
|
220 |
+
|
221 |
+
# print(f"Model saved in {save_directory}")
|
222 |
+
|
223 |
+
# @classmethod
|
224 |
+
# def load_pretrained(cls, model_path_or_repo, roberta_model):
|
225 |
+
# # if model_path_or_repo.startswith("http") or "/" not in model_path_or_repo:
|
226 |
+
# # # Load from Hugging Face Hub
|
227 |
+
# # model_config_path = hf_hub_download(model_path_or_repo, "config.json")
|
228 |
+
# # model_weights_path = hf_hub_download(model_path_or_repo, "model.safetensors")
|
229 |
+
# # else:
|
230 |
+
# # # Load from local directory
|
231 |
+
# # model_config_path = os.path.join(model_path_or_repo, "config.json")
|
232 |
+
# # model_weights_path = os.path.join(model_path_or_repo, "model.safetensors")
|
233 |
+
|
234 |
+
# model_config_path = hf_hub_download(model_path_or_repo, "config.json")
|
235 |
+
# model_weights_path = hf_hub_download(model_path_or_repo, "model.safetensors")
|
236 |
+
|
237 |
+
# # Load model config
|
238 |
+
# with open(model_config_path, "r") as f:
|
239 |
+
# config = json.load(f)
|
240 |
+
|
241 |
+
# # Load RoBERTa model
|
242 |
+
# roberta_model = DebertaV2Model.from_pretrained(config["roberta_model"])
|
243 |
+
|
244 |
+
# # Initialize SentimentModel
|
245 |
+
# model = cls(
|
246 |
+
# roberta_model,
|
247 |
+
# n_classes=config["num_labels"],
|
248 |
+
# dropout_rate=config["dropout_rate"]
|
249 |
+
# )
|
250 |
+
|
251 |
+
# # Load safetensors weights
|
252 |
+
# with safe_open(model_weights_path, framework="pt", device="cpu") as f:
|
253 |
+
# model_weights = {key: f.get_tensor(key) for key in f.keys()}
|
254 |
+
# model.load_state_dict(model_weights)
|
255 |
+
|
256 |
+
# print(f"Model loaded from {model_path_or_repo}")
|
257 |
+
# return model
|
sentiment_analysis/hmv_cfg_base_stage1/model3.py
CHANGED
@@ -8,7 +8,7 @@ import joblib
|
|
8 |
import torch
|
9 |
import torch.nn as nn
|
10 |
import torch.functional as F
|
11 |
-
from transformers import DebertaV2Model, DebertaV2Tokenizer
|
12 |
import safetensors
|
13 |
# from safetensors import load_file, save_file
|
14 |
import json
|
@@ -23,13 +23,14 @@ CONFIG_STAGE1 = os.path.join(BASE_DIR, "..", "config", "stage1_models.json")
|
|
23 |
MODEL_OPTIONS = {
|
24 |
"3": {
|
25 |
"name": "BERT Base Uncased Custom Model",
|
26 |
-
"type": "
|
27 |
-
"module_path": "hmv_cfg_base_stage1.
|
28 |
-
"hf_location": "tachygraphy-microtrext-norm-org/
|
29 |
-
"tokenizer_class": "
|
30 |
"model_class": "BERT_architecture",
|
31 |
"problem_type": "multi_label_classification",
|
32 |
-
"base_model": "
|
|
|
33 |
"num_labels": 3,
|
34 |
"device": "cpu",
|
35 |
"load_function": "load_model",
|
@@ -37,26 +38,23 @@ MODEL_OPTIONS = {
|
|
37 |
}
|
38 |
}
|
39 |
|
40 |
-
|
41 |
class BERT_architecture(nn.Module):
|
42 |
|
43 |
-
def __init__(self, bert
|
44 |
-
device_map=torch.device("cuda" if torch.cuda.is_available() else "cpu"))):
|
45 |
super(BERT_architecture, self).__init__()
|
46 |
self.bert = bert
|
47 |
-
|
48 |
self.dropout = nn.Dropout(0.3) # Increased dropout for regularization
|
49 |
self.layer_norm = nn.LayerNorm(768) # Layer normalization
|
50 |
-
|
51 |
self.fc1 = nn.Linear(768, 256) # Dense layer
|
52 |
self.fc2 = nn.Linear(256, 3) # Output layer with 3 classes
|
53 |
-
|
54 |
self.relu = nn.ReLU()
|
55 |
self.softmax = nn.LogSoftmax(dim=1)
|
56 |
|
57 |
def forward(self, sent_id, mask, token_type_ids):
|
58 |
-
_, cls_hs = self.bert(sent_id, attention_mask=mask,
|
59 |
-
token_type_ids=token_type_ids, return_dict=False)
|
60 |
x = self.layer_norm(cls_hs)
|
61 |
x = self.fc1(x)
|
62 |
x = self.relu(x)
|
@@ -64,3 +62,85 @@ class BERT_architecture(nn.Module):
|
|
64 |
x = self.fc2(x)
|
65 |
x = self.softmax(x)
|
66 |
return x
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
import torch
|
9 |
import torch.nn as nn
|
10 |
import torch.functional as F
|
11 |
+
from transformers import DebertaV2Model, DebertaV2Tokenizer, AutoModel, AutoTokenizer
|
12 |
import safetensors
|
13 |
# from safetensors import load_file, save_file
|
14 |
import json
|
|
|
23 |
MODEL_OPTIONS = {
|
24 |
"3": {
|
25 |
"name": "BERT Base Uncased Custom Model",
|
26 |
+
"type": "bert_base_uncased_custom",
|
27 |
+
"module_path": "hmv_cfg_base_stage1.model3",
|
28 |
+
"hf_location": "https://huggingface.co/tachygraphy-microtrext-norm-org/BERT-LV1-SentimentPolarities/resolve/main/saved_weights.pt",
|
29 |
+
"tokenizer_class": "AutoTokenizer",
|
30 |
"model_class": "BERT_architecture",
|
31 |
"problem_type": "multi_label_classification",
|
32 |
+
"base_model": "bert-base-uncased",
|
33 |
+
"base_model_class": "AutoModel",
|
34 |
"num_labels": 3,
|
35 |
"device": "cpu",
|
36 |
"load_function": "load_model",
|
|
|
38 |
}
|
39 |
}
|
40 |
|
|
|
41 |
class BERT_architecture(nn.Module):
|
42 |
|
43 |
+
def __init__(self, bert):
|
|
|
44 |
super(BERT_architecture, self).__init__()
|
45 |
self.bert = bert
|
46 |
+
|
47 |
self.dropout = nn.Dropout(0.3) # Increased dropout for regularization
|
48 |
self.layer_norm = nn.LayerNorm(768) # Layer normalization
|
49 |
+
|
50 |
self.fc1 = nn.Linear(768, 256) # Dense layer
|
51 |
self.fc2 = nn.Linear(256, 3) # Output layer with 3 classes
|
52 |
+
|
53 |
self.relu = nn.ReLU()
|
54 |
self.softmax = nn.LogSoftmax(dim=1)
|
55 |
|
56 |
def forward(self, sent_id, mask, token_type_ids):
|
57 |
+
_, cls_hs = self.bert(sent_id, attention_mask=mask, token_type_ids=token_type_ids, return_dict=False)
|
|
|
58 |
x = self.layer_norm(cls_hs)
|
59 |
x = self.fc1(x)
|
60 |
x = self.relu(x)
|
|
|
62 |
x = self.fc2(x)
|
63 |
x = self.softmax(x)
|
64 |
return x
|
65 |
+
|
66 |
+
|
67 |
+
model_key = "3"
|
68 |
+
model_info = MODEL_OPTIONS[model_key]
|
69 |
+
hf_location = model_info["hf_location"]
|
70 |
+
base_model = model_info["base_model"]
|
71 |
+
base_model_class = globals()[model_info["base_model_class"]]
|
72 |
+
|
73 |
+
tokenizer_class = globals()[model_info["tokenizer_class"]]
|
74 |
+
model_class = globals()[model_info["model_class"]]
|
75 |
+
|
76 |
+
|
77 |
+
@st.cache_resource
|
78 |
+
def load_model():
|
79 |
+
bert = base_model_class.from_pretrained(base_model)
|
80 |
+
tokenizer = tokenizer_class.from_pretrained(base_model)
|
81 |
+
print("Loading model 3")
|
82 |
+
|
83 |
+
model = BERT_architecture(bert)
|
84 |
+
state_dict = torch.hub.load_state_dict_from_url(
|
85 |
+
hf_location,
|
86 |
+
map_location = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
87 |
+
)
|
88 |
+
model.load_state_dict(state_dict)
|
89 |
+
print("Model 3 loaded")
|
90 |
+
|
91 |
+
return model, tokenizer
|
92 |
+
|
93 |
+
|
94 |
+
def predict(input_text, model, tokenizer, device, max_seq_len=128):
|
95 |
+
inputs = tokenizer(
|
96 |
+
input_text,
|
97 |
+
add_special_tokens=True,
|
98 |
+
padding=True,
|
99 |
+
truncation=True, # Ensure dynamic length truncation
|
100 |
+
max_length=max_seq_len,
|
101 |
+
return_attention_mask=True,
|
102 |
+
return_token_type_ids=True,
|
103 |
+
return_tensors='pt',
|
104 |
+
).to(device)
|
105 |
+
|
106 |
+
|
107 |
+
with torch.no_grad():
|
108 |
+
# outputs = model(**inputs)
|
109 |
+
|
110 |
+
outputs = model(
|
111 |
+
sent_id=inputs["input_ids"], # input_ids → sent_id
|
112 |
+
mask=inputs["attention_mask"], # attention_mask → mask
|
113 |
+
token_type_ids=inputs["token_type_ids"] # token_type_ids → token_type_ids
|
114 |
+
)
|
115 |
+
# preds = outputs.cpu().numpy()
|
116 |
+
# pred = np.argmax(preds, axis=1)
|
117 |
+
|
118 |
+
# return pred
|
119 |
+
|
120 |
+
# Ensure output shape consistency
|
121 |
+
# if outputs.dim() == 1:
|
122 |
+
# # Reshape to [1, num_classes] if it's a single prediction
|
123 |
+
# outputs = outputs.unsqueeze(0)
|
124 |
+
|
125 |
+
# Apply softmax here if you need probabilities
|
126 |
+
# probabilities = torch.softmax(outputs, dim=1).cpu().numpy()
|
127 |
+
|
128 |
+
## TO RETURN ARGMAX VALUE
|
129 |
+
# pred_class = torch.argmax(outputs, dim=1).cpu().numpy()
|
130 |
+
|
131 |
+
# num_classes=3
|
132 |
+
|
133 |
+
# probabilities = np.zeros((1, num_classes))
|
134 |
+
# probabilities[0, pred_class] = 1.0
|
135 |
+
|
136 |
+
## TO RETURN PROBABILITIES FROM LOG SOFTMAX OF MODEL
|
137 |
+
probabilities = torch.exp(outputs).cpu().numpy()
|
138 |
+
|
139 |
+
print(probabilities)
|
140 |
+
|
141 |
+
return probabilities
|
142 |
+
|
143 |
+
|
144 |
+
if __name__ == "__main__":
|
145 |
+
model, tokenizer = load_model()
|
146 |
+
print("Model and tokenizer loaded successfully.")
|
sentiment_analysis/sentiment_analysis_main.py
CHANGED
@@ -29,7 +29,7 @@ current_tokenizer = None
|
|
29 |
# Enabling Resource caching
|
30 |
|
31 |
|
32 |
-
@st.cache_resource
|
33 |
def load_model_config():
|
34 |
with open(CONFIG_STAGE1, "r") as f:
|
35 |
model_data = json.load(f)
|
@@ -91,9 +91,9 @@ def free_memory():
|
|
91 |
def load_selected_model(model_name):
|
92 |
global current_model, current_tokenizer
|
93 |
|
94 |
-
st.cache_resource.clear()
|
95 |
|
96 |
-
free_memory()
|
97 |
|
98 |
# st.write("DEBUG: Available Models:", MODEL_OPTIONS.keys()) # ✅ See available models
|
99 |
# st.write("DEBUG: Selected Model:", MODEL_OPTIONS[model_name]) # ✅ Check selected model
|
@@ -123,6 +123,41 @@ def load_selected_model(model_name):
|
|
123 |
return model, tokenizer, predict_func
|
124 |
|
125 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
# Function to increment progress dynamically
|
127 |
def update_progress(progress_bar, start, end, delay=0.1):
|
128 |
for i in range(start, end + 1, 5): # Increment in steps of 5%
|
@@ -156,12 +191,10 @@ if "model_changed" not in st.session_state:
|
|
156 |
if "text_changed" not in st.session_state:
|
157 |
st.session_state.text_changed = False
|
158 |
if "processing" not in st.session_state:
|
159 |
-
st.session_state.
|
160 |
|
161 |
|
162 |
def show_sentiment_analysis():
|
163 |
-
st.cache_resource.clear()
|
164 |
-
free_memory()
|
165 |
|
166 |
st.title("Stage 1: Sentiment Polarity Analysis")
|
167 |
st.write("This section handles sentiment analysis.")
|
@@ -182,6 +215,9 @@ def show_sentiment_analysis():
|
|
182 |
# 2. The text has changed OR the model has changed
|
183 |
if user_input.strip() and (st.session_state.text_changed or st.session_state.model_changed):
|
184 |
|
|
|
|
|
|
|
185 |
# Reset session state flags
|
186 |
st.session_state.last_processed_input = user_input
|
187 |
st.session_state.model_changed = False
|
@@ -228,10 +264,11 @@ def show_sentiment_analysis():
|
|
228 |
# Display raw predictions
|
229 |
st.write(f"**Predicted Sentiment Scores:** {predictions_array}")
|
230 |
|
|
|
|
|
231 |
# Display binary classification result
|
232 |
st.write(f"**Predicted Sentiment:**")
|
233 |
-
st.write(
|
234 |
-
f"**NEGATIVE:** {binary_predictions[0]}, **NEUTRAL:** {binary_predictions[1]}, **POSITIVE:** {binary_predictions[2]}")
|
235 |
# st.write(f"**NEUTRAL:** {binary_predictions[1]}")
|
236 |
# st.write(f"**POSITIVE:** {binary_predictions[2]}")
|
237 |
|
@@ -272,6 +309,281 @@ if __name__ == "__main__":
|
|
272 |
show_sentiment_analysis()
|
273 |
|
274 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
275 |
### COMMENTED OUT CODE ###
|
276 |
|
277 |
|
|
|
29 |
# Enabling Resource caching
|
30 |
|
31 |
|
32 |
+
# @st.cache_resource
|
33 |
def load_model_config():
|
34 |
with open(CONFIG_STAGE1, "r") as f:
|
35 |
model_data = json.load(f)
|
|
|
91 |
def load_selected_model(model_name):
|
92 |
global current_model, current_tokenizer
|
93 |
|
94 |
+
# st.cache_resource.clear()
|
95 |
|
96 |
+
# free_memory()
|
97 |
|
98 |
# st.write("DEBUG: Available Models:", MODEL_OPTIONS.keys()) # ✅ See available models
|
99 |
# st.write("DEBUG: Selected Model:", MODEL_OPTIONS[model_name]) # ✅ Check selected model
|
|
|
123 |
return model, tokenizer, predict_func
|
124 |
|
125 |
|
126 |
+
def disable_ui():
|
127 |
+
st.components.v1.html(
|
128 |
+
"""
|
129 |
+
<style>
|
130 |
+
#ui-disable-overlay {
|
131 |
+
position: fixed;
|
132 |
+
top: 0;
|
133 |
+
left: 0;
|
134 |
+
width: 100vw;
|
135 |
+
height: 100vh;
|
136 |
+
background-color: rgba(200, 200, 200, 0.5);
|
137 |
+
z-index: 9999;
|
138 |
+
}
|
139 |
+
</style>
|
140 |
+
<div id="ui-disable-overlay"></div>
|
141 |
+
""",
|
142 |
+
height=0,
|
143 |
+
scrolling=False
|
144 |
+
)
|
145 |
+
|
146 |
+
|
147 |
+
def enable_ui():
|
148 |
+
st.components.v1.html(
|
149 |
+
"""
|
150 |
+
<script>
|
151 |
+
var overlay = document.getElementById("ui-disable-overlay");
|
152 |
+
if (overlay) {
|
153 |
+
overlay.parentNode.removeChild(overlay);
|
154 |
+
}
|
155 |
+
</script>
|
156 |
+
""",
|
157 |
+
height=0,
|
158 |
+
scrolling=False
|
159 |
+
)
|
160 |
+
|
161 |
# Function to increment progress dynamically
|
162 |
def update_progress(progress_bar, start, end, delay=0.1):
|
163 |
for i in range(start, end + 1, 5): # Increment in steps of 5%
|
|
|
191 |
if "text_changed" not in st.session_state:
|
192 |
st.session_state.text_changed = False
|
193 |
if "processing" not in st.session_state:
|
194 |
+
st.session_state.disabled = False
|
195 |
|
196 |
|
197 |
def show_sentiment_analysis():
|
|
|
|
|
198 |
|
199 |
st.title("Stage 1: Sentiment Polarity Analysis")
|
200 |
st.write("This section handles sentiment analysis.")
|
|
|
215 |
# 2. The text has changed OR the model has changed
|
216 |
if user_input.strip() and (st.session_state.text_changed or st.session_state.model_changed):
|
217 |
|
218 |
+
# disable_ui()
|
219 |
+
|
220 |
+
|
221 |
# Reset session state flags
|
222 |
st.session_state.last_processed_input = user_input
|
223 |
st.session_state.model_changed = False
|
|
|
264 |
# Display raw predictions
|
265 |
st.write(f"**Predicted Sentiment Scores:** {predictions_array}")
|
266 |
|
267 |
+
# enable_ui()
|
268 |
+
|
269 |
# Display binary classification result
|
270 |
st.write(f"**Predicted Sentiment:**")
|
271 |
+
st.write(f"**NEGATIVE:** {binary_predictions[0]}, **NEUTRAL:** {binary_predictions[1]}, **POSITIVE:** {binary_predictions[2]}")
|
|
|
272 |
# st.write(f"**NEUTRAL:** {binary_predictions[1]}")
|
273 |
# st.write(f"**POSITIVE:** {binary_predictions[2]}")
|
274 |
|
|
|
309 |
show_sentiment_analysis()
|
310 |
|
311 |
|
312 |
+
|
313 |
+
|
314 |
+
|
315 |
+
|
316 |
+
|
317 |
+
|
318 |
+
|
319 |
+
|
320 |
+
#########
|
321 |
+
|
322 |
+
|
323 |
+
# def show_sentiment_analysis():
|
324 |
+
# st.cache_resource.clear()
|
325 |
+
# free_memory()
|
326 |
+
|
327 |
+
# st.title("Stage 1: Sentiment Polarity Analysis")
|
328 |
+
# st.write("This section handles sentiment analysis.")
|
329 |
+
|
330 |
+
# # Model selection with change detection
|
331 |
+
# selected_model = st.selectbox(
|
332 |
+
# "Choose a model:", list(MODEL_OPTIONS.keys()), key="selected_model", on_change=on_model_change
|
333 |
+
# )
|
334 |
+
|
335 |
+
# # Text input with change detection
|
336 |
+
# user_input = st.text_input(
|
337 |
+
# "Enter text for sentiment analysis:", key="user_input", on_change=on_text_change
|
338 |
+
# )
|
339 |
+
# user_input_copy = user_input
|
340 |
+
|
341 |
+
# # Only run inference if:
|
342 |
+
# # 1. The text is NOT empty
|
343 |
+
# # 2. The text has changed OR the model has changed
|
344 |
+
# if user_input.strip() and (st.session_state.text_changed or st.session_state.model_changed):
|
345 |
+
|
346 |
+
# # Reset session state flags
|
347 |
+
# st.session_state.last_processed_input = user_input
|
348 |
+
# st.session_state.model_changed = False
|
349 |
+
# st.session_state.text_changed = False # Store selected model
|
350 |
+
|
351 |
+
# # ADD A DYNAMIC PROGRESS BAR
|
352 |
+
# progress_bar = st.progress(0)
|
353 |
+
# update_progress(progress_bar, 0, 10)
|
354 |
+
# # status_text = st.empty()
|
355 |
+
|
356 |
+
# # update_progress(0, 10)
|
357 |
+
# # status_text.text("Loading model...")
|
358 |
+
|
359 |
+
# # Make prediction
|
360 |
+
|
361 |
+
# # model, tokenizer = load_model()
|
362 |
+
# # model, tokenizer = load_selected_model(selected_model)
|
363 |
+
# with st.spinner("Please wait..."):
|
364 |
+
# model, tokenizer, predict_func = load_selected_model(selected_model)
|
365 |
+
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
366 |
+
|
367 |
+
# if model is None:
|
368 |
+
# st.error(
|
369 |
+
# "⚠️ Error: Model failed to load! Check model selection or configuration.")
|
370 |
+
# st.stop()
|
371 |
+
|
372 |
+
# model.to(device)
|
373 |
+
|
374 |
+
# # predictions = predict(user_input, model, tokenizer, device)
|
375 |
+
|
376 |
+
# predictions = predict_func(user_input, model, tokenizer, device)
|
377 |
+
|
378 |
+
# # Squeeze predictions to remove extra dimensions
|
379 |
+
# predictions_array = predictions.squeeze()
|
380 |
+
|
381 |
+
# # Convert to binary predictions (argmax)
|
382 |
+
# binary_predictions = np.zeros_like(predictions_array)
|
383 |
+
# max_indices = np.argmax(predictions_array)
|
384 |
+
# binary_predictions[max_indices] = 1
|
385 |
+
|
386 |
+
# # Update progress bar for prediction and model loading
|
387 |
+
# update_progress(progress_bar, 10, 100)
|
388 |
+
|
389 |
+
# # Display raw predictions
|
390 |
+
# st.write(f"**Predicted Sentiment Scores:** {predictions_array}")
|
391 |
+
|
392 |
+
# # Display binary classification result
|
393 |
+
# st.write(f"**Predicted Sentiment:**")
|
394 |
+
# st.write(
|
395 |
+
# f"**NEGATIVE:** {binary_predictions[0]}, **NEUTRAL:** {binary_predictions[1]}, **POSITIVE:** {binary_predictions[2]}")
|
396 |
+
# # st.write(f"**NEUTRAL:** {binary_predictions[1]}")
|
397 |
+
# # st.write(f"**POSITIVE:** {binary_predictions[2]}")
|
398 |
+
|
399 |
+
# # 1️⃣ **Polar Plot (Plotly)**
|
400 |
+
# sentiment_polarities = predictions_array.tolist()
|
401 |
+
# fig_polar = px.line_polar(
|
402 |
+
# pd.DataFrame(dict(r=sentiment_polarities,
|
403 |
+
# theta=SENTIMENT_POLARITY_LABELS)),
|
404 |
+
# r='r', theta='theta', line_close=True
|
405 |
+
# )
|
406 |
+
# st.plotly_chart(fig_polar)
|
407 |
+
|
408 |
+
# # 2️⃣ **Normalized Horizontal Bar Chart (Matplotlib)**
|
409 |
+
# normalized_predictions = predictions_array / predictions_array.sum()
|
410 |
+
|
411 |
+
# fig, ax = plt.subplots(figsize=(8, 2))
|
412 |
+
# left = 0
|
413 |
+
# for i in range(len(normalized_predictions)):
|
414 |
+
# ax.barh(0, normalized_predictions[i], color=plt.cm.tab10(
|
415 |
+
# i), left=left, label=SENTIMENT_POLARITY_LABELS[i])
|
416 |
+
# left += normalized_predictions[i]
|
417 |
+
|
418 |
+
# # Configure the chart
|
419 |
+
# ax.set_xlim(0, 1)
|
420 |
+
# ax.set_yticks([])
|
421 |
+
# ax.set_xticks(np.arange(0, 1.1, 0.1))
|
422 |
+
# ax.legend(loc='upper center', bbox_to_anchor=(
|
423 |
+
# 0.5, -0.15), ncol=len(SENTIMENT_POLARITY_LABELS))
|
424 |
+
# plt.title("Sentiment Polarity Prediction Distribution")
|
425 |
+
|
426 |
+
# # Display in Streamlit
|
427 |
+
# st.pyplot(fig)
|
428 |
+
|
429 |
+
# progress_bar.empty()
|
430 |
+
|
431 |
+
######
|
432 |
+
########
|
433 |
+
|
434 |
+
|
435 |
+
|
436 |
+
# def show_sentiment_analysis():
|
437 |
+
# st.cache_resource.clear()
|
438 |
+
# free_memory()
|
439 |
+
|
440 |
+
# st.title("Stage 1: Sentiment Polarity Analysis")
|
441 |
+
# st.write("This section handles sentiment analysis.")
|
442 |
+
|
443 |
+
# # Model selection with change detection
|
444 |
+
# selected_model = st.selectbox(
|
445 |
+
# "Choose a model:", list(MODEL_OPTIONS.keys()), key="selected_model", on_change=on_model_change, disabled=st.session_state.disabled
|
446 |
+
# )
|
447 |
+
|
448 |
+
# # Text input with change detection
|
449 |
+
# user_input = st.text_input(
|
450 |
+
# "Enter text for sentiment analysis:", key="user_input", on_change=on_text_change, disabled=st.session_state.disabled
|
451 |
+
# )
|
452 |
+
# user_input_copy = user_input
|
453 |
+
|
454 |
+
# # progress_bar = st.progress(0)
|
455 |
+
# progress_bar = st.empty()
|
456 |
+
|
457 |
+
# if st.session_state.disabled is False and st.session_state.predictions is not None:
|
458 |
+
# st.write(f"**Predicted Sentiment Scores:** {st.session_state.predictions}")
|
459 |
+
# st.write(f"**NEGATIVE:** {st.session_state.binary_predictions[0]}, **NEUTRAL:** {st.session_state.binary_predictions[1]}, **POSITIVE:** {st.session_state.binary_predictions[2]}")
|
460 |
+
# st.plotly_chart(st.session_state.polar_plot)
|
461 |
+
# st.pyplot(st.session_state.bar_chart)
|
462 |
+
|
463 |
+
# update_progress(progress_bar, 95, 100)
|
464 |
+
|
465 |
+
# st.session_state.predictions = None
|
466 |
+
# st.session_state.binary_predictions = None
|
467 |
+
# st.session_state.polar_plot = None
|
468 |
+
# st.session_state.bar_chart = None
|
469 |
+
|
470 |
+
# st.session_state.disabled = False
|
471 |
+
|
472 |
+
# progress_bar.empty()
|
473 |
+
|
474 |
+
|
475 |
+
# if user_input.strip() and (st.session_state.text_changed or st.session_state.model_changed) and st.session_state.disabled is False:
|
476 |
+
# st.session_state.disabled = True
|
477 |
+
# st.rerun()
|
478 |
+
# return
|
479 |
+
|
480 |
+
|
481 |
+
# if user_input.strip() and (st.session_state.text_changed or st.session_state.model_changed) and st.session_state.disabled is True:
|
482 |
+
# # Mark processing as True to
|
483 |
+
|
484 |
+
|
485 |
+
# # Reset session state flags
|
486 |
+
# st.session_state.last_processed_input = user_input
|
487 |
+
# st.session_state.model_changed = False
|
488 |
+
# st.session_state.text_changed = False # Store selected model
|
489 |
+
|
490 |
+
# # ADD A DYNAMIC PROGRESS BAR
|
491 |
+
# progress_bar = st.progress(0)
|
492 |
+
# update_progress(progress_bar, 0, 10)
|
493 |
+
# # status_text = st.empty()
|
494 |
+
|
495 |
+
# # update_progress(0, 10)
|
496 |
+
# # status_text.text("Loading model...")
|
497 |
+
|
498 |
+
# # Make prediction
|
499 |
+
|
500 |
+
# # model, tokenizer = load_model()
|
501 |
+
# # model, tokenizer = load_selected_model(selected_model)
|
502 |
+
# with st.spinner("Please wait..."):
|
503 |
+
# model, tokenizer, predict_func = load_selected_model(selected_model)
|
504 |
+
# device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
505 |
+
|
506 |
+
# if model is None:
|
507 |
+
# st.error(
|
508 |
+
# "⚠️ Error: Model failed to load! Check model selection or configuration.")
|
509 |
+
# st.session_state.disabled = False
|
510 |
+
# st.rerun()
|
511 |
+
# st.stop()
|
512 |
+
# return
|
513 |
+
|
514 |
+
# model.to(device)
|
515 |
+
|
516 |
+
# # predictions = predict(user_input, model, tokenizer, device)
|
517 |
+
|
518 |
+
# predictions = predict_func(user_input, model, tokenizer, device)
|
519 |
+
|
520 |
+
# # Squeeze predictions to remove extra dimensions
|
521 |
+
# predictions_array = predictions.squeeze()
|
522 |
+
|
523 |
+
# # Convert to binary predictions (argmax)
|
524 |
+
# binary_predictions = np.zeros_like(predictions_array)
|
525 |
+
# max_indices = np.argmax(predictions_array)
|
526 |
+
# binary_predictions[max_indices] = 1
|
527 |
+
|
528 |
+
# # Update progress bar for prediction and model loading
|
529 |
+
# update_progress(progress_bar, 10, 75)
|
530 |
+
|
531 |
+
# # Display raw predictions
|
532 |
+
# # st.write(f"**Predicted Sentiment Scores:** {predictions_array}")
|
533 |
+
# st.session_state.predictions = predictions_array
|
534 |
+
|
535 |
+
# # Display binary classification result
|
536 |
+
# # st.write(f"**Predicted Sentiment:**")
|
537 |
+
# # st.write(f"**NEGATIVE:** {binary_predictions[0]}, **NEUTRAL:** {binary_predictions[1]}, **POSITIVE:** {binary_predictions[2]}")
|
538 |
+
# st.session_state.binary_predictions = binary_predictions
|
539 |
+
|
540 |
+
|
541 |
+
# # 1️⃣ **Polar Plot (Plotly)**
|
542 |
+
# sentiment_polarities = predictions_array.tolist()
|
543 |
+
# fig_polar = px.line_polar(
|
544 |
+
# pd.DataFrame(dict(r=sentiment_polarities,
|
545 |
+
# theta=SENTIMENT_POLARITY_LABELS)),
|
546 |
+
# r='r', theta='theta', line_close=True
|
547 |
+
# )
|
548 |
+
# # st.plotly_chart(fig_polar)
|
549 |
+
# st.session_state.polar_plot = fig_polar
|
550 |
+
|
551 |
+
# # 2️⃣ **Normalized Horizontal Bar Chart (Matplotlib)**
|
552 |
+
# normalized_predictions = predictions_array / predictions_array.sum()
|
553 |
+
|
554 |
+
# fig, ax = plt.subplots(figsize=(8, 2))
|
555 |
+
# left = 0
|
556 |
+
# for i in range(len(normalized_predictions)):
|
557 |
+
# ax.barh(0, normalized_predictions[i], color=plt.cm.tab10(
|
558 |
+
# i), left=left, label=SENTIMENT_POLARITY_LABELS[i])
|
559 |
+
# left += normalized_predictions[i]
|
560 |
+
|
561 |
+
# # Configure the chart
|
562 |
+
# ax.set_xlim(0, 1)
|
563 |
+
# ax.set_yticks([])
|
564 |
+
# ax.set_xticks(np.arange(0, 1.1, 0.1))
|
565 |
+
# ax.legend(loc='upper center', bbox_to_anchor=(
|
566 |
+
# 0.5, -0.15), ncol=len(SENTIMENT_POLARITY_LABELS))
|
567 |
+
# # plt.title("Sentiment Polarity Prediction Distribution")
|
568 |
+
# # st.pyplot(fig)
|
569 |
+
# st.session_state.bar_chart = fig
|
570 |
+
# update_progress(progress_bar, 75, 95)
|
571 |
+
|
572 |
+
# # progress_bar.empty()
|
573 |
+
|
574 |
+
# if st.session_state.disabled is True:
|
575 |
+
# st.session_state.disabled = False
|
576 |
+
# st.rerun()
|
577 |
+
# return
|
578 |
+
# else:
|
579 |
+
# return
|
580 |
+
|
581 |
+
|
582 |
+
|
583 |
+
|
584 |
+
#####
|
585 |
+
|
586 |
+
|
587 |
### COMMENTED OUT CODE ###
|
588 |
|
589 |
|