|
import gc
|
|
import psutil
|
|
import torch
|
|
import shutil
|
|
from transformers.utils.hub import TRANSFORMERS_CACHE
|
|
import streamlit as st
|
|
import os
|
|
import sys
|
|
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), )))
|
|
|
|
|
|
def free_memory():
|
|
|
|
global current_model, current_tokenizer
|
|
|
|
if current_model is not None:
|
|
del current_model
|
|
current_model = None
|
|
|
|
if current_tokenizer is not None:
|
|
del current_tokenizer
|
|
current_tokenizer = None
|
|
|
|
gc.collect()
|
|
|
|
if torch.cuda.is_available():
|
|
torch.cuda.empty_cache()
|
|
torch.cuda.ipc_collect()
|
|
|
|
|
|
try:
|
|
if torch.cuda.is_available() is False:
|
|
psutil.virtual_memory()
|
|
except Exception as e:
|
|
print(f"Memory cleanup error: {e}")
|
|
|
|
|
|
try:
|
|
cache_dir = TRANSFORMERS_CACHE
|
|
if os.path.exists(cache_dir):
|
|
shutil.rmtree(cache_dir)
|
|
print("Cache cleared!")
|
|
except Exception as e:
|
|
print(f"β Cache cleanup error: {e}")
|
|
|
|
|
|
def create_footer():
|
|
st.divider()
|
|
|
|
|
|
col1, col2, col3 = st.columns([1, 1, 1])
|
|
|
|
|
|
with col1:
|
|
st.markdown("### π Contributors")
|
|
st.write("**Archisman Karmakar**")
|
|
st.write("[π LinkedIn](https://www.linkedin.com/in/archismankarmakar/) | [π GitHub](https://www.github.com/ArchismanKarmakar) | [π Kaggle](https://www.kaggle.com/archismancoder)")
|
|
|
|
st.write("**Sumon Chatterjee**")
|
|
st.write("[π LinkedIn](https://www.linkedin.com/in/sumon-chatterjee-3b3b43227) | [π GitHub](https://github.com/Sumon670) | [π Kaggle](https://www.kaggle.com/sumonchatterjee)")
|
|
|
|
|
|
with col2:
|
|
st.markdown("### π Mentors")
|
|
st.write("**Prof. Anupam Mondal**")
|
|
st.write("[π LinkedIn](https://www.linkedin.com/in/anupam-mondal-ph-d-8a7a1a39/) | [π Google Scholar](https://scholar.google.com/citations?user=ESRR9o4AAAAJ&hl=en) | [π Website](https://sites.google.com/view/anupammondal/home)")
|
|
|
|
st.write("**Prof. Sainik Kumar Mahata**")
|
|
st.write("[π LinkedIn](https://www.linkedin.com/in/mahatasainikk) | [π Google Scholar](https://scholar.google.co.in/citations?user=OcJDM50AAAAJ&hl=en) | [π Website](https://sites.google.com/view/sainik-kumar-mahata/home)")
|
|
|
|
|
|
with col3:
|
|
st.markdown("### π About the Project")
|
|
st.write("This is our research project for our **B.Tech final year** and a **journal** which is yet to be published.")
|
|
st.write("Built with π using **Streamlit**.")
|
|
|
|
|
|
|
|
|
|
def show_dashboard():
|
|
|
|
st.title("Tachygraphy Micro-text Analysis & Normalization")
|
|
st.write("""
|
|
Welcome to the Tachygraphy Micro-text Analysis & Normalization Project. This application is designed to analyze text data through three stages:
|
|
1. Sentiment Polarity Analysis
|
|
2. Emotion Mood-tag Analysis
|
|
3. Text Transformation & Normalization
|
|
""")
|
|
|
|
st.write("""
|
|
- Training Source: [GitHub @ Tachygraphy Micro-text Analysis & Normalization](https://github.com/ArchismanKarmakar/Tachygraphy-Microtext-Analysis-And-Normalization)
|
|
- Kaggle Collections: [Kaggle @ Tachygraphy Micro-text Analysis & Normalization](https://www.kaggle.com/datasets/archismancoder/dataset-tachygraphy/data?select=Tachygraphy_MicroText-AIO-V3.xlsx)
|
|
- Hugging Face Org: [Hugging Face @ Tachygraphy Micro-text Analysis & Normalization](https://huggingface.co/tachygraphy-microtrext-norm-org)
|
|
- Deployment: [Streamlit + Hugging Face @ GitHub](https://github.com/ArchismanKarmakar/Tachygraphy-Microtext-Analysis-And-Normalization-Deployment-Source-HuggingFace_Streamlit_JPX14032025)
|
|
""")
|
|
|
|
create_footer()
|
|
|
|
|
|
def __main__():
|
|
show_dashboard()
|
|
|