import streamlit as st
import streamlit.components.v1 as components
import os
from PIL import Image
# Set the page layout
st.set_page_config(layout="wide")
import json
import base64
import time
from dotenv import load_dotenv
import os
import requests
# Try loading environment variables locally
try:
from dotenv import load_dotenv
load_dotenv()
except:
pass
# Get the token from environment variables
HF_TOKEN = os.environ.get("HF_TOKEN")
if "framework" not in st.session_state:
st.session_state.framework = "gen"
# Initialize state
if "menu" not in st.session_state:
st.session_state.menu = "class"
if "show_overlay" not in st.session_state:
st.session_state.show_overlay = True
if "models" not in st.session_state:
st.session_state.models = []
if "save_path" not in st.session_state:
st.session_state.save_path = ""
# Initialize message storage
if "messages" not in st.session_state:
st.session_state.messages = []
if "input_text" not in st.session_state:
st.session_state.input_text = ""
if "input_task" not in st.session_state:
st.session_state.input_task = ""
if "generate_response" not in st.session_state:
st.session_state.generate_response = False
if st.session_state.show_overlay == False:
left = -9
top = -10
else:
top= -6.75
left =-5
# Folder to store chat histories
CHAT_DIR = "chat_histories"
os.makedirs(CHAT_DIR, exist_ok=True)
# Set default chat_id if not set
if "chat_id" not in st.session_state:
st.session_state.chat_id = "chat_1"
# Save messages to a file
def save_chat_history():
if st.session_state.messages: # Only save if there's at least one message
with open(f"{CHAT_DIR}/{st.session_state.chat_id}.json", "w", encoding="utf-8") as f:
json.dump(st.session_state.messages, f, ensure_ascii=False, indent=4)
#####################################################################################################
# Function to load data
def query_huggingface_model(selected_model: dict, input_data, input_type="text",max_tokens=512,task="text-classification",temperature=0.7, top_p=0.9 ):
API_URL = selected_model.get("url")
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
try:
if input_type == "text":
if task == "text-generation":
payload = {
"messages": [
{
"role": "user",
"content": input_data
}
],
"max_tokens": max_tokens,
"temperature": temperature,
"top_p": top_p,
"model":selected_model.get("model")
}
else:
payload = {
"inputs": input_data ,
}
response = requests.post(API_URL, headers=headers, json=payload)
elif input_type == "image":
with open(input_data, "rb") as f:
data = f.read()
response = requests.post(API_URL, headers=headers, data=data)
else:
return {"error": f"Unsupported input_type: {input_type}"}
response.raise_for_status()
return response.json()
except requests.exceptions.RequestException as e:
return {"error": str(e)}
def extract_response_content(response):
print(f"Response is: {response}")
# For text generation or image captioning
if isinstance(response, list):
if response and isinstance(response[0], dict) and "generated_text" in response[0]:
return response[0]["generated_text"]
elif response and isinstance(response[0], list) and "label" in response[0][0]:
# For text classification
return [(item["label"], round(item["score"], 3)) for item in response[0]]
# For OpenAI-style chat responses
elif isinstance(response, dict):
if "choices" in response and isinstance(response["choices"], list):
try:
return response["choices"][0]["message"]["content"]
except (KeyError, IndexError, TypeError):
return "Error: Could not extract message from choices"
elif "error" in response:
return f"Error: {response['error']}"
return "Unknown response format"
# Load a specific chat
def load_chat_history(chat_id):
file_path = f"{CHAT_DIR}/{chat_id}.json"
if os.path.exists(file_path):
with open(file_path, "r", encoding="utf-8") as f:
st.session_state.messages = json.load(f)
st.session_state.chat_id = chat_id
else:
st.warning(f"No history found for {chat_id}.")
st.session_state.show_overlay = False
st.session_state.models = [
{
"model": "distilbert-base-uncased-finetuned-sst-2-english",
"url": "https://router.huggingface.co/hf-inference/models/distilbert/distilbert-base-uncased-finetuned-sst-2-english"
},
{
"model": "openai-community/roberta-base-openai-detector",
"url": "https://router.huggingface.co/hf-inference/models/openai-community/roberta-base-openai-detector"
},
{
"model": "nlptown/bert-base-multilingual-uncased-sentiment",
"url": "https://router.huggingface.co/hf-inference/models/nlptown/bert-base-multilingual-uncased-sentiment"
},
{
"model": "BAAI/bge-reranker-base",
"url": "https://router.huggingface.co/hf-inference/models/BAAI/bge-reranker-base"
},
{
"model": "SamLowe/roberta-base-go_emotions",
"url": "https://router.huggingface.co/hf-inference/models/SamLowe/roberta-base-go_emotions"
}
]
if st.session_state.framework == "gen":
encoded_logo = "hugging.png"
main_bg_ext = "png"
main_bg = "picturebg.jfif"
st.markdown(
f"""
""",
unsafe_allow_html=True,
)
# Overlay container
st.markdown(
f"""
""",
unsafe_allow_html=True,
)
st.markdown(
f"""
Hugging face
transformers pipeline
""",
unsafe_allow_html=True,
)
st.markdown(
f"""
This intelligent assistant enables you to generate insightful text and vivid imagery description from simple prompts.
Whether you're brainstorming ideas,
drafting content, or visualizing concepts — everything is saved, so your creative flow never skips a beat.
""",
unsafe_allow_html=True,
)
with st.container(key="content-container-3"):
if st.button("Try it now ",key="try"):
st.session_state.framework = "dash"
st.session_state.show_overlay = True
st.rerun()
if st.session_state.framework == "dash":
if st.session_state.menu == "class":
choice1 = '"Win a free vacation to the Bahamas! Click the link below to claim your prize.This is an exclusive offer for a limited time only, don’t miss out!"'
choice2 = '"I can’t believe my phone just died right before an important call. Ugh! Now I have to wait for hours to get it fixed and miss out on everything."'
choice3 = '"Apple unveils a new AI chip with groundbreaking technology in the latest iPhone.This innovation is set to redefine the way we use smartphones in everyday life."'
choice4 = '"I made a delicious homemade food last night with fresh ingredients. It was so good, I can’t wait to make it again for dinner tomorrow."'
text_h1 = "🏷️ Text Classification"
images="images.png"
images="images.png"
image1 = 'images.png'
image2 = 'images.png'
image3 = 'images.png'
margin = 0
margintop = "-20"
display = "none"
st.session_state.models = [
{
"model": "distilbert-base-uncased-finetuned-sst-2-english",
"url": "https://router.huggingface.co/hf-inference/models/distilbert/distilbert-base-uncased-finetuned-sst-2-english"
},
{
"model": "openai-community/roberta-base-openai-detector",
"url": "https://router.huggingface.co/hf-inference/models/openai-community/roberta-base-openai-detector"
},
{
"model": "nlptown/bert-base-multilingual-uncased-sentiment",
"url": "https://router.huggingface.co/hf-inference/models/nlptown/bert-base-multilingual-uncased-sentiment"
},
{
"model": "BAAI/bge-reranker-base",
"url": "https://router.huggingface.co/hf-inference/models/BAAI/bge-reranker-base"
},
{
"model": "SamLowe/roberta-base-go_emotions",
"url": "https://router.huggingface.co/hf-inference/models/SamLowe/roberta-base-go_emotions"
}
]
task = "text-classification"
if st.session_state.menu == "gen":
choice1 = 'Write a poem about the ocean at night. Include imagery and emotion to bring it to life.'
choice2 = 'Generate a product description for a futuristic smartwatch with health monitoring features.'
choice3 = 'Complete this sentence: "As she opened the old journal, she found..."'
choice4 = 'Write a motivational quote that could be used in a fitness app.'
text_h1 = "✍️ Text Generation"
images = 'images.png'
image1 = 'images.png'
image2 = 'images.png'
image3 = 'images.png'
margin = 0
margintop = -20
task = "text-generation"
display = "block"
st.session_state.models = [
{
"model": "deepseek-ai/DeepSeek-V3",
"url": "https://router.huggingface.co/nebius/v1/chat/completions"
},
{
"model": "mistralai/mistral-7b-instruct",
"url": "https://router.huggingface.co/novita/v3/openai/chat/completions"
},
{
"model": "meta-llama/llama-3.1-8b-instruct",
"url":"https://router.huggingface.co/novita/v3/openai/chat/completions"
},
{
"model": "qwen/qwq-32b",
"url": "https://router.huggingface.co/novita/v3/openai/chat/completions"
},
{
"model": "google/gemma-2-2b-it-fast",
"url": "https://router.huggingface.co/nebius/v1/chat/completions"
}
]
st.session_state.model = "best_bilstm_model.h5"
if st.session_state.menu == "image":
choice1 = ''
choice2 = ''
choice3 = ''
choice4 = ''
text_h1 = "🖼️ Image to Text "
images = 'images.jfif'
image1 = 'logo2.png'
image2 = 'hugging.png'
margintop = -90
image3 = 'Captured.png'
display = "none"
st.session_state.models = [
{
"model": "Salesforce/blip-image-captioning-large",
"url": "https://router.huggingface.co/hf-inference/models/Salesforce/blip-image-captioning-large"
},
{
"model": "nlpconnect/vit-gpt2-image-captioning",
"url": "https://router.huggingface.co/hf-inference/models/nlpconnect/vit-gpt2-image-captioning"
}
]
task = "image-to-text"
margin = 120
st.session_state.model = "best_bilstm_model.h5"
st.markdown(
"""
""",
unsafe_allow_html=True
)
st.markdown(
f"""
""",
unsafe_allow_html=True
)
loading_html = """
"""
# Sort chat files by last modified time (most recent first)
chat_files = sorted(
os.listdir(CHAT_DIR),
key=lambda x: os.path.getmtime(os.path.join(CHAT_DIR, x)),
reverse=True
)
# Sidebar buttons
with st.sidebar:
if st.button(" Text Classification",key="nav-1"):
st.session_state.menu ="class" # Store state
st.rerun()
if st.button(" Text Generation",key="nav-2"):
st.session_state.menu ="gen" # Store state
st.rerun()
if st.button(" Image to text Generator",key="nav-3"):
st.session_state.menu ="image" # Store state
st.rerun()
st.markdown("### 💬 Previous Chats")
# List existing chats as buttons
# List existing chats as buttons with a delete icon
for chat_file in chat_files:
chat_id = chat_file.replace(".json", "")
file_path = os.path.join(CHAT_DIR, chat_file)
try:
with open(file_path, "r", encoding="utf-8") as f:
data = json.load(f)
if data:
# Get first user message
first_user_message = next(
(msg["content"] for msg in data if msg["role"] == "user"), "🆕 New Chat"
)
preview_line = first_user_message.strip().split("\n")[0]
truncated_preview = preview_line[:50] + ("..." if len(preview_line) > 50 else "")
# Generate keys
val = f"hist_{chat_id}"
delete_key = f"del_{chat_id}"
# Create side-by-side buttons
col1, col2 = st.columns([5, 1])
key = f"col_{chat_id}"
with st.container(key=key):
with col1:
if st.button(f"🗂 {truncated_preview}", key=val):
load_chat_history(chat_id)
st.session_state.show_overlay = False
st.rerun()
except (json.JSONDecodeError, FileNotFoundError):
pass # Skip invalid files
# Add your model description logic here
if st.button("",key ="btn-new"):
st.session_state.show_overlay = True
# Set new ID based on existing files
new_id = f"chat_{len(os.listdir(CHAT_DIR)) + 1}"
st.session_state.chat_id = new_id
st.session_state.messages = []
save_chat_history() # Save empty history to create the file
st.rerun()
with st.container(key="content_1"):
if st.button("",key ="create"):
st.session_state.show_overlay = True
st.rerun()
model_names = [m["model"] for m in st.session_state.models]
selected_model_name = st.selectbox(task, model_names)
selected_model = next((m for m in st.session_state.models if m["model"] == selected_model_name), None)
with st.expander("⚙️ Advanced Settings"):
temperature = st.slider("Temperature", 0.0, 2.0, 0.7, step=0.1)
top_p = st.slider("Top-p ", 0.0, 1.0, 0.9, step=0.05)
max_tokens = st.slider("Max tokens", 50, 1024, 512, step=50)
if st.session_state.show_overlay == True:
st.header(f"{text_h1} – What can I help you with?")
user_input = ''
if st.session_state.menu != "image":
if user_input := st.chat_input("Ask anything",key="imupload"):
st.session_state.user_input = user_input
st.session_state.show_overlay = False
# Add user message
# Add user message to history
st.session_state.messages.append({
"role": "user",
"content": user_input,
"image": st.session_state.save_path if st.session_state.menu == "image" else ""
})
# Simulate assistant response (replace with real AI later)
# Indicate that a response should be generated on the chat page
st.session_state.input_text = st.session_state.user_input
st.session_state.input_task = "text"
st.session_state.generate_response = True
st.rerun()
if st.session_state.menu == "image":
if prompts := st.chat_input(
"Say something and/or attach an image",
accept_file=True,
file_type=["jpg", "jpeg", "png"],key = "uploads"
): # Step 3: Open the image using PIL
files = prompts["files"]
text_input = prompts["text"]
# Optional image handling
image_path = ""
if files:
file = files[0]
image = Image.open(file)
save_dir = "saved_images"
os.makedirs(save_dir, exist_ok=True)
save_path = os.path.join(save_dir, file.name)
image.save(save_path)
st.session_state.save_path = save_path
image_path = save_path # So you can add it to messages
#st.session_state.show_overlay = False
st.session_state.show_overlay = False
# Add user message
st.session_state.messages.append({
"role": "user",
"content": image_path,
"image": st.session_state.save_path
})
st.session_state.input_text = st.session_state.save_path
st.session_state.input_task = "image"
# assistant response
# Indicate that a response should be generated on the chat page
st.session_state.generate_response = True
st.rerun()
with st.container(key="choices"):
col1,col2,col3,col4 = st.columns([1.5,0.1,1.5,1])
with col1:
if st.button(choice1,key="choice-1"):
st.session_state.user_input = choice1
if st.session_state.menu == "image":
st.session_state.messages.append({
"role": "user",
"content": images,
"image": images
})
st.session_state.input_task = "image"
st.session_state.input_text = images
# assistant response
# Indicate that a response should be generated on the chat page
else:
st.session_state.input_text = choice1
st.session_state.input_task = "text"
st.session_state.messages.append({
"role": "user",
"content": choice1,
"image":""
})
st.session_state.show_overlay = False
st.session_state.generate_response = True
st.rerun()
if st.button(choice2,key="choice-2"):
st.session_state.user_input = choice2
if st.session_state.menu == "image":
st.session_state.input_task = "image"
st.session_state.input_text = image1
st.session_state.messages.append({
"role": "user",
"content": image1,
"image": image1
})
else:
st.session_state.input_text = choice2
st.session_state.input_task = "text"
st.session_state.messages.append({
"role": "user",
"content": choice2,
"image":""
})
st.session_state.generate_response = True
st.session_state.show_overlay = False
st.rerun()
with col3:
if st.button(choice3,key="choice-5"):
st.session_state.user_input = choice3
if st.session_state.menu == "image":
st.session_state.input_task = "image"
st.session_state.input_text = image2
st.session_state.messages.append({
"role": "user",
"content": image2,
"image": image2
})
else:
st.session_state.input_text = choice3
st.session_state.input_task = "text"
st.session_state.messages.append({
"role": "user",
"content": choice3,
"image":""
})
st.session_state.generate_response = True
st.session_state.show_overlay = False
st.rerun()
if st.button(choice4,key="choice-6"):
st.session_state.user_input = choice4
if st.session_state.menu == "image":
st.session_state.input_task = "image"
st.session_state.input_text = image3
st.session_state.messages.append({
"role": "user",
"content": image3,
"image": image3
})
else:
st.session_state.input_text = choice4
st.session_state.input_task = "text"
st.session_state.messages.append({
"role": "user",
"content": choice4,
"image":""
})
st.session_state.generate_response = True
st.session_state.show_overlay = False
st.rerun()
if st.session_state.show_overlay == False:
def generate_stream_response(text):
# Yield the string one character at a time (for streaming)
for char in text:
yield char
time.sleep(0.02)
# Display chat messages from history on app rerun
if st.session_state.get("generate_response", True):
with st.spinner("generating output..."):
response = query_huggingface_model(
selected_model,
st.session_state.input_text,
input_type=st.session_state.input_task,
task=task,
temperature=temperature,top_p=top_p,max_tokens=max_tokens
)
if st.session_state.menu == "gen":
temper = temperature,
topp = top_p
else:
temper = "",
topp = ""
st.session_state.messages.append({
"role": "assistant",
"content": extract_response_content(response),
"image": "",
"model": selected_model['model'],
"temp":temper,
"top_p" : topp,
})
st.session_state.generate_response = False # Reset the flag
save_chat_history()
st.rerun()
for i, message in enumerate(st.session_state.messages):
con = message["role"]
container_key = f"{con}-{i}"
with st.container(key=container_key):
with st.chat_message(message["role"]):
if message["role"] == "assistant":
st.markdown(
f"**Model:** `{message['model']}`"
+ (
f" **Temperature:** `{message['temp']}` **Top-p:** `{message['top_p']}`"
if message.get('temp') and message.get('top_p') else ""
),
unsafe_allow_html=True
)
if message["image"] != "":
st.markdown(f"""
""",unsafe_allow_html=True,)
st.markdown(message["content"])
if st.session_state.menu == "image":
if prompts := st.chat_input(
"Say something and/or attach an image",
accept_file=True,
file_type=["jpg", "jpeg", "png"],key = "upload"
):
files = prompts["files"]
text_input = prompts["text"]
# Optional image handling
image_path = ""
if files:
file = files[0]
image = Image.open(file)
save_dir = "saved_images"
os.makedirs(save_dir, exist_ok=True)
save_path = os.path.join(save_dir, file.name)
image.save(save_path)
st.session_state.save_path = save_path
image_path = save_path # So you can add it to messages
# Show in chat window
with st.container(key="user-k"):
with st.chat_message("user"):
st.markdown(text_input)
if image_path:
st.markdown(f"""
""",unsafe_allow_html=True,)
with st.container(key="assistant-k"):
# Display assistant response in chat message container
with st.chat_message("assistant"):
with st.spinner("Model is generating a response..."):
# Add message to history
result = query_huggingface_model(selected_model, st.session_state.save_path , input_type="image")
st.session_state.messages.append({
"role": "user",
"content": text_input,
"image": image_path
})
response = extract_response_content(result)
st.markdown(
f"**Model:** `{selected_model['model'] if isinstance(selected_model, dict) else selected_model}`"
f"{': temperature' + str(temperature) if st.session_state.menu == 'gen' else ''} "
f"{ 'Top-p:'+str( top_p) if st.session_state.menu == 'gen' else ''}"
)
print(response)
st.write_stream(generate_stream_response(response)) # This will stream the text one character at a time
# Add assistant response to chat history
if st.session_state.menu == "gen":
temper = temperature,
topp = top_p
else:
temper = "",
topp = ""
st.session_state.messages.append({"role": "assistant", "content": response,"image":"","model":selected_model['model'],"temp":temper,"top_p":topp})
save_chat_history()
else:
# Accept user input
if prompt := st.chat_input("Ask anything"):
# Display user message in chat message container
with st.container(key="user-k"):
with st.chat_message("user"):
st.markdown(prompt)
# Add user message to chat history
with st.container(key="assistant-k"):
# Display assistant response in chat message container
with st.chat_message("assistant"):
with st.spinner("Model is generating a response..."):
st.session_state.messages.append({"role": "user", "content": prompt,"image":""})
result = query_huggingface_model(selected_model, prompt , input_type="text",task=task,temperature=temperature,top_p=top_p,max_tokens=max_tokens)
st.markdown(
f"**Model:** `{selected_model['model'] if isinstance(selected_model, dict) else selected_model}`"
f"{':temperature ' + str(temperature) if st.session_state.menu == 'gen' else ''} "
f"{ 'Top-p:'+str( top_p) if st.session_state.menu == 'gen' else ''}"
)
response = extract_response_content(result)
st.write_stream(generate_stream_response(response)) # Add assistant response to chat history
if st.session_state.menu == "gen":
temper = temperature,
topp = top_p
else:
temper = "",
topp = ""
st.session_state.messages.append({"role": "assistant", "content": response,"image":"","model":selected_model['model'],"temp":temper,"top_p":topp})
save_chat_history()