Spaces:
Running
Running
File size: 3,446 Bytes
002b092 9f54a3b eb4a24d dab5cc9 9f54a3b eb4a24d 9f54a3b 8ff67d3 9f54a3b 8ff67d3 2a7dbb2 9f54a3b 8ff67d3 cc778b2 8ff67d3 0ca86ba 9f54a3b cc778b2 eb4a24d 142827c 9f54a3b eb4a24d 142827c eb4a24d e3a575c 142827c cc778b2 98c29f4 5fad261 98c29f4 e3a575c 5fad261 98c29f4 5fad261 98c29f4 5fad261 98c29f4 142827c eb4a24d 2a7dbb2 93979c3 eabc41f 8ff67d3 9f54a3b eb4a24d 9f54a3b 002b092 eb4a24d 002b092 eb4a24d 002b092 8ff67d3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 |
import numpy as np
import streamlit as st
from openai import OpenAI
import os
from dotenv import load_dotenv
load_dotenv()
# Initialize the OpenAI client
client = OpenAI(
base_url="https://api-inference.huggingface.co/v1",
api_key=os.environ.get('HUGGINGFACEHUB_API_TOKEN') # Replace with your token
)
# Create supported model
model_links = {
"Zephyr-7B": "HuggingFaceH4/zephyr-7b-beta"
}
# Pull info about the model to display
model_info = {
"Zephyr-7B-β": {
'description': """The **Zephyr 7B β** is a next-gen **GPT-like Large Language Model (LLM)** fine-tuned from Mistral-7B-v0.1, containing 7 billion parameters. This model is optimized for educational tasks and excels at science-related Q&A with high accuracy and performance.\n"""
}
}
# Reset the conversation
def reset_conversation():
st.session_state.conversation = []
st.session_state.messages = []
return None
# App title and description
st.title("Sci-Mom 👩🏫 ")
st.subheader("AI chatbot for Solving your doubts 📚 :)")
# Custom description for SciMom in the sidebar
st.sidebar.write("Built for my mom, with love ❤️.")
st.sidebar.markdown(model_info["Zephyr-7B-β"]['description'])
st.sidebar.markdown("""
### Zephyr 7B β 🤖
Your personal science assistant, built with **7 billion parameters** to help with all your science Q&As.
- **Trained using Ultrachat Feedbacks**!
- **Quick & Smart**: Handles easy to tough topics like a pro.
- **Accurate**: Reliable answers every time.
Need help with science? Zephyr’s got your back! 🔬📘
""")
st.sidebar.markdown("By Gokulnath ♔")
# If model selection was needed (now removed)
selected_model = "Zephyr-7B" # Only one model remains
if "prev_option" not in st.session_state:
st.session_state.prev_option = selected_model
if st.session_state.prev_option != selected_model:
st.session_state.messages = []
st.session_state.prev_option = selected_model
reset_conversation()
# Pull in the model we want to use
repo_id = model_links[selected_model]
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# Accept user input
if prompt := st.chat_input("Ask Scimom!"):
# Display user message in chat message container
with st.chat_message("user"):
st.markdown(prompt)
st.session_state.messages.append({"role": "user", "content": prompt})
# Display assistant response in chat message container
with st.chat_message("assistant"):
try:
stream = client.chat.completions.create(
model=model_links[selected_model],
messages=[
{"role": m["role"], "content": m["content"]}
for m in st.session_state.messages
],
temperature=0.5, # Default temperature setting
stream=True,
max_tokens=3000,
)
response = st.write_stream(stream)
except Exception as e:
response = "😵💫 Something went wrong. Please try again later."
st.write(response)
st.write("This was the error message:")
st.write(e)
st.session_state.messages.append({"role": "assistant", "content": response})
|