Spaces:
Sleeping
Sleeping
File size: 1,804 Bytes
94610b4 3fc60e5 464a9a3 94610b4 dc8e2ca 94610b4 3fc60e5 94610b4 3fc60e5 94610b4 3fc60e5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 |
import streamlit as st
from transformers import AutoTokenizer
from transformers import AutoModelForCausalLM
@st.cache_resource
def load_model():
model_name = "iamthehimansh/3dAiWeb-gguff"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name, model_type="llama")
return tokenizer, model
tokenizer, model = load_model()
def generate_response(prompt):
inputs = tokenizer(prompt, return_tensors="pt")
outputs = model.generate(**inputs, max_length=100, num_return_sequences=1)
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response
st.title("3D AI Web Chat Interface")
# Initialize chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Display chat messages from history on app rerun
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.markdown(message["content"])
# React to user input
if prompt := st.chat_input("What would you like to know about 3D scenes?"):
# Display user message in chat message container
st.chat_message("user").markdown(prompt)
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": prompt})
# Generate response
with st.chat_message("assistant"):
with st.spinner("Thinking..."):
response_text = generate_response(prompt)
st.markdown(response_text)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response_text})
st.sidebar.markdown("## About")
st.sidebar.info("This app provides a chat interface to interact with the iamthehimansh/3dAiWeb-gguf model. You can ask questions or discuss topics related to 3D scenes.")
|