Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,45 +1,45 @@
|
|
1 |
-
import streamlit as st
|
2 |
-
from transformers import pipeline
|
3 |
-
|
4 |
-
|
5 |
-
@st.cache_resource
|
6 |
-
def load_model():
|
7 |
-
# Load the model once and cache it
|
8 |
-
return pipeline("text-generation", model="deepseek-ai/deepseek-coder-1.3b-instruct")
|
9 |
-
|
10 |
-
|
11 |
-
# App UI
|
12 |
-
st.title("🤖 DeepSeek Coder Chat")
|
13 |
-
st.write("Ask questions to the DeepSeek Coder AI model!")
|
14 |
-
|
15 |
-
# User input
|
16 |
-
user_input = st.text_input("Enter your question:", value="Who are you?")
|
17 |
-
|
18 |
-
if st.button("Generate Response"):
|
19 |
-
# Format messages in chat format
|
20 |
-
messages = [{"role": "user", "content": user_input}]
|
21 |
-
|
22 |
-
# Load cached model
|
23 |
-
pipe = load_model()
|
24 |
-
|
25 |
-
# Generate response with loading indicator
|
26 |
-
with st.spinner("Generating response..."):
|
27 |
-
try:
|
28 |
-
response = pipe(messages)
|
29 |
-
|
30 |
-
# Display formatted output
|
31 |
-
st.subheader("Response:")
|
32 |
-
st.write(response[0]['generated_text'])
|
33 |
-
|
34 |
-
except Exception as e:
|
35 |
-
st.error(f"An error occurred: {str(e)}")
|
36 |
-
|
37 |
-
# Sidebar with info
|
38 |
-
with st.sidebar:
|
39 |
-
st.markdown("### Model Information")
|
40 |
-
st.write("This app uses the deepseek-ai/deepseek-coder-1.3b-instruct model")
|
41 |
-
st.markdown("### System Requirements")
|
42 |
-
st.write("⚠️ Note: This model requires significant computational resources:")
|
43 |
-
st.write("- ~3GB RAM minimum")
|
44 |
-
st.write("- ~5GB disk space for model weights")
|
45 |
st.write("- May take 30-60 seconds to load initially")
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from transformers import pipeline
|
3 |
+
|
4 |
+
|
5 |
+
@st.cache_resource
|
6 |
+
def load_model():
|
7 |
+
# Load the model once and cache it
|
8 |
+
return pipeline("text-generation", model="deepseek-ai/deepseek-coder-1.3b-instruct")
|
9 |
+
|
10 |
+
|
11 |
+
# App UI
|
12 |
+
st.title("🤖 DeepSeek Coder Chat")
|
13 |
+
st.write("Ask questions to the DeepSeek Coder AI model!")
|
14 |
+
|
15 |
+
# User input
|
16 |
+
user_input = st.text_input("Enter your question:", value="Who are you?")
|
17 |
+
|
18 |
+
if st.button("Generate Response"):
|
19 |
+
# Format messages in chat format
|
20 |
+
messages = [{"role": "user", "content": user_input}]
|
21 |
+
|
22 |
+
# Load cached model
|
23 |
+
pipe = load_model()
|
24 |
+
|
25 |
+
# Generate response with loading indicator
|
26 |
+
with st.spinner("Generating response..."):
|
27 |
+
try:
|
28 |
+
response = pipe(messages)
|
29 |
+
|
30 |
+
# Display formatted output
|
31 |
+
st.subheader("Response:")
|
32 |
+
st.write(response[0]['generated_text'][1]["content"])
|
33 |
+
|
34 |
+
except Exception as e:
|
35 |
+
st.error(f"An error occurred: {str(e)}")
|
36 |
+
|
37 |
+
# Sidebar with info
|
38 |
+
with st.sidebar:
|
39 |
+
st.markdown("### Model Information")
|
40 |
+
st.write("This app uses the deepseek-ai/deepseek-coder-1.3b-instruct model")
|
41 |
+
st.markdown("### System Requirements")
|
42 |
+
st.write("⚠️ Note: This model requires significant computational resources:")
|
43 |
+
st.write("- ~3GB RAM minimum")
|
44 |
+
st.write("- ~5GB disk space for model weights")
|
45 |
st.write("- May take 30-60 seconds to load initially")
|