Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
3 |
+
|
4 |
+
# Load model and tokenizer
|
5 |
+
@st.cache_resource # Cache the resources to avoid reloading on every run
|
6 |
+
def load_model():
|
7 |
+
tokenizer = AutoTokenizer.from_pretrained("mistralai/Codestral-22B-v0.1")
|
8 |
+
model = AutoModelForCausalLM.from_pretrained("mistralai/Codestral-22B-v0.1")
|
9 |
+
return tokenizer, model
|
10 |
+
|
11 |
+
tokenizer, model = load_model()
|
12 |
+
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
13 |
+
|
14 |
+
st.title("Chat with Codestral-22B")
|
15 |
+
st.write("Ask a question and get a response from the Codestral-22B model.")
|
16 |
+
|
17 |
+
user_input = st.text_input("You: ", "Type your question here...")
|
18 |
+
|
19 |
+
if st.button("Send"):
|
20 |
+
with st.spinner("Generating response..."):
|
21 |
+
response = generator(user_input, max_length=100, num_return_sequences=1)
|
22 |
+
st.write("Codestral-22B: " + response[0]["generated_text"])
|