Update app.py
Browse files
app.py
CHANGED
@@ -1,25 +1,34 @@
|
|
1 |
import streamlit as st
|
2 |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
3 |
-
from huggingface_hub import snapshot_download
|
4 |
from pathlib import Path
|
5 |
|
6 |
def main():
|
7 |
st.title("Codestral Inference with Hugging Face")
|
8 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
# Download the model files
|
10 |
st.text("Downloading model...")
|
11 |
model_id = "mistralai/Codestral-22B-v0.1"
|
12 |
local_model_path = Path.home().joinpath('mistral_models', model_id)
|
13 |
local_model_path.mkdir(parents=True, exist_ok=True)
|
14 |
|
15 |
-
snapshot_download(repo_id=model_id, allow_patterns=["*.bin", "*.json", "*.model"], local_dir=local_model_path)
|
16 |
st.success("Model downloaded successfully!")
|
17 |
|
18 |
# Load the model and tokenizer
|
19 |
st.text("Loading model...")
|
20 |
-
tokenizer = AutoTokenizer.from_pretrained(local_model_path)
|
21 |
-
model = AutoModelForCausalLM.from_pretrained(local_model_path)
|
22 |
-
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
23 |
st.success("Model loaded successfully!")
|
24 |
|
25 |
user_input = st.text_area("Enter your instruction", "Explain Machine Learning to me in a nutshell.")
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
3 |
+
from huggingface_hub import snapshot_download, login
|
4 |
from pathlib import Path
|
5 |
|
6 |
def main():
|
7 |
st.title("Codestral Inference with Hugging Face")
|
8 |
|
9 |
+
# Get the Hugging Face API token from the user
|
10 |
+
hf_token = st.text_input("Enter your Hugging Face API token", type="password")
|
11 |
+
if not hf_token:
|
12 |
+
st.warning("Please enter your Hugging Face API token to proceed.")
|
13 |
+
st.stop()
|
14 |
+
|
15 |
+
# Login to Hugging Face Hub
|
16 |
+
login(hf_token)
|
17 |
+
|
18 |
# Download the model files
|
19 |
st.text("Downloading model...")
|
20 |
model_id = "mistralai/Codestral-22B-v0.1"
|
21 |
local_model_path = Path.home().joinpath('mistral_models', model_id)
|
22 |
local_model_path.mkdir(parents=True, exist_ok=True)
|
23 |
|
24 |
+
snapshot_download(repo_id=model_id, allow_patterns=["*.bin", "*.json", "*.model"], local_dir=local_model_path, use_auth_token=hf_token)
|
25 |
st.success("Model downloaded successfully!")
|
26 |
|
27 |
# Load the model and tokenizer
|
28 |
st.text("Loading model...")
|
29 |
+
tokenizer = AutoTokenizer.from_pretrained(local_model_path, use_auth_token=hf_token)
|
30 |
+
model = AutoModelForCausalLM.from_pretrained(local_model_path, use_auth_token=hf_token)
|
31 |
+
generator = pipeline("text-generation", model=model, tokenizer=tokenizer, use_auth_token=hf_token)
|
32 |
st.success("Model loaded successfully!")
|
33 |
|
34 |
user_input = st.text_area("Enter your instruction", "Explain Machine Learning to me in a nutshell.")
|