Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -2,6 +2,7 @@ import streamlit as st
|
|
2 |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
3 |
from PIL import Image
|
4 |
import requests
|
|
|
5 |
|
6 |
# Load the image classification pipeline
|
7 |
@st.cache_resource
|
@@ -13,8 +14,16 @@ pipe_classification = load_image_classification_pipeline()
|
|
13 |
# Load the Meta-Llama model and tokenizer for text generation
|
14 |
@st.cache_resource
|
15 |
def load_llama_pipeline():
|
16 |
-
|
17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
return pipeline("text-generation", model=model, tokenizer=tokenizer)
|
19 |
|
20 |
pipe_llama = load_llama_pipeline()
|
|
|
2 |
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
3 |
from PIL import Image
|
4 |
import requests
|
5 |
+
import os
|
6 |
|
7 |
# Load the image classification pipeline
|
8 |
@st.cache_resource
|
|
|
14 |
# Load the Meta-Llama model and tokenizer for text generation
|
15 |
@st.cache_resource
|
16 |
def load_llama_pipeline():
|
17 |
+
# Retrieve Hugging Face token from environment variables
|
18 |
+
token = os.getenv("HF_AUTH_TOKEN")
|
19 |
+
tokenizer = AutoTokenizer.from_pretrained(
|
20 |
+
"meta-llama/Llama-3.2-3B-Instruct",
|
21 |
+
use_auth_token=token
|
22 |
+
)
|
23 |
+
model = AutoModelForCausalLM.from_pretrained(
|
24 |
+
"meta-llama/Llama-3.2-3B-Instruct",
|
25 |
+
use_auth_token=token
|
26 |
+
)
|
27 |
return pipeline("text-generation", model=model, tokenizer=tokenizer)
|
28 |
|
29 |
pipe_llama = load_llama_pipeline()
|