Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -65,11 +65,15 @@
|
|
65 |
|
66 |
import gradio as gr
|
67 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
|
68 |
|
69 |
-
#
|
|
|
|
|
|
|
70 |
model_name = "meta-llama/Llama-2-7b-hf"
|
71 |
-
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
72 |
-
model = AutoModelForCausalLM.from_pretrained(model_name)
|
73 |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
74 |
|
75 |
def respond(
|
@@ -112,3 +116,5 @@ demo = gr.ChatInterface(
|
|
112 |
|
113 |
if __name__ == "__main__":
|
114 |
demo.launch()
|
|
|
|
|
|
65 |
|
66 |
import gradio as gr
|
67 |
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
68 |
+
import os
|
69 |
|
70 |
+
# Access the Hugging Face API token from the environment variables
|
71 |
+
api_token = os.getenv("HF_API_TOKEN")
|
72 |
+
|
73 |
+
# Load the model and tokenizer with authentication
|
74 |
model_name = "meta-llama/Llama-2-7b-hf"
|
75 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=api_token)
|
76 |
+
model = AutoModelForCausalLM.from_pretrained(model_name, use_auth_token=api_token)
|
77 |
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
78 |
|
79 |
def respond(
|
|
|
116 |
|
117 |
if __name__ == "__main__":
|
118 |
demo.launch()
|
119 |
+
|
120 |
+
|