Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
|
|
2 |
import os
|
3 |
from huggingface_hub import InferenceClient
|
4 |
|
5 |
-
|
6 |
|
7 |
client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta", token=HF_TOKEN)
|
8 |
|
@@ -67,7 +67,23 @@ demo = gr.ChatInterface(
|
|
67 |
|
68 |
|
69 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
if not api_token:
|
|
|
71 |
raise ValueError("❌ ERROR: Hugging Face API token is not set. Please set it as an environment variable.")
|
72 |
|
73 |
# Define model names
|
@@ -87,8 +103,4 @@ base_model = AutoModelForCausalLM.from_pretrained(
|
|
87 |
model = PeftModel.from_pretrained(base_model, peft_model_name, token=api_token)
|
88 |
|
89 |
# Load tokenizer
|
90 |
-
tokenizer = AutoTokenizer.from_pretrained(base_model_name, token=api_token)
|
91 |
-
|
92 |
-
|
93 |
-
if __name__ == "__main__":
|
94 |
-
demo.launch()
|
|
|
2 |
import os
|
3 |
from huggingface_hub import InferenceClient
|
4 |
|
5 |
+
|
6 |
|
7 |
client = InferenceClient(model="HuggingFaceH4/zephyr-7b-beta", token=HF_TOKEN)
|
8 |
|
|
|
67 |
|
68 |
|
69 |
|
70 |
+
|
71 |
+
|
72 |
+
|
73 |
+
if __name__ == "__main__":
|
74 |
+
demo.launch()
|
75 |
+
|
76 |
+
|
77 |
+
|
78 |
+
|
79 |
+
|
80 |
+
import os
|
81 |
+
from transformers import AutoModelForCasualLM, AutoTokenizer
|
82 |
+
from peft import PeftModel
|
83 |
+
import torch
|
84 |
+
|
85 |
if not api_token:
|
86 |
+
api_token = os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
87 |
raise ValueError("❌ ERROR: Hugging Face API token is not set. Please set it as an environment variable.")
|
88 |
|
89 |
# Define model names
|
|
|
103 |
model = PeftModel.from_pretrained(base_model, peft_model_name, token=api_token)
|
104 |
|
105 |
# Load tokenizer
|
106 |
+
tokenizer = AutoTokenizer.from_pretrained(base_model_name, token=api_token)
|
|
|
|
|
|
|
|