Spaces:
Sleeping
Sleeping
attempt1 fix
Browse files
app.py
CHANGED
@@ -21,33 +21,33 @@ LICENSE = """
|
|
21 |
---.
|
22 |
"""
|
23 |
|
24 |
-
if not torch.cuda.is_available():
|
25 |
-
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
26 |
-
|
27 |
-
MODELS = {}
|
28 |
-
|
29 |
def load_model(model_id):
|
30 |
-
if model_id in MODELS:
|
31 |
-
return MODELS[model_id]
|
32 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
|
33 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
34 |
tokenizer.use_default_system_prompt = False
|
35 |
-
MODELS[model_id] = (model, tokenizer)
|
36 |
return model, tokenizer
|
37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
38 |
@spaces.GPU(duration=120)
|
39 |
def generate(
|
40 |
-
model_id: str,
|
41 |
message: str,
|
42 |
chat_history: list[tuple[str, str]],
|
43 |
system_prompt: str,
|
|
|
44 |
max_new_tokens: int = 1024,
|
45 |
temperature: float = 0.6,
|
46 |
top_p: float = 0.9,
|
47 |
top_k: int = 50,
|
48 |
repetition_penalty: float = 1.2,
|
49 |
) -> Iterator[str]:
|
50 |
-
model, tokenizer = load_model(model_id)
|
51 |
conversation = []
|
52 |
if system_prompt:
|
53 |
conversation.append({"role": "system", "content": system_prompt})
|
@@ -81,13 +81,12 @@ def generate(
|
|
81 |
outputs.append(text)
|
82 |
yield "".join(outputs)
|
83 |
|
84 |
-
MODEL_IDS = ["Nekochu/Luminia-13B-v3", "Nekochu/Llama-2-13B-German-ORPO"] # Add more model ids as needed
|
85 |
|
86 |
chat_interface = gr.ChatInterface(
|
87 |
fn=generate,
|
88 |
additional_inputs=[
|
89 |
-
gr.Dropdown(MODEL_IDS, label="Model ID"), # Add this line
|
90 |
gr.Textbox(label="System prompt", lines=6),
|
|
|
91 |
gr.Slider(
|
92 |
label="Max new tokens",
|
93 |
minimum=1,
|
@@ -138,4 +137,4 @@ with gr.Blocks(css="style.css") as demo:
|
|
138 |
gr.Markdown(LICENSE)
|
139 |
|
140 |
if __name__ == "__main__":
|
141 |
-
demo.queue(max_size=20).launch()
|
|
|
21 |
---.
|
22 |
"""
|
23 |
|
|
|
|
|
|
|
|
|
|
|
24 |
def load_model(model_id):
|
|
|
|
|
25 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
|
26 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
27 |
tokenizer.use_default_system_prompt = False
|
|
|
28 |
return model, tokenizer
|
29 |
|
30 |
+
if not torch.cuda.is_available():
|
31 |
+
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
32 |
+
|
33 |
+
if torch.cuda.is_available():
|
34 |
+
model_id = "Nekochu/Luminia-13B-v3"
|
35 |
+
model, tokenizer = load_model(model_id)
|
36 |
+
|
37 |
+
|
38 |
@spaces.GPU(duration=120)
|
39 |
def generate(
|
|
|
40 |
message: str,
|
41 |
chat_history: list[tuple[str, str]],
|
42 |
system_prompt: str,
|
43 |
+
model_id: str = "Nekochu/Luminia-13B-v3",
|
44 |
max_new_tokens: int = 1024,
|
45 |
temperature: float = 0.6,
|
46 |
top_p: float = 0.9,
|
47 |
top_k: int = 50,
|
48 |
repetition_penalty: float = 1.2,
|
49 |
) -> Iterator[str]:
|
50 |
+
model, tokenizer = load_model(model_id)
|
51 |
conversation = []
|
52 |
if system_prompt:
|
53 |
conversation.append({"role": "system", "content": system_prompt})
|
|
|
81 |
outputs.append(text)
|
82 |
yield "".join(outputs)
|
83 |
|
|
|
84 |
|
85 |
chat_interface = gr.ChatInterface(
|
86 |
fn=generate,
|
87 |
additional_inputs=[
|
|
|
88 |
gr.Textbox(label="System prompt", lines=6),
|
89 |
+
gr.Textbox(label="Model ID", default="Nekochu/Luminia-13B-v3"),
|
90 |
gr.Slider(
|
91 |
label="Max new tokens",
|
92 |
minimum=1,
|
|
|
137 |
gr.Markdown(LICENSE)
|
138 |
|
139 |
if __name__ == "__main__":
|
140 |
+
demo.queue(max_size=20).launch()
|