Spaces:
Sleeping
Sleeping
attempt6 fix
Browse files
app.py
CHANGED
@@ -24,12 +24,13 @@ LICENSE = """
|
|
24 |
if not torch.cuda.is_available():
|
25 |
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
26 |
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
33 |
|
34 |
@spaces.GPU(duration=120)
|
35 |
def generate(
|
@@ -43,11 +44,9 @@ def generate(
|
|
43 |
top_k: int = 50,
|
44 |
repetition_penalty: float = 1.2,
|
45 |
) -> Iterator[str]:
|
46 |
-
# Load the model and tokenizer based on the selected model ID
|
47 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
|
48 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
49 |
tokenizer.use_default_system_prompt = False
|
50 |
-
|
51 |
conversation = []
|
52 |
if system_prompt:
|
53 |
conversation.append({"role": "system", "content": system_prompt})
|
@@ -81,17 +80,11 @@ def generate(
|
|
81 |
outputs.append(text)
|
82 |
yield "".join(outputs)
|
83 |
|
84 |
-
# Add a dropdown for model selection
|
85 |
-
model_dropdown = gr.Dropdown(
|
86 |
-
label="Select Model",
|
87 |
-
choices=[model["name"] for model in MODELS],
|
88 |
-
value=MODELS[0]["name"], # Default to the first model
|
89 |
-
)
|
90 |
|
91 |
chat_interface = gr.ChatInterface(
|
92 |
fn=generate,
|
93 |
additional_inputs=[
|
94 |
-
|
95 |
gr.Textbox(label="System prompt", lines=6),
|
96 |
gr.Slider(
|
97 |
label="Max new tokens",
|
|
|
24 |
if not torch.cuda.is_available():
|
25 |
DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>"
|
26 |
|
27 |
+
|
28 |
+
if torch.cuda.is_available():
|
29 |
+
model_id = "Nekochu/Luminia-13B-v3"
|
30 |
+
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
|
31 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
32 |
+
tokenizer.use_default_system_prompt = False
|
33 |
+
|
34 |
|
35 |
@spaces.GPU(duration=120)
|
36 |
def generate(
|
|
|
44 |
top_k: int = 50,
|
45 |
repetition_penalty: float = 1.2,
|
46 |
) -> Iterator[str]:
|
|
|
47 |
model = AutoModelForCausalLM.from_pretrained(model_id, device_map="auto", load_in_4bit=True)
|
48 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
49 |
tokenizer.use_default_system_prompt = False
|
|
|
50 |
conversation = []
|
51 |
if system_prompt:
|
52 |
conversation.append({"role": "system", "content": system_prompt})
|
|
|
80 |
outputs.append(text)
|
81 |
yield "".join(outputs)
|
82 |
|
|
|
|
|
|
|
|
|
|
|
|
|
83 |
|
84 |
chat_interface = gr.ChatInterface(
|
85 |
fn=generate,
|
86 |
additional_inputs=[
|
87 |
+
gr.Dropdown(label="Model ID", choices=["Nekochu/Luminia-13B-v3", "Nekochu/Llama-2-13B-German-ORPO"]),
|
88 |
gr.Textbox(label="System prompt", lines=6),
|
89 |
gr.Slider(
|
90 |
label="Max new tokens",
|