Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -8,7 +8,7 @@ import torch
|
|
8 |
from transformers import AutoModelForCausalLM, GemmaTokenizerFast, TextIteratorStreamer
|
9 |
|
10 |
DESCRIPTION = """\
|
11 |
-
# Prompt Generator with Gemma 2
|
12 |
### Uses OpenAI's leaked meta prompt for optimizing GPT prompts!
|
13 |
## The Prompt Generator can turn a task description into a high quality prompt. For best results, be sure to describe your task in as much detail as possible, including what input data the prompt should expect as well as how the output should be formatted.
|
14 |
"""
|
@@ -19,7 +19,7 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
|
|
19 |
|
20 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
21 |
|
22 |
-
model_id = "unsloth/gemma-2-
|
23 |
tokenizer = GemmaTokenizerFast.from_pretrained(model_id)
|
24 |
model = AutoModelForCausalLM.from_pretrained(
|
25 |
model_id,
|
|
|
8 |
from transformers import AutoModelForCausalLM, GemmaTokenizerFast, TextIteratorStreamer
|
9 |
|
10 |
DESCRIPTION = """\
|
11 |
+
# Prompt Generator with Gemma 2 9B IT
|
12 |
### Uses OpenAI's leaked meta prompt for optimizing GPT prompts!
|
13 |
## The Prompt Generator can turn a task description into a high quality prompt. For best results, be sure to describe your task in as much detail as possible, including what input data the prompt should expect as well as how the output should be formatted.
|
14 |
"""
|
|
|
19 |
|
20 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
21 |
|
22 |
+
model_id = "unsloth/gemma-2-9b-it"
|
23 |
tokenizer = GemmaTokenizerFast.from_pretrained(model_id)
|
24 |
model = AutoModelForCausalLM.from_pretrained(
|
25 |
model_id,
|