Update app.py
Browse files
app.py
CHANGED
@@ -11,15 +11,17 @@ base_model = "meta-llama/Meta-Llama-3-8B-Instruct"
|
|
11 |
|
12 |
llama_model = transformers.AutoModelForCausalLM.from_pretrained(base_model)
|
13 |
|
14 |
-
peft_model = PeftModel.from_pretrained(llama_model, model_id)
|
15 |
|
16 |
pipeline = transformers.pipeline(
|
17 |
"text-generation",
|
18 |
-
model=
|
|
|
19 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
20 |
device="cuda",
|
21 |
)
|
22 |
|
|
|
|
|
23 |
def chat_function(message, history, system_prompt, max_new_tokens, temperature):
|
24 |
messages = [{"role":"system","content":system_prompt},
|
25 |
{"role":"user", "content":message}]
|
|
|
11 |
|
12 |
llama_model = transformers.AutoModelForCausalLM.from_pretrained(base_model)
|
13 |
|
|
|
14 |
|
15 |
pipeline = transformers.pipeline(
|
16 |
"text-generation",
|
17 |
+
model=llama_model,
|
18 |
+
tokenizer=base_model,
|
19 |
model_kwargs={"torch_dtype": torch.bfloat16},
|
20 |
device="cuda",
|
21 |
)
|
22 |
|
23 |
+
pipeline.model = PeftModel.from_pretrained(llama_model, model_id)
|
24 |
+
|
25 |
def chat_function(message, history, system_prompt, max_new_tokens, temperature):
|
26 |
messages = [{"role":"system","content":system_prompt},
|
27 |
{"role":"user", "content":message}]
|