Update README.md
Browse files
README.md
CHANGED
@@ -29,7 +29,9 @@ More information needed
|
|
29 |
### Testing results
|
30 |
|
31 |
import torch
|
|
|
32 |
from peft import AutoPeftModelForCausalLM
|
|
|
33 |
from transformers import AutoTokenizer, pipeline
|
34 |
|
35 |
peft_model_id = "frankmorales2020/Mistral-7B-text-to-sql-without-flash-attention-2"
|
@@ -43,7 +45,7 @@ model = AutoPeftModelForCausalLM.from_pretrained(
|
|
43 |
tokenizer = AutoTokenizer.from_pretrained(peft_model_id)
|
44 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
45 |
|
46 |
-
|
47 |
prompt='What was the first album Beyoncé released as a solo artist?'
|
48 |
prompt = f"Instruct: generate a SQL query.\n{prompt}\nOutput:\n" # for dataset b-mc2/sql-create-context
|
49 |
outputs = pipe(prompt, max_new_tokens=1024, do_sample=True, temperature=0.9, top_k=50, top_p=0.1, eos_token_id=pipe.tokenizer.eos_token_id, pad_token_id=pipe.tokenizer.eos_token_id)
|
|
|
29 |
### Testing results
|
30 |
|
31 |
import torch
|
32 |
+
|
33 |
from peft import AutoPeftModelForCausalLM
|
34 |
+
|
35 |
from transformers import AutoTokenizer, pipeline
|
36 |
|
37 |
peft_model_id = "frankmorales2020/Mistral-7B-text-to-sql-without-flash-attention-2"
|
|
|
45 |
tokenizer = AutoTokenizer.from_pretrained(peft_model_id)
|
46 |
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
47 |
|
48 |
+
CASE Number 1:
|
49 |
prompt='What was the first album Beyoncé released as a solo artist?'
|
50 |
prompt = f"Instruct: generate a SQL query.\n{prompt}\nOutput:\n" # for dataset b-mc2/sql-create-context
|
51 |
outputs = pipe(prompt, max_new_tokens=1024, do_sample=True, temperature=0.9, top_k=50, top_p=0.1, eos_token_id=pipe.tokenizer.eos_token_id, pad_token_id=pipe.tokenizer.eos_token_id)
|