|
--- |
|
license: apache-2.0 |
|
library_name: peft |
|
base_model: unsloth/mistral-7b |
|
--- |
|
LoRA Adapter for RBI Notifications Dataset |
|
|
|
## Directions for Usage |
|
|
|
```python |
|
|
|
!pip install "unsloth[colab_ampere] @ git+https://github.com/unslothai/unsloth.git" |
|
!pip install "git+https://github.com/huggingface/transformers.git" |
|
|
|
from peft import PeftModel, PeftConfig |
|
from transformers import AutoModelForCausalLM |
|
|
|
config = PeftConfig.from_pretrained("AISimplyExplained/RBI-Notif64") |
|
model = AutoModelForCausalLM.from_pretrained("unsloth/mistral-7b-bnb-4bit") |
|
model = PeftModel.from_pretrained(model, "AISimplyExplained/RBI-Notif64") |
|
tokenizer= AutoTokenizer.from_pretrained("unsloth/mistral-7b-bnb-4bit") |
|
|
|
alpaca_prompt = """Below is an instruction. Write a response that appropriately completes the request. |
|
### Instruction: |
|
{} |
|
|
|
### Response: |
|
{}""" |
|
|
|
|
|
def formatting_prompts_func(examples): |
|
|
|
inputs = examples["input"] |
|
outputs = examples["output"] |
|
texts = [] |
|
for input, output in zip(inputs, outputs): |
|
text = alpaca_prompt.format(input, output) |
|
texts.append(text) |
|
return { "text" : texts, } |
|
|
|
|
|
inputs = tokenizer( |
|
[ |
|
alpaca_prompt.format( |
|
f'''What is the reference for the procedure to be followed by RRBs for implementation of Section 51A of UAPA, 1967? |
|
''', |
|
"", |
|
) |
|
]*1, return_tensors = "pt").to("cuda") |
|
|
|
outputs = model.generate(**inputs, max_new_tokens = 128, use_cache = True) |
|
output=tokenizer.batch_decode(outputs)[0] |
|
print(output) |
|
|
|
``` |