AISimplyExplained commited on
Commit
d041b8d
1 Parent(s): 4cd8613

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +31 -0
README.md CHANGED
@@ -20,5 +20,36 @@ model = AutoModelForCausalLM.from_pretrained("unsloth/mistral-7b-bnb-4bit")
20
  model = PeftModel.from_pretrained(model, "AISimplyExplained/RBI-Notif64")
21
  tokenizer= AutoTokenizer.from_pretrained("unsloth/mistral-7b-bnb-4bit")
22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
  ```
 
20
  model = PeftModel.from_pretrained(model, "AISimplyExplained/RBI-Notif64")
21
  tokenizer= AutoTokenizer.from_pretrained("unsloth/mistral-7b-bnb-4bit")
22
 
23
+ alpaca_prompt = """Below is an instruction. Write a response that appropriately completes the request.
24
+ ### Instruction:
25
+ {}
26
+
27
+ ### Response:
28
+ {}"""
29
+
30
+
31
+ def formatting_prompts_func(examples):
32
+
33
+ inputs = examples["input"]
34
+ outputs = examples["output"]
35
+ texts = []
36
+ for input, output in zip(inputs, outputs):
37
+ text = alpaca_prompt.format(input, output)
38
+ texts.append(text)
39
+ return { "text" : texts, }
40
+
41
+
42
+ inputs = tokenizer(
43
+ [
44
+ alpaca_prompt.format(
45
+ f'''What is the reference for the procedure to be followed by RRBs for implementation of Section 51A of UAPA, 1967?
46
+ ''',
47
+ "",
48
+ )
49
+ ]*1, return_tensors = "pt").to("cuda")
50
+
51
+ outputs = model.generate(**inputs, max_new_tokens = 128, use_cache = True)
52
+ output=tokenizer.batch_decode(outputs)[0]
53
+ print(output)
54
 
55
  ```