Update README.md
Browse files
README.md
CHANGED
@@ -8,8 +8,7 @@ model-index:
|
|
8 |
results: []
|
9 |
---
|
10 |
|
11 |
-
|
12 |
-
should probably proofread and complete it, then remove this comment. -->
|
13 |
|
14 |
# multichoice-question-generator
|
15 |
|
@@ -22,8 +21,35 @@ It achieves the following results on the evaluation set:
|
|
22 |
More information needed
|
23 |
|
24 |
## Intended uses & limitations
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
|
26 |
-
More information needed
|
27 |
|
28 |
## Training and evaluation data
|
29 |
|
|
|
8 |
results: []
|
9 |
---
|
10 |
|
11 |
+
|
|
|
12 |
|
13 |
# multichoice-question-generator
|
14 |
|
|
|
21 |
More information needed
|
22 |
|
23 |
## Intended uses & limitations
|
24 |
+
This is an early version of a model meant to generate multichoice questions from text
|
25 |
+
To load the model:
|
26 |
+
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
27 |
+
|
28 |
+
tokenizer = AutoTokenizer.from_pretrained("Gachomba/multichoice-question-generator")
|
29 |
+
model = AutoModelForSeq2SeqLM.from_pretrained("Gachomba/multichoice-question-generator")
|
30 |
+
# tokenize input text
|
31 |
+
import torch
|
32 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
33 |
+
|
34 |
+
def tokenize_input(input_text):
|
35 |
+
inputs = tokenizer(input_text, return_tensors="pt", truncation=True, padding='max_length', max_length=1024)
|
36 |
+
return inputs.input_ids.to(device), inputs.attention_mask.to(device)
|
37 |
+
|
38 |
+
# generate output from the model
|
39 |
+
def generate_output(input_text):
|
40 |
+
input_ids, attention_mask = tokenize_input(input_text)
|
41 |
+
outputs = model.generate(input_ids=input_ids, attention_mask=attention_mask, max_length=512)
|
42 |
+
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
43 |
+
|
44 |
+
# get user input and generate a response
|
45 |
+
def get_response():
|
46 |
+
user_input = input("Enter your text: ")
|
47 |
+
response = generate_output(user_input)
|
48 |
+
print("Generated Output:", response)
|
49 |
+
|
50 |
+
get_response()
|
51 |
+
|
52 |
|
|
|
53 |
|
54 |
## Training and evaluation data
|
55 |
|