prithivMLmods commited on
Commit
c84ccf0
·
verified ·
1 Parent(s): 5702455

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +15 -13
README.md CHANGED
@@ -47,21 +47,23 @@ An autoencoder is a special type of neural network that learns to encode an inpu
47
 
48
  Implementing a pre-trained BART model for automatic text completion:
49
 
50
- '''
51
- from transformers import BartForConditionalGeneration, BartTokenizer
52
-
53
- bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large",
54
- forced_bos_token_id=0) # takes a while to load
55
- tokenizer = BartTokenizer.from_pretrained("facebook/bart-large")
56
 
 
 
 
 
 
 
 
57
  sent = "GeekforGeeks has a <mask> article on BART."
58
-
59
 
60
- tokenized_sent = tokenizer(sent, return_tensors='pt')
61
-
 
 
 
62
 
63
- generated_encoded = bart_model.generate(tokenized_sent['input_ids'])
64
-
65
- print(tokenizer.batch_decode(generated_encoded, skip_special_tokens=True)[0])
66
 
67
- '''
 
47
 
48
  Implementing a pre-trained BART model for automatic text completion:
49
 
50
+ Base from facebook bart /
 
 
 
 
 
51
 
52
+ from transformers import BartForConditionalGeneration, BartTokenizer
53
+
54
+ # Load pre-trained BART model
55
+ bart_model = BartForConditionalGeneration.from_pretrained("facebook/bart-large", forced_bos_token_id=0) # takes a while to load
56
+ tokenizer = BartTokenizer.from_pretrained("facebook/bart-large")
57
+
58
+ # Input sentence
59
  sent = "GeekforGeeks has a <mask> article on BART."
 
60
 
61
+ # Tokenize the input sentence
62
+ tokenized_sent = tokenizer(sent, return_tensors='pt')
63
+
64
+ # Generate the output sequence
65
+ generated_encoded = bart_model.generate(tokenized_sent['input_ids'])
66
 
67
+ # Decode the generated sequence and print
68
+ print(tokenizer.batch_decode(generated_encoded, skip_special_tokens=True)[0])
 
69