ierhon commited on
Commit
f8fed98
·
1 Parent(s): a72f0f8

Move inp_len to model_settings.py

Browse files
Files changed (1) hide show
  1. train.py +0 -1
train.py CHANGED
@@ -16,7 +16,6 @@ tokenizer = Tokenizer() # a tokenizer is a thing to split text into words, it mi
16
  tokenizer.fit_on_texts(list(dset.keys()))
17
 
18
  vocab_size = len(tokenizer.get_vocabulary())
19
- inp_len = 10 # limit of the input length, after 10 words the
20
 
21
  model = Sequential()
22
  model.add(Embedding(input_dim=vocab_size, output_dim=emb_size, input_length=inp_len))
 
16
  tokenizer.fit_on_texts(list(dset.keys()))
17
 
18
  vocab_size = len(tokenizer.get_vocabulary())
 
19
 
20
  model = Sequential()
21
  model.add(Embedding(input_dim=vocab_size, output_dim=emb_size, input_length=inp_len))