Spaces:
Running
Running
debugging slow inference pt1 (changing max_len)
Browse files- interfaces/cap.py +1 -1
interfaces/cap.py
CHANGED
@@ -100,7 +100,7 @@ def predict(text, model_id, tokenizer_id):
|
|
100 |
model.to(device)
|
101 |
|
102 |
inputs = tokenizer(text,
|
103 |
-
max_length=
|
104 |
truncation=True,
|
105 |
padding="do_not_pad",
|
106 |
return_tensors="pt").to(device)
|
|
|
100 |
model.to(device)
|
101 |
|
102 |
inputs = tokenizer(text,
|
103 |
+
max_length=4,
|
104 |
truncation=True,
|
105 |
padding="do_not_pad",
|
106 |
return_tensors="pt").to(device)
|