docker run --gpus all -it --rm -v /home/vietle/topic-clustering:/home/vietle/topic-clustering --name topic_clustering -p 8635:8633 nvcr.io/nvidia/tensorrt:22.07-py3 docker run --gpus all -it --rm -v /home/vietle/topic-clustering:/home/vietle/topic-clustering --name topic_clustering_trt -p 8633:8633 topic-clustering-trt /usr/src/tensorrt/bin/trtexec --onnx=tensorRT/models/paraphrase-mpnet-base-v2.onx --minShapes=input_ids:1x512,attention_mask:1x512 --optShapes=input_ids:32x512,attention_mask:32x512 --maxShapes=input_ids:32x512,attention_mask:32x512 --verbose --explicitBatch --saveEngine=tensorRT/models/paraphrase-mpnet-base-v2.engine --device=4 /usr/src/tensorrt/bin/trtexec --onnx=tensorRT/models/distiluse-base-multilingual-cased-v2.onx --minShapes=input_ids:1x512,attention_mask:1x512 --optShapes=input_ids:32x512,attention_mask:32x512 --maxShapes=input_ids:32x512,attention_mask:32x512 --verbose --explicitBatch --saveEngine=tensorRT/models/distiluse-base-multilingual-cased-v2.engine --device=4 /usr/src/tensorrt/bin/trtexec --onnx=tensorRT/models/paraphrase-multilingual-MiniLM-L12-v2.onx --minShapes=input_ids:1x512,attention_mask:1x512,token_type_ids:1x512 --optShapes=input_ids:32x512,attention_mask:32x512,token_type_ids:32x512 --maxShapes=input_ids:32x512,attention_mask:32x512,token_type_ids:32x512 --verbose --explicitBatch --saveEngine=tensorRT/models/paraphrase-multilingual-MiniLM-L12-v2.engine --device=4 /usr/src/tensorrt/bin/trtexec --onnx=tensorRT/models/model-sup-simcse-vn.onx --minShapes=input_ids:1x256,attention_mask:1x256,token_type_ids:1x256 --optShapes=input_ids:32x256,attention_mask:32x256,token_type_ids:32x256 --maxShapes=input_ids:32x256,attention_mask:32x256,token_type_ids:32x256 --verbose --explicitBatch --saveEngine=tensorRT/models/model-sup-simcse-vn2.engine --device=0