File size: 1,764 Bytes
3eb682b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 |
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "08fe0c59",
"metadata": {},
"outputs": [],
"source": [
"from pathlib import Path\n",
"\n",
"import torch\n",
"from timesformer.models.vit import TimeSformer"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "10239d32",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"True"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model_file = Path.home()/'TimeSformer/models/TimeSformer_divST_8x32_224_K600.pyth'\n",
"model_file.exists()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "652fb03e",
"metadata": {},
"outputs": [],
"source": [
"model = TimeSformer(img_size=224, num_classes=600, num_frames=8, attention_type='divided_space_time', pretrained_model=str(model_file))\n",
"\n",
"dummy_video = torch.randn(2, 3, 8, 224, 224) # (batch x channels x frames x height x width)\n",
"\n",
"pred = model(dummy_video,) # (2, 600)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "83de13c5-791c-4db7-aba4-6d29ce88584e",
"metadata": {},
"outputs": [],
"source": [
"assert pred.shape == (2,600)"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.4"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
|