File size: 4,029 Bytes
15d6c34
 
 
 
 
 
 
 
 
 
 
 
 
 
fa0aa6d
 
 
 
 
15d6c34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fa0aa6d
15d6c34
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
# get git root
ROOT_DIR:=$(shell git rev-parse --show-toplevel)/text2motion
PYTHON_BIN:=python3
EXP:=motiondiffuse
SEED = 42
MODEL_DIR:=checkpoints/grab/demo
# MODEL_DIR:=checkpoints/grab/md_fulem_2g_excl_196_seed42
EPOCH:=latest
PROMPT:=a person walking happily
# PROMPT:=happiness airplane pass
GT_FILE=s2/cubesmall_lift # ground-truth
FRAMES=60
MEAN_EMOTION=surprise

# to push changes to space, run 'git push space main'
# make sure to do 'git remote add space https://huggingface.co/spaces/ellemac/Text2EMotionDiffuse'
huggingface:
	cd ${ROOT_DIR} && ${PYTHON_BIN} -m app

mean-mesh:
	cd $(ROOT_DIR) && vglrun ${PYTHON_BIN} -m datasets.mean_mesh \
	--emotion ${MEAN_EMOTION} \
	--file train.txt \

expl-train:
	cd ${ROOT_DIR} && ${PYTHON_BIN} -m datasets.train_explorer \

eval:
	cd ${ROOT_DIR} && ${PYTHON_BIN} -m tools.evaluation ${MODEL_DIR}/opt.txt \

# TODO (elmc): increase batch_size from 1 when not debugging!!
train: w_stats
	echo "experiment name md_${EXP}_seed${SEED}"
	cd ${ROOT_DIR} && ${PYTHON_BIN} -m tools.train \
	--name md_${EXP}_seed${SEED} \
	--batch_size 128 \
	--times 50 \
	--num_epochs 50 \
	--dataset_name grab \
	--num_layers 8 \
	--diffusion_steps 1000 \
	--data_parallel \
	--gpu_id 0 1 \
	--wandb_user "elles" \
	--experiment_name md_${EXP}_seed${SEED} \
	--log_every 50 \
	--seed ${SEED} \
	--use_wandb \

# get-npy makes model generate seq according to text and writes result to npy file
gen-npy:
	cd ${ROOT_DIR} && ${PYTHON_BIN} -m tools.inference \
	--opt_path ${MODEL_DIR}/opt.txt \
	--which_epoch ${EPOCH} \
	--text "${PROMPT}" \
	--npy_path ${MODEL_DIR}/outputs \
	--seed 42 \
	--motion_length ${FRAMES} \

# put the model your trained in MODEL_DIR (set at top of file) and generate poses with it conditioned on prompt
# smpl-x model then displays poses as meshes
# WARNING: make sure to run 'make gen' first to generate the npy files for make gen
play-gen-gif:
	echo "make sure to run on hpc dtu gui with graphics support and that you use 'vglrun' before python3 call!"
	echo "WARNING: run 'make gen' first to generate the npy files for make gen"
	cd $(ROOT_DIR) && vglrun ${PYTHON_BIN} -m datasets.motionx_explorer \
	--model_path ${MODEL_DIR} \
	--which_epoch ${EPOCH} \
	--prompt "${PROMPT}" \
	--display_mesh \
	--save_gif \
	--max_t ${FRAMES} \

play-gen-gui:
	echo "make sure to run on hpc dtu gui with graphics support and that you use 'vglrun' before python3 call!"
	echo "WARNING: run 'make infer' first to generate the npy files for make gen"
	cd $(ROOT_DIR) && ${PYTHON_BIN} -m datasets.motionx_explorer \
	--model_path ${MODEL_DIR} \
	--which_epoch ${EPOCH} \
	--prompt "${PROMPT}" \
	--display_mesh \
	--max_t ${FRAMES} \

# smpl-x displays poses from seq_file as meshes
play-gt-gif:
	cd $(ROOT_DIR) && vglrun ${PYTHON_BIN} -m datasets.motionx_explorer \
	--seq_file ${GT_FILE} \
	--display_mesh \
	--save_gif \
	--max_t ${FRAMES} \

play-gt-gui:
	cd $(ROOT_DIR) && vglrun ${PYTHON_BIN} -m datasets.motionx_explorer \
	--seq_file ${GT_FILE} \
	--display_mesh \
	--max_t ${FRAMES} \

gen: gen-npy play-gen-gif

aug:
	cd $(ROOT_DIR) && ${PYTHON_BIN} -m Motion-X.mocap-dataset-process.face_motion_augmentation \

print-data:
	cd $(ROOT_DIR) && ${PYTHON_BIN} -m datasets.motionx_explorer \

queue:
	cd ${ROOT_DIR} && bsub < jobscript.sh

w_stats:
	cd ${ROOT_DIR} && ${PYTHON_BIN} -m datasets.statistics_writer \

w_custom:
	cd ${ROOT_DIR} && ${PYTHON_BIN} -m datasets.custom_data_writer \

stat:
	@err_file=$$(ls -v gpu_*.err | tail -n 1); \
	out_file=$$(ls -v gpu_*.out | tail -n 1); \
	echo "Latest .err file: $$err_file"; \
	echo "Latest .out file: $$out_file"; \

# checks gpu utilization of latest job
gpu:
	@err_file=$$(ls -v gpu_*.err | tail -n 1); \
	err_number=$$(echo $$err_file | grep -oP 'gpu_\K\d+(?=\.err)'); \
	echo "Latest .err file: $$err_file with number $$err_number"; \
	bnvtop $$err_number; \

space:
	getquota_work3.sh

hog:
	du -h --max-depth=1 --apparent /work3/s222376/

env_setup:
	@echo "module load cuda/10.1 cudnn/v7.6.5.32-prod-cuda-10.1 gcc/5.4.0"