|
Universe = vanilla |
|
|
|
RequestCpus = 4 |
|
RequestMemory = 30G |
|
+RequestWallTime = 30000 |
|
request_GPUs = 1 |
|
|
|
Requirements = (Has_avx)\ |
|
&&((GPUs_GlobalMemoryMb >= 15000)||(CUDAGlobalMemoryMb >= 15000)) |
|
|
|
NiceUser = true |
|
initialdir = /users/spraak/jponcele/whisper/finetune_event/whisper_runs |
|
|
|
Executable = /esat/spchtemp/scratch/jponcele/anaconda3/envs/whisper/bin/python |
|
Arguments = "run_eval_whisper_streaming_local.py --model_size=$(size) --dataset=$(dataset) --device=gpu --batch_size=$(bs)" |
|
|
|
Log = /esat/audioslave/jponcele/whisper/finetuning_event/CGN/condor/condor-eval-$(dataset)-$(size).log |
|
Output = /esat/audioslave/jponcele/whisper/finetuning_event/CGN/condor/condor-eval-$(dataset)-$(size).out |
|
Error = /esat/audioslave/jponcele/whisper/finetuning_event/CGN/condor/condor-eval-$(dataset)-$(size).err |
|
|
|
Queue |
|
|