-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathSbatch2_pre_cscd_r_128_from_bert.sbatch
38 lines (33 loc) · 1.32 KB
/
Sbatch2_pre_cscd_r_128_from_bert.sbatch
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
#!/bin/bash
#SBATCH -p normal
#SBATCH -N 4
#SBATCH -J pre_bert
#SBATCH -o pre_cscd_r_128_from_bert.out
#SBATCH --ntasks-per-node=4
#SBATCH --cpus-per-task=8
#SBATCH --gres=dcu:4
DIR=`pwd`
hostfile=${DIR}/tmp
scontrol show hostnames $SLURM_JOB_NODELIST > ${hostfile}
for i in `cat ${hostfile}`
do
echo ${i} slots=4
done > ${DIR}/hostfile-tmp
num_node=`cat ${hostfile} | uniq | wc -l`
((num_DCU=${num_node}*4))
export MIOPEN_USER_DB_PATH=/tmp/tensorflow-miopen-${USER}-2.8
export MIOPEN_DEBUG_DISABLE_FIND_DB=1
export HOROVOD_HIERARCHICAL_ALLREDUCE=1
mpirun -np ${num_DCU} --hostfile ${DIR}/hostfile-tmp -mca pml ucx -x UCX_TLS=sm,rc,rocm_cpy,rocm_gdr,rocm_ipc -x LD_LIBRARY_PATH -mca coll_hcoll_enable 0 --bind-to none \
python run_pretraining_hvd.py\
--input_file /public/home/zzx6320/lh/Projects/bert/data/cscibert_pre_training/pre_training_R_cscd_128.tfrecord\
--bert_config_file /public/home/zzx6320/lh/Projects/bert/models/chinese_L-12_H-768_A-12/chinese_L-12_H-768_A-12/bert_config.json\
--init_checkpoint /public/home/zzx6320/lh/Projects/bert/models/chinese_L-12_H-768_A-12/chinese_L-12_H-768_A-12/bert_model.ckpt \
--output_dir output/Pre2_cscd_R_128_64_from_bert\
--max_seq_length 128\
--do_train True\
--do_eval True \
--train_batch_size 64\
--learning_rate 2e-5\
--num_train_steps 500000\
--save_checkpoints_steps 1000\