Video-R1 / src /scripts /run_grpo_vllm_self_eval.sh
DingZhenDojoCat's picture
Add files using upload-large-folder tool
7ed0fb5 verified
#!/bin/bash
./move.sh
cd src/r1-v
export DEBUG_MODE="true"
export LOG_PATH="./vllm_run.txt"
QWEN_PATH='Qwen/Qwen2.5-VL-3B-Instruct'
HF_DATASET="./Video-R1-data/Train_QA_10k_noFreeForm.json"
OUTPUT_DIR="./log/Qwen2.5-VL-3B-Video-GRPO-Self-Eval-Train-QA10K"
if [ ! -d "$OUTPUT_DIR" ]; then
mkdir -p "$OUTPUT_DIR"
fi
RUN_NAME="Qwen2.5-VL-3B-Video-GRPO-COT-SelfEval-QA10K"
DS_CONFIG="local_scripts/zero3.json"
# Set temporal to choose between T-GRPO and GRPO, and len_control to enable or disable the length control reward.
# NOTE: you are expected to use X + 1 cards for X training proc and 1 vLLM proc
# e.g., the visible devices should be 0,1,2,3,4 for 5 cards, and --nproc_per_node="4"
CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7" torchrun \
--nproc_per_node="7" \
--nnodes="1" \
--node_rank="0" \
--master_addr="127.0.0.1" \
--master_port="12345" \
src/open_r1/grpo-cot-selfEval.py \
--use_vllm true \
--output_dir ${OUTPUT_DIR} \
--model_name_or_path ${QWEN_PATH} \
--dataset_name ${HF_DATASET} \
--max_prompt_length 16384 \
--max_completion_length 976 \
--per_device_train_batch_size 1 \
--gradient_accumulation_steps 16 \
--learning_rate 1e-6 \
--lr_scheduler_type "cosine" \
--weight_decay 0.01 \
--logging_steps 1 \
--bf16 true \
--gradient_checkpointing true \
--attn_implementation flash_attention_2 \
--min_pixels 3136 \
--max_pixels 501760 \
--num_train_epochs 2 \
--run_name ${RUN_NAME} \
--save_steps 30 \
--save_only_model false \
--temporal true \
--len_control true \
--report_to wandb \
--beta 0.04 \
--max_grad_norm 5 \
--temperature 1.0 \
--num_generations 8 \
--vllm_device "cuda:7" \
--vllm_gpu_memory_utilization 0.7 \
--deepspeed ${DS_CONFIG} \
2>&1 | tee "${OUTPUT_DIR}/training_log.txt"
python /apdcephfs_sh2/share_300000800/user/zongxia/Video-R1/gpu_burn.py