Video-R1 / src /scripts /3b_run_grpo_vllm_self_eval_thenNoDesEval.sh
DingZhenDojoCat's picture
Add files using upload-large-folder tool
7ed0fb5 verified
#!/bin/bash
./move.sh
cd src/r1-v
export DEBUG_MODE="true"
export LOG_PATH="./vllm_run.txt"
## Take self eval then do no des eval
QWEN_PATH='/apdcephfs_sh2/share_300000800/user/zongxia/Video-R1/src/r1-v/log/3B-Video-GRPO-SelfEval-Train/pool_numerical_chunk_01/checkpoint-25'
DATA_FILE='video_pool_multiple_choice_chunk_01'
# QWEN_PATH='/apdcephfs_sh2/share_300000800/user/zongxia/Video-R1/src/r1-v/log/3B-Video-GRPO-SelfEval-Train/pool_numerical_chunk_01/checkpoint-25'
# # DATA_FILE="pool_numerical_chunk_01"
# DATA_FILE='pool_multiple_choice_chunk_02'
# QWEN_PATH='/apdcephfs_sh2/share_300000800/user/zongxia/Video-R1/src/r1-v/log/3B-Video-GRPO-AnswerBERT/video_pool_multiple_choice_chunk_01/checkpoint-46'
# DATA_FILE='pool_numerical_chunk_02'
QWEN_PATH='/apdcephfs_sh2/share_300000800/user/zongxia/Video-R1/src/r1-v/log/3B-Video-GRPO-AnswerBERT/video_pool_multiple_choice_chunk_01/checkpoint-46'
DATA_FILE="pool_multiple_choice_chunk_02"
HF_DATASET="./Video-R1-data/${DATA_FILE}.json"
OUTPUT_DIR="./log/3B-Video-GRPO-selfEval-ThenNoDesEval/${DATA_FILE}"
if [ ! -d "$OUTPUT_DIR" ]; then
mkdir -p "$OUTPUT_DIR"
fi
RUN_NAME="3B-Video-GRPO-selfEval-ThenNoDesEval"
DS_CONFIG="local_scripts/zero3.json"
# Set temporal to choose between T-GRPO and GRPO, and len_control to enable or disable the length control reward.
# NOTE: you are expected to use X + 1 cards for X training proc and 1 vLLM proc
# e.g., the visible devices should be 0,1,2,3,4 for 5 cards, and --nproc_per_node="4"
export WANDB_API_KEY="5e11bfa8cf4062940486d279ecd9e70617d4ac7a"
CUDA_VISIBLE_DEVICES="0,1,2,3,4,5,6,7" torchrun \
--nproc_per_node="7" \
--nnodes="1" \
--node_rank="0" \
--master_addr="127.0.0.1" \
--master_port="12345" \
src/open_r1/grpo-cot-noDesEval.py \
--use_vllm true \
--output_dir ${OUTPUT_DIR} \
--model_name_or_path ${QWEN_PATH} \
--dataset_name ${HF_DATASET} \
--max_prompt_length 16384 \
--max_completion_length 1600 \
--per_device_train_batch_size 1 \
--gradient_accumulation_steps 32 \
--learning_rate 1e-6 \
--lr_scheduler_type "cosine" \
--weight_decay 0.01 \
--logging_steps 1 \
--bf16 true \
--gradient_checkpointing true \
--attn_implementation flash_attention_2 \
--min_pixels 3136 \
--max_pixels 501760 \
--num_train_epochs 1 \
--run_name ${RUN_NAME} \
--save_steps 14 \
--save_only_model false \
--temporal true \
--len_control false \
--report_to wandb \
--beta 0.04 \
--max_grad_norm 5 \
--temperature 1.0 \
--num_generations 8 \
--vllm_device "cuda:7" \
--vllm_gpu_memory_utilization 0.7 \
--deepspeed ${DS_CONFIG} \
2>&1 | tee "${OUTPUT_DIR}/training_log.txt"
python /cq_1/share_1603164/user/zongxia/workspace/gpu_burn.py