# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_1.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_1_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_2.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_2_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_3.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_3_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_1_single.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_1_single_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_2_single.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_2_single_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Llama2-7B/llama2_lora_sft_3_single.yaml | tee results/lora_sft_2/Llama2-7B/llama2_lora_sft_3_single_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_single.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_1_single_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2_single.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_2_single_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3_single.yaml | tee results/lora_sft_2/Baichuan2-7B/Baichuan2_lora_sft_3_single_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_1_single_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_2_single_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.yaml | tee results/lora_sft_2/ChatGLM2-6B/ChatGLM2_lora_sft_3_single_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1_single.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_1_single_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2_single.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_2_single_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3_single.yaml | tee results/lora_sft_2/Qwen-7B/Qwen_lora_sft_3_single_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_1.yaml | tee results/inference/Llama2-7B/llama2_predict_1_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_2.yaml | tee results/inference/Llama2-7B/llama2_predict_2_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_3.yaml | tee results/inference/Llama2-7B/llama2_predict_3_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_1_single.yaml | tee results/inference/Llama2-7B/llama2_predict_1_single_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_2_single.yaml | tee results/inference/Llama2-7B/llama2_predict_2_single_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_3_single.yaml | tee results/inference/Llama2-7B/llama2_predict_3_single_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_1.yaml | tee results/inference/Qwen-7B/Qwen_predict_1_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_2.yaml | tee results/inference/Qwen-7B/Qwen_predict_2_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_3.yaml | tee results/inference/Qwen-7B/Qwen_predict_3_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_1_single.yaml | tee results/inference/Qwen-7B/Qwen_predict_1_single_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_2_single.yaml | tee results/inference/Qwen-7B/Qwen_predict_2_single_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_3_single.yaml | tee results/inference/Qwen-7B/Qwen_predict_3_single_log.txt FORCE_TORCHRUN=1 CUDA_VISIBLE_DEVICES=0,1,4,5,6 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_1.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_1_log.txt FORCE_TORCHRUN=1 CUDA_VISIBLE_DEVICES=0,1,4,5,6 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_2.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_2_log.txt FORCE_TORCHRUN=1 CUDA_VISIBLE_DEVICES=0,1,4,5,6 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_3.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_3_log.txt CUDA_VISIBLE_DEVICES=6 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_1_single.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_1_single_log.txt CUDA_VISIBLE_DEVICES=6 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_2_single.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_2_single_log.txt CUDA_VISIBLE_DEVICES=6 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_3_single.yaml | tee results/inference/ChatGLM2-6B/ChatGLM2_predict_3_single_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_1.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_1_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_2.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_2_log.txt # FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_3.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_3_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_1_single.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_1_single_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_2_single.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_2_single_log.txt # CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_3_single.yaml | tee results/inference/Baichuan2-7B/Baichuan2_predict_3_single_log.txt