forked from p04798526/LLaMA-Factory-Mirror
57 lines
7.3 KiB
Bash
57 lines
7.3 KiB
Bash
FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_1.yaml > results/lora_sft/Llama2-7B/llama2_lora_sft_1.log
|
|
FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_2.yaml > results/lora_sft/Llama2-7B/llama2_lora_sft_2.log
|
|
FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_3.yaml > results/lora_sft/Llama2-7B/llama2_lora_sft_3.log
|
|
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_1_single.yaml > results/lora_sft/Llama2-7B/llama2_lora_sft_1_single.log
|
|
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_2_single.yaml > results/lora_sft/Llama2-7B/llama2_lora_sft_2_single.log
|
|
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Llama2-7B/llama2_lora_sft_3_single.yaml > results/lora_sft/Llama2-7B/llama2_lora_sft_3_single.log
|
|
|
|
FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_1.yaml > results/inference/Llama2-7B/llama2_predict_1.log
|
|
FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_2.yaml > results/inference/Llama2-7B/llama2_predict_2.log
|
|
FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_3.yaml > results/inference/Llama2-7B/llama2_predict_3.log
|
|
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_1_single.yaml > results/inference/Llama2-7B/llama2_predict_1_single.log
|
|
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_2_single.yaml > results/inference/Llama2-7B/llama2_predict_2_single.log
|
|
CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Llama2-7B/llama2_predict_3_single.yaml > results/inference/Llama2-7B/llama2_predict_3_single.log
|
|
|
|
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1.yaml > results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1.log
|
|
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2.yaml > results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2.log
|
|
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3.yaml > results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3.log
|
|
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1_single.yaml > results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_1_single.log
|
|
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2_single.yaml > results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_2_single.log
|
|
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3_single.yaml > results/lora_sft/Baichuan2-7B/Baichuan2_lora_sft_3_single.log
|
|
|
|
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1.yaml > results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1.log
|
|
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2.yaml > results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2.log
|
|
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3.yaml > results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3.log
|
|
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.yaml > results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_1_single.log
|
|
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.yaml > results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_2_single.log
|
|
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.yaml > results/lora_sft/ChatGLM2-6B/ChatGLM2_lora_sft_3_single.log
|
|
|
|
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_1.yaml > results/lora_sft/Qwen-7B/Qwen_lora_sft_1.log
|
|
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_2.yaml > results/lora_sft/Qwen-7B/Qwen_lora_sft_2.log
|
|
# FORCE_TORCHRUN=1 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_3.yaml > results/lora_sft/Qwen-7B/Qwen_lora_sft_3.log
|
|
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_1_single.yaml > results/lora_sft/Qwen-7B/Qwen_lora_sft_1_single.log
|
|
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_2_single.yaml > results/lora_sft/Qwen-7B/Qwen_lora_sft_2_single.log
|
|
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/lora_sft/Qwen-7B/Qwen_lora_sft_3_single.yaml > results/lora_sft/Qwen-7B/Qwen_lora_sft_3_single.log
|
|
|
|
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_1.yaml > results/inference/Qwen-7B/Qwen_predict_1.log
|
|
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_2.yaml > results/inference/Qwen-7B/Qwen_predict_2.log
|
|
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_3.yaml > results/inference/Qwen-7B/Qwen_predict_3.log
|
|
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_1_single.yaml > results/inference/Qwen-7B/Qwen_predict_1_single.log
|
|
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_2_single.yaml > results/inference/Qwen-7B/Qwen_predict_2_single.log
|
|
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Qwen-7B/Qwen_predict_3_single.yaml > results/inference/Qwen-7B/Qwen_predict_3_single.log
|
|
|
|
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_1.yaml > results/inference/ChatGLM2-6B/ChatGLM2_predict_1.log
|
|
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_2.yaml > results/inference/ChatGLM2-6B/ChatGLM2_predict_2.log
|
|
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_3.yaml > results/inference/ChatGLM2-6B/ChatGLM2_predict_3.log
|
|
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_1_single.yaml > results/inference/ChatGLM2-6B/ChatGLM2_predict_1_single.log
|
|
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_2_single.yaml > results/inference/ChatGLM2-6B/ChatGLM2_predict_2_single.log
|
|
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/ChatGLM2-6B/ChatGLM2_predict_3_single.yaml > results/inference/ChatGLM2-6B/ChatGLM2_predict_3_single.log
|
|
|
|
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_1.yaml > results/inference/Baichuan2-7B/Baichuan2_predict_1.log
|
|
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_2.yaml > results/inference/Baichuan2-7B/Baichuan2_predict_2.log
|
|
# FORCE_TORCHRUN=1 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_3.yaml > results/inference/Baichuan2-7B/Baichuan2_predict_3.log
|
|
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_1_single.yaml > results/inference/Baichuan2-7B/Baichuan2_predict_1_single.log
|
|
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_2_single.yaml > results/inference/Baichuan2-7B/Baichuan2_predict_2_single.log
|
|
# CUDA_VISIBLE_DEVICES=0 llamafactory-cli train results/inference/Baichuan2-7B/Baichuan2_predict_3_single.yaml > results/inference/Baichuan2-7B/Baichuan2_predict_3_single.log
|
|
|