add npu for model export

This commit is contained in:
MengqingCao 2024-06-05 07:06:40 +00:00
parent 82a565362c
commit 07045c876a
2 changed files with 3 additions and 3 deletions

View File

@ -145,9 +145,9 @@ class ModelArguments:
default=1,
metadata={"help": "The file shard size (in GB) of the exported model."},
)
export_device: Literal["cpu", "cuda"] = field(
export_device: Literal["cpu", "cuda", "npu"] = field(
default="cpu",
metadata={"help": "The device used in model export, use cuda to avoid addmm errors."},
metadata={"help": "The device used in model export, use cuda to avoid addmm errors; use npu/cuda to speed up exporting."},
)
export_quantization_bit: Optional[int] = field(
default=None,

View File

@ -89,7 +89,7 @@ def create_export_tab(engine: "Engine") -> Dict[str, "Component"]:
export_size = gr.Slider(minimum=1, maximum=100, value=1, step=1)
export_quantization_bit = gr.Dropdown(choices=["none"] + GPTQ_BITS, value="none")
export_quantization_dataset = gr.Textbox(value="data/c4_demo.json")
export_device = gr.Radio(choices=["cpu", "cuda"], value="cpu")
export_device = gr.Radio(choices=["cpu", "cuda", "npu"], value="cpu")
export_legacy_format = gr.Checkbox()
with gr.Row():