forked from p04798526/LLaMA-Factory-Mirror
fix #4242
This commit is contained in:
parent
656b2bbdaf
commit
577de2fa07
|
@ -32,7 +32,7 @@ RUN EXTRA_PACKAGES="metrics"; \
|
|||
EXTRA_PACKAGES="${EXTRA_PACKAGES},deepspeed"; \
|
||||
fi; \
|
||||
pip install -e .[$EXTRA_PACKAGES] && \
|
||||
pip uninstall -y transformer-engine
|
||||
pip uninstall -y transformer-engine flash-attn
|
||||
|
||||
# Set up volumes
|
||||
VOLUME [ "/root/.cache/huggingface/", "/app/data", "/app/output" ]
|
||||
|
|
|
@ -13,10 +13,11 @@ from .base_engine import BaseEngine, Response
|
|||
if is_vllm_available():
|
||||
from vllm import AsyncEngineArgs, AsyncLLMEngine, RequestOutput, SamplingParams
|
||||
from vllm.lora.request import LoRARequest
|
||||
|
||||
try:
|
||||
from vllm.multimodal import MultiModalData # vllm==0.5.0
|
||||
from vllm.multimodal import MultiModalData # type: ignore (for vllm>=0.5.0)
|
||||
except ImportError:
|
||||
from vllm.sequence import MultiModalData # vllm<0.5.0
|
||||
from vllm.sequence import MultiModalData # for vllm<0.5.0
|
||||
|
||||
|
||||
if TYPE_CHECKING:
|
||||
|
|
Loading…
Reference in New Issue