InfiniTensor/examples/distributed/placement.py

65 lines
1.7 KiB
Python
Raw Permalink Normal View History

tensor parallel for transformer (#125) * add cmake bits about NCCL * move example to examples/NNmodel * impl NCCL communicator * add comm related function to Runtime * export runtime interface * add launch.py * use unique name to distingush the the NCCL ID file * add timeout to communicator init * expose communicator obj from runtime obj, add unit test for nccl communicator * reformat files * Add allReduce operator and cuda nccl allReduce kernel * impl model parallel for resnet * add allGather nccl kernel and operator * Add allreduce allgather operator tests, change allgather kernel to output list of tensor, fix shape infer, handle nullptr output * fix format of onnx.py * use concat following AllGather * get tensor parallel for resnet * fix format of graph_handler.cc * change BUILD_DIST default to OFF * polish code of communicator * update .gitignore * export min/max to python * fix MatMul * modify launch.py to run opt * hack to treat ReduceSum as AllReduceSum * throw exception in cuda error * fix parallel_opt.py * improve the error prompt and cuda error check * fix GatherObj::GatherObj member init * fix size calculation for scalar (rank = 0) tensor * MatMul supports bias * fix add bias for row parallel gemm * add --gen_std to launch.py * fix AllReduceNCCL * update launch.py * less log * update parallel_opt * update launch.py * add __eq__ for Placement sub-classes * less benchmark run * fix placement infer for matmul * fix vacabuary size * fix Exception * Add shard tensor with group to support gpt2 * Add find successor function to find split op at different depth * recover CommunicatorObj * improve error mesasge * optimize parallel_opt.py * optimize launch.py * recover docs for all_reduce and all_gather * Fix API * fix format --------- Co-authored-by: panzezhong <panzezhong@qiyuanlab.com> Co-authored-by: Haojie Wang <haojie0429@gmail.com>
2023-09-14 14:19:45 +08:00
from typing import Optional
class Placement:
# base class Placement type
# convenient utils to check for placement types
def is_shard(self, dim: Optional[int] = None) -> bool:
if dim is not None and isinstance(self, Shard):
return self.dim == dim
else:
return isinstance(self, Shard)
def is_replicate(self) -> bool:
return isinstance(self, Replicate)
def is_partial(self) -> bool:
return isinstance(self, _Partial)
class Replicate(Placement):
def __eq__(self, other: object) -> bool:
if not isinstance(other, Replicate):
return False
return True
def __repr__(self) -> str:
"""
machine readable representation of the Replicate placement
"""
return "Replicate()"
class Shard(Placement):
# shard placement, shard on a dim
def __init__(self, dim):
self.dim = dim
def __eq__(self, other: object) -> bool:
if not isinstance(other, Shard):
return False
return self.dim == other.dim
def __repr__(self) -> str:
"""
machine readable representation of the Shard placement
"""
return f"Shard(dim={self.dim})"
class _Partial(Placement):
def __init__(self, reduce_op: str = "sum"):
self.reduce_op: str = reduce_op
def __eq__(self, other: object) -> bool:
if not isinstance(other, _Partial):
return False
return self.reduce_op == other.reduce_op
def __repr__(self) -> str:
"""
machine readable representation of the Partial placement
"""
return f"_Partial(reduce_op={self.reduce_op})"