forked from jiuyuan/InfiniTensor
Merge branch 'master' into xpu
This commit is contained in:
commit
a69390e310
|
@ -0,0 +1,13 @@
|
|||
# Changelog
|
||||
|
||||
All notable changes to this project will be documented in this file.
|
||||
|
||||
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
|
||||
|
||||
## Unreleased
|
||||
|
||||
### Added
|
||||
|
||||
### Modified
|
||||
|
||||
### Fixed
|
|
@ -270,6 +270,7 @@ function(build_test files)
|
|||
endfunction()
|
||||
|
||||
if(BUILD_TEST)
|
||||
add_compile_definitions(BUILD_TEST=1)
|
||||
enable_testing()
|
||||
if(USE_TRACE)
|
||||
build_test(test/trace/*.cc)
|
||||
|
|
12
Makefile
12
Makefile
|
@ -1,12 +1,13 @@
|
|||
.PHONY : build clean install-python test-cpp test-onnx
|
||||
.PHONY : build clean format install-python test-cpp test-onnx
|
||||
|
||||
TYPE ?= release
|
||||
TYPE ?= Release
|
||||
CUDA ?= OFF
|
||||
BANG ?= OFF
|
||||
XPU ?= OFF
|
||||
INTELCPU ?= off
|
||||
BACKTRACE ?= ON
|
||||
TEST ?= ON
|
||||
FORMAT_ORIGIN ?=
|
||||
|
||||
CMAKE_OPT = -DCMAKE_BUILD_TYPE=$(TYPE)
|
||||
CMAKE_OPT += -DUSE_CUDA=$(CUDA)
|
||||
|
@ -26,11 +27,14 @@ build:
|
|||
clean:
|
||||
rm -rf build
|
||||
|
||||
format:
|
||||
@python3 scripts/format.py $(FORMAT_ORIGIN)
|
||||
|
||||
install-python: build
|
||||
cp build/$(TYPE)/backend*.so pyinfinitensor/src/pyinfinitensor
|
||||
pip install pyinfinitensor/
|
||||
pip install -e pyinfinitensor/
|
||||
|
||||
test-cpp: build
|
||||
test-cpp:
|
||||
@echo
|
||||
cd build/$(TYPE) && make test
|
||||
|
||||
|
|
60
README.md
60
README.md
|
@ -1,19 +1,14 @@
|
|||
# InfiniTensor
|
||||
|
||||
## Compilation on Lotus
|
||||
# Compilation for cuda
|
||||
``` bash
|
||||
# Enter the root of InfiniTensor
|
||||
source test/script/env_lotus.sh
|
||||
make CUDA=ON
|
||||
```
|
||||
## Compilation for intelcpu
|
||||
``` bash
|
||||
# Enter the root of InfiniTensor
|
||||
source test/script/env_lotus.sh intelcpu
|
||||
mkdir build && cd build
|
||||
cmake -DUSE_INTELCPU=ON -DCMAKE_CXX_COMPILER=dpcpp .. && make -j 12
|
||||
```
|
||||
[中文项目简介](/README_CN.md) | Documentation | [中文文档](/docs/INDEX.md)
|
||||
|
||||
[](https://github.com/InfiniTensor/InfiniTensor/actions)
|
||||
[](https://github.com/InfiniTensor/InfiniTensor/issues)
|
||||

|
||||
|
||||
InfiniTensor is a high-performance inference engine tailored for GPUs and AI accelerators. Its design focuses on effective deployment and swift academic validation.
|
||||
|
||||
## Get started
|
||||
|
||||
### Make Commands
|
||||
|
||||
|
@ -30,12 +25,22 @@ cmake -DUSE_INTELCPU=ON -DCMAKE_CXX_COMPILER=dpcpp .. && make -j 12
|
|||
|
||||
### CMake Options
|
||||
|
||||
There are several configurable CMake options, see the [CMakeLists.txt file](/CMakeLists.txt#L5).
|
||||
There are several configurable CMake options, see the [CMakeLists.txt](/CMakeLists.txt#L5) file.
|
||||
|
||||
- If `USE_BACKTRACE` is `ON`, `libdw-dev` have to be installed. See the README of [backward-cpp](https://github.com/bombela/backward-cpp) for details.
|
||||
- If `USE_PROTOBUF` is `ON`, `protobuf` have to be installed. See the README of [protobuf](https://github.com/protocolbuffers/protobuf) for details.
|
||||
- If `USE_CUDA` is `ON`, `cuda` have to be installed.
|
||||
|
||||
## Roadmap
|
||||
|
||||
- [EinNet](https://github.com/InfiniTensor/InfiniTensor/tree/NNET_e2e) is going to be merged into the main branch.
|
||||
- Integration of [PET](https://github.com/thu-pacman/PET), a tensor program optimizer supporting partially equivalent transformations.
|
||||
- Supported hardware
|
||||
- ✔ NVIDIA GPU
|
||||
- ✔ Cambricon MLU
|
||||
- ⬜ Ascend NPU
|
||||
- ⬜ Kunlunxin XPU
|
||||
|
||||
## Contributor Guide
|
||||
|
||||
InfiniTensor development is based on the pull request on Github. Before requesting for merging, a PR should satisfy the following requirements
|
||||
|
@ -46,9 +51,24 @@ InfiniTensor development is based on the pull request on Github. Before requesti
|
|||
2. Receive at least one approval from reviewers.
|
||||
3. PR title should be concise since it is going to be the commit message in the main branch after merging and squashing.
|
||||
|
||||
## Dependencies
|
||||
## Reference
|
||||
|
||||
- [backward-cpp](https://github.com/bombela/backward-cpp): [v1.6](https://github.com/bombela/backward-cpp/releases/tag/v1.6)
|
||||
- [googletest](https://github.com/google/googletest): [v1.13.0](https://github.com/google/googletest/releases/tag/v1.13.0)
|
||||
- [nlohmann_json_cmake_fetchcontent](https://github.com/ArthurSonzogni/nlohmann_json_cmake_fetchcontent): [v3.10.5](https://github.com/ArthurSonzogni/nlohmann_json_cmake_fetchcontent/releases/tag/v3.10.5)
|
||||
- [pybind11](https://github.com/pybind/pybind11): [v2.10.3](https://github.com/pybind/pybind11/releases/tag/v2.10.3)
|
||||
Please cite EinNet or PET in your publications if it helps your research:
|
||||
|
||||
```plaintext
|
||||
@article{zheng2023einnet,
|
||||
title={EINNET: Optimizing Tensor Programs with Derivation-Based Transformations},
|
||||
author={Zheng, Liyan and Wang, Haojie and Zhai, Jidong and Hu, Muyan and Ma, Zixuan and Wang, Tuowei and Huang, Shuhong and Miao, Xupeng and Tang, Shizhi and Huang, Kezhao and Jia, Zhihao},
|
||||
booktitle={17th USENIX Symposium on Operating Systems Design and Implementation (OSDI 23)},
|
||||
pages={739--755},
|
||||
year={2023}
|
||||
}
|
||||
|
||||
@inproceedings{wang2021pet,
|
||||
title={PET: Optimizing tensor programs with partially equivalent transformations and automated corrections},
|
||||
author={Wang, Haojie and Zhai, Jidong and Gao, Mingyu and Ma, Zixuan and Tang, Shizhi and Zheng, Liyan and Li, Yuanzhi and Rong, Kaiyuan and Chen, Yuanyong and Jia, Zhihao},
|
||||
booktitle={15th USENIX Symposium on Operating Systems Design and Implementation (OSDI 21)},
|
||||
pages={37--54},
|
||||
year={2021}
|
||||
}
|
||||
```
|
||||
|
|
224
README_CN.md
224
README_CN.md
|
@ -1,221 +1,13 @@
|
|||
# 使用指南
|
||||
# Infinitensor
|
||||
|
||||
## 目录
|
||||
## 项目简介
|
||||
|
||||
- [编译](#编译)
|
||||
- [使用](#使用)
|
||||
- [python-前端应用指南](#python-前端应用指南)
|
||||
- [导入-onnx-模型](#导入-onnx-模型)
|
||||
- [导出-onnx-模型](#导出-onnx-模型)
|
||||
- [执行推理](#执行推理)
|
||||
- [测试](#测试)
|
||||
本项目是深度学习领域的一个编译器集合,本项目旨在缩小深度学习应用与后端硬件之间的鸿沟。本项目通过使用编译器超优化技术,对神经网络模型进行优化,从而获得更好的性能。同时,本项目与深度学习框架相互配合,为不同的硬件后端提供端倒端的编译,方便用户迁移部署。
|
||||
|
||||
## 编译
|
||||
## 项目设计
|
||||
|
||||
推荐使用 Ubuntu-22.04,本文以此环境为例。
|
||||
本项目的设计是前后端解耦合的,主要有三个模块,分别为:
|
||||
|
||||
1. 使用 apt 安装依赖
|
||||
|
||||
> 如果不使用 Ubuntu-22.04,部分软件版本可能不够高。
|
||||
|
||||
```bash
|
||||
sudo apt-get install make cmake build-essential python-is-python3 python-dev-is-python3 python3-pip libdw-dev
|
||||
```
|
||||
|
||||
2. 更新 pip 并换清华源
|
||||
|
||||
```bash
|
||||
python -m pip install -i https://pypi.tuna.tsinghua.edu.cn/simple --upgrade pip
|
||||
pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
```
|
||||
|
||||
3. 编译并安装 python 库
|
||||
|
||||
> 第一次执行会同时安装 python 依赖库,比较慢
|
||||
|
||||
仅编译 CPU 部分:
|
||||
|
||||
```bash
|
||||
make install-python
|
||||
```
|
||||
|
||||
编译 GPU 部分:
|
||||
|
||||
```bash
|
||||
make install-python CUDA=ON
|
||||
```
|
||||
|
||||
## 使用
|
||||
|
||||
项目管理功能已写到 [Makefile](Makefile),支持下列功能:
|
||||
|
||||
- 编译项目:`make`/`make build`
|
||||
- 清理生成文件:`make clean`
|
||||
- 安装 python 库:`make install-python`
|
||||
- 测试 c++ 后端:`make test-cpp`
|
||||
- 测试 python 前端:`make test-onnx`
|
||||
|
||||
并使用下列环境变量传递选项参数:
|
||||
|
||||
- `TYPE`:编译模式(`debug`/`release`),默认值为 `release`
|
||||
- `CUDA`:是否编译 CUDA 后端,默认为 `OFF`,`ON` 打开
|
||||
- `BANG`:是否编译寒武纪后端,默认为 `OFF`,`ON` 打开
|
||||
- `BACKTRACE`:是否启用栈回溯,默认为 `ON`,`OFF` 关闭,建议调试时打开
|
||||
- `TEST`:是否编译 `googletest`,默认为 `ON`,`OFF` 关闭,只有 `test-cpp` 时必要
|
||||
|
||||
## python 前端应用指南
|
||||
|
||||
`make install-python` 会将项目的 python 前端以 `pyinfinitensor` 为名字安装到系统目录,可以直接 `import pyinfinitensor` 来使用。现阶段,项目的主要用法是从 onnx 导入模型进行优化,然后可以再导出优化后的模型到 onnx,也可以直接运行推理。
|
||||
|
||||
### 导入 onnx 模型
|
||||
|
||||
支持的模型:
|
||||
|
||||
- [x] [ResNet18-v2](https://github.com/onnx/models/blob/main/vision/classification/resnet/model/resnet18-v2-7.onnx)
|
||||
- [x] [DenseNet-121-12](https://github.com/onnx/models/blob/main/vision/classification/densenet-121/model/densenet-12.onnx)
|
||||
- [x] [Inception-2](https://github.com/onnx/models/blob/main/vision/classification/inception_and_googlenet/inception_v2/model/inception-v2-9.onnx)
|
||||
- [x] [EfficientNet-Lite4](https://github.com/onnx/models/blob/main/vision/classification/efficientnet-lite4/model/efficientnet-lite4-11.onnx)
|
||||
|
||||
```python
|
||||
import onnx
|
||||
from pyinfinitensor.onnx import OnnxStub
|
||||
from pyinfinitensor import backend
|
||||
|
||||
stub = OnnxStub(onnx.load("model_file"), backend.cpu_runtime())
|
||||
```
|
||||
|
||||
[`onnx.load`](https://onnx.ai/onnx/api/serialization.html#load-a-model) 是 onnx 提供的加载函数,将 onnx 文件读取为保存在内存中的 onnx 模型。
|
||||
|
||||
`OnnxStub` 是 onnx 模型在项目中的表示,通过构造这个对象,将 onnx 模型导入到项目中。其构造器的第一个参数是 onnx 模型文件;第二个参数是模型运行的后端运行时,可以是 `backend.cpu_runtime()`、`backend.cuda_runtime()` 或 `backend.bang_runtime()`。
|
||||
|
||||
构造出的 stub 对象可以用于操作项目中的模型和运行时。
|
||||
|
||||
### 优化
|
||||
|
||||
TODO
|
||||
|
||||
### 导出 onnx 模型
|
||||
|
||||
优化后的模型可以导出成 onnx 文件提供给其他运行时。
|
||||
|
||||
```python
|
||||
with open("optimized.onnx", "wb") as f:
|
||||
f.write(stub.to_onnx("optimized").SerializeToString())
|
||||
```
|
||||
|
||||
`stub.to_onnx(<name>)` 将模型转换为 onnx 模型对象,`<name>` 将填写到 onnx 模型的 `name` 字段。序列化到文件的代码见[官方示例](https://onnx.ai/onnx/intro/python.html#model-serialization)。
|
||||
|
||||
要可视化检查导出的模型文件,可以利用 [onnx 提供的功能](https://onnx.ai/onnx/api/shape_inference.html#infer-shapes)将所有的张量的形状推理出来再导出:
|
||||
|
||||
```python
|
||||
from onnx.shape_inference import infer_shapes
|
||||
|
||||
with open("optimized.onnx", "wb") as f:
|
||||
f.write(infer_shapes(stub.to_onnx("optimized")).SerializeToString())
|
||||
```
|
||||
|
||||
然后用 [Netron](https://netron.app/) 绘制计算图。
|
||||
|
||||
### 执行推理
|
||||
|
||||
也可以使用项目的运行时执行推理。
|
||||
|
||||
第一步是将数据传入计算图。`OnnxStub.inputs` 是一个 `Dict[str, Tensor]`,保存着模型的所有输入的名字和对象。可以用 [`items()`](https://docs.python.org/zh-cn/3/library/stdtypes.html#dict.items) 来遍历。
|
||||
|
||||
这个代码片段显示了如何打印出模型所有输入张量的名字、形状和对象指针:
|
||||
|
||||
```python
|
||||
for name, tensor in stub.inputs.items():
|
||||
print(name, tensor.shape(), tensor)
|
||||
```
|
||||
|
||||
对于 [resnet18-v2-7.onnx](https://github.com/onnx/models/blob/main/vision/classification/resnet/model/resnet18-v2-7.onnx),会打印出:
|
||||
|
||||
```plaintext
|
||||
data [1, 3, 224, 224] <backend.Tensor object at 0x7efeb828e3b0>
|
||||
```
|
||||
|
||||
当然,地址是随机的。这个输出表明需要输入一个名为 “data”,形为 1×3×224×224 的数据。通常来说,这表示一张 224×224 的 rgb 图片。而这个模型是一个 1000 分类的图像分类模型。
|
||||
|
||||
为了方便,这里我们向模型传入一个随机的数据。
|
||||
|
||||
```python
|
||||
import numpy
|
||||
|
||||
stub.init()
|
||||
for name, tensor in stub.inputs.items():
|
||||
print(name, tensor.shape(), tensor)
|
||||
input = numpy.random.random(tensor.shape()).astype(numpy.float32)
|
||||
tensor.copyin_float(input.flatten().tolist())
|
||||
```
|
||||
|
||||
`stub.init()` 为所有张量分配空间。空间是预分配的,所以不支持动态 size 的模型。
|
||||
|
||||
`tensor.copyin_float(<data>)` 向张量传入数据。其参数必须是一个 `List[float]`,即压平的数据。类似的函数还有 `copyin_int32(<data>)` 和 `copyin_int64(<data>)`
|
||||
|
||||
然后,调用 `stub.run()` 执行推理:
|
||||
|
||||
```python
|
||||
stub.run()
|
||||
```
|
||||
|
||||
最后,将结果拷贝出来,传入类似:
|
||||
|
||||
```python
|
||||
stub.init()
|
||||
for name, tensor in stub.outputs.items():
|
||||
print(name, tensor.shape(), tensor)
|
||||
print(tensor.copyout_float())
|
||||
```
|
||||
|
||||
## 测试
|
||||
|
||||
除了单元测试 `make test-cpp` 和 `make test-onnx` 之外,还可以用其他方式来测试单个模型导入导出和优化的正确性。
|
||||
|
||||
这个脚本利用 onnxruntime 来测试导出的模型是否与导入的模型等价:
|
||||
|
||||
```python
|
||||
import onnx
|
||||
import numpy
|
||||
import sys
|
||||
from onnx import ModelProto, ValueInfoProto
|
||||
from pyinfinitensor.onnx import OnnxStub
|
||||
from pyinfinitensor import backend
|
||||
from onnxruntime import InferenceSession
|
||||
|
||||
|
||||
def infer(model: ModelProto, input) -> dict:
|
||||
collection = set()
|
||||
for node in model.graph.node:
|
||||
for output in node.output:
|
||||
collection.add(output)
|
||||
model.graph.output.extend([ValueInfoProto(name=x) for x in collection])
|
||||
session = InferenceSession(model.SerializeToString())
|
||||
i = session.get_inputs()[0].name
|
||||
return dict(
|
||||
zip(
|
||||
[x.name for x in session.get_outputs()],
|
||||
[x.flatten() for x in session.run(None, {i: input})],
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
model0 = onnx.load(sys.argv[1])
|
||||
model1 = OnnxStub(model0, backend.cpu_runtime()).to_onnx("new")
|
||||
|
||||
input_shape = [x.dim_value for x in model1.graph.input[0].type.tensor_type.shape.dim]
|
||||
input = numpy.random.random(input_shape).astype(numpy.float32)
|
||||
|
||||
output0 = infer(model0, input)[model0.graph.output[0].name]
|
||||
output1 = infer(model1, input)[model1.graph.output[0].name]
|
||||
|
||||
print("error =", sum((output1 - output0) ** 2) / len(output0))
|
||||
```
|
||||
|
||||
要运行脚本,先安装 onnxruntime:
|
||||
|
||||
```bash
|
||||
pip install onnxruntime
|
||||
```
|
||||
|
||||
打印出的 `error = ...` 是两个模型输出张量的均方误差。对于不同的模型,这个误差最小为 0,最大不超过 1e-9。
|
||||
- Runtime 模块:该模式负责对不同的加速卡后端进行包装与支持,支撑后端运行。另外提供统一的向上接口,方便上层建设。
|
||||
- Compiler 模块:该模式负责对神经网络模型进行优化变换,获得更加高效的等价模型。
|
||||
- Interface 模块:该模式负责给用户提供编程与交互的接口,方便用户使用本系统。
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
# 项目文档
|
||||
|
||||
- [安装部署指南](INSTALL_GUIDE_CN.md)
|
||||
- [硬件支持](SUPPORT_MATRIX_CN.md)
|
||||
- [使用指南](USER_GUIDE_CN.md)
|
|
@ -0,0 +1,142 @@
|
|||
# 安装部署指南
|
||||
|
||||
## 目录
|
||||
|
||||
- [环境准备](#环境准备)
|
||||
- [编译本项目](#编译本项目)
|
||||
- [技术支持](#技术支持)
|
||||
|
||||
## 环境准备
|
||||
|
||||
目前的软硬件环境支持矩阵
|
||||
|
||||
| Host CPU | Device | OS | Support |
|
||||
| -------- | ------------ | ----------- | ---------- |
|
||||
| X86-64 | Nvidia GPU | Ubuntu-22.04 | Yes |
|
||||
| X86-64 | Cambricon MLU | Ubuntu-22.04 | Yes |
|
||||
|
||||
推荐使用 X86-64 机器以及 Ubuntu-22.04,本文以此环境为例。
|
||||
|
||||
1. 确认 GCC 版本为 11.3 及以上的稳定版本,如若您的机器 GCC 版本不满足此条件,请自行编译安装,下述方式二选一:
|
||||
|
||||
- [GCC 官方文档](https://gcc.gnu.org/onlinedocs/gcc-11.3.0/gcc/)
|
||||
|
||||
- [网友安装分享](https://zhuanlan.zhihu.com/p/509695395)
|
||||
|
||||
2. 确认 CMake 版本为 3.17 及以上的稳定版本, 如若您的机器 CMake 版本不满足此条件,请自行编译安装,下述方式二选一:
|
||||
|
||||
- [CMake 官方文档](https://cmake.org/install/)
|
||||
|
||||
- [网友安装分享](https://zhuanlan.zhihu.com/p/110793004)
|
||||
|
||||
3. 第三方加速卡软件资源安装,目前本项目已经适配了如下的第三方加速卡:
|
||||
|
||||
- 如您的第三方加速卡为英伟达 GPU,请参考英伟达官方文档进行:
|
||||
|
||||
> [驱动安装](https://www.nvidia.cn/geforce/drivers/),
|
||||
> [CUDA Toolkit 安装](https://developer.nvidia.com/cuda-toolkit),
|
||||
> [Cudnn 安装](https://developer.nvidia.com/rdp/cudnn-download),
|
||||
> [Cublas 安装](https://developer.nvidia.com/cublas),
|
||||
> 安装完成后请进行相应的环境变量配置,将可执行文件目录与库目录添加到操作系统识别的路径中,例如
|
||||
>
|
||||
> ```bash
|
||||
> # 将如下内容写入到你的 bashrc 文件并 source 该文件
|
||||
> export CUDA_HOME="/PATH/TO/YOUR/CUDA_HOME"
|
||||
> export CUDNN_HOME="/PATH/TO/YOUR/CUDNN_HOME"
|
||||
> export PATH="${CUDA_HOME}/bin:${PATH}"
|
||||
> export LD_LIBRARY_PATH="${CUDA_HOME}/lib64:${LD_LIBRARY_PATH}"
|
||||
> # 如您不方便将上述环境变量配置到 bashrc 文件中进行长期使用,你也可以在我们提供的 env.sh 文件中进行正确配置并激活,作为临时使用
|
||||
> source env.sh
|
||||
> ```
|
||||
|
||||
我们强烈建议您规范安装,统一到一个目录下,以免不必要的麻烦。
|
||||
|
||||
- 如您的第三方加速卡为寒武纪 MLU,请参考寒武纪官方文档进行:
|
||||
> [驱动安装](https://www.cambricon.com/docs/sdk_1.11.0/driver_5.10.6/user_guide_5.10.6/index.html),
|
||||
> [CNToolkit 安装](https://www.cambricon.com/docs/sdk_1.11.0/cntoolkit_3.4.1/cntoolkit_install_3.4.1/index.html),
|
||||
> [CNNL 安装](https://www.cambricon.com/docs/sdk_1.11.0/cambricon_cnnl_1.16.1/user_guide/index.html),
|
||||
> 安装完成后请进行相应的环境变量配置,将可执行文件目录与库目录添加到操作系统识别的路径中,例如
|
||||
>
|
||||
> ```bash
|
||||
> # 将如下内容写入到你的 bashrc 文件并 source 该文件
|
||||
> export NEUWARE_HOME="/usr/local/neuware"
|
||||
> export PATH="${NEUWARE_HOME}/bin:${PATH}"
|
||||
> export LD_LIBRARY_PATH="${NEUWARE_HOME}/lib64:${LD_LIBRARY_PATH}"
|
||||
> # 如您不方便将上述环境变量配置到 bashrc 文件中进行长期使用,你也可以在我们提供的 env.sh 文件中进行正确配置并激活,作为临时使用
|
||||
> source env.sh
|
||||
> ```
|
||||
|
||||
我们强烈建议您规范安装,统一到一个目录下,以免不必要的麻烦。另外请注意,由于 MLU 上层软件建设适配程度有限,如您在其覆盖的机器,操作系统之外运行,需要在安装驱动之后使用上层软件的 Docker。
|
||||
|
||||
4. 确认您安装了 make,build-essential, python-is-python3, python-dev-is-python3, python3-pip, libdw-dev,如您的机器没有上述基础依赖,请自行按需安装。
|
||||
|
||||
- 在使用 apt-get 工具情况下,您可以这样执行
|
||||
|
||||
```bash
|
||||
sudo apt-get install make cmake build-essential python-is-python3 python-dev-is-python3 python3-pip libdw-dev
|
||||
```
|
||||
|
||||
5. 更新pip并切换到清华源
|
||||
|
||||
```bash
|
||||
python -m pip install -i https://pypi.tuna.tsinghua.edu.cn/simple --upgrade pip
|
||||
pip config set global.index-url https://pypi.tuna.tsinghua.edu.cn/simple
|
||||
```
|
||||
|
||||
6. 安装一些不必要的项目(可选)
|
||||
|
||||
- 如您需要运行本项目下的 example 代码,您需要安装一些辅助项目。请注意这些项目不是必要的,若您不需要运行样例代码,这些项目无需安装。
|
||||
|
||||
> [Pytorch](https://pytorch.org/get-started/locally/):业界内流行的神经网络编程框架
|
||||
> [ONNX](https://onnx.ai/get-started.html):业界内流行的神经网络模型存储文件与转换器
|
||||
> [onnxsim](https://pypi.org/project/onnxsim/):一个简化onnx模型的小工具
|
||||
> [onnx2torch](https://github.com/ENOT-AutoDL/onnx2torch):一个将onnx模型转换pytorch模型的小工具
|
||||
> [tqdm](https://pypi.org/project/tqdm/):一个显示程序运行进度条的小工具
|
||||
|
||||
- 如您需要使用本项目下的 InfiniTest 测试工具,你还需要安装如下的项目:
|
||||
|
||||
> [protobuf](https://github.com/protocolbuffers/protobuf): 一种序列化文件的格式及其编译、序列化、解析工具
|
||||
|
||||
## 编译本项目
|
||||
|
||||
推荐使用 X86-64 机器以及 Ubuntu-22.04,本文以此环境为例。
|
||||
|
||||
1. 配置环境
|
||||
|
||||
打开 env.sh 文件进行环境变量配置,之后执行
|
||||
|
||||
```bash
|
||||
source env.sh
|
||||
```
|
||||
|
||||
2. 编译本项目并打包成 Python 库进行安装
|
||||
|
||||
我们提供了意见编译参数,您可以在项目根目录下执行下面的命令。第一次执行会同时安装 python 依赖库,耗时略长,请耐心等待。
|
||||
|
||||
仅编译 CPU 部分,不编译第三方计算卡:
|
||||
|
||||
```bash
|
||||
make install-python
|
||||
```
|
||||
|
||||
编译 CPU 部分,同时编译英伟达 GPU 部分:
|
||||
|
||||
```bash
|
||||
export CUDA_HOME=/path/to/your/cuda_home
|
||||
make install-python CUDA=ON
|
||||
```
|
||||
|
||||
编译 CPU 部分,同时编译寒武纪 MLU 部分:
|
||||
|
||||
```bash
|
||||
export NEUWARE_HOME=/path/to/your/neuware_home
|
||||
make install-python BANG=ON
|
||||
```
|
||||
|
||||
3. 使用方法
|
||||
|
||||
安装成功后,您就可以使用本项目的 Python 接口进行编码并运行。具体使用方式可以参考项目样例代码 example/Resnet/resnet.py 以及用户使用手册
|
||||
|
||||
## 技术支持
|
||||
|
||||
如遇到问题,请联系我们技术支持团队
|
|
@ -0,0 +1,29 @@
|
|||
# 支持矩阵
|
||||
|
||||
## 目录
|
||||
|
||||
- [环境支持](#环境支持)
|
||||
- [神经网络支持](#神经网络支持)
|
||||
- [技术支持](#技术支持)
|
||||
|
||||
## 环境支持
|
||||
|
||||
目前的软硬件环境支持矩阵
|
||||
|
||||
| Host CPU | Device | OS | Support |
|
||||
| -------- | ------------ | ----------- | ---------- |
|
||||
| X86-64 | Nvidia GPU | Ubuntu-22.04 | Yes |
|
||||
| X86-64 | Cambricon MLU | Ubuntu-22.04 | Yes |
|
||||
|
||||
## 神经网络支持
|
||||
|
||||
目前已经验证过的神经网络模型有
|
||||
|
||||
- [x] [ResNet18-v2](https://github.com/onnx/models/blob/main/vision/classification/resnet/model/resnet18-v2-7.onnx)
|
||||
- [x] [DenseNet-121-12](https://github.com/onnx/models/blob/main/vision/classification/densenet-121/model/densenet-12.onnx)
|
||||
- [x] [Inception-2](https://github.com/onnx/models/blob/main/vision/classification/inception_and_googlenet/inception_v2/model/inception-v2-9.onnx)
|
||||
- [x] [EfficientNet-Lite4](https://github.com/onnx/models/blob/main/vision/classification/efficientnet-lite4/model/efficientnet-lite4-11.onnx)
|
||||
|
||||
## 技术支持
|
||||
|
||||
如若您遇到了本项目的问题,请联系我们的技术支持团队
|
|
@ -0,0 +1 @@
|
|||
|
|
@ -0,0 +1,201 @@
|
|||
# 使用指南
|
||||
|
||||
## 目录
|
||||
|
||||
- [使用方法](#使用方法)
|
||||
- [python-前端应用指南](#python-前端应用指南)
|
||||
- [导入-onnx-模型](#导入-onnx-模型)
|
||||
- [导出-onnx-模型](#导出-onnx-模型)
|
||||
- [执行推理](#执行推理)
|
||||
- [样例代码](#样例代码)
|
||||
- [技术支持](#技术支持)
|
||||
- [测试](#测试)
|
||||
|
||||
## 使用方法
|
||||
|
||||
项目管理功能已写到 [Makefile](Makefile),支持下列功能:
|
||||
|
||||
- 编译项目:`make`/`make build`
|
||||
- 清理生成文件:`make clean`
|
||||
- 安装 python 库:`make install-python`
|
||||
- 测试 c++ 后端:`make test-cpp`
|
||||
- 测试 python 前端:`make test-onnx`
|
||||
|
||||
并使用下列环境变量传递选项参数:
|
||||
|
||||
- `TYPE`:编译模式(`debug`/`release`),默认值为 `release`
|
||||
- `CUDA`:是否编译 CUDA 后端,默认为 `OFF`,`ON` 打开
|
||||
- `BANG`:是否编译寒武纪后端,默认为 `OFF`,`ON` 打开
|
||||
- `BACKTRACE`:是否启用栈回溯,默认为 `ON`,`OFF` 关闭,建议调试时打开
|
||||
- `TEST`:是否编译 `googletest`,默认为 `ON`,`OFF` 关闭,只有 `test-cpp` 时必要
|
||||
|
||||
## python 前端应用指南
|
||||
|
||||
`make install-python` 会将项目的 python 前端以 `pyinfinitensor` 为名字安装到系统目录,可以直接 `import pyinfinitensor` 来使用。现阶段,项目的主要用法是从 onnx 导入模型进行优化,然后可以再导出优化后的模型到 onnx,也可以直接运行推理。
|
||||
|
||||
### 导入 onnx 模型
|
||||
|
||||
支持的模型:
|
||||
|
||||
- [x] [ResNet18-v2](https://github.com/onnx/models/blob/main/vision/classification/resnet/model/resnet18-v2-7.onnx)
|
||||
- [x] [DenseNet-121-12](https://github.com/onnx/models/blob/main/vision/classification/densenet-121/model/densenet-12.onnx)
|
||||
- [x] [Inception-2](https://github.com/onnx/models/blob/main/vision/classification/inception_and_googlenet/inception_v2/model/inception-v2-9.onnx)
|
||||
- [x] [EfficientNet-Lite4](https://github.com/onnx/models/blob/main/vision/classification/efficientnet-lite4/model/efficientnet-lite4-11.onnx)
|
||||
|
||||
```python
|
||||
import onnx
|
||||
from pyinfinitensor.onnx import OnnxStub
|
||||
from pyinfinitensor import backend
|
||||
|
||||
stub = OnnxStub(onnx.load("model_file"), backend.cpu_runtime())
|
||||
```
|
||||
|
||||
[`onnx.load`](https://onnx.ai/onnx/api/serialization.html#load-a-model) 是 onnx 提供的加载函数,将 onnx 文件读取为保存在内存中的 onnx 模型。
|
||||
|
||||
`OnnxStub` 是 onnx 模型在项目中的表示,通过构造这个对象,将 onnx 模型导入到项目中。其构造器的第一个参数是 onnx 模型文件;第二个参数是模型运行的后端运行时,可以是 `backend.cpu_runtime()`、`backend.cuda_runtime()` 或 `backend.bang_runtime()`。
|
||||
|
||||
构造出的 stub 对象可以用于操作项目中的模型和运行时。
|
||||
|
||||
### 优化
|
||||
|
||||
TODO
|
||||
|
||||
### 导出 onnx 模型
|
||||
|
||||
优化后的模型可以导出成 onnx 文件提供给其他运行时。
|
||||
|
||||
```python
|
||||
with open("optimized.onnx", "wb") as f:
|
||||
f.write(stub.to_onnx("optimized").SerializeToString())
|
||||
```
|
||||
|
||||
`stub.to_onnx(<name>)` 将模型转换为 onnx 模型对象,`<name>` 将填写到 onnx 模型的 `name` 字段。序列化到文件的代码见[官方示例](https://onnx.ai/onnx/intro/python.html#model-serialization)。
|
||||
|
||||
要可视化检查导出的模型文件,可以利用 [onnx 提供的功能](https://onnx.ai/onnx/api/shape_inference.html#infer-shapes)将所有的张量的形状推理出来再导出:
|
||||
|
||||
```python
|
||||
from onnx.shape_inference import infer_shapes
|
||||
|
||||
with open("optimized.onnx", "wb") as f:
|
||||
f.write(infer_shapes(stub.to_onnx("optimized")).SerializeToString())
|
||||
```
|
||||
|
||||
然后用 [Netron](https://netron.app/) 绘制计算图。
|
||||
|
||||
### 执行推理
|
||||
|
||||
也可以使用项目的运行时执行推理。
|
||||
|
||||
第一步是将数据传入计算图。`OnnxStub.inputs` 是一个 `Dict[str, Tensor]`,保存着模型的所有输入的名字和对象。可以用 [`items()`](https://docs.python.org/zh-cn/3/library/stdtypes.html#dict.items) 来遍历。
|
||||
|
||||
这个代码片段显示了如何打印出模型所有输入张量的名字、形状和对象指针:
|
||||
|
||||
```python
|
||||
for name, tensor in stub.inputs.items():
|
||||
print(name, tensor.shape(), tensor)
|
||||
```
|
||||
|
||||
对于 [resnet18-v2-7.onnx](https://github.com/onnx/models/blob/main/vision/classification/resnet/model/resnet18-v2-7.onnx),会打印出:
|
||||
|
||||
```plaintext
|
||||
data [1, 3, 224, 224] <backend.Tensor object at 0x7efeb828e3b0>
|
||||
```
|
||||
|
||||
当然,地址是随机的。这个输出表明需要输入一个名为 “data”,形为 1×3×224×224 的数据。通常来说,这表示一张 224×224 的 rgb 图片。而这个模型是一个 1000 分类的图像分类模型。
|
||||
|
||||
为了方便,这里我们向模型传入一个随机的数据。
|
||||
|
||||
```python
|
||||
import numpy
|
||||
|
||||
stub.init()
|
||||
for name, tensor in stub.inputs.items():
|
||||
print(name, tensor.shape(), tensor)
|
||||
input = numpy.random.random(tensor.shape()).astype(numpy.float32)
|
||||
tensor.copyin_float(input.flatten().tolist())
|
||||
```
|
||||
|
||||
`stub.init()` 为所有张量分配空间。空间是预分配的,所以不支持动态 size 的模型。
|
||||
|
||||
`tensor.copyin_float(<data>)` 向张量传入数据。其参数必须是一个 `List[float]`,即压平的数据。类似的函数还有 `copyin_int32(<data>)` 和 `copyin_int64(<data>)`
|
||||
|
||||
然后,调用 `stub.run()` 执行推理:
|
||||
|
||||
```python
|
||||
stub.run()
|
||||
```
|
||||
|
||||
最后,将结果拷贝出来,传入类似:
|
||||
|
||||
```python
|
||||
stub.init()
|
||||
for name, tensor in stub.outputs.items():
|
||||
print(name, tensor.shape(), tensor)
|
||||
print(tensor.copyout_float())
|
||||
```
|
||||
|
||||
### 样例代码
|
||||
|
||||
您可以参照[./example/Resnet/resnet.py](./example/ResNet/resnet.py)的样例代码进行了解,并尝试运行。在这个文件中,我们使用了 Pytorch 构建了 resnet 网络。您可以查阅该脚本使用方式:
|
||||
|
||||
```python
|
||||
python resnet.py -h
|
||||
```
|
||||
|
||||
在样例代码中,我们对定义的网络进行了序列化操作,并存储为模型文件。之后加载该模型文件,并转换为本项目的模型进行优化操作,再进行推理。您可以关注一下代码中 242 行之后的代码。请注意,您可以按照您的需求来进行操作,通常来说,您所需要撰写的代码就是加载模型,转换为本项目的模型进行优化,推理运行。
|
||||
|
||||
## 技术支持
|
||||
|
||||
如若您遇到了本项目的问题,请联系我们的技术支持团队
|
||||
|
||||
## 测试
|
||||
|
||||
除了单元测试 `make test-cpp` 和 `make test-onnx` 之外,还可以用其他方式来测试单个模型导入导出和优化的正确性。
|
||||
|
||||
这个脚本利用 onnxruntime 来测试导出的模型是否与导入的模型等价:
|
||||
|
||||
```python
|
||||
import onnx
|
||||
import numpy
|
||||
import sys
|
||||
from onnx import ModelProto, ValueInfoProto
|
||||
from pyinfinitensor.onnx import OnnxStub
|
||||
from pyinfinitensor import backend
|
||||
from onnxruntime import InferenceSession
|
||||
|
||||
|
||||
def infer(model: ModelProto, input) -> dict:
|
||||
collection = set()
|
||||
for node in model.graph.node:
|
||||
for output in node.output:
|
||||
collection.add(output)
|
||||
model.graph.output.extend([ValueInfoProto(name=x) for x in collection])
|
||||
session = InferenceSession(model.SerializeToString())
|
||||
i = session.get_inputs()[0].name
|
||||
return dict(
|
||||
zip(
|
||||
[x.name for x in session.get_outputs()],
|
||||
[x.flatten() for x in session.run(None, {i: input})],
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
model0 = onnx.load(sys.argv[1])
|
||||
model1 = OnnxStub(model0, backend.cpu_runtime()).to_onnx("new")
|
||||
|
||||
input_shape = [x.dim_value for x in model1.graph.input[0].type.tensor_type.shape.dim]
|
||||
input = numpy.random.random(input_shape).astype(numpy.float32)
|
||||
|
||||
output0 = infer(model0, input)[model0.graph.output[0].name]
|
||||
output1 = infer(model1, input)[model1.graph.output[0].name]
|
||||
|
||||
print("error =", sum((output1 - output0) ** 2) / len(output0))
|
||||
```
|
||||
|
||||
要运行脚本,先安装 onnxruntime:
|
||||
|
||||
```bash
|
||||
pip install onnxruntime
|
||||
```
|
||||
|
||||
打印出的 `error = ...` 是两个模型输出张量的均方误差。对于不同的模型,这个误差最小为 0,最大不超过 1e-9。
|
|
@ -0,0 +1,38 @@
|
|||
# 配置英伟达 CUDA 的 HOME 路径,请注意安装 CUDA Toolkit, CUDNN 并将路径配置到下述环境变量。
|
||||
export CUDA_HOME=/PATH/TO/YOUR/CUDA/HOME
|
||||
export CUDNN_HOME=/PATH/TO/YOUR/CUDNN/HOME
|
||||
export PATH="${CUDA_HOME}/bin:${PATH}"
|
||||
export LD_LIBRARY_PATH="${CUDA_HOME}/lib64:${LD_LIBRARY_PATH}"
|
||||
|
||||
# 配置寒武纪 BANG 的 HOME 路径,请注意 /usr/local/neuware 是寒武纪软件栈建议的,同时也是默认的安装路径。
|
||||
# 如若用户有其他的路径安装方式,请自行配置正确的路径。
|
||||
# 这里是 neuware 目录下一个可能的结构图,请参考。
|
||||
# .
|
||||
# ├── bin
|
||||
# ├── cmake
|
||||
# ├── data
|
||||
# ├── edge
|
||||
# ├── include
|
||||
# ├── lib
|
||||
# ├── lib64
|
||||
# ├── LICENSE
|
||||
# ├── mlvm
|
||||
# ├── README
|
||||
# ├── samples
|
||||
# ├── share
|
||||
# └── version.txt
|
||||
export NEUWARE_HOME=/usr/local/neuware
|
||||
export PATH="${NEUWARE_HOME}/bin:${PATH}"
|
||||
export LD_LIBRARY_PATH="${NEUWARE_HOME}/lib64:${LD_LIBRARY_PATH}"
|
||||
|
||||
# 配置昆仑芯 XPU 的 HOME 路径,请注意 /usr/local/xpu 是昆仑芯软件栈提供的软件包路径。
|
||||
# 如若用户有其他的路径安装方式,请自行配置正确的路径。
|
||||
# 这里是 xpu 目录下一个可能的结构图,请参考。
|
||||
# .
|
||||
# ├── bin
|
||||
# ├── include
|
||||
# ├── lib64
|
||||
# ├── tools
|
||||
# ├── version
|
||||
# └── XTDK
|
||||
export XPU_HOME=/usr/local/xpu
|
2
example
2
example
|
@ -1 +1 @@
|
|||
Subproject commit d6ac8c8c73bf83833a71b41e95820d4eb7741fa9
|
||||
Subproject commit 51d3105277f3774ed31c02ed4cd11fa92925af77
|
|
@ -4,19 +4,50 @@ namespace infini {
|
|||
|
||||
class DataType {
|
||||
public:
|
||||
// legacy
|
||||
static const DataType Float32;
|
||||
static const DataType UInt32;
|
||||
// These are just aligned with the type and index of onnx:
|
||||
// <https://onnx.ai/onnx/intro/concepts.html#element-type>
|
||||
static const DataType UInt8, Int8, UInt16, Int16, Int32, Int64;
|
||||
static constexpr size_t sizePerElement[]{
|
||||
sizeof(float), sizeof(uint32_t), sizeof(uint8_t), sizeof(int8_t),
|
||||
sizeof(uint16_t), sizeof(int16_t), sizeof(int32_t), sizeof(int64_t)};
|
||||
static const DataType Undefine;
|
||||
static const DataType Float32;
|
||||
static const DataType UInt8;
|
||||
static const DataType Int8;
|
||||
static const DataType UInt16;
|
||||
static const DataType Int16;
|
||||
static const DataType Int32;
|
||||
static const DataType Int64;
|
||||
static const DataType String;
|
||||
static const DataType Bool;
|
||||
static const DataType Float16;
|
||||
static const DataType Double;
|
||||
static const DataType UInt32;
|
||||
static const DataType UInt64;
|
||||
static const DataType BFloat16;
|
||||
// "sizePerElement" show the DType to cpu_type
|
||||
// DataType::Bool -> int8_t DataType::Float16 -> uint16_t
|
||||
static constexpr size_t sizePerElement[]{0,
|
||||
sizeof(float),
|
||||
sizeof(uint8_t),
|
||||
sizeof(int8_t),
|
||||
sizeof(uint16_t),
|
||||
sizeof(int16_t),
|
||||
sizeof(int32_t),
|
||||
sizeof(int64_t),
|
||||
sizeof(std::string),
|
||||
sizeof(int8_t),
|
||||
sizeof(uint16_t),
|
||||
sizeof(double),
|
||||
sizeof(uint32_t),
|
||||
sizeof(uint64_t),
|
||||
0,
|
||||
0,
|
||||
sizeof(uint16_t)};
|
||||
|
||||
static constexpr std::string_view names[]{"Float32", "UInt32", "UInt8",
|
||||
"Int8", "UInt16", "Int16",
|
||||
"Int32", "Int64"};
|
||||
static constexpr std::string_view names[]{
|
||||
"Undefine", "Float32", "UInt8", "Int8", "UInt16",
|
||||
"Int16", "Int32", "Int64", "String", "Bool",
|
||||
"Float16", "Double", "UInt32", "UInt64", "PlaceHolder",
|
||||
"PlaceHolder", "BFloat16"};
|
||||
|
||||
static constexpr int cpuType[]{-1, 0, 2, 3, 4, 5, 6, 7, -1,
|
||||
3, 4, 9, 1, 8, -1, -1, 4};
|
||||
|
||||
private:
|
||||
int index;
|
||||
|
@ -29,37 +60,60 @@ class DataType {
|
|||
bool operator==(const DataType &rhs) const { return index == rhs.index; }
|
||||
bool operator<(const DataType &rhs) const { return index < rhs.index; }
|
||||
|
||||
template <typename T> static DataType get() {
|
||||
template <typename T> static int get() {
|
||||
IT_TODO_HALT_MSG("Unsupported data type");
|
||||
}
|
||||
size_t getSize() const { return sizePerElement[index]; }
|
||||
string toString() const { return string(names[index]); }
|
||||
int cpuTypeInt() const { return cpuType[index]; }
|
||||
int getIndex() const { return index; }
|
||||
};
|
||||
|
||||
inline const DataType DataType::Float32(0);
|
||||
inline const DataType DataType::UInt32(1);
|
||||
inline const DataType DataType::UInt8(2), DataType::Int8(3),
|
||||
DataType::UInt16(4), DataType::Int16(5), DataType::Int32(6),
|
||||
DataType::Int64(7);
|
||||
// to be consistent with onnx
|
||||
// https://github.com/onnx/onnx/blob/aeb21329122b96df1d3ef33b500a35ca140b1431/onnx/onnx.proto#L484
|
||||
inline const DataType DataType::Undefine(0);
|
||||
inline const DataType DataType::Float32(1);
|
||||
inline const DataType DataType::UInt8(2);
|
||||
inline const DataType DataType::Int8(3);
|
||||
inline const DataType DataType::UInt16(4);
|
||||
inline const DataType DataType::Int16(5);
|
||||
inline const DataType DataType::Int32(6);
|
||||
inline const DataType DataType::Int64(7);
|
||||
inline const DataType DataType::String(8);
|
||||
inline const DataType DataType::Bool(9);
|
||||
inline const DataType DataType::Float16(10);
|
||||
inline const DataType DataType::Double(11);
|
||||
inline const DataType DataType::UInt32(12);
|
||||
inline const DataType DataType::UInt64(13);
|
||||
inline const DataType DataType::BFloat16(16);
|
||||
// Method definitions are out of the declaration due to GCC bug:
|
||||
// https://stackoverflow.com/questions/49707184/explicit-specialization-in-non-namespace-scope-does-not-compile-in-gcc
|
||||
template <> inline DataType DataType::get<float>() { return Float32; }
|
||||
template <> inline DataType DataType::get<uint32_t>() { return UInt32; }
|
||||
template <> inline DataType DataType::get<uint8_t>() { return UInt8; }
|
||||
template <> inline DataType DataType::get<int8_t>() { return Int8; }
|
||||
template <> inline DataType DataType::get<uint16_t>() { return UInt16; }
|
||||
template <> inline DataType DataType::get<int16_t>() { return Int16; }
|
||||
template <> inline DataType DataType::get<int32_t>() { return Int32; }
|
||||
template <> inline DataType DataType::get<int64_t>() { return Int64; }
|
||||
template <> inline int DataType::get<float>() { return 0; }
|
||||
template <> inline int DataType::get<uint32_t>() { return 1; }
|
||||
template <> inline int DataType::get<uint8_t>() { return 2; }
|
||||
template <> inline int DataType::get<int8_t>() { return 3; }
|
||||
template <> inline int DataType::get<uint16_t>() { return 4; }
|
||||
template <> inline int DataType::get<int16_t>() { return 5; }
|
||||
template <> inline int DataType::get<int32_t>() { return 6; }
|
||||
template <> inline int DataType::get<int64_t>() { return 7; }
|
||||
template <> inline int DataType::get<uint64_t>() { return 8; }
|
||||
template <> inline int DataType::get<double>() { return 9; }
|
||||
|
||||
template <int index> struct DT {};
|
||||
template <> struct DT<0> { using t = float; };
|
||||
template <> struct DT<1> { using t = uint32_t; };
|
||||
template <> struct DT<0> { using t = bool; };
|
||||
template <> struct DT<1> { using t = float; };
|
||||
template <> struct DT<2> { using t = uint8_t; };
|
||||
template <> struct DT<3> { using t = int8_t; };
|
||||
template <> struct DT<4> { using t = uint16_t; };
|
||||
template <> struct DT<5> { using t = int16_t; };
|
||||
template <> struct DT<6> { using t = int32_t; };
|
||||
template <> struct DT<7> { using t = int64_t; };
|
||||
template <> struct DT<8> { using t = char; };
|
||||
template <> struct DT<9> { using t = int8_t; };
|
||||
template <> struct DT<10> { using t = uint16_t; };
|
||||
template <> struct DT<11> { using t = double; };
|
||||
template <> struct DT<12> { using t = uint32_t; };
|
||||
template <> struct DT<13> { using t = uint64_t; };
|
||||
template <> struct DT<16> { using t = uint16_t; };
|
||||
|
||||
} // namespace infini
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#pragma once
|
||||
#include "core/lazy_allocator.h"
|
||||
#include "core/operator.h"
|
||||
#include "core/tensor.h"
|
||||
|
||||
|
@ -9,9 +10,11 @@ class GraphObj : public Object {
|
|||
Runtime runtime;
|
||||
TensorVec tensors;
|
||||
OpVec ops;
|
||||
LazyAllocator allocator;
|
||||
|
||||
public:
|
||||
explicit GraphObj(Runtime runtime) : runtime(runtime), sorted(false){};
|
||||
explicit GraphObj(Runtime runtime)
|
||||
: runtime(runtime), allocator(runtime), sorted(false){};
|
||||
GraphObj(Runtime runtime, OpVec ops_in);
|
||||
string toString() const override;
|
||||
Runtime getRuntime() const { return runtime; }
|
||||
|
|
|
@ -7,30 +7,6 @@
|
|||
|
||||
namespace infini {
|
||||
|
||||
// Use the indices from onnx to reduce delivery overhead,
|
||||
// which comes from onnx but may be not only used for onnx.
|
||||
//
|
||||
// see https://onnx.ai/onnx/intro/concepts.html#element-type
|
||||
enum OnnxDType : int {
|
||||
UNDEFINED = 0,
|
||||
FLOAT,
|
||||
UINT8,
|
||||
INT8,
|
||||
UINT16,
|
||||
INT16,
|
||||
INT32,
|
||||
INT64,
|
||||
STRING,
|
||||
BOOL,
|
||||
FLOAT16,
|
||||
DOUBLE,
|
||||
UINT32,
|
||||
UINT64,
|
||||
COMPLEX64,
|
||||
COMPLEX128,
|
||||
BFLOAT16,
|
||||
};
|
||||
|
||||
class GraphHandlerObj {
|
||||
Graph g;
|
||||
|
||||
|
@ -51,9 +27,9 @@ class GraphHandlerObj {
|
|||
int opw);
|
||||
Tensor matmul(Tensor a, Tensor b, Tensor y, bool transA, bool transB,
|
||||
Tensor bias, ActType act);
|
||||
Tensor batchNorm(Tensor input, Tensor output, Tensor mean, Tensor var,
|
||||
Tensor scale, Tensor bias, float momentum, float eps,
|
||||
bool training);
|
||||
Tensor batchNormalization(Tensor input, Tensor output, Tensor mean,
|
||||
Tensor var, Tensor scale, Tensor bias,
|
||||
float momentum, float eps, bool training);
|
||||
|
||||
Tensor maxPool(Tensor input, Tensor output, int kh, int kw, int dh, int dw,
|
||||
int ph, int pw, int sh, int sw);
|
||||
|
@ -71,6 +47,7 @@ class GraphHandlerObj {
|
|||
Tensor tanh(Tensor x, Tensor y);
|
||||
Tensor softmax(Tensor x, Tensor y, int axis);
|
||||
Tensor abs(Tensor x, Tensor y);
|
||||
Tensor sqrt(Tensor x, Tensor y);
|
||||
Tensor shape(Tensor x, Tensor y);
|
||||
Tensor identity(Tensor x, Tensor y);
|
||||
Tensor flatten(Tensor s, Tensor y, int axis);
|
||||
|
@ -90,6 +67,7 @@ class GraphHandlerObj {
|
|||
const optional<vector<int>> &steps);
|
||||
Tensor pad(Tensor input, Tensor output, const vector<int> &pads,
|
||||
const optional<vector<int>> &axes);
|
||||
Tensor cast(Tensor input, Tensor output, int to);
|
||||
|
||||
//------ modifiers
|
||||
|
||||
|
@ -101,7 +79,11 @@ class GraphHandlerObj {
|
|||
|
||||
inline void data_malloc() { g->dataMalloc(); }
|
||||
|
||||
inline void tune() { g->getRuntime()->run(g, true); }
|
||||
|
||||
inline void run() { g->getRuntime()->run(g); }
|
||||
|
||||
inline double get_perf_time() { return g->getRuntime()->getPerfTime(g); }
|
||||
};
|
||||
|
||||
} // namespace infini
|
||||
|
|
|
@ -105,8 +105,8 @@ class KernelRegistry {
|
|||
IT_ASSERT(it != kernels.end(),
|
||||
"Kernel not found for key {" +
|
||||
to_string(enum_to_underlying(std::get<0>(kernelAttrs))) +
|
||||
", " + OpRegistry::getOpName(std::get<1>(kernelAttrs)) +
|
||||
", " + std::get<2>(kernelAttrs).toString() + "}");
|
||||
", " + std::to_string(std::get<1>(kernelAttrs)) + ", " +
|
||||
std::get<2>(kernelAttrs).toString() + "}");
|
||||
return std::get<0>(it->second);
|
||||
}
|
||||
const KernelRecord &getKernelItem(const KernelAttrs &kernelAttrs) const {
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
#pragma once
|
||||
#include "core/runtime.h"
|
||||
#include "core/tensor.h"
|
||||
#ifdef BUILD_TEST
|
||||
#include "gtest/gtest.h"
|
||||
#endif
|
||||
#include <cstddef>
|
||||
#include <map>
|
||||
#include <unordered_set>
|
||||
|
||||
namespace infini {
|
||||
|
||||
class LazyAllocator {
|
||||
private:
|
||||
#ifdef BUILD_TEST
|
||||
FRIEND_TEST(LazyAllocator, testMergeFreeBlocks);
|
||||
|
||||
FRIEND_TEST(LazyAllocator, testAllocWithEndFreeBlock);
|
||||
#endif
|
||||
|
||||
Runtime runtime;
|
||||
|
||||
size_t used;
|
||||
|
||||
size_t peak;
|
||||
|
||||
size_t alignment;
|
||||
|
||||
// pointer to the memory actually allocated
|
||||
void *ptr;
|
||||
|
||||
struct freeBlockInfo {
|
||||
size_t addr;
|
||||
size_t blockSize;
|
||||
};
|
||||
|
||||
struct cmpFreeBlockInfo {
|
||||
bool operator()(const freeBlockInfo &a, const freeBlockInfo &b) const {
|
||||
return (a.blockSize != b.blockSize) ? (a.blockSize < b.blockSize)
|
||||
: (a.addr < b.addr);
|
||||
}
|
||||
};
|
||||
|
||||
// free balanced tree, maintains all free memory blocks
|
||||
std::set<freeBlockInfo, cmpFreeBlockInfo> freeBlocks;
|
||||
|
||||
// key: head address offset of the free memory block
|
||||
// value: blockSize of the block
|
||||
std::unordered_map<size_t, size_t> headAddrToBlockSize;
|
||||
|
||||
// key: tail address offset of the free memory block
|
||||
// value: blockSize of the block
|
||||
std::unordered_map<size_t, size_t> tailAddrToBlockSize;
|
||||
|
||||
public:
|
||||
LazyAllocator(Runtime runtime);
|
||||
|
||||
virtual ~LazyAllocator();
|
||||
|
||||
// function: simulate memory allocation
|
||||
// arguments:
|
||||
// size: size of memory block to be allocated
|
||||
// return: head address offset of the allocated memory block
|
||||
size_t alloc(size_t size);
|
||||
|
||||
// function: simulate memory free
|
||||
// arguments:
|
||||
// addr: head address offset of memory block to be free
|
||||
// size: size of memory block to be freed
|
||||
void free(size_t addr, size_t size);
|
||||
|
||||
// function: perform actual memory allocation
|
||||
// return: pointer to the head address of the allocated memory
|
||||
void *getPtr();
|
||||
|
||||
void info();
|
||||
|
||||
private:
|
||||
// function: memory alignment, rouned up
|
||||
// return: size of the aligned memory block
|
||||
size_t getAlignedSize(size_t size);
|
||||
};
|
||||
|
||||
} // namespace infini
|
|
@ -0,0 +1,253 @@
|
|||
#pragma once
|
||||
#ifndef OP_TYPE_H
|
||||
#define OP_TYPE_H
|
||||
|
||||
#include <string>
|
||||
#include <unordered_set>
|
||||
|
||||
namespace infini {
|
||||
|
||||
struct OpType {
|
||||
using underlying_t = uint16_t;
|
||||
|
||||
// Clang-format is ambiguous in formating of comment alignment.
|
||||
// In order to disambiguate, it is necessary to comment all enum
|
||||
// elements.
|
||||
enum : underlying_t {
|
||||
Unknown,
|
||||
Abs, // Unary
|
||||
Acos, // Unary
|
||||
Acosh, // Unary
|
||||
Add, // Binary
|
||||
And, // Binary
|
||||
ArgMax, //
|
||||
Asin, // Binary
|
||||
Asinh, // Binary
|
||||
Atan, // Binary
|
||||
Atanh, // Binary
|
||||
AveragePool, // Pool
|
||||
BatchNormalization, //
|
||||
Bernoulli, //
|
||||
BitShift, // Binary
|
||||
BitwiseAnd, // Binary
|
||||
BitwiseNot, // Binary
|
||||
BitwiseOr, // Binary
|
||||
BitwiseXor, // Binary
|
||||
BlackmanWindow, //
|
||||
Cast, // Unary
|
||||
CastLike, //
|
||||
Ceil, // Unary
|
||||
Celu, //
|
||||
CenterCropPad, //
|
||||
Clip, // Unary
|
||||
Col2lm,
|
||||
Compress,
|
||||
Concat,
|
||||
ConcatFromSequence,
|
||||
ConstantOfShape,
|
||||
Conv, // ComputationIntensive
|
||||
ConvInteger, // ComputationIntensive
|
||||
ConvTranspose, // ComputationIntensive
|
||||
Cos, // Unary
|
||||
Cosh, // Unary
|
||||
CumSum,
|
||||
DFT,
|
||||
DeformConv, // ComputationIntensive
|
||||
DepthToSpace,
|
||||
DequantizeLinear,
|
||||
Det,
|
||||
Div, // Binary
|
||||
Dropout,
|
||||
DynamicQuantizeLinear,
|
||||
Einsum,
|
||||
Elu,
|
||||
Equal, // Compair
|
||||
Erf, // Unary
|
||||
Exp, // Unary
|
||||
Expand,
|
||||
EyeLike,
|
||||
Flatten,
|
||||
Floor, // Unary
|
||||
GRU,
|
||||
Gather,
|
||||
GatherElements,
|
||||
GatherND,
|
||||
Gemm,
|
||||
GlobalAveragePool, // GlobalPool
|
||||
GlobalLpPool, // GlobalPool
|
||||
GlobalMaxPool, // GlobalPool
|
||||
Greater, // Compair
|
||||
GreaterOrEqual, // Compair
|
||||
GridSample,
|
||||
GroupNormalization,
|
||||
HammingWindow,
|
||||
HannWindow,
|
||||
HardSigmoid,
|
||||
HardSwish,
|
||||
Hardmax,
|
||||
Identity,
|
||||
If,
|
||||
InstanceNormalization,
|
||||
IsInf,
|
||||
IsNaN,
|
||||
LRN,
|
||||
LSTM,
|
||||
LayerNormalization,
|
||||
LeakyRelu,
|
||||
Less, // Compair
|
||||
LessOrEqual, // Compair
|
||||
Log, // Unary
|
||||
LogSoftmax,
|
||||
Loop,
|
||||
LpNormalization,
|
||||
LpPool,
|
||||
MatMul, // ComputationIntensive
|
||||
MatMulInteger, // ComputationIntensive
|
||||
Max,
|
||||
MaxPool,
|
||||
MaxRoiPool,
|
||||
MaxUnpool,
|
||||
Mean,
|
||||
MeanVarianceNormalization,
|
||||
MelWeightMatrix,
|
||||
Min,
|
||||
Mish,
|
||||
Mod, // Binary
|
||||
Mul, // Binary
|
||||
Multinomial, //
|
||||
Neg, // Unary
|
||||
NegativeLogLikelihoodLoss,
|
||||
NonMaxSuppression,
|
||||
NonZero,
|
||||
Not, // Unary
|
||||
OneHot,
|
||||
Optional,
|
||||
OptionalGetElement,
|
||||
OptionalHasElement,
|
||||
Or, // Binary
|
||||
PRelu, //
|
||||
Pad, //
|
||||
Pow, // Binary
|
||||
QLinearConv, // ComputationIntensive
|
||||
QLinearMatMul, // ComputationIntensive
|
||||
QuantizeLinear,
|
||||
RNN,
|
||||
RandomNormal,
|
||||
RandomNormalLike,
|
||||
RandomUniform,
|
||||
RandomUniformLike,
|
||||
Range,
|
||||
Reciprocal,
|
||||
ReduceL1, // Reduce
|
||||
ReduceL2, // Reduce
|
||||
ReduceLogSum, // Reduce
|
||||
ReduceLogSumExp, // Reduce
|
||||
ReduceMax, // Reduce
|
||||
ReduceMean, // Reduce
|
||||
ReduceMin, // Reduce
|
||||
ReduceProd, // Reduce
|
||||
ReduceSum, // Reduce
|
||||
ReduceSumSquare, // Reduce
|
||||
Relu, // Unary
|
||||
Reshape,
|
||||
Resize,
|
||||
ReverseSequence,
|
||||
RoiAlign,
|
||||
Round, // Unary
|
||||
STFT,
|
||||
Scan,
|
||||
Scatter,
|
||||
ScatterElements,
|
||||
ScatterND,
|
||||
Selu,
|
||||
SequenceAt,
|
||||
SequenceConstruct,
|
||||
SequenceEmpty,
|
||||
SequenceErase,
|
||||
SequenceInsert,
|
||||
SequenceLength,
|
||||
SequenceMap,
|
||||
Shape,
|
||||
Shrink,
|
||||
Sigmoid,
|
||||
Sign,
|
||||
Sin, // Unary
|
||||
Sinh, // Unary
|
||||
Size,
|
||||
Slice,
|
||||
Softmax,
|
||||
SoftmaxCrossEntropyLoss,
|
||||
Softplus,
|
||||
Softsign,
|
||||
SpaceToDepth,
|
||||
Split,
|
||||
SplitToSequence,
|
||||
Sqrt,
|
||||
Squeeze,
|
||||
StringNormalizer,
|
||||
Sub, // Binary
|
||||
Sum, //
|
||||
Tan, // Unary
|
||||
Tanh, // unary
|
||||
TfIdfVectorizer,
|
||||
ThresholdedRelu,
|
||||
Tile,
|
||||
TopK,
|
||||
Transpose,
|
||||
Trilu,
|
||||
Unique,
|
||||
Unsqueeze,
|
||||
Upsample,
|
||||
Where,
|
||||
Xor, // Binary
|
||||
// CUSTOM DEFINED
|
||||
G2BMM,
|
||||
GBMM,
|
||||
MemBound,
|
||||
// TODO
|
||||
ConvTransNHWC,
|
||||
ConvBackwardFilter,
|
||||
ReluBackward,
|
||||
SigmoidBackward,
|
||||
TanhBackward,
|
||||
|
||||
Fill,
|
||||
Extend,
|
||||
MSELoss,
|
||||
Hardtanh,
|
||||
L2Loss,
|
||||
Rsqrt,
|
||||
FloorDiv,
|
||||
FloorMod,
|
||||
Square,
|
||||
SquaredDifference,
|
||||
} type;
|
||||
|
||||
constexpr OpType(decltype(type) t) : type(t) {}
|
||||
constexpr explicit OpType(underlying_t val) : type((decltype(type))val) {}
|
||||
constexpr underlying_t underlying() const { return type; }
|
||||
|
||||
bool operator==(OpType others) const { return type == others.type; }
|
||||
bool operator!=(OpType others) const { return type != others.type; }
|
||||
bool operator<(OpType others) const { return type < others.type; }
|
||||
|
||||
const char *toString() const;
|
||||
bool isUnary() const;
|
||||
bool isBinary() const;
|
||||
bool isElementWise() const;
|
||||
bool isCompair() const;
|
||||
bool isPool() const;
|
||||
bool isGlobalPool() const;
|
||||
bool isMatMulOrConv() const;
|
||||
};
|
||||
|
||||
enum class ActType {
|
||||
None,
|
||||
Relu,
|
||||
Sigmoid,
|
||||
Tanh,
|
||||
};
|
||||
|
||||
} // namespace infini
|
||||
|
||||
#endif // OP_TYPE_H
|
|
@ -1,231 +1,14 @@
|
|||
#pragma once
|
||||
|
||||
#include "core/op_type.h"
|
||||
#include "core/tensor.h"
|
||||
|
||||
namespace infini {
|
||||
|
||||
enum class OpType {
|
||||
Unknown = 0,
|
||||
// linear
|
||||
Conv = 100,
|
||||
ConvBackwardFilter,
|
||||
ConvBackwardData,
|
||||
Matmul,
|
||||
ConvTrans,
|
||||
ConvTransNHWC,
|
||||
G2BMM,
|
||||
GBMM,
|
||||
Pad,
|
||||
Slice,
|
||||
Concat,
|
||||
Split,
|
||||
Transpose,
|
||||
Extend,
|
||||
MaxPool,
|
||||
AvgPool,
|
||||
Add,
|
||||
Sub,
|
||||
Mul,
|
||||
Div,
|
||||
Pow,
|
||||
Gather,
|
||||
ReduceMean,
|
||||
Reshape,
|
||||
Flatten,
|
||||
Identity,
|
||||
// element wise
|
||||
BatchNorm = 200,
|
||||
Softmax,
|
||||
Activation,
|
||||
Relu,
|
||||
ReluBackward,
|
||||
PRelu,
|
||||
Sigmoid,
|
||||
SigmoidBackward,
|
||||
Tanh,
|
||||
TanhBackward,
|
||||
Abs,
|
||||
Sin,
|
||||
Cos,
|
||||
Tan,
|
||||
ASin,
|
||||
ACos,
|
||||
ATan,
|
||||
SinH,
|
||||
CosH,
|
||||
TanH,
|
||||
ASinH,
|
||||
ACosH,
|
||||
ATanH,
|
||||
Resize,
|
||||
Arange,
|
||||
Shape,
|
||||
Copy,
|
||||
Ceil,
|
||||
Floor,
|
||||
Clip,
|
||||
Erf,
|
||||
Exp,
|
||||
Fill,
|
||||
Log,
|
||||
L2Loss,
|
||||
Maximum,
|
||||
Minimum,
|
||||
MSELoss,
|
||||
Neg,
|
||||
Power,
|
||||
Reciprocal,
|
||||
Sqrt,
|
||||
Rsqrt,
|
||||
Cast,
|
||||
FloorDiv,
|
||||
FloorMod,
|
||||
Det,
|
||||
Round,
|
||||
Square,
|
||||
SquaredDifference,
|
||||
Hardtanh,
|
||||
Equal,
|
||||
NotEqual,
|
||||
GreaterThan,
|
||||
GreaterEqual,
|
||||
LessThan,
|
||||
LessEqual,
|
||||
And,
|
||||
Or,
|
||||
Xor,
|
||||
Not,
|
||||
BitAnd,
|
||||
BitOr,
|
||||
BitXor,
|
||||
BitNot,
|
||||
BitLeftShift,
|
||||
BitRightShift,
|
||||
Dropout,
|
||||
//
|
||||
MemBound = 300,
|
||||
};
|
||||
|
||||
using KernelAttrs = std::tuple<Device, OpType, DataType>;
|
||||
|
||||
class OpRegistry {
|
||||
public:
|
||||
static std::string getOpName(OpType opType) {
|
||||
#define FOP(op) \
|
||||
case OpType::op: \
|
||||
return #op
|
||||
|
||||
switch (opType) {
|
||||
FOP(Unknown);
|
||||
// linear
|
||||
FOP(Conv);
|
||||
FOP(ConvBackwardFilter);
|
||||
FOP(ConvBackwardData);
|
||||
FOP(Matmul);
|
||||
FOP(ConvTrans);
|
||||
FOP(G2BMM);
|
||||
FOP(GBMM);
|
||||
FOP(Pad);
|
||||
FOP(Slice);
|
||||
FOP(Concat);
|
||||
FOP(Split);
|
||||
FOP(Transpose);
|
||||
FOP(Extend);
|
||||
FOP(MaxPool);
|
||||
FOP(AvgPool);
|
||||
FOP(Add);
|
||||
FOP(Sub);
|
||||
FOP(Mul);
|
||||
FOP(Div);
|
||||
FOP(Pow);
|
||||
FOP(Gather);
|
||||
FOP(ReduceMean);
|
||||
FOP(Reshape);
|
||||
FOP(Identity);
|
||||
FOP(Shape);
|
||||
// element wise
|
||||
FOP(BatchNorm);
|
||||
FOP(Softmax);
|
||||
FOP(Activation);
|
||||
FOP(Relu);
|
||||
FOP(ReluBackward);
|
||||
FOP(PRelu);
|
||||
FOP(Sigmoid);
|
||||
FOP(SigmoidBackward);
|
||||
FOP(Tanh);
|
||||
FOP(TanhBackward);
|
||||
FOP(Abs);
|
||||
FOP(Sin);
|
||||
FOP(Cos);
|
||||
FOP(Tan);
|
||||
FOP(ASin);
|
||||
FOP(ACos);
|
||||
FOP(ATan);
|
||||
FOP(SinH);
|
||||
FOP(CosH);
|
||||
FOP(TanH);
|
||||
FOP(ASinH);
|
||||
FOP(ACosH);
|
||||
FOP(ATanH);
|
||||
FOP(Copy);
|
||||
FOP(Ceil);
|
||||
FOP(Floor);
|
||||
FOP(Clip);
|
||||
FOP(Erf);
|
||||
FOP(Exp);
|
||||
FOP(Fill);
|
||||
FOP(Log);
|
||||
FOP(L2Loss);
|
||||
FOP(Maximum);
|
||||
FOP(Minimum);
|
||||
FOP(MSELoss);
|
||||
FOP(Neg);
|
||||
FOP(Power);
|
||||
FOP(Reciprocal);
|
||||
FOP(Sqrt);
|
||||
FOP(Rsqrt);
|
||||
FOP(Cast);
|
||||
FOP(FloorDiv);
|
||||
FOP(FloorMod);
|
||||
FOP(Det);
|
||||
FOP(Round);
|
||||
FOP(Square);
|
||||
FOP(SquaredDifference);
|
||||
FOP(Hardtanh);
|
||||
FOP(Equal);
|
||||
FOP(NotEqual);
|
||||
FOP(GreaterThan);
|
||||
FOP(GreaterEqual);
|
||||
FOP(LessThan);
|
||||
FOP(LessEqual);
|
||||
FOP(And);
|
||||
FOP(Or);
|
||||
FOP(Xor);
|
||||
FOP(Not);
|
||||
FOP(BitAnd);
|
||||
FOP(BitOr);
|
||||
FOP(BitXor);
|
||||
FOP(BitNot);
|
||||
FOP(BitLeftShift);
|
||||
FOP(BitRightShift);
|
||||
//
|
||||
FOP(MemBound);
|
||||
default:
|
||||
IT_ASSERT(false);
|
||||
break;
|
||||
}
|
||||
#undef FOP
|
||||
}
|
||||
};
|
||||
|
||||
enum class ActType {
|
||||
None,
|
||||
Relu,
|
||||
Sigmoid,
|
||||
Tanh,
|
||||
};
|
||||
using KernelAttrs = std::tuple<Device, OpType::underlying_t, DataType>;
|
||||
|
||||
struct OpPerfKey {
|
||||
HashType hash;
|
||||
OpType opType;
|
||||
OpType::underlying_t opType;
|
||||
vector<int> attrs;
|
||||
|
||||
public:
|
||||
|
@ -233,7 +16,7 @@ struct OpPerfKey {
|
|||
// https://github.com/nlohmann/json#how-can-i-use-get-for-non-default-constructiblenon-copyable-types
|
||||
OpPerfKey() = default;
|
||||
OpPerfKey(HashType hash, OpType opType, vector<int> attrs = {})
|
||||
: hash(hash), opType(opType), attrs(attrs) {}
|
||||
: hash(hash), opType(opType.underlying()), attrs(attrs) {}
|
||||
bool operator==(const OpPerfKey &rhs) const {
|
||||
if (hash != rhs.hash)
|
||||
return false;
|
||||
|
@ -290,16 +73,7 @@ class OperatorObj : public Object {
|
|||
*/
|
||||
HashType hash() const;
|
||||
|
||||
public: // check Op type
|
||||
bool isLinearOp() const;
|
||||
bool isElementWiseOp() const;
|
||||
bool isSplitOp() const;
|
||||
bool isConcatOp() const;
|
||||
bool isComputeOp() const;
|
||||
bool isTransposeOp() const;
|
||||
bool isReshapeOp() const;
|
||||
bool isMemBoundOp() const;
|
||||
|
||||
public:
|
||||
public: // getter and setter
|
||||
const TensorVec &getInputs() const { return inputs; }
|
||||
const TensorVec &getOutputs() const { return outputs; }
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#pragma once
|
||||
#include "core/common.h"
|
||||
#include "core/op_type.h"
|
||||
#include "core/ref.h"
|
||||
#include <memory>
|
||||
|
||||
|
@ -21,7 +22,6 @@ using Graph = Ref<GraphObj>;
|
|||
using GraphHandler = Ref<GraphHandlerObj>;
|
||||
using Runtime = Ref<RuntimeObj>;
|
||||
using Blob = Ref<BlobObj>;
|
||||
enum class OpType;
|
||||
|
||||
using TensorVec = vector<Tensor>;
|
||||
using OpVec = vector<Operator>;
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
#pragma once
|
||||
#include "core/tensor_base.h"
|
||||
#include "utils/data_convert.h"
|
||||
#include <cmath>
|
||||
#include <cstring>
|
||||
|
||||
|
@ -35,6 +36,7 @@ class TensorObj : public TensorBaseObj {
|
|||
size_t getBytes() const { return _size * dtype.getSize(); }
|
||||
|
||||
Shape getDims() const { return shape; }
|
||||
size_t getRank() const { return shape.size(); }
|
||||
vector<size_t> getStride() const;
|
||||
size_t getOffset(const vector<int> &ds) const;
|
||||
void dataMalloc();
|
||||
|
@ -45,20 +47,20 @@ class TensorObj : public TensorBaseObj {
|
|||
|
||||
// Copy elements from `data`.
|
||||
template <typename T> void copyin(const vector<T> &data) {
|
||||
IT_ASSERT(DataType::get<T>() == dtype);
|
||||
IT_ASSERT(DataType::get<T>() == dtype.cpuTypeInt());
|
||||
IT_ASSERT(data.size() >= _size);
|
||||
copyin(data.data(), getBytes());
|
||||
}
|
||||
// Copy all the elements to a vector.
|
||||
template <typename T> auto copyout() const {
|
||||
IT_ASSERT(DataType::get<T>() == dtype);
|
||||
IT_ASSERT(DataType::get<T>() == dtype.cpuTypeInt());
|
||||
std::vector<T> ans(_size);
|
||||
copyout(ans.data(), getBytes());
|
||||
return ans;
|
||||
}
|
||||
// Copy the element at `pos`.
|
||||
template <typename T> auto copyOne(const vector<int> &pos) const {
|
||||
IT_ASSERT(DataType::get<T>() == dtype);
|
||||
IT_ASSERT(DataType::get<T>() == dtype.cpuTypeInt());
|
||||
auto offset = getOffset(pos);
|
||||
auto bytes = dtype.getSize();
|
||||
T ans;
|
||||
|
@ -70,10 +72,16 @@ class TensorObj : public TensorBaseObj {
|
|||
void copyData(const TensorObj *src);
|
||||
void copyData(const Tensor &src) { copyData(src.get()); }
|
||||
|
||||
// TODO: Rename this function later, because it is confused that it will
|
||||
// change the field data, but actually it generates data and maybe copy to
|
||||
// device.
|
||||
// FIXME: std::fucntion copies the generator instead of passing it by ref.
|
||||
// Thus the internal state of generator cannot be updated.
|
||||
void setData(
|
||||
std::function<void(void *, size_t, DataType)> const &generator) const;
|
||||
|
||||
void setDataBlob(const Blob &blob);
|
||||
|
||||
Tensor clone() const {
|
||||
auto obj = make_ref<TensorObj>(*this);
|
||||
obj->freeData();
|
||||
|
@ -98,8 +106,12 @@ class TensorObj : public TensorBaseObj {
|
|||
bool equalData(const Tensor &rhs, double relativeError = 1e-6) const;
|
||||
|
||||
template <typename T> bool equalData(const vector<T> &dataVector) {
|
||||
IT_ASSERT(DataType::get<T>() == dtype);
|
||||
IT_ASSERT(size() == dataVector.size());
|
||||
if (dtype == DataType::Float16) {
|
||||
return equalDataImpl_fp16(getRawDataPtr<uint16_t *>(),
|
||||
(float *)dataVector.data(), size());
|
||||
}
|
||||
IT_ASSERT(DataType::get<T>() == dtype.cpuTypeInt());
|
||||
return equalDataImpl(getRawDataPtr<T *>(), dataVector.data(), size());
|
||||
}
|
||||
|
||||
|
@ -156,6 +168,20 @@ class TensorObj : public TensorBaseObj {
|
|||
return true;
|
||||
}
|
||||
|
||||
bool equalDataImpl_fp16(const uint16_t *a, const float *b,
|
||||
size_t size) const {
|
||||
for (size_t i = 0; i < size; ++i) {
|
||||
auto a_fp32 = fp16_to_float(a[i]);
|
||||
auto b_fp32 = b[i];
|
||||
if (fabs(a_fp32 - b_fp32) / std::max(fabs(a_fp32), fabs(b_fp32)) >
|
||||
1e-6) {
|
||||
printf("Error on %lu: %f %f\n", i, a_fp32, b_fp32);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
Shape getPosByOffset(size_t offset, Shape dim) const;
|
||||
size_t getOffsetByPos(Shape pos, Shape dim) const;
|
||||
|
||||
|
@ -305,7 +331,7 @@ class TensorObj : public TensorBaseObj {
|
|||
// }
|
||||
|
||||
// void initSplittingPoints() {
|
||||
// splittingPoints.resize(getDims().size()); }
|
||||
// splittingPoints.resize(getRank()); }
|
||||
|
||||
// void printShape();
|
||||
};
|
||||
|
|
|
@ -3,29 +3,32 @@
|
|||
#include "operators/unary.h"
|
||||
|
||||
namespace infini {
|
||||
// TODO(constroy): num should be size_t.
|
||||
void softmax_kernel(float *input, float *output, int num);
|
||||
void relu_kernel(float *input, float *output, int num);
|
||||
void sigmoid_kernel(float *input, float *output, int num);
|
||||
void tanh_kernel(float *input, float *output, int num);
|
||||
void abs_kernel(float *input, float *output, int num);
|
||||
void sqrt_kernel(float *input, float *output, int num);
|
||||
|
||||
void unary_kernel(const Operator &_op) {
|
||||
auto op = as<UnaryObj>(_op);
|
||||
float *const inputData = (op->getInputs(0)->getRawDataPtr<float *>());
|
||||
float *const outputData = (op->getOutput()->getRawDataPtr<float *>());
|
||||
|
||||
auto dim = op->getInputs(0)->getDims();
|
||||
int n = dim[0], c = dim[1], h = dim[2], w = dim[3];
|
||||
size_t num = op->getOutput()->size();
|
||||
if (op->getOpType() == OpType::Softmax)
|
||||
softmax_kernel(inputData, outputData, n * c * h * w);
|
||||
softmax_kernel(inputData, outputData, num);
|
||||
else if (op->getOpType() == OpType::Relu)
|
||||
relu_kernel(inputData, outputData, n * c * h * w);
|
||||
relu_kernel(inputData, outputData, num);
|
||||
else if (op->getOpType() == OpType::Sigmoid)
|
||||
sigmoid_kernel(inputData, outputData, n * c * h * w);
|
||||
sigmoid_kernel(inputData, outputData, num);
|
||||
else if (op->getOpType() == OpType::Tanh)
|
||||
tanh_kernel(inputData, outputData, n * c * h * w);
|
||||
tanh_kernel(inputData, outputData, num);
|
||||
else if (op->getOpType() == OpType::Abs)
|
||||
abs_kernel(inputData, outputData, n * c * h * w);
|
||||
abs_kernel(inputData, outputData, num);
|
||||
else if (op->getOpType() == OpType::Sqrt)
|
||||
sqrt_kernel(inputData, outputData, num);
|
||||
else
|
||||
IT_TODO_HALT();
|
||||
}
|
||||
|
|
|
@ -65,26 +65,24 @@ DEFINE_ELEMENT_WISE_OBJ(Sub, OpType::Sub)
|
|||
DEFINE_ELEMENT_WISE_OBJ(Mul, OpType::Mul)
|
||||
DEFINE_ELEMENT_WISE_OBJ(Div, OpType::Div)
|
||||
DEFINE_ELEMENT_WISE_OBJ(Pow, OpType::Pow)
|
||||
DEFINE_ELEMENT_WISE_OBJ(Maximum, OpType::Maximum)
|
||||
DEFINE_ELEMENT_WISE_OBJ(Minimum, OpType::Minimum)
|
||||
DEFINE_ELEMENT_WISE_OBJ(Power, OpType::Power)
|
||||
DEFINE_ELEMENT_WISE_OBJ(Maximum, OpType::Max)
|
||||
DEFINE_ELEMENT_WISE_OBJ(Minimum, OpType::Min)
|
||||
DEFINE_ELEMENT_WISE_OBJ(Power, OpType::Pow)
|
||||
DEFINE_ELEMENT_WISE_OBJ(FloorDiv, OpType::FloorDiv)
|
||||
DEFINE_ELEMENT_WISE_OBJ(FloorMod, OpType::FloorMod)
|
||||
DEFINE_ELEMENT_WISE_OBJ(SquaredDifference, OpType::SquaredDifference)
|
||||
DEFINE_ELEMENT_WISE_OBJ(Equal, OpType::Equal)
|
||||
DEFINE_ELEMENT_WISE_OBJ(NotEqual, OpType::NotEqual)
|
||||
DEFINE_ELEMENT_WISE_OBJ(GreaterThan, OpType::GreaterThan)
|
||||
DEFINE_ELEMENT_WISE_OBJ(GreaterEqual, OpType::GreaterEqual)
|
||||
DEFINE_ELEMENT_WISE_OBJ(LessThan, OpType::LessThan)
|
||||
DEFINE_ELEMENT_WISE_OBJ(LessEqual, OpType::LessEqual)
|
||||
DEFINE_ELEMENT_WISE_OBJ(GreaterThan, OpType::Greater)
|
||||
DEFINE_ELEMENT_WISE_OBJ(GreaterEqual, OpType::GreaterOrEqual)
|
||||
DEFINE_ELEMENT_WISE_OBJ(LessThan, OpType::Less)
|
||||
DEFINE_ELEMENT_WISE_OBJ(LessEqual, OpType::LessOrEqual)
|
||||
DEFINE_ELEMENT_WISE_OBJ(And, OpType::And)
|
||||
DEFINE_ELEMENT_WISE_OBJ(Or, OpType::Or)
|
||||
DEFINE_ELEMENT_WISE_OBJ(Xor, OpType::Xor)
|
||||
DEFINE_ELEMENT_WISE_OBJ(Not, OpType::Not)
|
||||
DEFINE_ELEMENT_WISE_OBJ(BitAnd, OpType::BitAnd)
|
||||
DEFINE_ELEMENT_WISE_OBJ(BitOr, OpType::BitOr)
|
||||
DEFINE_ELEMENT_WISE_OBJ(BitXor, OpType::BitXor)
|
||||
DEFINE_ELEMENT_WISE_OBJ(BitNot, OpType::BitNot)
|
||||
DEFINE_ELEMENT_WISE_OBJ(BitLeftShift, OpType::BitLeftShift)
|
||||
DEFINE_ELEMENT_WISE_OBJ(BitRightShift, OpType::BitRightShift)
|
||||
DEFINE_ELEMENT_WISE_OBJ(BitAnd, OpType::BitwiseAnd)
|
||||
DEFINE_ELEMENT_WISE_OBJ(BitOr, OpType::BitwiseOr)
|
||||
DEFINE_ELEMENT_WISE_OBJ(BitXor, OpType::BitwiseXor)
|
||||
DEFINE_ELEMENT_WISE_OBJ(BitNot, OpType::BitwiseNot)
|
||||
DEFINE_ELEMENT_WISE_OBJ(BitLeftShift, OpType::BitShift)
|
||||
}; // namespace infini
|
||||
|
|
|
@ -70,7 +70,7 @@ class AvgPoolObj : public PoolingObj {
|
|||
public:
|
||||
AvgPoolObj(GraphObj *graph, Tensor input, Tensor output, int kh, int kw,
|
||||
int dh, int dw, int ph, int pw, int sh, int sw)
|
||||
: PoolingObj(graph, OpType::AvgPool, input, output, kh, kw, dh, dw, ph,
|
||||
pw, sh, sw) {}
|
||||
: PoolingObj(graph, OpType::AveragePool, input, output, kh, kw, dh, dw,
|
||||
ph, pw, sh, sw) {}
|
||||
};
|
||||
}; // namespace infini
|
||||
|
|
|
@ -60,6 +60,7 @@ class FlattenObj : public OperatorObj {
|
|||
std::string toString() const override;
|
||||
int numInputs() const override { return 1; }
|
||||
int numOutputs() const override { return 1; }
|
||||
int getAxis() const { return axis; }
|
||||
|
||||
private:
|
||||
vector<int> getWorkloadVector() const override;
|
||||
|
|
|
@ -15,7 +15,7 @@ class TransposeObj : public OperatorObj {
|
|||
std::vector<int> getPermute() const { return transposePermute; }
|
||||
|
||||
private:
|
||||
vector<int> transposePermute = {1, 1, 1, 1};
|
||||
vector<int> transposePermute;
|
||||
vector<int> getWorkloadVector() const override;
|
||||
vector<int> getOpAttrVector() const override;
|
||||
};
|
||||
|
|
|
@ -134,31 +134,35 @@ class TransformObj : public OperatorObj {
|
|||
vector<int> getOpAttrVector() const override;
|
||||
};
|
||||
|
||||
enum class CastType {
|
||||
Float2Float16 = 0,
|
||||
Float2Int64,
|
||||
Float2Int32,
|
||||
Float2Int16,
|
||||
Float2Int8,
|
||||
Float2BFloat16,
|
||||
Int322Float,
|
||||
Int322Int8,
|
||||
Int322Int16,
|
||||
Int322Int64,
|
||||
Int162Float,
|
||||
Int162Int32,
|
||||
Int82Float,
|
||||
Int82Int16,
|
||||
Int82Int32,
|
||||
Uint82Float,
|
||||
Uint82Int32,
|
||||
Uint82Int64,
|
||||
Int642Int32,
|
||||
Int642Uint32,
|
||||
Int642Float,
|
||||
Uint322Int64,
|
||||
Float162Float,
|
||||
BFloat162Float,
|
||||
};
|
||||
|
||||
class CastObj : public OperatorObj {
|
||||
public:
|
||||
enum CastType {
|
||||
Float2Half = 0,
|
||||
Float2Int64,
|
||||
Float2Int32,
|
||||
Float2Int16,
|
||||
Float2Int8,
|
||||
Int322Float,
|
||||
Int322Int8,
|
||||
Int322Int16,
|
||||
Int162Float,
|
||||
Int162Int32,
|
||||
Int82Float,
|
||||
Int82Int16,
|
||||
Int82Int32,
|
||||
Uint82Float,
|
||||
Uint82Int32,
|
||||
Uint82Int64,
|
||||
Int322Int64,
|
||||
Int642Int32,
|
||||
Int642Uint32,
|
||||
Int642Float,
|
||||
Uint322Int64,
|
||||
};
|
||||
CastObj(GraphObj *graph, Tensor input, Tensor output, CastType type);
|
||||
OP_CLONE(CastObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
|
@ -197,27 +201,6 @@ class CumsumObj : public OperatorObj {
|
|||
vector<int> getOpAttrVector() const override;
|
||||
};
|
||||
|
||||
class ArangeObj : public OperatorObj {
|
||||
public:
|
||||
ArangeObj(GraphObj *graph, float start, float step, int length,
|
||||
Tensor output);
|
||||
OP_CLONE(ArangeObj);
|
||||
optional<vector<Shape>> inferShape(const TensorVec &inputs) const override;
|
||||
|
||||
std::string toString() const override;
|
||||
int numInputs() const override { return 0; }
|
||||
int numOutputs() const override { return 1; }
|
||||
float getStartValue() { return startValue; }
|
||||
float getStepValue() { return stepValue; }
|
||||
int getLength() { return lengthValue; }
|
||||
|
||||
private:
|
||||
float startValue, stepValue;
|
||||
int lengthValue;
|
||||
vector<int> getWorkloadVector() const override;
|
||||
vector<int> getOpAttrVector() const override;
|
||||
};
|
||||
|
||||
class ShapeObj : public OperatorObj {
|
||||
public:
|
||||
ShapeObj(GraphObj *graph, Tensor input, Tensor output);
|
||||
|
@ -283,17 +266,16 @@ DEFINE_UNARY_OBJ(Abs, OpType::Abs)
|
|||
DEFINE_UNARY_OBJ(Sin, OpType::Sin)
|
||||
DEFINE_UNARY_OBJ(Cos, OpType::Cos)
|
||||
DEFINE_UNARY_OBJ(Tan, OpType::Tan)
|
||||
DEFINE_UNARY_OBJ(ASin, OpType::ASin)
|
||||
DEFINE_UNARY_OBJ(ACos, OpType::ACos)
|
||||
DEFINE_UNARY_OBJ(ATan, OpType::ATan)
|
||||
DEFINE_UNARY_OBJ(SinH, OpType::SinH)
|
||||
DEFINE_UNARY_OBJ(CosH, OpType::CosH)
|
||||
DEFINE_UNARY_OBJ(TanH, OpType::TanH)
|
||||
DEFINE_UNARY_OBJ(ASinH, OpType::ASinH)
|
||||
DEFINE_UNARY_OBJ(ACosH, OpType::ACosH)
|
||||
DEFINE_UNARY_OBJ(ATanH, OpType::ATanH)
|
||||
DEFINE_UNARY_OBJ(ASin, OpType::Asin)
|
||||
DEFINE_UNARY_OBJ(ACos, OpType::Acos)
|
||||
DEFINE_UNARY_OBJ(ATan, OpType::Atan)
|
||||
DEFINE_UNARY_OBJ(SinH, OpType::Sinh)
|
||||
DEFINE_UNARY_OBJ(CosH, OpType::Cosh)
|
||||
DEFINE_UNARY_OBJ(TanH, OpType::Tanh)
|
||||
DEFINE_UNARY_OBJ(ASinH, OpType::Asinh)
|
||||
DEFINE_UNARY_OBJ(ACosH, OpType::Acosh)
|
||||
DEFINE_UNARY_OBJ(ATanH, OpType::Atanh)
|
||||
|
||||
DEFINE_UNARY_OBJ(Copy, OpType::Copy)
|
||||
DEFINE_UNARY_OBJ(Ceil, OpType::Ceil)
|
||||
DEFINE_UNARY_OBJ(Floor, OpType::Floor)
|
||||
DEFINE_UNARY_OBJ(Erf, OpType::Erf)
|
||||
|
@ -301,7 +283,5 @@ DEFINE_UNARY_OBJ(Exp, OpType::Exp)
|
|||
DEFINE_UNARY_OBJ(Neg, OpType::Neg)
|
||||
DEFINE_UNARY_OBJ(Reciprocal, OpType::Reciprocal)
|
||||
DEFINE_UNARY_OBJ(Sqrt, OpType::Sqrt)
|
||||
DEFINE_UNARY_OBJ(Rsqrt, OpType::Rsqrt)
|
||||
DEFINE_UNARY_OBJ(Round, OpType::Round)
|
||||
DEFINE_UNARY_OBJ(Square, OpType::Square)
|
||||
}; // namespace infini
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
#pragma once
|
||||
#include <iostream>
|
||||
|
||||
namespace infini {
|
||||
union Uf32 {
|
||||
float f32;
|
||||
uint32_t u32;
|
||||
};
|
||||
uint16_t float_to_fp16(const float x);
|
||||
float fp16_to_float(const uint16_t x);
|
||||
uint16_t float_to_bfp16(const float x);
|
||||
float bfp16_to_float(const uint16_t x);
|
||||
} // namespace infini
|
|
@ -1,6 +1,7 @@
|
|||
#pragma once
|
||||
#include "core/common.h"
|
||||
#include "core/tensor_base.h"
|
||||
#include "utils/data_convert.h"
|
||||
#include <random>
|
||||
|
||||
namespace infini {
|
||||
|
@ -10,6 +11,7 @@ class DataGenerator {
|
|||
private:
|
||||
virtual void fill(uint32_t *data, size_t size) { IT_TODO_HALT(); }
|
||||
virtual void fill(float *data, size_t size) { IT_TODO_HALT(); }
|
||||
virtual void fill_fp16(uint16_t *data, size_t size) { IT_TODO_HALT(); }
|
||||
|
||||
public:
|
||||
virtual ~DataGenerator() {}
|
||||
|
@ -18,6 +20,8 @@ class DataGenerator {
|
|||
fill(reinterpret_cast<uint32_t *>(data), size);
|
||||
else if (dataType == DataType::Float32)
|
||||
fill(reinterpret_cast<float *>(data), size);
|
||||
else if (dataType == DataType::Float16)
|
||||
fill_fp16(reinterpret_cast<uint16_t *>(data), size);
|
||||
else
|
||||
IT_TODO_HALT();
|
||||
}
|
||||
|
@ -38,6 +42,13 @@ class IncrementalGenerator : public DataGenerator {
|
|||
fill<uint32_t>(data, size);
|
||||
}
|
||||
void fill(float *data, size_t size) override { fill<float>(data, size); }
|
||||
// FIXME: fix the accuracy standards when dtype is float16
|
||||
void fill_fp16(uint16_t *data, size_t size) {
|
||||
for (size_t i = 0; i < size; i++) {
|
||||
float x = 2.0f;
|
||||
data[i] = float_to_fp16(x);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
class RandomGenerator : public DataGenerator {
|
||||
|
|
|
@ -0,0 +1,15 @@
|
|||
#pragma once
|
||||
#ifndef OPERATOR_UTIL_H
|
||||
#define OPERATOR_UTIL_H
|
||||
|
||||
#include "core/tensor.h"
|
||||
|
||||
namespace infini {
|
||||
|
||||
// Launch a broadcast shape based on the shape of input A and B
|
||||
Shape infer_broadcast(const Shape &A, const Shape &B);
|
||||
// Launch the real axis based on rank and current axis
|
||||
int get_real_axis(const int &axis, const int &rank);
|
||||
} // namespace infini
|
||||
|
||||
#endif
|
File diff suppressed because it is too large
Load Diff
|
@ -64,6 +64,36 @@ class TestStringMethods(unittest.TestCase):
|
|||
)
|
||||
make_and_import_model(make_graph([conv], "conv", [i, w], [o]))
|
||||
|
||||
def test_conv_fp16(self):
|
||||
i = make_tensor_value_info("i", TensorProto.FLOAT16, [1, 3, 4, 4])
|
||||
w = make_tensor_value_info("w", TensorProto.FLOAT16, [2, 3, 3, 3])
|
||||
o = make_tensor_value_info("o", TensorProto.FLOAT16, [1, 2, 2, 2])
|
||||
conv = make_node(
|
||||
"Conv",
|
||||
["i", "w"],
|
||||
["o"],
|
||||
"conv",
|
||||
pads=[1, 1, 1, 1],
|
||||
strides=[2, 1],
|
||||
dilations=[1, 2],
|
||||
)
|
||||
make_and_import_model(make_graph([conv], "conv_fp16", [i, w], [o]))
|
||||
|
||||
def test_conv_bfp16(self):
|
||||
i = make_tensor_value_info("i", TensorProto.BFLOAT16, [1, 3, 4, 4])
|
||||
w = make_tensor_value_info("w", TensorProto.BFLOAT16, [2, 3, 3, 3])
|
||||
o = make_tensor_value_info("o", TensorProto.BFLOAT16, [1, 2, 2, 2])
|
||||
conv = make_node(
|
||||
"Conv",
|
||||
["i", "w"],
|
||||
["o"],
|
||||
"conv",
|
||||
pads=[1, 1, 1, 1],
|
||||
strides=[2, 1],
|
||||
dilations=[1, 2],
|
||||
)
|
||||
make_and_import_model(make_graph([conv], "conv_bfp16", [i, w], [o]))
|
||||
|
||||
def test_matmul(self):
|
||||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 2, 3])
|
||||
a = make_tensor_value_info("a", TensorProto.FLOAT, [1, 3, 4])
|
||||
|
@ -93,7 +123,7 @@ class TestStringMethods(unittest.TestCase):
|
|||
name="batchNormalization",
|
||||
)
|
||||
make_and_import_model(
|
||||
make_graph([batch_norm], "batchNorm", [x, scale, b, mean, var], [y])
|
||||
make_graph([batch_norm], "batchNormalzation", [x, scale, b, mean, var], [y])
|
||||
)
|
||||
|
||||
def test_max_pool(self):
|
||||
|
@ -211,9 +241,7 @@ class TestStringMethods(unittest.TestCase):
|
|||
x = make_tensor_value_info("x", TensorProto.FLOAT, [1, 3, 5, 7])
|
||||
y = make_tensor_value_info("y", TensorProto.FLOAT, [1 * 3, 5 * 7])
|
||||
flatten = make_node("Flatten", ["x"], ["y"], axis=2, name="flatten")
|
||||
# make_and_import_model(
|
||||
make_graph([flatten], "flatten", [x], [y])
|
||||
# )
|
||||
make_and_import_model(make_graph([flatten], "flatten", [x], [y]))
|
||||
|
||||
def test_reshape(self):
|
||||
data = make_tensor_value_info("data", TensorProto.FLOAT, [2, 3, 4, 5])
|
||||
|
@ -316,6 +344,14 @@ class TestStringMethods(unittest.TestCase):
|
|||
y = handler.tensor([3, 2, 1], 12)
|
||||
handler.reshape(x, y, [3, 2, 1])
|
||||
|
||||
def test_cast(self):
|
||||
input1 = make_tensor_value_info("input1", TensorProto.FLOAT, [1, 3, 2, 4])
|
||||
output = make_tensor_value_info("output", TensorProto.FLOAT16, [1, 3, 2, 4])
|
||||
cast = make_node(
|
||||
"Cast", ["input1"], ["output"], to=TensorProto.FLOAT16, name="cast"
|
||||
)
|
||||
make_and_import_model(make_graph([cast], "cast", [input1], [output]))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
import sys
|
||||
from pathlib import Path
|
||||
from subprocess import run
|
||||
|
||||
c_style_file = [".h", ".hh", ".hpp", ".c", ".cc", ".cpp", ".cxx", ".cu", ".mlu"]
|
||||
py_file = ".py"
|
||||
proj_path = Path(sys.path[0]).parent
|
||||
|
||||
|
||||
# Formats one file under project path.
|
||||
def format_file(file):
|
||||
file = Path(proj_path.joinpath(file))
|
||||
if file.suffix in c_style_file:
|
||||
run(f"clang-format-14 -i {file}", cwd=proj_path, shell=True)
|
||||
run(f"git add {file}", cwd=proj_path, shell=True)
|
||||
elif file.suffix == py_file:
|
||||
run(f"black {file}", cwd=proj_path, shell=True)
|
||||
run(f"git add {file}", cwd=proj_path, shell=True)
|
||||
|
||||
|
||||
if len(sys.argv) == 1:
|
||||
# Last commit.
|
||||
print("Formats git added files.")
|
||||
for line in (
|
||||
run("git status", cwd=proj_path, capture_output=True, shell=True)
|
||||
.stdout.decode()
|
||||
.splitlines()
|
||||
):
|
||||
line = line.strip()
|
||||
# Only formats git added files.
|
||||
for pre in ["new file:", "modified:"]:
|
||||
if line.startswith(pre):
|
||||
format_file(line[len(pre) :].strip())
|
||||
break
|
||||
else:
|
||||
# Origin commit.
|
||||
origin = sys.argv[1]
|
||||
print(f'Formats changed files from "{origin}".')
|
||||
for line in (
|
||||
run(f"git diff {origin}", cwd=proj_path, capture_output=True, shell=True)
|
||||
.stdout.decode()
|
||||
.splitlines()
|
||||
):
|
||||
diff = "diff --git "
|
||||
if line.startswith(diff):
|
||||
files = line[len(diff) :].split(" ")
|
||||
assert len(files) == 2
|
||||
assert files[0][:2] == "a/"
|
||||
assert files[1][:2] == "b/"
|
||||
format_file(files[1][2:])
|
|
@ -14,7 +14,7 @@ void BangRuntimeObj::runWithoutSync(const Graph &graph, bool tune = false,
|
|||
for (auto &op : graph->getOperators()) {
|
||||
// HACK: set correct data type
|
||||
auto kernelAttrs =
|
||||
KernelAttrs{device, op->getOpType(), DataType::Float32};
|
||||
KernelAttrs{device, op->getOpType().underlying(), op->getDType()};
|
||||
Kernel *kernel = kernelRegistry.getKernel(kernelAttrs);
|
||||
auto perfKey = PerfEngine::Key{kernelAttrs, op->getOpPerfKey()};
|
||||
auto perfData = perfEngine.getPerfData(perfKey);
|
||||
|
|
|
@ -5,7 +5,7 @@ namespace infini {
|
|||
|
||||
BlobObj::~BlobObj() {
|
||||
// Avoid cycled inclusion
|
||||
runtime->dealloc(ptr);
|
||||
// destruction is performed in LazyAllocator
|
||||
}
|
||||
|
||||
} // namespace infini
|
||||
} // namespace infini
|
||||
|
|
|
@ -48,7 +48,7 @@ bool DummyMutator::isMultiBranchMergable(const Graph &inGraph) {
|
|||
if (inGraph->getOperators().size() != 2)
|
||||
return false;
|
||||
for (auto op : inGraph->getOperators()) {
|
||||
if (op->getOpType() != OpType::Matmul)
|
||||
if (op->getOpType() != OpType::MatMul)
|
||||
return false;
|
||||
if (op->getPredecessors().size() > 0)
|
||||
return false;
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
namespace infini {
|
||||
|
||||
GraphObj::GraphObj(Runtime runtime, OpVec ops_in)
|
||||
: runtime(runtime), sorted(false) {
|
||||
: runtime(runtime), allocator(runtime), sorted(false) {
|
||||
map<UidBaseType, Tensor> tensorPool;
|
||||
// Clone tensors
|
||||
for (const auto &op : ops_in) {
|
||||
|
@ -116,7 +116,7 @@ bool GraphObj::topo_sort() {
|
|||
|
||||
void GraphObj::optimize() {
|
||||
for (auto &op : ops) {
|
||||
switch (op->getOpType()) {
|
||||
switch (op->getOpType().underlying()) {
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
@ -124,9 +124,58 @@ void GraphObj::optimize() {
|
|||
}
|
||||
|
||||
void GraphObj::dataMalloc() {
|
||||
// topological sorting first
|
||||
IT_ASSERT(topo_sort() == true);
|
||||
// count the number of times all tensors are used
|
||||
std::unordered_map<TensorObj *, size_t> tensorToRefCount;
|
||||
// record the memory address offsets of all tensors to be allocated
|
||||
std::unordered_map<TensorObj *, size_t> tensorToOffset;
|
||||
|
||||
// record all constant tensors, including weight tensors and input tensors
|
||||
std::unordered_set<TensorObj *> constTensor;
|
||||
for (auto &tensor : tensors) {
|
||||
tensor->dataMalloc();
|
||||
if (tensor.get()->getSource() == nullptr) {
|
||||
// allocate memory for all constant tensors first, and this memory
|
||||
// will not be reused later
|
||||
constTensor.insert(tensor.get());
|
||||
tensorToOffset[tensor.get()] = allocator.alloc(tensor->getBytes());
|
||||
} else {
|
||||
tensorToRefCount[tensor.get()] = tensor->getTargets().size();
|
||||
}
|
||||
}
|
||||
// traverse in topological order and simulate memory allocation
|
||||
for (auto &op : ops) {
|
||||
// memory should be allocated for the output first
|
||||
auto outputs = op->getOutputs();
|
||||
for (auto &tensor : outputs) {
|
||||
tensorToOffset[tensor.get()] = allocator.alloc(tensor->getBytes());
|
||||
}
|
||||
auto inputs = op->getInputs();
|
||||
for (auto &tensor : inputs) {
|
||||
if (constTensor.find(tensor.get()) == constTensor.end()) {
|
||||
auto tensorIter = tensorToRefCount.find(tensor.get());
|
||||
IT_ASSERT(tensorIter != tensorToRefCount.end());
|
||||
tensorToRefCount[tensor.get()] -= 1;
|
||||
if (tensorToRefCount[tensor.get()] == 0) {
|
||||
// indicate that this tensor will no longer be used and
|
||||
// perform memory free
|
||||
tensorToRefCount.erase(tensor.get());
|
||||
allocator.free(tensorToOffset[tensor.get()],
|
||||
tensor->getBytes());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// perform actual memory allocation
|
||||
for (auto &tensor : tensors) {
|
||||
IT_ASSERT(tensorToOffset.find(tensor.get()) != tensorToOffset.end());
|
||||
tensor->setDataBlob(make_ref<BlobObj>(
|
||||
tensor->runtime, static_cast<uint8_t *>(allocator.getPtr()) +
|
||||
tensorToOffset[tensor.get()]));
|
||||
}
|
||||
|
||||
allocator.info();
|
||||
}
|
||||
|
||||
Tensor GraphObj::addTensor(Shape dim, DataType dtype) {
|
||||
|
@ -151,7 +200,7 @@ TensorVec GraphObj::addTensor(const TensorVec &tensors) {
|
|||
OpVec GraphObj::getComputeOps() const {
|
||||
OpVec opList;
|
||||
for (auto op : ops)
|
||||
if (op->isComputeOp())
|
||||
if (op->getOpType().isMatMulOrConv())
|
||||
opList.emplace_back(op);
|
||||
return opList;
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
namespace infini {
|
||||
|
||||
static DataType dtype_repr_convert(int);
|
||||
static CastType inferCastType(Tensor input, int to);
|
||||
|
||||
Tensor GraphHandlerObj::tensor(Shape dims, int dtype) {
|
||||
return g->addTensor(std::move(dims), dtype_repr_convert(dtype));
|
||||
|
@ -69,9 +70,11 @@ Tensor GraphHandlerObj::matmul(Tensor a, Tensor b, Tensor y, bool transA,
|
|||
}
|
||||
}
|
||||
|
||||
Tensor GraphHandlerObj::batchNorm(Tensor input, Tensor output, Tensor mean,
|
||||
Tensor var, Tensor scale, Tensor bias,
|
||||
float momentum, float eps, bool training) {
|
||||
Tensor GraphHandlerObj::batchNormalization(Tensor input, Tensor output,
|
||||
Tensor mean, Tensor var,
|
||||
Tensor scale, Tensor bias,
|
||||
float momentum, float eps,
|
||||
bool training) {
|
||||
if (output) {
|
||||
g->addOpWithOutputs<BatchNormObj>(
|
||||
std::move(input), output, std::move(mean), std::move(var),
|
||||
|
@ -148,6 +151,7 @@ DEFINE_UNARY_METHOD(relu, Relu)
|
|||
DEFINE_UNARY_METHOD(sigmoid, Sigmoid)
|
||||
DEFINE_UNARY_METHOD(tanh, Tanh)
|
||||
DEFINE_UNARY_METHOD(abs, Abs)
|
||||
DEFINE_UNARY_METHOD(sqrt, Sqrt)
|
||||
DEFINE_UNARY_METHOD(shape, Shape)
|
||||
|
||||
// see operators/reshape.h
|
||||
|
@ -291,24 +295,108 @@ Tensor GraphHandlerObj::pad(Tensor input, Tensor output,
|
|||
}
|
||||
}
|
||||
|
||||
Tensor GraphHandlerObj::cast(Tensor input, Tensor output, int to) {
|
||||
if (output) {
|
||||
g->addOpWithOutputs<CastObj>(std::move(input), output,
|
||||
inferCastType(input, to));
|
||||
return output;
|
||||
} else {
|
||||
return g
|
||||
->addOp<CastObj>(std::move(input), output, inferCastType(input, to))
|
||||
->getOutput();
|
||||
}
|
||||
}
|
||||
|
||||
static CastType inferCastType(Tensor input, int to) {
|
||||
auto iType = input->getDType();
|
||||
auto oType = DataType(to);
|
||||
if (iType == DataType::Float32 && oType == DataType::Float16) {
|
||||
return CastType::Float2Float16;
|
||||
} else if (iType == DataType::Float32 && oType == DataType::Int64) {
|
||||
return CastType::Float2Int64;
|
||||
} else if (iType == DataType::Float32 && oType == DataType::Int32) {
|
||||
return CastType::Float2Int32;
|
||||
} else if (iType == DataType::Float32 && oType == DataType::Int16) {
|
||||
return CastType::Float2Int16;
|
||||
} else if (iType == DataType::Float32 && oType == DataType::Int8) {
|
||||
return CastType::Float2Int8;
|
||||
} else if (iType == DataType::Float32 && oType == DataType::BFloat16) {
|
||||
return CastType::Float2BFloat16;
|
||||
} else if (iType == DataType::Int32 && oType == DataType::Float32) {
|
||||
return CastType::Int322Float;
|
||||
} else if (iType == DataType::Int32 && oType == DataType::Int8) {
|
||||
return CastType::Int322Int8;
|
||||
} else if (iType == DataType::Int32 && oType == DataType::Int16) {
|
||||
return CastType::Int322Int16;
|
||||
} else if (iType == DataType::Int32 && oType == DataType::Int64) {
|
||||
return CastType::Int322Int64;
|
||||
} else if (iType == DataType::Int16 && oType == DataType::Int32) {
|
||||
return CastType::Int162Int32;
|
||||
} else if (iType == DataType::Int16 && oType == DataType::Float32) {
|
||||
return CastType::Int162Float;
|
||||
} else if (iType == DataType::Int8 && oType == DataType::Float32) {
|
||||
return CastType::Int82Float;
|
||||
} else if (iType == DataType::Int8 && oType == DataType::Int16) {
|
||||
return CastType::Int82Int16;
|
||||
} else if (iType == DataType::Int8 && oType == DataType::Int32) {
|
||||
return CastType::Int82Int32;
|
||||
} else if (iType == DataType::UInt8 && oType == DataType::Int32) {
|
||||
return CastType::Uint82Int32;
|
||||
} else if (iType == DataType::UInt8 && oType == DataType::Float32) {
|
||||
return CastType::Uint82Float;
|
||||
} else if (iType == DataType::UInt8 && oType == DataType::Int64) {
|
||||
return CastType::Uint82Int64;
|
||||
} else if (iType == DataType::Int64 && oType == DataType::Float32) {
|
||||
return CastType::Int642Float;
|
||||
} else if (iType == DataType::Int64 && oType == DataType::UInt32) {
|
||||
return CastType::Int642Uint32;
|
||||
} else if (iType == DataType::Int64 && oType == DataType::Int32) {
|
||||
return CastType::Int642Int32;
|
||||
} else if (iType == DataType::UInt32 && oType == DataType::Int64) {
|
||||
return CastType::Uint322Int64;
|
||||
} else if (iType == DataType::Float16 && oType == DataType::Float32) {
|
||||
return CastType::Float162Float;
|
||||
} else if (iType == DataType::BFloat16 && oType == DataType::Float32) {
|
||||
return CastType::BFloat162Float;
|
||||
} else {
|
||||
IT_TODO_HALT_MSG("Unsupported CastType : input_type is " +
|
||||
iType.toString() + " output_type is " +
|
||||
oType.toString());
|
||||
}
|
||||
}
|
||||
|
||||
static DataType dtype_repr_convert(int dtype) {
|
||||
switch ((OnnxDType)dtype) {
|
||||
case OnnxDType::FLOAT:
|
||||
switch (dtype) {
|
||||
case 0:
|
||||
return DataType::Undefine;
|
||||
case 1:
|
||||
return DataType::Float32;
|
||||
case OnnxDType::UINT32:
|
||||
return DataType::UInt32;
|
||||
case OnnxDType::UINT8:
|
||||
case 2:
|
||||
return DataType::UInt8;
|
||||
case OnnxDType::INT8:
|
||||
case 3:
|
||||
return DataType::Int8;
|
||||
case OnnxDType::UINT16:
|
||||
case 4:
|
||||
return DataType::UInt16;
|
||||
case OnnxDType::INT16:
|
||||
case 5:
|
||||
return DataType::Int16;
|
||||
case OnnxDType::INT32:
|
||||
case 6:
|
||||
return DataType::Int32;
|
||||
case OnnxDType::INT64:
|
||||
case 7:
|
||||
return DataType::Int64;
|
||||
case 8:
|
||||
return DataType::String;
|
||||
case 9:
|
||||
return DataType::Bool;
|
||||
case 10:
|
||||
return DataType::Float16;
|
||||
case 11:
|
||||
return DataType::Double;
|
||||
case 12:
|
||||
return DataType::UInt32;
|
||||
case 13:
|
||||
return DataType::UInt64;
|
||||
case 16:
|
||||
return DataType::BFloat16;
|
||||
default:
|
||||
IT_ASSERT(false, "Unsupported data type");
|
||||
}
|
||||
|
|
|
@ -0,0 +1,143 @@
|
|||
#include "core/lazy_allocator.h"
|
||||
#include <utility>
|
||||
|
||||
namespace infini {
|
||||
|
||||
// In
|
||||
// cuda-c-programming-guide(https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#device-memory-accesses):
|
||||
// Any address of a variable residing in global memory or returned by one of the
|
||||
// memory allocation routines from the driver or runtime API is always aligned
|
||||
// to at least 256 bytes.
|
||||
constexpr size_t alignmentInBytesForCUDA = 256;
|
||||
|
||||
LazyAllocator::LazyAllocator(Runtime runtime) : runtime(runtime) {
|
||||
used = 0;
|
||||
peak = 0;
|
||||
ptr = nullptr;
|
||||
if (runtime->isCuda()) {
|
||||
// TODO: the alignment on cuda might need further discussion
|
||||
alignment = alignmentInBytesForCUDA;
|
||||
} else {
|
||||
// 'alignment' defaults to sizeof(uint64_t), because it is the length of
|
||||
// the longest data type currently supported by the DataType field of
|
||||
// the tensor
|
||||
// TODO: the alignment on bang might need further discussion
|
||||
alignment = sizeof(uint64_t);
|
||||
}
|
||||
}
|
||||
|
||||
LazyAllocator::~LazyAllocator() {
|
||||
if (this->ptr != nullptr) {
|
||||
runtime->dealloc(this->ptr);
|
||||
}
|
||||
}
|
||||
|
||||
size_t LazyAllocator::alloc(size_t size) {
|
||||
IT_ASSERT(this->ptr == nullptr);
|
||||
// pad the size to the multiple of alignment
|
||||
size = this->getAlignedSize(size);
|
||||
auto it = this->freeBlocks.lower_bound(freeBlockInfo{(size_t)0, size});
|
||||
|
||||
size_t retAddr = this->peak;
|
||||
if (it != this->freeBlocks.end()) {
|
||||
// found an alvailable free memory block for allocation
|
||||
size_t blockSize = it->blockSize;
|
||||
retAddr = it->addr;
|
||||
size_t tailAddr = retAddr + size;
|
||||
// update the map of head and tail address offset of memory blocks
|
||||
this->headAddrToBlockSize.erase(retAddr);
|
||||
this->tailAddrToBlockSize.erase(tailAddr);
|
||||
// memory block splitting
|
||||
if (blockSize > tailAddr - retAddr) {
|
||||
freeBlockInfo newBlock = {tailAddr,
|
||||
blockSize - (tailAddr - retAddr)};
|
||||
this->headAddrToBlockSize[tailAddr] = newBlock.blockSize;
|
||||
this->tailAddrToBlockSize[retAddr + blockSize] = newBlock.blockSize;
|
||||
this->freeBlocks.insert(newBlock);
|
||||
}
|
||||
// update the free balanced tree
|
||||
this->freeBlocks.erase(it);
|
||||
this->used += tailAddr - retAddr;
|
||||
} else {
|
||||
// the allocated memory space is not sufficient for reallocation, it
|
||||
// needs to be extended
|
||||
auto blockTailWithPeak = this->tailAddrToBlockSize.find(this->peak);
|
||||
if (blockTailWithPeak != this->tailAddrToBlockSize.end()) {
|
||||
// there is a free block located at the end of the currently
|
||||
// allocated memory, where this free block has its tail address as
|
||||
// 'peak'
|
||||
retAddr = this->peak - blockTailWithPeak->second;
|
||||
IT_ASSERT(blockTailWithPeak->second < size);
|
||||
this->peak += (size - blockTailWithPeak->second);
|
||||
// updata freeBlocks, headAddrToBlockSize and tailAddrToBlockSize
|
||||
freeBlockInfo endBlock = {retAddr, blockTailWithPeak->second};
|
||||
this->freeBlocks.erase(endBlock);
|
||||
this->headAddrToBlockSize.erase(endBlock.addr);
|
||||
this->tailAddrToBlockSize.erase(endBlock.addr + endBlock.blockSize);
|
||||
} else {
|
||||
this->peak = this->peak + size;
|
||||
}
|
||||
this->used += size;
|
||||
}
|
||||
|
||||
return retAddr;
|
||||
}
|
||||
|
||||
void LazyAllocator::free(size_t addr, size_t size) {
|
||||
IT_ASSERT(this->ptr == nullptr);
|
||||
size = getAlignedSize(size);
|
||||
auto tailAddr = addr + size;
|
||||
freeBlockInfo block = {addr, tailAddr - addr};
|
||||
this->headAddrToBlockSize[addr] = block.blockSize;
|
||||
this->tailAddrToBlockSize[tailAddr] = block.blockSize;
|
||||
auto preFreeBlockIter = this->tailAddrToBlockSize.find(addr);
|
||||
auto subFreeBlockIter = this->headAddrToBlockSize.find(tailAddr);
|
||||
if (preFreeBlockIter != this->tailAddrToBlockSize.end()) {
|
||||
// the head address of the memory block to be freed matches the end of a
|
||||
// free block, merge them together
|
||||
size_t preBlockSize = preFreeBlockIter->second;
|
||||
this->headAddrToBlockSize.erase(block.addr);
|
||||
this->headAddrToBlockSize[block.addr - preBlockSize] += block.blockSize;
|
||||
this->tailAddrToBlockSize.erase(block.addr);
|
||||
this->tailAddrToBlockSize[tailAddr] += preBlockSize;
|
||||
block.addr -= preBlockSize;
|
||||
block.blockSize += preBlockSize;
|
||||
// delete the preceding adjacent free block
|
||||
this->freeBlocks.erase(freeBlockInfo{block.addr, preBlockSize});
|
||||
}
|
||||
if (subFreeBlockIter != this->headAddrToBlockSize.end()) {
|
||||
// the tail address of the memory block to be freed matches the start of
|
||||
// a free block, merge them together
|
||||
auto subBlockSize = subFreeBlockIter->second;
|
||||
this->headAddrToBlockSize.erase(tailAddr);
|
||||
this->headAddrToBlockSize[block.addr] += subBlockSize;
|
||||
this->tailAddrToBlockSize.erase(tailAddr);
|
||||
this->tailAddrToBlockSize[tailAddr + subBlockSize] += block.blockSize;
|
||||
tailAddr += subBlockSize;
|
||||
block.blockSize += subBlockSize;
|
||||
// delete the succeeding adjacent memory block
|
||||
this->freeBlocks.erase(
|
||||
freeBlockInfo{tailAddr - subBlockSize, subBlockSize});
|
||||
}
|
||||
this->freeBlocks.insert(block);
|
||||
this->used -= size;
|
||||
}
|
||||
|
||||
void *LazyAllocator::getPtr() {
|
||||
if (this->ptr == nullptr) {
|
||||
this->ptr = runtime->alloc(this->peak);
|
||||
printf("LazyAllocator really alloc: %p %lu bytes\n", this->ptr, peak);
|
||||
}
|
||||
return this->ptr;
|
||||
}
|
||||
|
||||
size_t LazyAllocator::getAlignedSize(size_t size) {
|
||||
return ((size - 1) / this->alignment + 1) * this->alignment;
|
||||
}
|
||||
|
||||
void LazyAllocator::info() {
|
||||
std::cout << "Used memory: " << this->used
|
||||
<< ", peak memory: " << this->peak << std::endl;
|
||||
}
|
||||
|
||||
} // namespace infini
|
|
@ -0,0 +1,278 @@
|
|||
#include "core/op_type.h"
|
||||
|
||||
namespace infini {
|
||||
const char *OpType::toString() const {
|
||||
#define CASE(NAME) \
|
||||
case OpType::NAME: \
|
||||
return #NAME
|
||||
|
||||
switch (type) {
|
||||
CASE(Unknown);
|
||||
CASE(Abs);
|
||||
CASE(Acos);
|
||||
CASE(Acosh);
|
||||
CASE(Add);
|
||||
CASE(And);
|
||||
CASE(ArgMax);
|
||||
CASE(Asin);
|
||||
CASE(Asinh);
|
||||
CASE(Atan);
|
||||
CASE(Atanh);
|
||||
CASE(AveragePool);
|
||||
CASE(BatchNormalization);
|
||||
CASE(Bernoulli);
|
||||
CASE(BitShift);
|
||||
CASE(BitwiseAnd);
|
||||
CASE(BitwiseNot);
|
||||
CASE(BitwiseOr);
|
||||
CASE(BitwiseXor);
|
||||
CASE(BlackmanWindow);
|
||||
CASE(Cast);
|
||||
CASE(CastLike);
|
||||
CASE(Ceil);
|
||||
CASE(Celu);
|
||||
CASE(CenterCropPad);
|
||||
CASE(Clip);
|
||||
CASE(Col2lm);
|
||||
CASE(Compress);
|
||||
CASE(Concat);
|
||||
CASE(ConcatFromSequence);
|
||||
CASE(ConstantOfShape);
|
||||
CASE(Conv);
|
||||
CASE(ConvInteger);
|
||||
CASE(ConvTranspose);
|
||||
CASE(Cos);
|
||||
CASE(Cosh);
|
||||
CASE(CumSum);
|
||||
CASE(DFT);
|
||||
CASE(DeformConv);
|
||||
CASE(DepthToSpace);
|
||||
CASE(DequantizeLinear);
|
||||
CASE(Det);
|
||||
CASE(Div);
|
||||
CASE(Dropout);
|
||||
CASE(DynamicQuantizeLinear);
|
||||
CASE(Einsum);
|
||||
CASE(Elu);
|
||||
CASE(Equal);
|
||||
CASE(Erf);
|
||||
CASE(Exp);
|
||||
CASE(Expand);
|
||||
CASE(EyeLike);
|
||||
CASE(Flatten);
|
||||
CASE(Floor);
|
||||
CASE(GRU);
|
||||
CASE(Gather);
|
||||
CASE(GatherElements);
|
||||
CASE(GatherND);
|
||||
CASE(Gemm);
|
||||
CASE(GlobalAveragePool);
|
||||
CASE(GlobalLpPool);
|
||||
CASE(GlobalMaxPool);
|
||||
CASE(Greater);
|
||||
CASE(GreaterOrEqual);
|
||||
CASE(GridSample);
|
||||
CASE(GroupNormalization);
|
||||
CASE(HammingWindow);
|
||||
CASE(HannWindow);
|
||||
CASE(HardSigmoid);
|
||||
CASE(HardSwish);
|
||||
CASE(Hardmax);
|
||||
CASE(Identity);
|
||||
CASE(If);
|
||||
CASE(InstanceNormalization);
|
||||
CASE(IsInf);
|
||||
CASE(IsNaN);
|
||||
CASE(LRN);
|
||||
CASE(LSTM);
|
||||
CASE(LayerNormalization);
|
||||
CASE(LeakyRelu);
|
||||
CASE(Less);
|
||||
CASE(LessOrEqual);
|
||||
CASE(Log);
|
||||
CASE(LogSoftmax);
|
||||
CASE(Loop);
|
||||
CASE(LpNormalization);
|
||||
CASE(LpPool);
|
||||
CASE(MatMul);
|
||||
CASE(MatMulInteger);
|
||||
CASE(Max);
|
||||
CASE(MaxPool);
|
||||
CASE(MaxRoiPool);
|
||||
CASE(MaxUnpool);
|
||||
CASE(Mean);
|
||||
CASE(MeanVarianceNormalization);
|
||||
CASE(MelWeightMatrix);
|
||||
CASE(Min);
|
||||
CASE(Mish);
|
||||
CASE(Mod);
|
||||
CASE(Mul);
|
||||
CASE(Multinomial);
|
||||
CASE(Neg);
|
||||
CASE(NegativeLogLikelihoodLoss);
|
||||
CASE(NonMaxSuppression);
|
||||
CASE(NonZero);
|
||||
CASE(Not);
|
||||
CASE(OneHot);
|
||||
CASE(Optional);
|
||||
CASE(OptionalGetElement);
|
||||
CASE(OptionalHasElement);
|
||||
CASE(Or);
|
||||
CASE(PRelu);
|
||||
CASE(Pad);
|
||||
CASE(Pow);
|
||||
CASE(QLinearConv);
|
||||
CASE(QLinearMatMul);
|
||||
CASE(QuantizeLinear);
|
||||
CASE(RNN);
|
||||
CASE(RandomNormal);
|
||||
CASE(RandomNormalLike);
|
||||
CASE(RandomUniform);
|
||||
CASE(RandomUniformLike);
|
||||
CASE(Range);
|
||||
CASE(Reciprocal);
|
||||
CASE(ReduceL1);
|
||||
CASE(ReduceL2);
|
||||
CASE(ReduceLogSum);
|
||||
CASE(ReduceLogSumExp);
|
||||
CASE(ReduceMax);
|
||||
CASE(ReduceMean);
|
||||
CASE(ReduceMin);
|
||||
CASE(ReduceProd);
|
||||
CASE(ReduceSum);
|
||||
CASE(ReduceSumSquare);
|
||||
CASE(Relu);
|
||||
CASE(Reshape);
|
||||
CASE(Resize);
|
||||
CASE(ReverseSequence);
|
||||
CASE(RoiAlign);
|
||||
CASE(Round);
|
||||
CASE(STFT);
|
||||
CASE(Scan);
|
||||
CASE(Scatter);
|
||||
CASE(ScatterElements);
|
||||
CASE(ScatterND);
|
||||
CASE(Selu);
|
||||
CASE(SequenceAt);
|
||||
CASE(SequenceConstruct);
|
||||
CASE(SequenceEmpty);
|
||||
CASE(SequenceErase);
|
||||
CASE(SequenceInsert);
|
||||
CASE(SequenceLength);
|
||||
CASE(SequenceMap);
|
||||
CASE(Shape);
|
||||
CASE(Shrink);
|
||||
CASE(Sigmoid);
|
||||
CASE(Sign);
|
||||
CASE(Sin);
|
||||
CASE(Sinh);
|
||||
CASE(Size);
|
||||
CASE(Slice);
|
||||
CASE(Softmax);
|
||||
CASE(SoftmaxCrossEntropyLoss);
|
||||
CASE(Softplus);
|
||||
CASE(Softsign);
|
||||
CASE(SpaceToDepth);
|
||||
CASE(Split);
|
||||
CASE(SplitToSequence);
|
||||
CASE(Sqrt);
|
||||
CASE(Squeeze);
|
||||
CASE(StringNormalizer);
|
||||
CASE(Sub);
|
||||
CASE(Sum);
|
||||
CASE(Tan);
|
||||
CASE(Tanh);
|
||||
CASE(TfIdfVectorizer);
|
||||
CASE(ThresholdedRelu);
|
||||
CASE(Tile);
|
||||
CASE(TopK);
|
||||
CASE(Transpose);
|
||||
CASE(Trilu);
|
||||
CASE(Unique);
|
||||
CASE(Unsqueeze);
|
||||
CASE(Upsample);
|
||||
CASE(Where);
|
||||
CASE(Xor);
|
||||
// CUSTOM DEFINED
|
||||
CASE(G2BMM);
|
||||
CASE(GBMM);
|
||||
CASE(MemBound);
|
||||
// TODO
|
||||
CASE(ConvTransNHWC);
|
||||
CASE(ConvBackwardFilter);
|
||||
CASE(ReluBackward);
|
||||
CASE(SigmoidBackward);
|
||||
CASE(TanhBackward);
|
||||
|
||||
CASE(Fill);
|
||||
CASE(Extend);
|
||||
CASE(MSELoss);
|
||||
CASE(Hardtanh);
|
||||
CASE(L2Loss);
|
||||
CASE(Rsqrt);
|
||||
CASE(FloorDiv);
|
||||
CASE(FloorMod);
|
||||
CASE(Square);
|
||||
CASE(SquaredDifference);
|
||||
default:
|
||||
return "Unknown";
|
||||
}
|
||||
|
||||
#undef CASE
|
||||
}
|
||||
|
||||
bool OpType::isUnary() const {
|
||||
static const std::unordered_set<decltype(type)> set{
|
||||
Abs, Acos, Acosh, Asin, Asinh, Atan, Atanh, Cast, Ceil,
|
||||
Clip, Cos, Cosh, Erf, Exp, Floor, Log, Neg, Not,
|
||||
Relu, Round, Sigmoid, Sin, Sinh, Sqrt, Tan, Tanh,
|
||||
};
|
||||
|
||||
return set.find(type) != set.end();
|
||||
}
|
||||
|
||||
bool OpType::isBinary() const {
|
||||
static const std::unordered_set<decltype(type)> set{
|
||||
Add, And, BitShift, BitwiseAnd, BitwiseNot, BitwiseOr, BitwiseXor,
|
||||
Div, Mod, Mul, Or, Pow, Sub, Xor,
|
||||
};
|
||||
|
||||
return set.find(type) != set.end() || isCompair();
|
||||
}
|
||||
|
||||
bool OpType::isElementWise() const { return isUnary() || isBinary(); }
|
||||
|
||||
bool OpType::isCompair() const {
|
||||
static const std::unordered_set<decltype(type)> set{
|
||||
Equal, Greater, GreaterOrEqual, Less, LessOrEqual,
|
||||
};
|
||||
|
||||
return set.find(type) != set.end();
|
||||
}
|
||||
|
||||
bool OpType::isPool() const {
|
||||
static const std::unordered_set<decltype(type)> set{};
|
||||
|
||||
return set.find(type) != set.end();
|
||||
}
|
||||
|
||||
bool OpType::isGlobalPool() const {
|
||||
static const std::unordered_set<decltype(type)> set{
|
||||
GlobalAveragePool,
|
||||
GlobalLpPool,
|
||||
GlobalMaxPool,
|
||||
};
|
||||
|
||||
return set.find(type) != set.end();
|
||||
}
|
||||
|
||||
bool OpType::isMatMulOrConv() const {
|
||||
static const std::unordered_set<decltype(type)> set{
|
||||
Conv, ConvInteger, ConvTranspose, DeformConv,
|
||||
QLinearConv, MatMul, MatMulInteger, QLinearMatMul,
|
||||
};
|
||||
|
||||
return set.find(type) != set.end();
|
||||
}
|
||||
|
||||
} // namespace infini
|
|
@ -10,33 +10,6 @@ OperatorObj::OperatorObj(OpType opType, TensorVec inputs, TensorVec outputs)
|
|||
IT_ASSERT(t);
|
||||
}
|
||||
|
||||
bool OperatorObj::isLinearOp() const {
|
||||
return enum_to_underlying(type) >= 100 && enum_to_underlying(type) < 200;
|
||||
}
|
||||
|
||||
bool OperatorObj::isElementWiseOp() const {
|
||||
return enum_to_underlying(type) >= 200 && enum_to_underlying(type) < 300;
|
||||
}
|
||||
|
||||
bool OperatorObj::isSplitOp() const { return type == OpType::Split; }
|
||||
|
||||
bool OperatorObj::isConcatOp() const { return type == OpType::Concat; }
|
||||
|
||||
bool OperatorObj::isComputeOp() const {
|
||||
return type == OpType::Conv || type == OpType::Matmul ||
|
||||
type == OpType::ConvTrans || type == OpType::ConvTransNHWC ||
|
||||
type == OpType::G2BMM || type == OpType::GBMM;
|
||||
}
|
||||
|
||||
bool OperatorObj::isTransposeOp() const { return type == OpType::Transpose; }
|
||||
|
||||
bool OperatorObj::isReshapeOp() const { return type == OpType::Reshape; }
|
||||
|
||||
bool OperatorObj::isMemBoundOp() const {
|
||||
return type == OpType::MemBound || type == OpType::Activation ||
|
||||
type == OpType::Transpose;
|
||||
}
|
||||
|
||||
void OperatorObj::removePredecessors(const Operator &op) {
|
||||
for (auto it = predecessors.begin(); it != predecessors.end();) {
|
||||
if (it->lock() == op)
|
||||
|
@ -69,14 +42,14 @@ OpPerfKey OperatorObj::getOpPerfKey() const {
|
|||
// Operator::hash, which hashes operator attributes and ignores tensor
|
||||
// shapes.
|
||||
HashType hash = 0;
|
||||
hash = hashAppend(hash, enum_to_underlying(type));
|
||||
hash = hashAppend(hash, type.underlying());
|
||||
hash = hashAppend(hash, hashVector(workloadVector));
|
||||
return OpPerfKey(hash, type, workloadVector);
|
||||
}
|
||||
|
||||
HashType OperatorObj::hash() const {
|
||||
HashType hash = 0;
|
||||
hash = hashAppend(hash, enum_to_underlying(type));
|
||||
hash = hashAppend(hash, type.underlying());
|
||||
hash = hashAppend(hash, hashVector(getOpAttrVector()));
|
||||
return hash;
|
||||
}
|
||||
|
@ -110,8 +83,6 @@ optional<vector<Shape>> OperatorObj::inferShape() const {
|
|||
|
||||
vector<DataType> OperatorObj::inferDataType(const TensorVec &inputs) const {
|
||||
auto dataType = inputs[0]->getDType();
|
||||
for (const auto &tensor : inputs)
|
||||
IT_ASSERT(dataType == tensor->getDType());
|
||||
return vector(numOutputs(), dataType);
|
||||
}
|
||||
|
||||
|
|
|
@ -30,9 +30,7 @@ void from_json(const json &j, OpPerfKey &p) {
|
|||
j.at("opType").get_to(p.opType);
|
||||
j.at("attrs").get_to(p.attrs);
|
||||
}
|
||||
void to_json(json &j, const DataType &p) {
|
||||
j = p.toString() == "Float32" ? 0 : 1;
|
||||
}
|
||||
void to_json(json &j, const DataType &p) { j = p.getIndex(); }
|
||||
void from_json(const json &j, DataType &p) { p = DataType(j.get<int>()); }
|
||||
void to_json(json &j, const PerfRecord &p) { p->to_json(j); }
|
||||
void from_json(const json &j, PerfRecord &p) {
|
||||
|
@ -49,4 +47,4 @@ void from_json(const json &j, PerfEngine &p) {
|
|||
p.set_data(tmp);
|
||||
}
|
||||
|
||||
} // namespace infini
|
||||
} // namespace infini
|
||||
|
|
|
@ -17,7 +17,8 @@ void CpuRuntimeObj::run(const Graph &graph, bool tune, bool profiling) const {
|
|||
std::map<OpType, int> opCnt;
|
||||
|
||||
for (auto &op : graph->getOperators()) {
|
||||
auto kernelAttrs = KernelAttrs{device, op->getOpType(), op->getDType()};
|
||||
auto kernelAttrs =
|
||||
KernelAttrs{device, op->getOpType().underlying(), op->getDType()};
|
||||
Kernel *kernel = kernelRegistry.getKernel(kernelAttrs);
|
||||
auto perfKey = PerfEngine::Key{kernelAttrs, op->getOpPerfKey()};
|
||||
auto perfData = perfEngine.getPerfData(perfKey);
|
||||
|
@ -65,7 +66,8 @@ double RuntimeObj::getPerfTime(const Graph &graph, bool profiling) const {
|
|||
std::map<OpType, int> opCnt;
|
||||
|
||||
for (auto &op : graph->getOperators()) {
|
||||
auto kernelAttrs = KernelAttrs{device, op->getOpType(), op->getDType()};
|
||||
auto kernelAttrs =
|
||||
KernelAttrs{device, op->getOpType().underlying(), op->getDType()};
|
||||
Kernel *kernel = kernelRegistry.getKernel(kernelAttrs);
|
||||
auto perfKey = PerfEngine::Key{kernelAttrs, op->getOpPerfKey()};
|
||||
auto perfData = perfEngine.getPerfData(perfKey);
|
||||
|
@ -116,9 +118,8 @@ void RuntimeObj::printProfilingData(double totalTime,
|
|||
const std::map<OpType, int> &opCnt) const {
|
||||
printf("%11s %3s %7s %7s %7s\n", "Op", "Cnt", "T_tot", "Percent", "T_mean");
|
||||
for (const auto &[type, t] : opTime) {
|
||||
printf("%11s %3d %7.3f %7.1f %7.3f\n",
|
||||
OpRegistry::getOpName(type).data(), opCnt.at(type), t,
|
||||
t / totalTime * 100, t / opCnt.at(type));
|
||||
printf("%11s %3d %7.3f %7.1f %7.3f\n", type.toString(), opCnt.at(type),
|
||||
t, t / totalTime * 100, t / opCnt.at(type));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -127,7 +127,7 @@ SearchEngine::buildMetaGraphWithGraph(const Graph graph) {
|
|||
std::vector<Operator> ops;
|
||||
ops.emplace_back(op);
|
||||
node.graph = make_ref<GraphObj>(runtimeExec, ops);
|
||||
node.type = op->isComputeOp();
|
||||
node.type = op->getOpType().isMatMulOrConv();
|
||||
node.cnt = op->getPredecessors().size();
|
||||
opMap.emplace(op->getGuid(), i);
|
||||
metaGraph->nodes.emplace_back(node);
|
||||
|
@ -196,7 +196,7 @@ std::shared_ptr<SearchEngine::MetaGraph> SearchEngine::buildMetaGraphWithPlan(
|
|||
}
|
||||
node.graph = make_ref<GraphObj>(runtimeExec, ops);
|
||||
node.cnt = node.pre.size();
|
||||
node.type = ops[0]->isComputeOp();
|
||||
node.type = ops[0]->getOpType().isMatMulOrConv();
|
||||
resultMetaGraph->nodes.emplace_back(node);
|
||||
}
|
||||
}
|
||||
|
@ -404,7 +404,7 @@ std::vector<Graph> SearchEngine::partitionGraph(const Graph graph) {
|
|||
headOps.emplace_back(op);
|
||||
if (op->getPredecessors().size() + op->getSuccessors().size() >=
|
||||
(size_t)partitionThreshold &&
|
||||
!op->isComputeOp()) {
|
||||
!op->getOpType().isMatMulOrConv()) {
|
||||
auto preOrderI = preOrder[op->getGuid()];
|
||||
auto postOrderI = postOrder[op->getGuid()];
|
||||
for (size_t j = 0; j < i; j++) {
|
||||
|
|
|
@ -71,14 +71,21 @@ void TensorObj::printData() const {
|
|||
if (dtype == DataType(N)) \
|
||||
std::cout << dataToString<DT<N>::t>() << std::endl;
|
||||
|
||||
TRY_PRINT(0) // fmt: new line
|
||||
else TRY_PRINT(1) //
|
||||
else TRY_PRINT(2) //
|
||||
else TRY_PRINT(3) //
|
||||
else TRY_PRINT(4) //
|
||||
else TRY_PRINT(5) //
|
||||
else TRY_PRINT(6) //
|
||||
else TRY_PRINT(7) //
|
||||
TRY_PRINT(0) // fmt: new line
|
||||
else TRY_PRINT(1) //
|
||||
else TRY_PRINT(2) //
|
||||
else TRY_PRINT(3) //
|
||||
else TRY_PRINT(4) //
|
||||
else TRY_PRINT(5) //
|
||||
else TRY_PRINT(6) //
|
||||
else TRY_PRINT(7) //
|
||||
else TRY_PRINT(8) //
|
||||
else TRY_PRINT(9) //
|
||||
else TRY_PRINT(10) //
|
||||
else TRY_PRINT(11) //
|
||||
else TRY_PRINT(12) //
|
||||
else TRY_PRINT(13) //
|
||||
else TRY_PRINT(16) //
|
||||
else IT_TODO_HALT();
|
||||
|
||||
#undef TRY_PRINT
|
||||
|
@ -98,14 +105,21 @@ bool TensorObj::equalData(const Tensor &rhs, double relativeError) const {
|
|||
return equalDataImpl(getRawDataPtr<DT<N>::t *>(), \
|
||||
rhs->getRawDataPtr<DT<N>::t *>(), size());
|
||||
|
||||
TEST_EQUAL(0) // fmt: new line
|
||||
else TEST_EQUAL(1) //
|
||||
else TEST_EQUAL(2) //
|
||||
else TEST_EQUAL(3) //
|
||||
else TEST_EQUAL(4) //
|
||||
else TEST_EQUAL(5) //
|
||||
else TEST_EQUAL(6) //
|
||||
else TEST_EQUAL(7) //
|
||||
TEST_EQUAL(0) // fmt: new line
|
||||
else TEST_EQUAL(1) //
|
||||
else TEST_EQUAL(2) //
|
||||
else TEST_EQUAL(3) //
|
||||
else TEST_EQUAL(4) //
|
||||
else TEST_EQUAL(5) //
|
||||
else TEST_EQUAL(6) //
|
||||
else TEST_EQUAL(7) //
|
||||
else TEST_EQUAL(8) //
|
||||
else TEST_EQUAL(9) //
|
||||
else TEST_EQUAL(10) //
|
||||
else TEST_EQUAL(11) //
|
||||
else TEST_EQUAL(12) //
|
||||
else TEST_EQUAL(13) //
|
||||
else TEST_EQUAL(16) //
|
||||
else IT_TODO_HALT();
|
||||
|
||||
#undef TEST_EQUAL
|
||||
|
@ -138,6 +152,8 @@ void TensorObj::setData(
|
|||
}
|
||||
}
|
||||
|
||||
void TensorObj::setDataBlob(const Blob &blob) { this->data = blob; }
|
||||
|
||||
void TensorObj::load(std::string file_path) { loadTensorData(this, file_path); }
|
||||
|
||||
void TensorObj::save(std::string file_path) { saveTensorData(this, file_path); }
|
||||
|
|
|
@ -12,7 +12,7 @@ void CudaRuntimeObj::runWithoutSync(const Graph &graph) const {
|
|||
for (auto &op : graph->getOperators()) {
|
||||
// HACK: set correct data type
|
||||
auto kernelAttrs =
|
||||
KernelAttrs{device, op->getOpType(), DataType::Float32};
|
||||
KernelAttrs{device, op->getOpType().underlying(), op->getDType()};
|
||||
Kernel *kernel = kernelRegistry.getKernel(kernelAttrs);
|
||||
auto perfKey = PerfEngine::Key{kernelAttrs, op->getOpPerfKey()};
|
||||
auto perfData = perfEngine.getPerfData(perfKey);
|
||||
|
@ -33,8 +33,8 @@ void CudaRuntimeObj::tune(const Graph &graph, bool profiling = false) const {
|
|||
std::map<OpType, int> opCnt;
|
||||
for (auto &op : graph->getOperators()) {
|
||||
// HACK: set correct data type
|
||||
auto kernelAttrs =
|
||||
KernelAttrs{device, op->getOpType(), DataType::Float32};
|
||||
auto kernelAttrs = KernelAttrs{device, op->getOpType().underlying(),
|
||||
DataType::Float32};
|
||||
Kernel *kernel = kernelRegistry.getKernel(kernelAttrs);
|
||||
auto perfKey = PerfEngine::Key{kernelAttrs, op->getOpPerfKey()};
|
||||
auto perfData = perfEngine.getPerfData(perfKey);
|
||||
|
|
|
@ -51,6 +51,8 @@ void register_operator_timer(py::module &m) {
|
|||
#endif
|
||||
}
|
||||
|
||||
decltype(OpType::type) getId(OpType const *const ptr) { return ptr->type; }
|
||||
|
||||
void export_values(py::module &m) {
|
||||
#define VALUE(TYPE, NAME) value(#NAME, TYPE::NAME)
|
||||
|
||||
|
@ -61,13 +63,13 @@ void export_values(py::module &m) {
|
|||
.VALUE(ActType, Tanh)
|
||||
.export_values();
|
||||
|
||||
py::enum_<OpType>(m, "OpType")
|
||||
.VALUE(OpType, Unknown)
|
||||
py::class_<OpType>(m, "OpType")
|
||||
.def(py::init<decltype(OpType::type)>())
|
||||
.def("id", getId, policy::automatic);
|
||||
py::enum_<decltype(OpType::type)>(m, "OpTypeId")
|
||||
.VALUE(OpType, Conv)
|
||||
.VALUE(OpType, Matmul)
|
||||
.VALUE(OpType, ConvTrans)
|
||||
.VALUE(OpType, G2BMM)
|
||||
.VALUE(OpType, GBMM)
|
||||
.VALUE(OpType, MatMul)
|
||||
.VALUE(OpType, ConvTranspose)
|
||||
.VALUE(OpType, Pad)
|
||||
.VALUE(OpType, Clip)
|
||||
.VALUE(OpType, Slice)
|
||||
|
@ -76,7 +78,7 @@ void export_values(py::module &m) {
|
|||
.VALUE(OpType, Transpose)
|
||||
.VALUE(OpType, Extend)
|
||||
.VALUE(OpType, MaxPool)
|
||||
.VALUE(OpType, AvgPool)
|
||||
.VALUE(OpType, AveragePool)
|
||||
.VALUE(OpType, Add)
|
||||
.VALUE(OpType, Sub)
|
||||
.VALUE(OpType, Mul)
|
||||
|
@ -87,9 +89,8 @@ void export_values(py::module &m) {
|
|||
.VALUE(OpType, Reshape)
|
||||
.VALUE(OpType, Flatten)
|
||||
.VALUE(OpType, Identity)
|
||||
.VALUE(OpType, BatchNorm)
|
||||
.VALUE(OpType, BatchNormalization)
|
||||
.VALUE(OpType, Softmax)
|
||||
.VALUE(OpType, Activation)
|
||||
.VALUE(OpType, Relu)
|
||||
.VALUE(OpType, PRelu)
|
||||
.VALUE(OpType, Sigmoid)
|
||||
|
@ -97,28 +98,43 @@ void export_values(py::module &m) {
|
|||
.VALUE(OpType, Abs)
|
||||
.VALUE(OpType, Resize)
|
||||
.VALUE(OpType, Dropout)
|
||||
.VALUE(OpType, Cast)
|
||||
.export_values();
|
||||
|
||||
#undef VALUE
|
||||
}
|
||||
|
||||
static int tensor_dtype(Tensor t) {
|
||||
if (t->getDType() == DataType::Undefine)
|
||||
return 0;
|
||||
if (t->getDType() == DataType::Float32)
|
||||
return OnnxDType::FLOAT;
|
||||
if (t->getDType() == DataType::UInt32)
|
||||
return OnnxDType::UINT32;
|
||||
return 1;
|
||||
if (t->getDType() == DataType::UInt8)
|
||||
return OnnxDType::UINT8;
|
||||
return 2;
|
||||
if (t->getDType() == DataType::Int8)
|
||||
return OnnxDType::INT8;
|
||||
return 3;
|
||||
if (t->getDType() == DataType::UInt16)
|
||||
return OnnxDType::UINT16;
|
||||
return 4;
|
||||
if (t->getDType() == DataType::Int16)
|
||||
return OnnxDType::INT16;
|
||||
return 5;
|
||||
if (t->getDType() == DataType::Int32)
|
||||
return OnnxDType::INT32;
|
||||
return 6;
|
||||
if (t->getDType() == DataType::Int64)
|
||||
return OnnxDType::INT64;
|
||||
return 7;
|
||||
if (t->getDType() == DataType::String)
|
||||
return 8;
|
||||
if (t->getDType() == DataType::Bool)
|
||||
return 9;
|
||||
if (t->getDType() == DataType::Float16)
|
||||
return 10;
|
||||
if (t->getDType() == DataType::Double)
|
||||
return 11;
|
||||
if (t->getDType() == DataType::UInt32)
|
||||
return 12;
|
||||
if (t->getDType() == DataType::UInt64)
|
||||
return 13;
|
||||
if (t->getDType() == DataType::BFloat16)
|
||||
return 16;
|
||||
IT_ASSERT(false, "Unsupported data type");
|
||||
}
|
||||
|
||||
|
@ -147,7 +163,7 @@ static std::tuple<int, int, int, int, int, int> conv_attrs_of(Operator op) {
|
|||
|
||||
static std::tuple<int, int, int, int, int, int, int, int>
|
||||
conv_trans_attrs_of(Operator op) {
|
||||
IT_ASSERT(op->getOpType() == OpType::ConvTrans);
|
||||
IT_ASSERT(op->getOpType() == OpType::ConvTranspose);
|
||||
auto conv = dynamic_cast<const ConvTransposed2dObj *>(op.get());
|
||||
auto [oph, opw] = conv->getOutputPadding();
|
||||
return std::make_tuple(conv->getPh(), conv->getPw(), conv->getDh(),
|
||||
|
@ -156,13 +172,13 @@ conv_trans_attrs_of(Operator op) {
|
|||
}
|
||||
|
||||
static std::tuple<bool, bool> matmul_attrs_of(Operator op) {
|
||||
IT_ASSERT(op->getOpType() == OpType::Matmul);
|
||||
IT_ASSERT(op->getOpType() == OpType::MatMul);
|
||||
auto matmul = dynamic_cast<const MatmulObj *>(op.get());
|
||||
return std::make_tuple(matmul->getTransA(), matmul->getTransB());
|
||||
}
|
||||
|
||||
static std::tuple<float, float, bool> batch_norm_attrs_of(Operator op) {
|
||||
IT_ASSERT(op->getOpType() == OpType::BatchNorm);
|
||||
IT_ASSERT(op->getOpType() == OpType::BatchNormalization);
|
||||
auto batchnorm = dynamic_cast<const BatchNormObj *>(op.get());
|
||||
return std::make_tuple(batchnorm->getMomentum(), batchnorm->getEps(),
|
||||
batchnorm->getTrainingMode());
|
||||
|
@ -171,7 +187,7 @@ static std::tuple<float, float, bool> batch_norm_attrs_of(Operator op) {
|
|||
static std::tuple<int, int, int, int, int, int, int, int>
|
||||
pool_attrs_of(Operator op) {
|
||||
IT_ASSERT(op->getOpType() == OpType::MaxPool ||
|
||||
op->getOpType() == OpType::AvgPool);
|
||||
op->getOpType() == OpType::AveragePool);
|
||||
auto pool = dynamic_cast<const PoolingObj *>(op.get());
|
||||
return std::make_tuple(pool->getKh(), pool->getKw(), pool->getDh(),
|
||||
pool->getDw(), pool->getPh(), pool->getPw(),
|
||||
|
@ -231,6 +247,18 @@ static vector<int> transpose_permute_of(Operator op) {
|
|||
return dynamic_cast<const TransposeObj *>(op.get())->getPermute();
|
||||
}
|
||||
|
||||
static int flatten_axis_of(Operator op) {
|
||||
IT_ASSERT(op->getOpType() == OpType::Flatten);
|
||||
return dynamic_cast<const FlattenObj *>(op.get())->getAxis();
|
||||
}
|
||||
|
||||
static int cast_to_of(Operator op) {
|
||||
IT_ASSERT(op->getOpType() == OpType::Cast);
|
||||
auto castOutputDtype =
|
||||
dynamic_cast<const CastObj *>(op.get())->getOutputDataType();
|
||||
return castOutputDtype.getIndex();
|
||||
}
|
||||
|
||||
void export_functions(py::module &m) {
|
||||
#define FUNCTION(NAME) def(#NAME, &NAME)
|
||||
m.def("cpu_runtime", &NativeCpuRuntimeObj::getInstance)
|
||||
|
@ -263,7 +291,9 @@ void export_functions(py::module &m) {
|
|||
.FUNCTION(transpose_permute_of)
|
||||
.FUNCTION(concat_axis_of)
|
||||
.FUNCTION(split_axis_of)
|
||||
.FUNCTION(gather_axis_of);
|
||||
.FUNCTION(gather_axis_of)
|
||||
.FUNCTION(flatten_axis_of)
|
||||
.FUNCTION(cast_to_of);
|
||||
#undef FUNCTION
|
||||
}
|
||||
|
||||
|
@ -291,9 +321,15 @@ void init_graph_builder(py::module &m) {
|
|||
.def("copyin_float", &TensorObj::copyin<float>, policy::move)
|
||||
.def("copyin_int32", &TensorObj::copyin<int32_t>, policy::move)
|
||||
.def("copyin_int64", &TensorObj::copyin<int64_t>, policy::move)
|
||||
.def("copyin_int8", &TensorObj::copyin<int8_t>, policy::move)
|
||||
.def("copyin_uint8", &TensorObj::copyin<uint8_t>, policy::move)
|
||||
.def("copyin_float16", &TensorObj::copyin<uint16_t>, policy::move)
|
||||
.def("copyout_float", &TensorObj::copyout<float>, policy::move)
|
||||
.def("copyout_int32", &TensorObj::copyout<int32_t>, policy::move)
|
||||
.def("copyout_int64", &TensorObj::copyout<int64_t>, policy::move)
|
||||
.def("copyout_int8", &TensorObj::copyout<int8_t>, policy::move)
|
||||
.def("copyout_uint8", &TensorObj::copyout<uint8_t>, policy::move)
|
||||
.def("copyout_float16", &TensorObj::copyout<uint16_t>, policy::move)
|
||||
.def("has_target", &TensorObj::hasTarget, policy::automatic)
|
||||
.def("src", &TensorObj::getSource, policy::move)
|
||||
.def("printData", &TensorObj::printData, policy::automatic);
|
||||
|
@ -310,7 +346,7 @@ void init_graph_builder(py::module &m) {
|
|||
.def("conv", &Handler::conv, policy::move)
|
||||
.def("convTransposed2d", &Handler::convTransposed2d, policy::move)
|
||||
.def("matmul", &Handler::matmul, policy::move)
|
||||
.def("batchNorm", &Handler::batchNorm, policy::move)
|
||||
.def("batchNormalization", &Handler::batchNormalization, policy::move)
|
||||
.def("maxPool", &Handler::maxPool, policy::move)
|
||||
.def("avgPool", &Handler::avgPool, policy::move)
|
||||
.def("add", &Handler::add, policy::move)
|
||||
|
@ -323,6 +359,7 @@ void init_graph_builder(py::module &m) {
|
|||
.def("tanh", &Handler::tanh, policy::move)
|
||||
.def("softmax", &Handler::softmax, policy::move)
|
||||
.def("abs", &Handler::abs, policy::move)
|
||||
.def("sqrt", &Handler::sqrt, policy::move)
|
||||
.def("shape", &Handler::shape, policy::move)
|
||||
.def("identity", &Handler::identity, policy::move)
|
||||
.def("flatten", &Handler::flatten, policy::move)
|
||||
|
@ -336,11 +373,15 @@ void init_graph_builder(py::module &m) {
|
|||
.def("reduce_mean", &Handler::reduceMean, policy::move)
|
||||
.def("slice", &Handler::slice, policy::move)
|
||||
.def("pad", &Handler::pad, policy::move)
|
||||
.def("cast", &Handler::cast, policy::move)
|
||||
.def("topo_sort", &Handler::topo_sort, policy::automatic)
|
||||
.def("optimize", &Handler::optimize, policy::automatic)
|
||||
.def("operators", &Handler::operators, policy::move)
|
||||
.def("data_malloc", &Handler::data_malloc, policy::automatic)
|
||||
.def("run", &Handler::run, policy::automatic);
|
||||
.def("get_perf_time", &Handler::get_perf_time, policy::automatic)
|
||||
.def("tune", &Handler::tune, policy::automatic)
|
||||
.def("run", &Handler::run, policy::automatic)
|
||||
.def("get_perf_time", &Handler::get_perf_time, policy::automatic);
|
||||
}
|
||||
|
||||
} // namespace infini
|
||||
|
|
|
@ -92,43 +92,6 @@ class RoundCnnl : public BangKernelWithoutConfig {
|
|||
}
|
||||
};
|
||||
|
||||
class SquareCnnl : public BangKernelWithoutConfig {
|
||||
void compute(const Operator &_op,
|
||||
const RuntimeObj *_context) const override {
|
||||
auto op = as<UnaryObj>(_op);
|
||||
auto context = dynamic_cast<const BangRuntimeObj *>(_context);
|
||||
|
||||
void *const aData = (op->getInputs(0)->getRawDataPtr<void *>());
|
||||
void *const cData = (op->getOutput()->getRawDataPtr<void *>());
|
||||
|
||||
cnnlTensorDescriptor_t aDesc, cDesc;
|
||||
auto dim = op->getInputs(0)->getDims();
|
||||
if (dim.size() != 4)
|
||||
IT_TODO_HALT();
|
||||
|
||||
int dim_array[4] = {dim[0], dim[1], dim[2], dim[3]};
|
||||
// get inputs
|
||||
checkCnnlError(cnnlCreateTensorDescriptor(&aDesc));
|
||||
checkCnnlError(cnnlSetTensorDescriptor(aDesc, CNNL_LAYOUT_NCHW,
|
||||
CNNL_DTYPE_FLOAT, 4, dim_array));
|
||||
|
||||
// get outputs
|
||||
checkCnnlError(cnnlCreateTensorDescriptor(&cDesc));
|
||||
checkCnnlError(cnnlSetTensorDescriptor(cDesc, CNNL_LAYOUT_NCHW,
|
||||
CNNL_DTYPE_FLOAT, 4, dim_array));
|
||||
|
||||
cnnlStatus_t stat =
|
||||
cnnlSquare(context->cnnlHandle(), aDesc, aData, cDesc, cData);
|
||||
if (stat != CNNL_STATUS_SUCCESS)
|
||||
return;
|
||||
|
||||
// Destories in BANG does not require sync. But cnnl does not state
|
||||
// whether sync is required before destories.
|
||||
checkCnnlError(cnnlDestroyTensorDescriptor(aDesc));
|
||||
checkCnnlError(cnnlDestroyTensorDescriptor(cDesc));
|
||||
}
|
||||
};
|
||||
|
||||
class PReluCnnl : public BangKernelWithoutConfig {
|
||||
void compute(const Operator &_op,
|
||||
const RuntimeObj *_context) const override {
|
||||
|
@ -185,24 +148,13 @@ class SigmoidCnnl : public UnaryCnnl {
|
|||
float getCoef() const override { return 0.0; }
|
||||
};
|
||||
|
||||
class TanhCnnl : public UnaryCnnl {
|
||||
cnnlActivationMode_t getOpType() const override {
|
||||
return CNNL_ACTIVATION_TANH;
|
||||
}
|
||||
float getCoef() const override { return 0.0; }
|
||||
};
|
||||
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Relu, DataType::Float32, ReluCnnl,
|
||||
"Relu_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::PRelu, DataType::Float32, PReluCnnl,
|
||||
"PRelu_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Sigmoid, DataType::Float32, SigmoidCnnl,
|
||||
"Sigmoid_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Tanh, DataType::Float32, TanhCnnl,
|
||||
"Tanh_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Round, DataType::Float32, RoundCnnl,
|
||||
"Round_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Square, DataType::Float32, SquareCnnl,
|
||||
"Square_cnnl_BANG_Float32");
|
||||
|
||||
}; // namespace infini
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
#include "bang/bang_kernel_without_config.h"
|
||||
#include "bang/bang_runtime.h"
|
||||
#include "operators/batch_norm.h"
|
||||
|
||||
namespace infini {
|
||||
class BatchNormCnnl : public BangKernelWithoutConfig {
|
||||
void compute(const Operator &_op,
|
||||
const RuntimeObj *_context) const override {
|
||||
auto op = as<BatchNormObj>(_op);
|
||||
auto context = dynamic_cast<const BangRuntimeObj *>(_context);
|
||||
|
||||
void *const input = (op->getInputs(0)->getRawDataPtr<void *>());
|
||||
void *const mean = (op->getInputs(1)->getRawDataPtr<void *>());
|
||||
void *const var = (op->getInputs(2)->getRawDataPtr<void *>());
|
||||
void *const scale = (op->getInputs(3)->getRawDataPtr<void *>());
|
||||
void *const bias = (op->getInputs(4)->getRawDataPtr<void *>());
|
||||
void *const output = (op->getOutput()->getRawDataPtr<void *>());
|
||||
|
||||
auto dims = op->getInputs(0)->getDims();
|
||||
|
||||
if (dims.size() != 4)
|
||||
IT_TODO_HALT();
|
||||
|
||||
int dimArray[4], strideArray[4], dimPArray[1], stridePArray[1];
|
||||
|
||||
for (size_t i = 0; i < dims.size(); ++i) {
|
||||
dimArray[i] = dims[i];
|
||||
strideArray[i] = op->getInputs(0)->getStride()[i];
|
||||
}
|
||||
int w = dimArray[3];
|
||||
dimArray[3] = dimArray[1];
|
||||
int h = dimArray[2];
|
||||
dimArray[1] = h;
|
||||
dimArray[2] = w;
|
||||
|
||||
dimPArray[0] = op->getInputs(1)->getDims()[0];
|
||||
stridePArray[0] = op->getInputs(1)->getDims()[0];
|
||||
// get inputs
|
||||
cnnlTensorDescriptor_t inDesc;
|
||||
checkCnnlError(cnnlCreateTensorDescriptor(&inDesc));
|
||||
checkCnnlError(cnnlSetTensorDescriptorEx(inDesc, CNNL_LAYOUT_NHWC,
|
||||
CNNL_DTYPE_FLOAT, dims.size(),
|
||||
dimArray, strideArray));
|
||||
|
||||
// get bnScaleBiasMeanVarDesc
|
||||
cnnlTensorDescriptor_t paraDesc;
|
||||
checkCnnlError(cnnlCreateTensorDescriptor(¶Desc));
|
||||
checkCnnlError(cnnlSetTensorDescriptorEx(paraDesc, CNNL_LAYOUT_ARRAY,
|
||||
CNNL_DTYPE_FLOAT, 1, dimPArray,
|
||||
stridePArray));
|
||||
|
||||
float alpha = 1.f, beta = 0.f;
|
||||
// This mode is intended for use after convolutional layers
|
||||
cnnlStatus_t stat = cnnlBatchNormForwardInference(
|
||||
context->cnnlHandle(), &alpha, &beta, inDesc, input, paraDesc,
|
||||
scale, bias, mean, var, op->getEps(), inDesc, output);
|
||||
|
||||
if (stat != CNNL_STATUS_SUCCESS)
|
||||
return;
|
||||
|
||||
// Destories in BANG does not require sync. But cnnl does not state
|
||||
// whether sync is required before destories.
|
||||
checkCnnlError(cnnlDestroyTensorDescriptor(inDesc));
|
||||
checkCnnlError(cnnlDestroyTensorDescriptor(paraDesc));
|
||||
}
|
||||
};
|
||||
|
||||
REGISTER_KERNEL(Device::BANG, OpType::BatchNormalization, DataType::Float32,
|
||||
BatchNormCnnl, "BatchNorm_cnnl_BANG_Float32");
|
||||
|
||||
}; // namespace infini
|
|
@ -83,6 +83,6 @@ class ConvTransCnnl : public BangKernelWithoutConfig {
|
|||
}
|
||||
};
|
||||
|
||||
REGISTER_KERNEL(Device::BANG, OpType::ConvTrans, DataType::Float32,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::ConvTranspose, DataType::Float32,
|
||||
ConvTransCnnl, "ConvTrans_cnnl_BANG_Float32");
|
||||
}; // namespace infini
|
||||
|
|
|
@ -1,46 +0,0 @@
|
|||
#include "bang/bang_kernel_without_config.h"
|
||||
#include "bang/bang_runtime.h"
|
||||
#include "operators/unary.h"
|
||||
|
||||
namespace infini {
|
||||
class CopyCnnl : public BangKernelWithoutConfig {
|
||||
void compute(const Operator &_op,
|
||||
const RuntimeObj *_context) const override {
|
||||
auto op = as<UnaryObj>(_op);
|
||||
auto context = dynamic_cast<const BangRuntimeObj *>(_context);
|
||||
|
||||
void *const aData = (op->getInputs(0)->getRawDataPtr<void *>());
|
||||
void *const cData = (op->getOutput()->getRawDataPtr<void *>());
|
||||
|
||||
cnnlTensorDescriptor_t aDesc, cDesc;
|
||||
auto dim = op->getInputs(0)->getDims();
|
||||
if (dim.size() != 4)
|
||||
IT_TODO_HALT();
|
||||
|
||||
int dim_array[4] = {dim[0], dim[1], dim[2], dim[3]};
|
||||
// get inputs
|
||||
checkCnnlError(cnnlCreateTensorDescriptor(&aDesc));
|
||||
checkCnnlError(cnnlSetTensorDescriptor(aDesc, CNNL_LAYOUT_NCHW,
|
||||
CNNL_DTYPE_FLOAT, 4, dim_array));
|
||||
|
||||
// get outputs
|
||||
checkCnnlError(cnnlCreateTensorDescriptor(&cDesc));
|
||||
checkCnnlError(cnnlSetTensorDescriptor(cDesc, CNNL_LAYOUT_NCHW,
|
||||
CNNL_DTYPE_FLOAT, 4, dim_array));
|
||||
|
||||
cnnlStatus_t stat =
|
||||
cnnlCopy(context->cnnlHandle(), aDesc, aData, cDesc, cData);
|
||||
if (stat != CNNL_STATUS_SUCCESS)
|
||||
return;
|
||||
|
||||
// Destories in BANG does not require sync. But cnnl does not state
|
||||
// whether sync is required before destories.
|
||||
checkCnnlError(cnnlDestroyTensorDescriptor(aDesc));
|
||||
checkCnnlError(cnnlDestroyTensorDescriptor(cDesc));
|
||||
}
|
||||
};
|
||||
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Copy, DataType::Float32, CopyCnnl,
|
||||
"Copy_cnnl_BANG_Float32");
|
||||
|
||||
}; // namespace infini
|
|
@ -18,24 +18,26 @@ class ElementWiseCnnl : public BangKernelWithoutConfig {
|
|||
void *const cData = (op->getOutput()->getRawDataPtr<void *>());
|
||||
|
||||
cnnlTensorDescriptor_t aDesc, bDesc, cDesc;
|
||||
auto dim = op->getInputs(0)->getDims();
|
||||
if (dim.size() != 4)
|
||||
auto a_dim = op->getInputs(0)->getDims();
|
||||
auto b_dim = op->getInputs(1)->getDims();
|
||||
auto c_dim = op->getOutput()->getDims();
|
||||
|
||||
if (a_dim.size() > 4 || b_dim.size() > 4 || c_dim.size() > 4)
|
||||
IT_TODO_HALT();
|
||||
|
||||
int dim_array[4] = {dim[0], dim[1], dim[2], dim[3]};
|
||||
// get inputs
|
||||
checkCnnlError(cnnlCreateTensorDescriptor(&aDesc));
|
||||
checkCnnlError(cnnlSetTensorDescriptor(aDesc, CNNL_LAYOUT_NCHW,
|
||||
CNNL_DTYPE_FLOAT, 4, dim_array));
|
||||
checkCnnlError(cnnlSetTensorDescriptor(
|
||||
aDesc, CNNL_LAYOUT_NCHW, CNNL_DTYPE_FLOAT, 4, a_dim.data()));
|
||||
|
||||
checkCnnlError(cnnlCreateTensorDescriptor(&bDesc));
|
||||
checkCnnlError(cnnlSetTensorDescriptor(bDesc, CNNL_LAYOUT_NCHW,
|
||||
CNNL_DTYPE_FLOAT, 4, dim_array));
|
||||
checkCnnlError(cnnlSetTensorDescriptor(
|
||||
bDesc, CNNL_LAYOUT_NCHW, CNNL_DTYPE_FLOAT, 4, b_dim.data()));
|
||||
|
||||
// get outputs
|
||||
checkCnnlError(cnnlCreateTensorDescriptor(&cDesc));
|
||||
checkCnnlError(cnnlSetTensorDescriptor(cDesc, CNNL_LAYOUT_NCHW,
|
||||
CNNL_DTYPE_FLOAT, 4, dim_array));
|
||||
checkCnnlError(cnnlSetTensorDescriptor(
|
||||
cDesc, CNNL_LAYOUT_NCHW, CNNL_DTYPE_FLOAT, 4, c_dim.data()));
|
||||
|
||||
// get op descriptor
|
||||
cnnlOpTensorDescriptor_t opDesc;
|
||||
|
@ -591,9 +593,6 @@ class MulCnnl : public ElementWiseCnnl {
|
|||
class EqualCnnl : public LogicOpCnnl {
|
||||
cnnlLogicOp_t getOpType() const override { return CNNL_LOGIC_OP_EQ; }
|
||||
};
|
||||
class NotEqualCnnl : public LogicOpCnnl {
|
||||
cnnlLogicOp_t getOpType() const override { return CNNL_LOGIC_OP_NE; }
|
||||
};
|
||||
class GreaterThanCnnl : public LogicOpCnnl {
|
||||
cnnlLogicOp_t getOpType() const override { return CNNL_LOGIC_OP_GT; }
|
||||
};
|
||||
|
@ -649,13 +648,13 @@ REGISTER_KERNEL(Device::BANG, OpType::Mul, DataType::Float32, MulCnnl,
|
|||
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Div, DataType::Float32, DivCnnl,
|
||||
"Div_cnnl_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Maximum, DataType::Float32, MaximumCnnl,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Max, DataType::Float32, MaximumCnnl,
|
||||
"Maximum_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Minimum, DataType::Float32, MinimumCnnl,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Min, DataType::Float32, MinimumCnnl,
|
||||
"Minimum_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::MSELoss, DataType::Float32, MSELossCnnl,
|
||||
"MSELoss_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Power, DataType::Float32, PowerCnnl,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Pow, DataType::Float32, PowerCnnl,
|
||||
"Power_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::FloorDiv, DataType::Float32, FloorDivCnnl,
|
||||
"FloorDiv_cnnl_BANG_Float32");
|
||||
|
@ -665,15 +664,13 @@ REGISTER_KERNEL(Device::BANG, OpType::SquaredDifference, DataType::Float32,
|
|||
SquaredDifferenceCnnl, "SquaredDifference_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Equal, DataType::Float32, EqualCnnl,
|
||||
"Equal_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::NotEqual, DataType::Float32, NotEqualCnnl,
|
||||
"NotEqual_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::GreaterThan, DataType::Float32,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Greater, DataType::Float32,
|
||||
GreaterThanCnnl, "GreaterThan_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::GreaterEqual, DataType::Float32,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::GreaterOrEqual, DataType::Float32,
|
||||
GreaterEqualCnnl, "GreaterEqual_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::LessThan, DataType::Float32, LessThanCnnl,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Less, DataType::Float32, LessThanCnnl,
|
||||
"LessThan_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::LessEqual, DataType::Float32,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::LessOrEqual, DataType::Float32,
|
||||
LessEqualCnnl, "LessEqual_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::And, DataType::Float32, AndCnnl,
|
||||
"And_cnnl_BANG_Float32");
|
||||
|
@ -683,13 +680,13 @@ REGISTER_KERNEL(Device::BANG, OpType::Xor, DataType::Float32, XorCnnl,
|
|||
"Xor_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Not, DataType::Float32, NotCnnl,
|
||||
"Not_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::BitAnd, DataType::Float32, BitAndCnnl,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::BitwiseAnd, DataType::Float32, BitAndCnnl,
|
||||
"BitAnd_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::BitOr, DataType::Float32, BitOrCnnl,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::BitwiseOr, DataType::Float32, BitOrCnnl,
|
||||
"BitOr_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::BitXor, DataType::Float32, BitXorCnnl,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::BitwiseXor, DataType::Float32, BitXorCnnl,
|
||||
"BitXor_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::BitNot, DataType::Float32, BitNotCnnl,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::BitwiseNot, DataType::Float32, BitNotCnnl,
|
||||
"BitNot_cnnl_BANG_Float32");
|
||||
// REGISTER_KERNEL(Device::BANG, OpType::BitLeftShift, DataType::Float32,
|
||||
// BitLeftShiftCnnl,
|
||||
|
|
|
@ -18,45 +18,53 @@ class MatmulCnnl : public BangKernelWithoutConfig {
|
|||
auto dimInputs0 = op->getInputs(0)->getDims();
|
||||
auto dimInputs1 = op->getInputs(1)->getDims();
|
||||
auto dimOutput = op->getOutput()->getDims();
|
||||
int input0_batch_size = 1;
|
||||
int input1_batch_size = 1;
|
||||
int output_batch_size = 1;
|
||||
for (size_t i = 0; i < dimInputs0.size() - 2; ++i) {
|
||||
input0_batch_size *= dimInputs0[i];
|
||||
input1_batch_size *= dimInputs1[i];
|
||||
output_batch_size *= dimOutput[i];
|
||||
}
|
||||
|
||||
bool transA = op->getTransA();
|
||||
bool transB = op->getTransB();
|
||||
|
||||
int inputs0Array[3] = {input0_batch_size,
|
||||
dimInputs0[dimInputs0.size() - 2],
|
||||
dimInputs0[dimInputs0.size() - 1]};
|
||||
int inputs1Array[3] = {input1_batch_size,
|
||||
dimInputs1[dimInputs1.size() - 2],
|
||||
dimInputs1[dimInputs1.size() - 1]};
|
||||
int outputArray[3] = {output_batch_size,
|
||||
dimOutput[dimOutput.size() - 2],
|
||||
dimOutput[dimOutput.size() - 1]};
|
||||
int32_t transA = op->getTransA();
|
||||
int32_t transB = op->getTransB();
|
||||
|
||||
// get inputs
|
||||
checkCnnlError(cnnlCreateTensorDescriptor(&aDesc));
|
||||
checkCnnlError(cnnlSetTensorDescriptor(
|
||||
aDesc, CNNL_LAYOUT_ARRAY, CNNL_DTYPE_FLOAT, 3, inputs0Array));
|
||||
checkCnnlError(
|
||||
cnnlSetTensorDescriptor(aDesc, CNNL_LAYOUT_ARRAY, CNNL_DTYPE_FLOAT,
|
||||
dimInputs0.size(), dimInputs0.data()));
|
||||
|
||||
checkCnnlError(cnnlCreateTensorDescriptor(&bDesc));
|
||||
checkCnnlError(cnnlSetTensorDescriptor(
|
||||
bDesc, CNNL_LAYOUT_ARRAY, CNNL_DTYPE_FLOAT, 3, inputs1Array));
|
||||
checkCnnlError(
|
||||
cnnlSetTensorDescriptor(bDesc, CNNL_LAYOUT_ARRAY, CNNL_DTYPE_FLOAT,
|
||||
dimInputs1.size(), dimInputs1.data()));
|
||||
|
||||
// get outputs
|
||||
checkCnnlError(cnnlCreateTensorDescriptor(&cDesc));
|
||||
checkCnnlError(cnnlSetTensorDescriptor(
|
||||
cDesc, CNNL_LAYOUT_ARRAY, CNNL_DTYPE_FLOAT, 3, outputArray));
|
||||
checkCnnlError(
|
||||
cnnlSetTensorDescriptor(cDesc, CNNL_LAYOUT_ARRAY, CNNL_DTYPE_FLOAT,
|
||||
dimOutput.size(), dimOutput.data()));
|
||||
|
||||
cnnlStatus_t stat =
|
||||
cnnlBatchMatMul(context->cnnlHandle(), transA, transB, aDesc, aData,
|
||||
bDesc, bData, cDesc, cData);
|
||||
cnnlMatMulDescriptor_t bmm_desc;
|
||||
cnnlMatMulDescCreate(&bmm_desc);
|
||||
cnnlSetMatMulDescAttr(bmm_desc, CNNL_MATMUL_DESC_TRANSA, &transA,
|
||||
sizeof(int32_t));
|
||||
cnnlSetMatMulDescAttr(bmm_desc, CNNL_MATMUL_DESC_TRANSB, &transB,
|
||||
sizeof(int32_t));
|
||||
|
||||
cnnlMatMulAlgo_t bmm_algo;
|
||||
cnnlMatMulAlgoCreate(&bmm_algo);
|
||||
|
||||
float alpha = 1.0;
|
||||
float beta = 0.0;
|
||||
int count = 0;
|
||||
|
||||
cnnlMatMulHeuristicResult_t desc;
|
||||
cnnlCreateMatMulHeuristicResult(&desc);
|
||||
|
||||
cnnlGetBatchMatMulAlgoHeuristic(context->cnnlHandle(), bmm_desc, aDesc,
|
||||
bDesc, cDesc, NULL, 1, &desc, &count);
|
||||
size_t wsSize;
|
||||
cnnlGetBatchMatMulHeuristicResult(desc, bmm_algo, &wsSize);
|
||||
BangPtr wsData = context->getWorkspace(wsSize);
|
||||
|
||||
cnnlStatus_t stat = cnnlBatchMatMulBCast_v2(
|
||||
context->cnnlHandle(), bmm_desc, bmm_algo, &alpha, aDesc, aData,
|
||||
bDesc, bData, &beta, cDesc, cData, wsData, wsSize);
|
||||
if (stat != CNNL_STATUS_SUCCESS)
|
||||
return;
|
||||
|
||||
|
@ -65,9 +73,12 @@ class MatmulCnnl : public BangKernelWithoutConfig {
|
|||
checkCnnlError(cnnlDestroyTensorDescriptor(aDesc));
|
||||
checkCnnlError(cnnlDestroyTensorDescriptor(bDesc));
|
||||
checkCnnlError(cnnlDestroyTensorDescriptor(cDesc));
|
||||
checkCnnlError(cnnlMatMulDescDestroy(bmm_desc));
|
||||
checkCnnlError(cnnlMatMulAlgoDestroy(bmm_algo));
|
||||
checkCnnlError(cnnlDestroyMatMulHeuristicResult(desc));
|
||||
}
|
||||
};
|
||||
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Matmul, DataType::Float32, MatmulCnnl,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::MatMul, DataType::Float32, MatmulCnnl,
|
||||
"Matmul_cnnl_BANG_Float32");
|
||||
}; // namespace infini
|
||||
|
|
|
@ -68,6 +68,6 @@ class avgPoolCnnl : public PoolingCnnl {
|
|||
|
||||
REGISTER_KERNEL(Device::BANG, OpType::MaxPool, DataType::Float32, maxPoolCnnl,
|
||||
"MaxPool_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::AvgPool, DataType::Float32, avgPoolCnnl,
|
||||
"AvgPool_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::AveragePool, DataType::Float32,
|
||||
avgPoolCnnl, "AvgPool_cnnl_BANG_Float32");
|
||||
}; // namespace infini
|
||||
|
|
|
@ -4,9 +4,8 @@
|
|||
|
||||
namespace infini {
|
||||
class CopyBang : public BangKernelWithoutConfig {
|
||||
void compute(const Operator &_op,
|
||||
void compute(const Operator &op,
|
||||
const RuntimeObj *_context) const override {
|
||||
auto op = as<ReshapeObj>(_op);
|
||||
auto context = dynamic_cast<const BangRuntimeObj *>(_context);
|
||||
auto inData = op->getInputs(0)->getRawDataPtr<void *>();
|
||||
auto outData = op->getOutputs()[0]->getRawDataPtr<void *>();
|
||||
|
|
|
@ -162,23 +162,23 @@ REGISTER_KERNEL(Device::BANG, OpType::Cos, DataType::Float32, CosCnnl,
|
|||
"Cos_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Tan, DataType::Float32, TanCnnl,
|
||||
"Tan_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::ASin, DataType::Float32, ASinCnnl,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Asin, DataType::Float32, ASinCnnl,
|
||||
"ASin_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::ACos, DataType::Float32, ACosCnnl,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Acos, DataType::Float32, ACosCnnl,
|
||||
"ACos_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::ATan, DataType::Float32, ATanCnnl,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Atan, DataType::Float32, ATanCnnl,
|
||||
"ATan_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::SinH, DataType::Float32, SinHCnnl,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Sinh, DataType::Float32, SinHCnnl,
|
||||
"SinH_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::CosH, DataType::Float32, CosHCnnl,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Cosh, DataType::Float32, CosHCnnl,
|
||||
"CosH_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::TanH, DataType::Float32, TanHCnnl,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Tanh, DataType::Float32, TanHCnnl,
|
||||
"TanH_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::ASinH, DataType::Float32, ASinHCnnl,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Asinh, DataType::Float32, ASinHCnnl,
|
||||
"ASinH_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::ACosH, DataType::Float32, ACosHCnnl,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Acosh, DataType::Float32, ACosHCnnl,
|
||||
"ACosH_cnnl_BANG_Float32");
|
||||
REGISTER_KERNEL(Device::BANG, OpType::ATanH, DataType::Float32, ATanHCnnl,
|
||||
REGISTER_KERNEL(Device::BANG, OpType::Atanh, DataType::Float32, ATanHCnnl,
|
||||
"ATanH_cnnl_BANG_Float32");
|
||||
|
||||
}; // namespace infini
|
||||
|
|
|
@ -13,7 +13,6 @@ template <typename T> class NaiveMatmul : public CpuKernelWithoutConfig {
|
|||
T *C = op->getOutput()->getRawDataPtr<T *>();
|
||||
IT_ASSERT(op->getTransA() == false && op->getTransB() == false);
|
||||
IT_ASSERT(op->getAct() == ActType::None);
|
||||
IT_ASSERT(op->getB() == 1);
|
||||
const int M = op->getM(), N = op->getN(), K = op->getK();
|
||||
for (int i = 0; i < M; i++) {
|
||||
for (int j = 0; j < N; j++) {
|
||||
|
@ -26,9 +25,9 @@ template <typename T> class NaiveMatmul : public CpuKernelWithoutConfig {
|
|||
}
|
||||
};
|
||||
|
||||
REGISTER_KERNEL(Device::CPU, OpType::Matmul, DataType::UInt32,
|
||||
REGISTER_KERNEL(Device::CPU, OpType::MatMul, DataType::UInt32,
|
||||
NaiveMatmul<uint32_t>, "MatmulNaive_CPU_uint32");
|
||||
REGISTER_KERNEL(Device::CPU, OpType::Matmul, DataType::Float32,
|
||||
REGISTER_KERNEL(Device::CPU, OpType::MatMul, DataType::Float32,
|
||||
NaiveMatmul<float>, "MatmulNaive_CPU_float32");
|
||||
|
||||
} // namespace infini
|
||||
} // namespace infini
|
||||
|
|
|
@ -76,6 +76,6 @@ REGISTER_KERNEL(Device::CPU, OpType::MaxPool, DataType::UInt32,
|
|||
NaiveMaxPool<uint32_t>, "maxPoolNaive_CPU_uint32");
|
||||
REGISTER_KERNEL(Device::CPU, OpType::MaxPool, DataType::Float32,
|
||||
NaiveMaxPool<float>, "maxPoolNaive_CPU_float32");
|
||||
REGISTER_KERNEL(Device::CPU, OpType::AvgPool, DataType::Float32,
|
||||
REGISTER_KERNEL(Device::CPU, OpType::AveragePool, DataType::Float32,
|
||||
NaiveAvgPool<float>, "AvgPoolNaive_CPU_float32");
|
||||
} // namespace infini
|
||||
} // namespace infini
|
||||
|
|
|
@ -56,6 +56,10 @@ template <typename T> class NaiveAbs : public NativeUnary<T> {
|
|||
T doCompute(T val) const override { return val < 0 ? -val : val; }
|
||||
};
|
||||
|
||||
template <typename T> class NaiveSqrt : public NativeUnary<T> {
|
||||
T doCompute(T val) const override { return std::sqrt(val); }
|
||||
};
|
||||
|
||||
template <typename T> class Clip : public CpuKernelWithoutConfig {
|
||||
void compute(const Operator &_op,
|
||||
const RuntimeObj *context) const override {
|
||||
|
@ -91,6 +95,8 @@ REGISTER_KERNEL(Device::CPU, OpType::Abs, DataType::UInt32, NaiveAbs<uint32_t>,
|
|||
"absNaive_CPU_uint32");
|
||||
REGISTER_KERNEL(Device::CPU, OpType::Abs, DataType::Float32, NaiveAbs<float>,
|
||||
"absNaive_CPU_float32");
|
||||
REGISTER_KERNEL(Device::CPU, OpType::Sqrt, DataType::Float32, NaiveSqrt<float>,
|
||||
"sqrtNaive_CPU_float32");
|
||||
REGISTER_KERNEL(Device::CPU, OpType::Softmax, DataType::UInt32,
|
||||
NaiveSoftmax<uint32_t>, "softmaxNaive_CPU_uint32");
|
||||
REGISTER_KERNEL(Device::CPU, OpType::Softmax, DataType::Float32,
|
||||
|
|
|
@ -59,6 +59,6 @@ class BatchNormCudnn : public CudaKernelWithoutConfig {
|
|||
}
|
||||
};
|
||||
|
||||
REGISTER_KERNEL(Device::CUDA, OpType::BatchNorm, DataType::Float32,
|
||||
REGISTER_KERNEL(Device::CUDA, OpType::BatchNormalization, DataType::Float32,
|
||||
BatchNormCudnn, "BatchNorm_cuDNN_CUDA_Float32");
|
||||
} // namespace infini
|
||||
|
|
|
@ -0,0 +1,261 @@
|
|||
#include "core/kernel.h"
|
||||
#include "cuda/cuda_runtime.h"
|
||||
#include "operators/conv.h"
|
||||
#include <chrono>
|
||||
#include <functional>
|
||||
#include <limits>
|
||||
#include <tuple>
|
||||
|
||||
namespace infini {
|
||||
|
||||
struct ConvCuDnnPerfRecordObj : public PerfRecordObj {
|
||||
int algo = 0; // cudnnConvolutionFwdAlgo_t
|
||||
int mode = 1;
|
||||
size_t workspaceSize = 100000;
|
||||
bool fuseAct = false;
|
||||
void to_json(json &j) override {
|
||||
j["type"] = 1;
|
||||
j["data"] = std::make_tuple(algo, mode, fuseAct, time, workspaceSize);
|
||||
}
|
||||
static PerfRecord from_json(const json &j) {
|
||||
ConvCuDnnPerfRecordObj tmp;
|
||||
auto [Algo, Mode, FuseAct, Time, WorkspaceSize] =
|
||||
j["data"].get<tuple<int, int, bool, double, size_t>>();
|
||||
tmp.algo = Algo;
|
||||
tmp.mode = Mode;
|
||||
tmp.fuseAct = FuseAct;
|
||||
tmp.time = Time;
|
||||
tmp.workspaceSize = WorkspaceSize;
|
||||
return make_ref<ConvCuDnnPerfRecordObj>(tmp);
|
||||
}
|
||||
};
|
||||
|
||||
using ConvCuDnnPerfRecord = Ref<ConvCuDnnPerfRecordObj>;
|
||||
|
||||
class convCudnnFP16 : public Kernel {
|
||||
|
||||
static constexpr int N_ALGO = 8;
|
||||
static constexpr int N_MODE = 2;
|
||||
static constexpr cudnnConvolutionFwdAlgo_t ALGOS[8] = {
|
||||
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
|
||||
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_PRECOMP_GEMM,
|
||||
CUDNN_CONVOLUTION_FWD_ALGO_GEMM,
|
||||
CUDNN_CONVOLUTION_FWD_ALGO_DIRECT,
|
||||
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
|
||||
CUDNN_CONVOLUTION_FWD_ALGO_FFT_TILING,
|
||||
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD,
|
||||
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED};
|
||||
|
||||
static constexpr cudnnConvolutionMode_t MODES[2] = {
|
||||
CUDNN_CONVOLUTION, CUDNN_CROSS_CORRELATION};
|
||||
|
||||
std::tuple<void *, void *, void *, cudnnTensorDescriptor_t,
|
||||
cudnnFilterDescriptor_t, cudnnTensorDescriptor_t,
|
||||
cudnnConvolutionDescriptor_t, cudnnActivationDescriptor_t,
|
||||
cudnnTensorDescriptor_t>
|
||||
createCuDNNDescriptor(const Ref<ConvObj> &op,
|
||||
const ConvCuDnnPerfRecord &record) const {
|
||||
void *const inData = (op->getInputs(0)->getRawDataPtr<void *>());
|
||||
void *const knData = (op->getInputs(1)->getRawDataPtr<void *>());
|
||||
// Bias is not supported yet
|
||||
if (op->getInputs().size() > 2) {
|
||||
IT_TODO_HALT();
|
||||
}
|
||||
// void *const biasData = (op->getInputs(2)->getRawDataPtr<void *>());
|
||||
void *const outData = (op->getOutput()->getRawDataPtr<void *>());
|
||||
|
||||
const auto [n, c, h, w, f, r, s] = op->getNCHWFRS();
|
||||
const int cpg = op->getChannelPerGroup();
|
||||
const int g = c / cpg;
|
||||
const auto [ph, pw, sh, sw, dh, dw] = op->getPadStrideDilation();
|
||||
|
||||
int channelsPerGrp = cpg, channels = c;
|
||||
|
||||
// get inputs
|
||||
cudnnTensorDescriptor_t inDesc;
|
||||
checkCudnnError(cudnnCreateTensorDescriptor(&inDesc));
|
||||
checkCudnnError(cudnnSetTensor4dDescriptor(inDesc, CUDNN_TENSOR_NCHW,
|
||||
CUDNN_DATA_HALF, n, channels,
|
||||
h, w)); /*fp16 type*/
|
||||
|
||||
// get kernels
|
||||
cudnnFilterDescriptor_t knDesc;
|
||||
checkCudnnError(cudnnCreateFilterDescriptor(&knDesc));
|
||||
checkCudnnError(cudnnSetFilter4dDescriptor(
|
||||
knDesc, CUDNN_DATA_HALF, /*fp16 type*/
|
||||
CUDNN_TENSOR_NCHW, f, channelsPerGrp, r, s));
|
||||
// get bias
|
||||
cudnnTensorDescriptor_t biasDesc;
|
||||
checkCudnnError(cudnnCreateTensorDescriptor(&biasDesc));
|
||||
checkCudnnError(cudnnSetTensor4dDescriptor(biasDesc, CUDNN_TENSOR_NCHW,
|
||||
CUDNN_DATA_HALF, 1, f, 1,
|
||||
1)); /*fp16 type*/
|
||||
|
||||
// get convolution descriptor
|
||||
cudnnConvolutionDescriptor_t convDesc;
|
||||
checkCudnnError(cudnnCreateConvolutionDescriptor(&convDesc));
|
||||
// TODO: CUDNN_CONVOLUTION is a tunable argument
|
||||
checkCudnnError(cudnnSetConvolution2dDescriptor(
|
||||
convDesc, ph, pw, sh, sw, dh, dw, MODES[record->mode],
|
||||
CUDNN_DATA_HALF)); /*fp16 type*/
|
||||
if (g > 1) {
|
||||
checkCudnnError(cudnnSetConvolutionGroupCount(convDesc, g));
|
||||
}
|
||||
|
||||
// get activation descriptor
|
||||
cudnnActivationDescriptor_t actDesc;
|
||||
checkCudnnError(cudnnCreateActivationDescriptor(&actDesc));
|
||||
// NOT_PROPAGATE_NAN is requierd by
|
||||
// cudnnConvolotionBiasActivationForward
|
||||
switch (op->getAct()) {
|
||||
case ActType::Relu:
|
||||
checkCudnnError(cudnnSetActivationDescriptor(
|
||||
actDesc, CUDNN_ACTIVATION_RELU, CUDNN_NOT_PROPAGATE_NAN, 0));
|
||||
break;
|
||||
case ActType::Sigmoid:
|
||||
checkCudnnError(cudnnSetActivationDescriptor(
|
||||
actDesc, CUDNN_ACTIVATION_SIGMOID, CUDNN_NOT_PROPAGATE_NAN, 0));
|
||||
break;
|
||||
case ActType::None:
|
||||
checkCudnnError(
|
||||
cudnnSetActivationDescriptor(actDesc, CUDNN_ACTIVATION_IDENTITY,
|
||||
CUDNN_NOT_PROPAGATE_NAN, 0));
|
||||
break;
|
||||
default:
|
||||
assert(false);
|
||||
}
|
||||
|
||||
// get output descriptor
|
||||
int outn, outc, outh, outw;
|
||||
checkCudnnError(cudnnGetConvolution2dForwardOutputDim(
|
||||
convDesc, inDesc, knDesc, &outn, &outc, &outh, &outw));
|
||||
cudnnTensorDescriptor_t outDesc;
|
||||
checkCudnnError(cudnnCreateTensorDescriptor(&outDesc));
|
||||
checkCudnnError(cudnnSetTensor4dDescriptor(outDesc, CUDNN_TENSOR_NCHW,
|
||||
CUDNN_DATA_HALF, outn, outc,
|
||||
outh, outw));
|
||||
IT_ASSERT((vector{outn, outc, outh, outw}) ==
|
||||
op->getOutput()->getDims(),
|
||||
"cuDNN output shape mismatches with OP output shape");
|
||||
|
||||
return tuple(inData, knData, outData, inDesc, knDesc, biasDesc,
|
||||
convDesc, actDesc, outDesc);
|
||||
}
|
||||
|
||||
bool cuDNNUnfused(const Ref<ConvObj> &op, const ConvCuDnnPerfRecord &record,
|
||||
const CudaRuntimeObj *context) const {
|
||||
cudnnStatus_t stat;
|
||||
|
||||
const auto &[inData, knData, outData, inDesc, knDesc, biasDesc,
|
||||
convDesc, actDesc, outDesc] =
|
||||
createCuDNNDescriptor(op, record);
|
||||
size_t wsSize = record->workspaceSize;
|
||||
CudaPtr wsData = context->getWorkspace(wsSize);
|
||||
float alpha = 1.f, beta = 0.f;
|
||||
|
||||
stat = cudnnConvolutionForward(context->cudnnHandle(), &alpha, inDesc,
|
||||
inData, knDesc, knData, convDesc,
|
||||
ALGOS[record->algo], wsData, wsSize,
|
||||
&beta, outDesc, outData);
|
||||
if (stat != CUDNN_STATUS_SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
checkCudnnError(cudnnDestroyTensorDescriptor(outDesc));
|
||||
checkCudnnError(cudnnDestroyActivationDescriptor(actDesc));
|
||||
checkCudnnError(cudnnDestroyConvolutionDescriptor(convDesc));
|
||||
checkCudnnError(cudnnDestroyTensorDescriptor(biasDesc));
|
||||
checkCudnnError(cudnnDestroyFilterDescriptor(knDesc));
|
||||
checkCudnnError(cudnnDestroyTensorDescriptor(inDesc));
|
||||
return true;
|
||||
}
|
||||
|
||||
void compute(const Operator &op, const RuntimeObj *context) const override {
|
||||
auto record = make_ref<ConvCuDnnPerfRecordObj>(); // with paramters in
|
||||
// default ctor
|
||||
compute(op, record, context);
|
||||
}
|
||||
|
||||
PerfRecord tune(const Operator &_op,
|
||||
const RuntimeObj *_context) const override {
|
||||
ConvCuDnnPerfRecordObj ret;
|
||||
ret.time = std::numeric_limits<double>::max();
|
||||
auto context = dynamic_cast<const CudaRuntimeObj *>(_context);
|
||||
auto op = as<ConvObj>(_op);
|
||||
// Both modes have the same performance. Only run cross-correlation.
|
||||
for (int mode = 1; mode < 2; mode++) {
|
||||
// Try every possible algorithm of convolution
|
||||
for (int algo = 0; algo < N_ALGO; algo++) {
|
||||
auto recordRef = make_ref<ConvCuDnnPerfRecordObj>();
|
||||
auto &record = *recordRef;
|
||||
record.mode = mode;
|
||||
record.algo = algo;
|
||||
cudnnStatus_t stat;
|
||||
const auto &[inData, knData, outData, inDesc, knDesc, biasDesc,
|
||||
convDesc, actDesc, outDesc] =
|
||||
createCuDNNDescriptor(op, recordRef);
|
||||
|
||||
// get workspace
|
||||
stat = cudnnGetConvolutionForwardWorkspaceSize(
|
||||
context->cudnnHandle(), inDesc, knDesc, convDesc, outDesc,
|
||||
ALGOS[record.algo], &record.workspaceSize);
|
||||
if (stat != CUDNN_STATUS_SUCCESS) {
|
||||
continue;
|
||||
}
|
||||
if (record.workspaceSize > context->getWorkspaceSize()) {
|
||||
continue;
|
||||
}
|
||||
CudaPtr wsData = context->getWorkspace(record.workspaceSize);
|
||||
float alpha = 1.f, beta = 0.f;
|
||||
|
||||
stat = cudnnConvolutionForward(
|
||||
context->cudnnHandle(), &alpha, inDesc, inData, knDesc,
|
||||
knData, convDesc, ALGOS[record.algo], wsData,
|
||||
record.workspaceSize, &beta, outDesc, outData);
|
||||
if (stat != CUDNN_STATUS_SUCCESS) {
|
||||
continue;
|
||||
}
|
||||
record.time = timeit(
|
||||
[&]() {
|
||||
cudnnConvolutionForward(context->cudnnHandle(), &alpha,
|
||||
inDesc, inData, knDesc, knData,
|
||||
convDesc, ALGOS[record.algo],
|
||||
wsData, record.workspaceSize,
|
||||
&beta, outDesc, outData);
|
||||
},
|
||||
[&]() { context->sync(); });
|
||||
// printf("mode:%d algo:%d :%.8lf\n", mode, algo, record.time);
|
||||
|
||||
// Update the tune result
|
||||
if (ret.time > record.time) {
|
||||
ret = record;
|
||||
}
|
||||
checkCudnnError(cudnnDestroyTensorDescriptor(outDesc));
|
||||
checkCudnnError(cudnnDestroyActivationDescriptor(actDesc));
|
||||
checkCudnnError(cudnnDestroyConvolutionDescriptor(convDesc));
|
||||
checkCudnnError(cudnnDestroyTensorDescriptor(biasDesc));
|
||||
checkCudnnError(cudnnDestroyFilterDescriptor(knDesc));
|
||||
checkCudnnError(cudnnDestroyTensorDescriptor(inDesc));
|
||||
}
|
||||
}
|
||||
// printf("the best algo is %d, the best conv mode is %d\n", ret.algo,
|
||||
// ret.mode);
|
||||
IT_ASSERT(ret.time < std::numeric_limits<double>::max(), "No valid "
|
||||
"algorithm "
|
||||
"found");
|
||||
return make_ref<ConvCuDnnPerfRecordObj>(ret);
|
||||
}
|
||||
|
||||
void compute(const Operator &_op, const PerfRecord &_record,
|
||||
const RuntimeObj *_context) const override {
|
||||
auto op = as<ConvObj>(_op);
|
||||
auto record = as<ConvCuDnnPerfRecordObj>(_record);
|
||||
auto context = dynamic_cast<const CudaRuntimeObj *>(_context);
|
||||
bool success = cuDNNUnfused(op, record, context);
|
||||
IT_ASSERT(success);
|
||||
}
|
||||
};
|
||||
|
||||
REGISTER_KERNEL(Device::CUDA, OpType::Conv, DataType::Float16, convCudnnFP16,
|
||||
"Conv_cuDNN_CUDA_Float16");
|
||||
|
||||
} // namespace infini
|
|
@ -300,7 +300,7 @@ class convBackwardDataCudnn : public Kernel {
|
|||
}
|
||||
};
|
||||
|
||||
REGISTER_KERNEL(Device::CUDA, OpType::ConvTrans, DataType::Float32,
|
||||
REGISTER_KERNEL(Device::CUDA, OpType::ConvTranspose, DataType::Float32,
|
||||
convBackwardDataCudnn, "ConvTranposed_cuDNN_CUDA_Float32");
|
||||
REGISTER_KERNEL(Device::CUDA, OpType::ConvTransNHWC, DataType::Float32,
|
||||
convBackwardDataCudnn, "ConvTranposedNHWC_cuDNN_CUDA_Float32");
|
||||
|
|
|
@ -14,14 +14,25 @@ class ElementWiseCudnn : public CudaKernelWithoutConfig {
|
|||
auto op = as<ElementWiseObj>(_op);
|
||||
auto context = dynamic_cast<const CudaRuntimeObj *>(_context);
|
||||
|
||||
void *const aData = (op->getInputs(0)->getRawDataPtr<void *>());
|
||||
void *const bData = (op->getInputs(1)->getRawDataPtr<void *>());
|
||||
void *const cData = (op->getOutput()->getRawDataPtr<void *>());
|
||||
auto aTensor = op->getInputs(0);
|
||||
auto bTensor = op->getInputs(1);
|
||||
auto cTensor = op->getOutput();
|
||||
|
||||
// cudnnOpTensor only allows B to be broadcasted.
|
||||
if (aTensor->getDims() != cTensor->getDims()) {
|
||||
swap(aTensor, bTensor);
|
||||
}
|
||||
IT_ASSERT(aTensor->getDims() == cTensor->getDims(),
|
||||
"Shape does not match.");
|
||||
|
||||
void *const aData = (aTensor->getRawDataPtr<void *>());
|
||||
void *const bData = (bTensor->getRawDataPtr<void *>());
|
||||
void *const cData = (cTensor->getRawDataPtr<void *>());
|
||||
|
||||
cudnnTensorDescriptor_t aDesc, bDesc, cDesc;
|
||||
auto a_dim = op->getInputs(0)->getDims();
|
||||
auto b_dim = op->getInputs(1)->getDims();
|
||||
auto c_dim = op->getOutput()->getDims();
|
||||
auto a_dim = aTensor->getDims();
|
||||
auto b_dim = bTensor->getDims();
|
||||
auto c_dim = cTensor->getDims();
|
||||
|
||||
if (a_dim.size() > 4 || b_dim.size() > 4 || c_dim.size() > 4)
|
||||
IT_TODO_HALT();
|
||||
|
|
|
@ -14,9 +14,9 @@ class GatherCuda : public CudaKernelWithoutConfig {
|
|||
auto out = op->getOutput();
|
||||
metaData.indexValue = index->getRawDataPtr<int *>();
|
||||
metaData.axis = op->getAxis();
|
||||
metaData.inNDim = in->getDims().size();
|
||||
metaData.outNDim = out->getDims().size();
|
||||
metaData.idxNDim = index->getDims().size();
|
||||
metaData.inNDim = in->getRank();
|
||||
metaData.outNDim = out->getRank();
|
||||
metaData.idxNDim = index->getRank();
|
||||
for (int i = 0; i < metaData.outNDim; ++i)
|
||||
metaData.outDim[i] = out->getDims()[i];
|
||||
for (int i = 0; i < metaData.idxNDim; ++i) {
|
||||
|
|
|
@ -51,8 +51,8 @@ class matmulCublas : public Kernel {
|
|||
cublasStatus_t stat;
|
||||
if (b > 1) {
|
||||
// Support batch broadcast with zero stride
|
||||
int dimA = op->getInputs(0)->getDims().size();
|
||||
int dimB = op->getInputs(1)->getDims().size();
|
||||
int dimA = op->getInputs(0)->getRank();
|
||||
int dimB = op->getInputs(1)->getRank();
|
||||
long long strideA =
|
||||
(dimA == 2 ||
|
||||
(dimA == 3 && op->getInputs(0)->getDims()[0] == 1))
|
||||
|
@ -114,7 +114,7 @@ class matmulCublas : public Kernel {
|
|||
}
|
||||
};
|
||||
|
||||
REGISTER_KERNEL(Device::CUDA, OpType::Matmul, DataType::Float32, matmulCublas,
|
||||
REGISTER_KERNEL(Device::CUDA, OpType::MatMul, DataType::Float32, matmulCublas,
|
||||
"Matmul_cuBLAS_CUDA_Float32");
|
||||
|
||||
REGISTER_CONSTRUCTOR(2, MatmulCublasPerfRecordObj::from_json);
|
||||
|
|
|
@ -7,7 +7,7 @@ class PadSliceCudaCompute {
|
|||
public:
|
||||
void do_compute(Tensor partTensor, Tensor wholeTensor, const Shape &begNos,
|
||||
bool isPad) const {
|
||||
int nDims = partTensor->getDims().size();
|
||||
int nDims = partTensor->getRank();
|
||||
IT_ASSERT(MAX_DIM >= nDims);
|
||||
TransMetaData metadata;
|
||||
for (int i = 0; i < nDims; i++) {
|
||||
|
|
|
@ -68,6 +68,6 @@ class avgPoolCudnn : public poolingCudnn {
|
|||
|
||||
REGISTER_KERNEL(Device::CUDA, OpType::MaxPool, DataType::Float32, maxPoolCudnn,
|
||||
"MaxPool_cuDNN_CUDA_Float32");
|
||||
REGISTER_KERNEL(Device::CUDA, OpType::AvgPool, DataType::Float32, avgPoolCudnn,
|
||||
"AvgPool_cuDNN_CUDA_Float32");
|
||||
REGISTER_KERNEL(Device::CUDA, OpType::AveragePool, DataType::Float32,
|
||||
avgPoolCudnn, "AvgPool_cuDNN_CUDA_Float32");
|
||||
}; // namespace infini
|
||||
|
|
|
@ -14,7 +14,7 @@ class ReduceMeanCudnn : public CudaKernelWithoutConfig {
|
|||
// Each dimension of the output tensor C must match the corresponding
|
||||
// dimension of the input tensor A or must be equal to 1. The dimensions
|
||||
// equal to 1 indicate the dimensions of A to be reduced.
|
||||
int nInDims = input->getDims().size();
|
||||
int nInDims = input->getRank();
|
||||
IT_ASSERT(CUDNN_DIM_MAX >= nInDims);
|
||||
int inDimArray[CUDNN_DIM_MAX], outDimArray[CUDNN_DIM_MAX],
|
||||
inStrideArray[CUDNN_DIM_MAX], outStrideArray[CUDNN_DIM_MAX];
|
||||
|
|
|
@ -9,7 +9,7 @@ class ResizeCuda : public CudaKernelWithoutConfig {
|
|||
auto in = op->getInputs(0);
|
||||
auto out = op->getOutputs()[0];
|
||||
|
||||
int nDims = in->getDims().size();
|
||||
int nDims = in->getRank();
|
||||
if (nDims > 4)
|
||||
IT_TODO_HALT();
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ namespace infini {
|
|||
class CudaCompute {
|
||||
void initComposedTensorMetadata(ComposedTensorMetadata &metadata,
|
||||
Tensor tensor) const {
|
||||
int nDims = tensor->getDims().size();
|
||||
int nDims = tensor->getRank();
|
||||
auto strides = tensor->getStride();
|
||||
IT_ASSERT(strides.size() == (size_t)nDims);
|
||||
for (int i = 0; i < nDims; ++i) {
|
||||
|
@ -60,8 +60,8 @@ class ConcatCuda : private CudaCompute, public CudaKernelWithoutConfig {
|
|||
void compute(const Operator &_op,
|
||||
const RuntimeObj *_context) const override {
|
||||
do_compute(_op->getOutput(), _op->getInputs(),
|
||||
as<ConcatObj>(_op)->getDim(),
|
||||
_op->getOutput()->getDims().size(), false);
|
||||
as<ConcatObj>(_op)->getDim(), _op->getOutput()->getRank(),
|
||||
false);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -69,8 +69,8 @@ class SplitCuda : private CudaCompute, public CudaKernelWithoutConfig {
|
|||
void compute(const Operator &_op,
|
||||
const RuntimeObj *_context) const override {
|
||||
do_compute(_op->getInputs(0), _op->getOutputs(),
|
||||
as<SplitObj>(_op)->getDim(),
|
||||
_op->getInputs(0)->getDims().size(), true);
|
||||
as<SplitObj>(_op)->getDim(), _op->getInputs(0)->getRank(),
|
||||
true);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -132,6 +132,8 @@ REGISTER_KERNEL(Device::CUDA, OpType::Tanh, DataType::Float32, TanhCudnn,
|
|||
"Tanh_CUDA_Float32");
|
||||
REGISTER_KERNEL(Device::CUDA, OpType::Abs, DataType::Float32, UnaryCuda,
|
||||
"Abs_CUDA_Float32");
|
||||
REGISTER_KERNEL(Device::CUDA, OpType::Sqrt, DataType::Float32, UnaryCuda,
|
||||
"Sqrt_CUDA_Float32");
|
||||
|
||||
// REGISTER_KERNEL(Device::CUDA, OpType::Softmax, DataType::Float32, UnaryCuda,
|
||||
// "Softmax_CUDA_Float32");
|
||||
|
|
|
@ -58,6 +58,14 @@ __global__ void _abs_kernel(float *input, float *output, int n) {
|
|||
}
|
||||
}
|
||||
|
||||
__global__ void _sqrt_kernel(float *input, float *output, int n) {
|
||||
int index = threadIdx.x + blockIdx.x * blockDim.x;
|
||||
int stride = blockDim.x * gridDim.x;
|
||||
for (int i = index; i < n; i += stride) {
|
||||
output[i] = sqrt(input[i]);
|
||||
}
|
||||
}
|
||||
|
||||
namespace infini {
|
||||
void softmax_kernel(float *input, float *output, int num) {
|
||||
|
||||
|
@ -90,5 +98,10 @@ void abs_kernel(float *input, float *output, int num) {
|
|||
int gridsize = (num + block_work_size() - 1) / block_work_size();
|
||||
_abs_kernel<<<blocksize, gridsize>>>(input, output, num);
|
||||
}
|
||||
void sqrt_kernel(float *input, float *output, int num) {
|
||||
|
||||
int blocksize = block_work_size();
|
||||
int gridsize = (num + block_work_size() - 1) / block_work_size();
|
||||
_sqrt_kernel<<<blocksize, gridsize>>>(input, output, num);
|
||||
}
|
||||
}; // namespace infini
|
||||
|
|
|
@ -14,7 +14,7 @@ class MklBatchNorm : public MklKernelWithoutConfig {
|
|||
|
||||
// create user memory that describes data layout in the buffers
|
||||
std::vector<dnnl_dim_t> dims;
|
||||
for (size_t i = 0; i < op->getInputs(0)->getDims().size(); ++i)
|
||||
for (size_t i = 0; i < op->getInputs(0)->getRank(); ++i)
|
||||
dims.push_back(op->getInputs(0)->getDims()[i]);
|
||||
|
||||
auto srcMd = dnnl::memory::desc(dims, dnnl::memory::data_type::f32,
|
||||
|
@ -25,7 +25,7 @@ class MklBatchNorm : public MklKernelWithoutConfig {
|
|||
getUserFormatTag(dims.size()));
|
||||
auto output = dnnl::memory(dstMd, context->getEngine(), dstData);
|
||||
|
||||
std::vector<dnnl_dim_t> meanDims(op->getInputs(0)->getDims().size(), 1);
|
||||
std::vector<dnnl_dim_t> meanDims(op->getInputs(0)->getRank(), 1);
|
||||
meanDims[1] = op->getInputs(0)->getDims()[1];
|
||||
auto meanMd = dnnl::memory::desc(meanDims, dnnl::memory::data_type::f32,
|
||||
getUserFormatTag(meanDims.size()));
|
||||
|
@ -63,6 +63,6 @@ class MklBatchNorm : public MklKernelWithoutConfig {
|
|||
{DNNL_ARG_SHIFT, baisMemory}});
|
||||
}
|
||||
};
|
||||
REGISTER_KERNEL(Device::INTELCPU, OpType::BatchNorm, DataType::Float32,
|
||||
REGISTER_KERNEL(Device::INTELCPU, OpType::BatchNormalization, DataType::Float32,
|
||||
MklBatchNorm, "BatchNorm_Mkl_Float32");
|
||||
}; // namespace infini
|
||||
|
|
|
@ -244,7 +244,7 @@ class MklConvTranspose : public Kernel {
|
|||
return make_ref<ConvTransposeMklPerfRecordObj>(ret);
|
||||
}
|
||||
};
|
||||
REGISTER_KERNEL(Device::INTELCPU, OpType::ConvTrans, DataType::Float32,
|
||||
REGISTER_KERNEL(Device::INTELCPU, OpType::ConvTranspose, DataType::Float32,
|
||||
MklConvTranspose, "MklConvTrans_CPU_float32");
|
||||
|
||||
} // namespace infini
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
namespace infini {
|
||||
class MklBinary : public MklKernelWithoutConfig {
|
||||
dnnl::algorithm getAlgorithem(const Ref<ElementWiseObj> &op) const {
|
||||
switch (op->getOpType()) {
|
||||
switch (op->getOpType().underlying()) {
|
||||
case OpType::Add:
|
||||
return dnnl::algorithm::binary_add;
|
||||
case OpType::Sub:
|
||||
|
@ -34,7 +34,7 @@ class MklBinary : public MklKernelWithoutConfig {
|
|||
|
||||
// create user memory that describes data layout in the buffers
|
||||
std::vector<dnnl_dim_t> dims;
|
||||
for (size_t i = 0; i < op->getInputs(0)->getDims().size(); ++i)
|
||||
for (size_t i = 0; i < op->getInputs(0)->getRank(); ++i)
|
||||
dims.push_back(op->getInputs(0)->getDims()[i]);
|
||||
|
||||
auto srcMd1 = dnnl::memory::desc(dims, dnnl::memory::data_type::f32,
|
||||
|
@ -64,7 +64,7 @@ class MklBinary : public MklKernelWithoutConfig {
|
|||
|
||||
class MklUnary : public MklKernelWithoutConfig {
|
||||
dnnl::algorithm getAlgorithem(const Ref<UnaryObj> &op) const {
|
||||
switch (op->getOpType()) {
|
||||
switch (op->getOpType().underlying()) {
|
||||
case OpType::Relu:
|
||||
return dnnl::algorithm::eltwise_relu;
|
||||
case OpType::Tanh:
|
||||
|
@ -89,7 +89,7 @@ class MklUnary : public MklKernelWithoutConfig {
|
|||
|
||||
// create user memory that describes data layout in the buffers
|
||||
std::vector<dnnl_dim_t> dims;
|
||||
for (size_t i = 0; i < op->getInputs(0)->getDims().size(); ++i)
|
||||
for (size_t i = 0; i < op->getInputs(0)->getRank(); ++i)
|
||||
dims.push_back(op->getInputs(0)->getDims()[i]);
|
||||
|
||||
auto srcMd = dnnl::memory::desc(dims, dnnl::memory::data_type::f32,
|
||||
|
|
|
@ -17,9 +17,9 @@ class MklGather : public MklKernelWithoutConfig {
|
|||
int oSize = out->size();
|
||||
int idxSize = index->size();
|
||||
|
||||
int inNDim = in->getDims().size();
|
||||
int oNDim = out->getDims().size();
|
||||
int idxNDim = index->getDims().size();
|
||||
int inNDim = in->getRank();
|
||||
int oNDim = out->getRank();
|
||||
int idxNDim = index->getRank();
|
||||
int axis = op->getAxis();
|
||||
|
||||
int outDim[4] = {0};
|
||||
|
|
|
@ -69,7 +69,7 @@ template <typename T> class MklDpcppMatmul : public CpuKernelWithoutConfig {
|
|||
}
|
||||
};
|
||||
|
||||
REGISTER_KERNEL(Device::INTELCPU, OpType::Matmul, DataType::Float32,
|
||||
REGISTER_KERNEL(Device::INTELCPU, OpType::MatMul, DataType::Float32,
|
||||
MklDpcppMatmul<float>, "MklDpcppMatmul_CPU_float32");
|
||||
|
||||
} // namespace infini
|
||||
|
|
|
@ -10,7 +10,7 @@ class MklPad : public MklKernelWithoutConfig {
|
|||
auto context = dynamic_cast<const MklRuntimeObj *>(_context);
|
||||
|
||||
std::vector<dnnl_dim_t> dims;
|
||||
for (size_t i = 0; i < op->getInputs(0)->getDims().size(); ++i) {
|
||||
for (size_t i = 0; i < op->getInputs(0)->getRank(); ++i) {
|
||||
dims.push_back(op->getInputs(0)->getDims()[i]);
|
||||
}
|
||||
auto paddedMd = dnnl::memory::desc(dims, dnnl::memory::data_type::f32,
|
||||
|
|
|
@ -17,7 +17,7 @@ class MklPooling : public MklKernelWithoutConfig {
|
|||
// create user memory that describes data layout in the buffers
|
||||
auto [n, c, h, w, r, s] = op->getNCHWRS();
|
||||
auto [ph, pw, sh, sw, dh, dw] = op->getPadStrideDilation();
|
||||
auto nDim = op->getOutput()->getDims().size();
|
||||
auto nDim = op->getOutput()->getRank();
|
||||
auto oh = op->getOutput()->getDims()[nDim - 2];
|
||||
auto ow = op->getOutput()->getDims()[nDim - 1];
|
||||
|
||||
|
@ -77,7 +77,7 @@ class MklMaxPool : public MklPooling {
|
|||
}
|
||||
};
|
||||
|
||||
REGISTER_KERNEL(Device::INTELCPU, OpType::AvgPool, DataType::Float32,
|
||||
REGISTER_KERNEL(Device::INTELCPU, OpType::AveragePool, DataType::Float32,
|
||||
MklAvgPool, "AvgPool_Mkl_Float32");
|
||||
REGISTER_KERNEL(Device::INTELCPU, OpType::MaxPool, DataType::Float32,
|
||||
MklMaxPool, "MaxPool_Mkl_Float32");
|
||||
|
|
|
@ -18,16 +18,16 @@ class MklReduce : public MklKernelWithoutConfig {
|
|||
|
||||
// create user memory that describes data layout in the buffers
|
||||
std::vector<dnnl_dim_t> inDims, inStrides;
|
||||
for (size_t i = 0; i < op->getInputs(0)->getDims().size(); ++i) {
|
||||
for (size_t i = 0; i < op->getInputs(0)->getRank(); ++i) {
|
||||
inDims.push_back(op->getInputs(0)->getDims()[i]);
|
||||
inStrides.push_back(op->getInputs(0)->getStride()[i]);
|
||||
}
|
||||
|
||||
std::vector<dnnl_dim_t> oDims(op->getInputs(0)->getDims().size(), 0),
|
||||
oStrides(op->getInputs(0)->getDims().size(), 1);
|
||||
std::vector<dnnl_dim_t> oDims(op->getInputs(0)->getRank(), 0),
|
||||
oStrides(op->getInputs(0)->getRank(), 1);
|
||||
if (!op->getKeepDims()) {
|
||||
oDims = inDims;
|
||||
for (size_t i = 0; i < op->getInputs(0)->getDims().size(); ++i) {
|
||||
for (size_t i = 0; i < op->getInputs(0)->getRank(); ++i) {
|
||||
if (op->isReduced(i)) {
|
||||
oDims[i] = 1;
|
||||
}
|
||||
|
@ -38,7 +38,7 @@ class MklReduce : public MklKernelWithoutConfig {
|
|||
stride *= oDims[i];
|
||||
}
|
||||
} else {
|
||||
for (size_t i = 0; i < op->getOutput(0)->getDims().size(); ++i) {
|
||||
for (size_t i = 0; i < op->getOutput(0)->getRank(); ++i) {
|
||||
oDims[i] = op->getOutput(0)->getDims()[i];
|
||||
oStrides[i] = op->getOutput(0)->getStride()[i];
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ class MklReshape : public MklKernelWithoutConfig {
|
|||
auto context = dynamic_cast<const MklRuntimeObj *>(_context);
|
||||
|
||||
std::vector<dnnl_dim_t> dims;
|
||||
for (size_t i = 0; i < op->getInputs(0)->getDims().size(); ++i)
|
||||
for (size_t i = 0; i < op->getInputs(0)->getRank(); ++i)
|
||||
dims.push_back(op->getInputs(0)->getDims()[i]);
|
||||
|
||||
// create src md and src memory
|
||||
|
|
|
@ -30,7 +30,7 @@ class MklResize : public MklKernelWithoutConfig {
|
|||
enum_to_underlying(ResizeObj::ECoordinateTransMode::halfPixel))
|
||||
IT_TODO_HALT();
|
||||
|
||||
int nDim = op->getInputs(0)->getDims().size();
|
||||
int nDim = op->getInputs(0)->getRank();
|
||||
IT_ASSERT(nDim == 3 || nDim == 4 ||
|
||||
nDim == 5 &&
|
||||
(op->getInputs(0)->getDims()[0] == 1 &&
|
||||
|
@ -44,7 +44,7 @@ class MklResize : public MklKernelWithoutConfig {
|
|||
|
||||
// create user memory that describes data layout in the buffers
|
||||
std::vector<dnnl_dim_t> idims, odims;
|
||||
for (size_t i = 0; i < op->getInputs(0)->getDims().size(); ++i) {
|
||||
for (size_t i = 0; i < op->getInputs(0)->getRank(); ++i) {
|
||||
idims.push_back(op->getInputs(0)->getDims()[i]);
|
||||
odims.push_back(op->getOutput(0)->getDims()[i]);
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ class MklSlice : public MklKernelWithoutConfig {
|
|||
auto context = dynamic_cast<const MklRuntimeObj *>(_context);
|
||||
|
||||
std::vector<dnnl_dim_t> dims;
|
||||
for (size_t i = 0; i < op->getInputs(0)->getDims().size(); ++i)
|
||||
for (size_t i = 0; i < op->getInputs(0)->getRank(); ++i)
|
||||
dims.push_back(op->getInputs(0)->getDims()[i]);
|
||||
|
||||
// create src md
|
||||
|
|
|
@ -14,7 +14,7 @@ class MklSoftmax : public MklKernelWithoutConfig {
|
|||
|
||||
// create user memory that describes data layout in the buffers
|
||||
std::vector<dnnl_dim_t> dims;
|
||||
for (size_t i = 0; i < op->getInputs(0)->getDims().size(); ++i)
|
||||
for (size_t i = 0; i < op->getInputs(0)->getRank(); ++i)
|
||||
dims.push_back(op->getInputs(0)->getDims()[i]);
|
||||
|
||||
auto srcMd = dnnl::memory::desc(dims, dnnl::memory::data_type::f32,
|
||||
|
|
|
@ -10,7 +10,7 @@ class MklSplit : public MklKernelWithoutConfig {
|
|||
auto context = dynamic_cast<const MklRuntimeObj *>(_context);
|
||||
|
||||
std::vector<dnnl_dim_t> dims;
|
||||
for (size_t i = 0; i < op->getInputs(0)->getDims().size(); ++i)
|
||||
for (size_t i = 0; i < op->getInputs(0)->getRank(); ++i)
|
||||
dims.push_back(op->getInputs(0)->getDims()[i]);
|
||||
|
||||
// create src md
|
||||
|
|
|
@ -23,27 +23,22 @@ string G2BMMObj::toString() const {
|
|||
optional<vector<Shape>> G2BMMObj::inferShape(const TensorVec &inputs) const {
|
||||
auto A = inputs[0], B = inputs[1];
|
||||
|
||||
if (!(A->getDims().size() == 3 && B->getDims().size() == 3))
|
||||
return {};
|
||||
if (!(A->getDims()[0] == B->getDims()[0]))
|
||||
return {};
|
||||
if (!(A->getDims()[1] == B->getDims()[1]))
|
||||
return {};
|
||||
if (!(A->getDims()[2] == B->getDims()[2]))
|
||||
return {};
|
||||
if (width < 0)
|
||||
return {};
|
||||
IT_ASSERT(A->getRank() == 3 && B->getRank() == 3);
|
||||
IT_ASSERT(A->getDims()[0] == B->getDims()[0]);
|
||||
IT_ASSERT(A->getDims()[1] == B->getDims()[1]);
|
||||
IT_ASSERT(A->getDims()[2] == B->getDims()[2]);
|
||||
IT_ASSERT(width >= 0);
|
||||
int b(A->getDims()[0]), m(A->getDims()[1]), n(2 * width + 1);
|
||||
return {{{b, m, n}}};
|
||||
}
|
||||
|
||||
vector<int> G2BMMObj::getWorkloadVector() const {
|
||||
return {enum_to_underlying(type), b, m, k, width, dilation,
|
||||
return {type.underlying(), b, m, k, width, dilation,
|
||||
enum_to_underlying(act)};
|
||||
}
|
||||
|
||||
vector<int> G2BMMObj::getOpAttrVector() const {
|
||||
return {enum_to_underlying(type), width, dilation, enum_to_underlying(act)};
|
||||
return {type.underlying(), width, dilation, enum_to_underlying(act)};
|
||||
}
|
||||
|
||||
} // namespace infini
|
||||
|
|
|
@ -24,24 +24,19 @@ string GBMMObj::toString() const {
|
|||
optional<vector<Shape>> GBMMObj::inferShape(const TensorVec &inputs) const {
|
||||
auto A = inputs[0], B = inputs[1];
|
||||
|
||||
if (!(A->getDims().size() == 3 && B->getDims().size() == 3))
|
||||
return {};
|
||||
if (!(A->getDims()[0] == B->getDims()[0]))
|
||||
return {};
|
||||
if (!(A->getDims()[1] == B->getDims()[1]))
|
||||
return {};
|
||||
if (A->getDims()[2] % 2 == 0)
|
||||
return {};
|
||||
IT_ASSERT(A->getRank() == 3 && B->getRank() == 3);
|
||||
IT_ASSERT(A->getDims()[0] == B->getDims()[0]);
|
||||
IT_ASSERT(A->getDims()[1] == B->getDims()[1]);
|
||||
IT_ASSERT(A->getDims()[2] % 2 != 0);
|
||||
int b(A->getDims()[0]), m(A->getDims()[1]), k(B->getDims()[2]);
|
||||
return {{{b, m, k}}};
|
||||
}
|
||||
|
||||
vector<int> GBMMObj::getWorkloadVector() const {
|
||||
return {enum_to_underlying(type), b, m, w, n, dilation,
|
||||
enum_to_underlying(act)};
|
||||
return {type.underlying(), b, m, w, n, dilation, enum_to_underlying(act)};
|
||||
}
|
||||
|
||||
vector<int> GBMMObj::getOpAttrVector() const {
|
||||
return {enum_to_underlying(type), dilation, enum_to_underlying(act)};
|
||||
return {type.underlying(), dilation, enum_to_underlying(act)};
|
||||
}
|
||||
} // namespace infini
|
||||
|
|
|
@ -15,7 +15,7 @@ ActivationBackwardObj::inferShape(const TensorVec &inputs) const {
|
|||
|
||||
std::string ActivationBackwardObj::toString() const {
|
||||
std::ostringstream os;
|
||||
os << OpRegistry::getOpName(type) << "[" << getGuid() << "]";
|
||||
os << type.toString() << "[" << getGuid() << "]";
|
||||
os << "(";
|
||||
os << vecToString(inputs[0]->getDims()) << ",";
|
||||
os << "input=" << inputs[0]->getGuid() << ",";
|
||||
|
@ -24,14 +24,14 @@ std::string ActivationBackwardObj::toString() const {
|
|||
}
|
||||
|
||||
vector<int> ActivationBackwardObj::getWorkloadVector() const {
|
||||
vector<int> ret{enum_to_underlying(type)};
|
||||
vector<int> ret{type.underlying()};
|
||||
const Shape shape = outputs[0]->getDims();
|
||||
ret.insert(ret.end(), shape.begin(), shape.end());
|
||||
return ret;
|
||||
}
|
||||
|
||||
vector<int> ActivationBackwardObj::getOpAttrVector() const {
|
||||
return {enum_to_underlying(type)};
|
||||
return {type.underlying()};
|
||||
}
|
||||
|
||||
}; // namespace infini
|
||||
|
|
|
@ -4,7 +4,8 @@ namespace infini {
|
|||
BatchNormObj::BatchNormObj(GraphObj *graph, Tensor input, Tensor output,
|
||||
Tensor mean, Tensor var, Tensor scale, Tensor bias,
|
||||
float momentum, float eps, bool trainingMode)
|
||||
: OperatorObj(OpType::BatchNorm, {input, mean, var, scale, bias}, {output}),
|
||||
: OperatorObj(OpType::BatchNormalization, {input, mean, var, scale, bias},
|
||||
{output}),
|
||||
momentum(momentum), eps(eps), trainingMode(trainingMode) {
|
||||
if (trainingMode)
|
||||
IT_TODO_HALT();
|
||||
|
@ -20,9 +21,10 @@ BatchNormObj::inferShape(const TensorVec &inputs) const {
|
|||
auto scale = inputs[3];
|
||||
auto bias = inputs[4];
|
||||
auto c = std::vector<int>{input->getDims()[1]};
|
||||
if (mean->getDims() != c || var->getDims() != c || scale->getDims() != c ||
|
||||
bias->getDims() != c)
|
||||
return {};
|
||||
IT_ASSERT(mean->getRank() == 1 && mean->getDims() == c);
|
||||
IT_ASSERT(var->getRank() == 1 && var->getDims() == c);
|
||||
IT_ASSERT(scale->getRank() == 1 && scale->getDims() == c);
|
||||
IT_ASSERT(bias->getRank() == 1 && bias->getDims() == c);
|
||||
return {{input->getDims()}};
|
||||
}
|
||||
|
||||
|
@ -38,7 +40,7 @@ vector<DataType> BatchNormObj::inferDataType(const TensorVec &inputs) const {
|
|||
|
||||
std::string BatchNormObj::toString() const {
|
||||
std::ostringstream os;
|
||||
os << "BatchNorm[" << getGuid() << "]";
|
||||
os << "batchNormalization[" << getGuid() << "]";
|
||||
os << "(";
|
||||
os << vecToString(inputs[0]->getDims()) << ",";
|
||||
os << "momentum=" << momentum << ",";
|
||||
|
@ -57,13 +59,13 @@ std::string BatchNormObj::toString() const {
|
|||
// need eps and momentum?
|
||||
vector<int> BatchNormObj::getWorkloadVector() const {
|
||||
vector<int> ret = inputs[0]->getDims();
|
||||
ret.emplace(ret.begin(), enum_to_underlying(type));
|
||||
ret.emplace(ret.begin(), type.underlying());
|
||||
return ret;
|
||||
}
|
||||
|
||||
// need eps and momentum?
|
||||
vector<int> BatchNormObj::getOpAttrVector() const {
|
||||
return {enum_to_underlying(type)};
|
||||
return {type.underlying()};
|
||||
}
|
||||
|
||||
} // namespace infini
|
||||
|
|
|
@ -1,28 +1,29 @@
|
|||
#include "operators/concat.h"
|
||||
#include "utils/operator_utils.h"
|
||||
|
||||
namespace infini {
|
||||
ConcatObj::ConcatObj(GraphObj *graph, TensorVec inputs, Tensor output, int dim)
|
||||
: OperatorObj(OpType::Concat, inputs, {output}), dim(dim) {
|
||||
int rank = inputs[0]->getRank();
|
||||
dim = get_real_axis(dim, rank);
|
||||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> ConcatObj::inferShape(const TensorVec &inputs) const {
|
||||
IT_ASSERT(inputs.size() > 1);
|
||||
Shape dims = inputs[0]->getDims();
|
||||
auto rank = inputs[0]->getRank();
|
||||
ShapeElem n = dims.at(dim);
|
||||
for (auto itr = inputs.begin() + 1; itr != inputs.end(); ++itr) {
|
||||
auto input = *itr;
|
||||
auto iDims = input->getDims();
|
||||
if (dims.size() != iDims.size())
|
||||
return {};
|
||||
int nDims = dims.size();
|
||||
for (auto i = 0; i < nDims; i++) {
|
||||
IT_ASSERT(rank == input->getRank());
|
||||
for (auto i = 0; i < (int)rank; i++) {
|
||||
if (i == dim) {
|
||||
n += iDims.at(i);
|
||||
continue;
|
||||
}
|
||||
if (iDims.at(i) != dims.at(i))
|
||||
return {};
|
||||
IT_ASSERT(iDims.at(i) == dims.at(i));
|
||||
}
|
||||
}
|
||||
dims[dim] = n;
|
||||
|
@ -47,12 +48,12 @@ vector<int> ConcatObj::getWorkloadVector() const {
|
|||
vector<int> ret = getOutput()->getDims();
|
||||
ret.emplace(ret.begin(), (int)inputs.size());
|
||||
ret.emplace(ret.begin(), dim);
|
||||
ret.emplace(ret.begin(), enum_to_underlying(type));
|
||||
ret.emplace(ret.begin(), type.underlying());
|
||||
return ret;
|
||||
}
|
||||
|
||||
vector<int> ConcatObj::getOpAttrVector() const {
|
||||
return {enum_to_underlying(type), dim};
|
||||
return {type.underlying(), dim};
|
||||
}
|
||||
|
||||
} // namespace infini
|
||||
|
|
|
@ -19,7 +19,7 @@ ConvBaseObj::ConvBaseObj(OpType opType, TensorVec inputs, Tensor &output,
|
|||
|
||||
string ConvBaseObj::toString() const {
|
||||
std::ostringstream os;
|
||||
os << OpRegistry::getOpName(getOpType()) << "[" << getGuid() << "]";
|
||||
os << type.toString() << "[" << getGuid() << "]";
|
||||
os << "(";
|
||||
if (inputs.size() == 2) {
|
||||
os << vecToString(inputs[0]->getDims()) << ",";
|
||||
|
@ -36,13 +36,12 @@ string ConvBaseObj::toString() const {
|
|||
}
|
||||
|
||||
vector<int> ConvBaseObj::getWorkloadVector() const {
|
||||
return {
|
||||
enum_to_underlying(type), n, c, h, w, f, r, s, ph, pw, sh, sw, dh, dw};
|
||||
return {type.underlying(), n, c, h, w, f, r, s, ph, pw, sh, sw, dh, dw};
|
||||
}
|
||||
|
||||
vector<int> ConvBaseObj::getOpAttrVector() const {
|
||||
// IT_TODO_HALT(); // should padding mode / ph+pw be in attrs?
|
||||
return {enum_to_underlying(type), c, f, r, s, ph, pw, sh, sw, dh, dw};
|
||||
return {type.underlying(), c, f, r, s, ph, pw, sh, sw, dh, dw};
|
||||
}
|
||||
|
||||
void ConvObj::setAuxilaryAttributes(PaddingMode mode) {
|
||||
|
@ -94,8 +93,7 @@ optional<vector<Shape>> ConvObj::inferShape(const TensorVec &inputs) const {
|
|||
int on = n, oc = f;
|
||||
int oh = 0, ow = 0;
|
||||
// For NCHW+FCRS layout, C of input is divisable by C of weight
|
||||
if (input->getDims()[1] % weight->getDims()[1] != 0)
|
||||
return {};
|
||||
IT_ASSERT(input->getDims()[1] % weight->getDims()[1] == 0);
|
||||
// Set padding size
|
||||
if (padding == PaddingMode::Other) {
|
||||
oh = (h - (r - sh) * dh + ph * 2) / sh;
|
||||
|
@ -119,8 +117,8 @@ ConvTransposed2dObj::ConvTransposed2dObj(GraphObj *graph, Tensor input,
|
|||
int pw, int sh, int sw, int dh, int dw,
|
||||
int oph, int opw, int group,
|
||||
Tensor bias, ActType act)
|
||||
: ConvBaseObj(OpType::ConvTrans, {input, weight}, output, ph, pw, sh, sw,
|
||||
dh, dw, output, weight, act),
|
||||
: ConvBaseObj(OpType::ConvTranspose, {input, weight}, output, ph, pw, sh,
|
||||
sw, dh, dw, output, weight, act),
|
||||
oph(oph), opw(opw), group(group) {
|
||||
if (bias)
|
||||
IT_TODO_HALT();
|
||||
|
@ -133,8 +131,8 @@ ConvTransposed2dObj::ConvTransposed2dObj(GraphObj *graph, Tensor input,
|
|||
PaddingMode mode, int sh, int sw,
|
||||
int dh, int dw, int oph, int opw,
|
||||
int group, Tensor bias, ActType act)
|
||||
: ConvBaseObj(OpType::ConvTrans, {input, weight}, output, mode, sh, sw, dh,
|
||||
dw, output, weight, act),
|
||||
: ConvBaseObj(OpType::ConvTranspose, {input, weight}, output, mode, sh, sw,
|
||||
dh, dw, output, weight, act),
|
||||
oph(oph), opw(opw), group(group) {
|
||||
if (bias)
|
||||
IT_TODO_HALT();
|
||||
|
@ -152,8 +150,7 @@ ConvTransposed2dObj::inferShape(const TensorVec &inputs) const {
|
|||
auto c = weight->getDims()[1];
|
||||
auto r = weight->getDims()[2];
|
||||
auto s = weight->getDims()[3];
|
||||
if (f != weight->getDims()[0])
|
||||
return {};
|
||||
IT_ASSERT(f == weight->getDims()[0]);
|
||||
|
||||
int on = n, oc = c * group;
|
||||
int oh = 0, ow = 0;
|
||||
|
@ -233,8 +230,7 @@ ConvBackwardFilterObj::inferShape(const TensorVec &inputs) const {
|
|||
int on = n, oc = f;
|
||||
int oh = 0, ow = 0;
|
||||
// For NCHW+FCRS layout, C of input is divisable by C of weight
|
||||
if (inputX->getDims()[1] % diffY->getDims()[1] != 0)
|
||||
return {};
|
||||
IT_ASSERT(inputX->getDims()[1] % diffY->getDims()[1] == 0);
|
||||
// Set padding size
|
||||
if (padding == PaddingMode::Other) {
|
||||
oh = (h - (r - sh) * dh + ph * 2) / sh;
|
||||
|
@ -274,8 +270,8 @@ ConvTransposed2dNHWCObj::ConvTransposed2dNHWCObj(GraphObj *graph, Tensor input,
|
|||
int sw, int dh, int dw,
|
||||
int oph, int opw, int group,
|
||||
Tensor bias, ActType act)
|
||||
: ConvBaseObj(OpType::ConvTrans, {input, weight}, output, mode, sh, sw, dh,
|
||||
dw, output, weight, act),
|
||||
: ConvBaseObj(OpType::ConvTranspose, {input, weight}, output, mode, sh, sw,
|
||||
dh, dw, output, weight, act),
|
||||
oph(oph), opw(opw), group(group) {
|
||||
if (bias)
|
||||
IT_TODO_HALT();
|
||||
|
|
|
@ -9,8 +9,8 @@ DetObj::DetObj(GraphObj *graph, Tensor input, Tensor output, Mode mode)
|
|||
optional<vector<Shape>> DetObj::inferShape(const TensorVec &inputs) const {
|
||||
const auto A = inputs[0];
|
||||
auto input = A->getDims();
|
||||
int length = input.size();
|
||||
if (length == 2) {
|
||||
int rank = A->getRank();
|
||||
if (rank == 2) {
|
||||
std::vector<int> output = {1};
|
||||
return {{output}};
|
||||
} else {
|
||||
|
@ -21,7 +21,7 @@ optional<vector<Shape>> DetObj::inferShape(const TensorVec &inputs) const {
|
|||
|
||||
std::string DetObj::toString() const {
|
||||
std::ostringstream os;
|
||||
os << OpRegistry::getOpName(type) << "[" << getGuid() << "]";
|
||||
os << type.toString() << "[" << getGuid() << "]";
|
||||
os << "(";
|
||||
os << vecToString(inputs[0]->getDims()) << ",";
|
||||
os << "input=" << inputs[0]->getGuid() << ",";
|
||||
|
@ -30,14 +30,12 @@ std::string DetObj::toString() const {
|
|||
}
|
||||
|
||||
vector<int> DetObj::getWorkloadVector() const {
|
||||
vector<int> ret{enum_to_underlying(type)};
|
||||
vector<int> ret{type.underlying()};
|
||||
const Shape shape = outputs[0]->getDims();
|
||||
ret.insert(ret.end(), shape.begin(), shape.end());
|
||||
return ret;
|
||||
}
|
||||
|
||||
vector<int> DetObj::getOpAttrVector() const {
|
||||
return {enum_to_underlying(type)};
|
||||
}
|
||||
vector<int> DetObj::getOpAttrVector() const { return {type.underlying()}; }
|
||||
|
||||
}; // namespace infini
|
||||
|
|
|
@ -29,12 +29,12 @@ std::string DropoutObj::toString() const {
|
|||
vector<int> DropoutObj::getWorkloadVector() const {
|
||||
vector<int> ret = inputs[0]->getDims();
|
||||
ret.emplace_back(static_cast<int>(ratio));
|
||||
ret.emplace(ret.begin(), enum_to_underlying(type));
|
||||
ret.emplace(ret.begin(), type.underlying());
|
||||
return ret;
|
||||
}
|
||||
|
||||
vector<int> DropoutObj::getOpAttrVector() const {
|
||||
return {enum_to_underlying(type), static_cast<int>(ratio), false};
|
||||
return {type.underlying(), static_cast<int>(ratio), false};
|
||||
}
|
||||
|
||||
} // namespace infini
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#include "operators/element_wise.h"
|
||||
#include "utils/operator_utils.h"
|
||||
|
||||
namespace infini {
|
||||
ElementWiseObj::ElementWiseObj(OpType type, GraphObj *graph, Tensor input0,
|
||||
|
@ -9,37 +10,14 @@ ElementWiseObj::ElementWiseObj(OpType type, GraphObj *graph, Tensor input0,
|
|||
|
||||
optional<vector<Shape>>
|
||||
ElementWiseObj::inferShape(const TensorVec &inputs) const {
|
||||
// For now,we only process the same dims here, broardcast will be considered
|
||||
// in the opt layer.
|
||||
const auto A = inputs[0], B = inputs[1];
|
||||
int max_len = std::max(A->getDims().size(), B->getDims().size());
|
||||
std::vector<int> A_(max_len, 1);
|
||||
std::vector<int> B_(max_len, 1);
|
||||
std::vector<int> res(max_len, 1);
|
||||
memcpy(A_.data() + max_len - A->getDims().size(), A->getDims().data(),
|
||||
A->getDims().size() * sizeof(int));
|
||||
memcpy(B_.data() + max_len - B->getDims().size(), B->getDims().data(),
|
||||
B->getDims().size() * sizeof(int));
|
||||
// std::copy(A->getDims().begin(), A->getDims().end(), A_.begin() + (max_len
|
||||
// - A->getDims().size())); std::copy(B->getDims().begin(),
|
||||
// B->getDims().end(), B_.begin() + (max_len - B->getDims().size()));
|
||||
// std::copy(A->getDims().rbegin(), A->getDims().rend(), A_.rbegin());
|
||||
// std::copy(B->getDims().rbegin(), B->getDims().rend(), B_.rbegin());
|
||||
|
||||
for (int i = 0; i < max_len; ++i) {
|
||||
if (A_[i] == B_[i] || (A_[i] == 1 || B_[i] == 1)) {
|
||||
res[i] = std::max(A_[i], B_[i]);
|
||||
} else {
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
auto res = infer_broadcast(A->getDims(), B->getDims());
|
||||
return {{res}};
|
||||
}
|
||||
|
||||
std::string ElementWiseObj::toString() const {
|
||||
std::ostringstream os;
|
||||
os << OpRegistry::getOpName(type) << "[" << getGuid() << "]";
|
||||
os << type.toString() << "[" << getGuid() << "]";
|
||||
os << "(";
|
||||
os << vecToString(inputs[0]->getDims()) << ",";
|
||||
os << vecToString(inputs[1]->getDims()) << ",";
|
||||
|
@ -52,12 +30,12 @@ std::string ElementWiseObj::toString() const {
|
|||
// use output dim or inputs dim?
|
||||
vector<int> ElementWiseObj::getWorkloadVector() const {
|
||||
vector<int> ret = outputs[0]->getDims();
|
||||
ret.emplace(ret.begin(), enum_to_underlying(type));
|
||||
ret.emplace(ret.begin(), type.underlying());
|
||||
return ret;
|
||||
}
|
||||
|
||||
vector<int> ElementWiseObj::getOpAttrVector() const {
|
||||
return {enum_to_underlying(type)};
|
||||
return {type.underlying()};
|
||||
}
|
||||
|
||||
MSELossObj::MSELossObj(GraphObj *graph, Tensor input0, Tensor input1,
|
||||
|
@ -69,9 +47,8 @@ MSELossObj::MSELossObj(GraphObj *graph, Tensor input0, Tensor input1,
|
|||
|
||||
optional<vector<Shape>> MSELossObj::inferShape(const TensorVec &inputs) const {
|
||||
const auto A = inputs[0], B = inputs[1];
|
||||
if (A->getDims().size() != B->getDims().size() ||
|
||||
A->getDims() != B->getDims())
|
||||
return {};
|
||||
IT_ASSERT(A->getRank() == B->getRank());
|
||||
IT_ASSERT(A->getDims() == B->getDims());
|
||||
|
||||
if (reductionMode == None) {
|
||||
return {{A->getDims()}};
|
||||
|
@ -83,7 +60,7 @@ optional<vector<Shape>> MSELossObj::inferShape(const TensorVec &inputs) const {
|
|||
|
||||
std::string MSELossObj::toString() const {
|
||||
std::ostringstream os;
|
||||
os << OpRegistry::getOpName(type) << "[" << getGuid() << "]";
|
||||
os << type.toString() << "[" << getGuid() << "]";
|
||||
os << "(";
|
||||
os << vecToString(inputs[0]->getDims()) << ",";
|
||||
os << vecToString(inputs[1]->getDims()) << ",";
|
||||
|
@ -96,12 +73,10 @@ std::string MSELossObj::toString() const {
|
|||
// use output dim or inputs dim?
|
||||
vector<int> MSELossObj::getWorkloadVector() const {
|
||||
vector<int> ret = outputs[0]->getDims();
|
||||
ret.emplace(ret.begin(), enum_to_underlying(type));
|
||||
ret.emplace(ret.begin(), type.underlying());
|
||||
return ret;
|
||||
}
|
||||
|
||||
vector<int> MSELossObj::getOpAttrVector() const {
|
||||
return {enum_to_underlying(type)};
|
||||
}
|
||||
vector<int> MSELossObj::getOpAttrVector() const { return {type.underlying()}; }
|
||||
|
||||
}; // namespace infini
|
||||
|
|
|
@ -1,16 +1,18 @@
|
|||
#include "operators/extend.h"
|
||||
#include "utils/operator_utils.h"
|
||||
|
||||
namespace infini {
|
||||
|
||||
ExtendObj::ExtendObj(GraphObj *graph, Tensor input, Tensor output, int dim,
|
||||
int num)
|
||||
: OperatorObj(OpType::Extend, {input}, {output}), dim(dim), num(num) {
|
||||
int rank = input->getRank();
|
||||
dim = get_real_axis(dim, rank);
|
||||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
optional<vector<Shape>> ExtendObj::inferShape(const TensorVec &inputs) const {
|
||||
auto ret = inputs[0]->getDims();
|
||||
IT_ASSERT((size_t)dim < ret.size());
|
||||
ret[dim] = ret[dim] * (num + 1);
|
||||
return {{ret}};
|
||||
}
|
||||
|
@ -30,12 +32,12 @@ vector<int> ExtendObj::getWorkloadVector() const {
|
|||
vector<int> ret = inputs[0]->getDims();
|
||||
ret.emplace_back(dim);
|
||||
ret.emplace_back(num);
|
||||
ret.emplace(ret.begin(), enum_to_underlying(type));
|
||||
ret.emplace(ret.begin(), type.underlying());
|
||||
return ret;
|
||||
}
|
||||
|
||||
vector<int> ExtendObj::getOpAttrVector() const {
|
||||
return {enum_to_underlying(type), dim, num};
|
||||
return {type.underlying(), dim, num};
|
||||
}
|
||||
|
||||
} // namespace infini
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
#include "operators/gather.h"
|
||||
#include "utils/operator_utils.h"
|
||||
|
||||
namespace infini {
|
||||
GatherObj::GatherObj(GraphObj *graph, Tensor input, Tensor indices,
|
||||
Tensor output, int axis)
|
||||
: OperatorObj(OpType::Gather, {input, indices}, {output}), axis(axis) {
|
||||
int rank = input->getRank();
|
||||
axis = get_real_axis(axis, rank);
|
||||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
|
@ -11,12 +14,6 @@ optional<vector<Shape>> GatherObj::inferShape(const TensorVec &inputs) const {
|
|||
auto dims0 = inputs[0]->getDims();
|
||||
auto dims1 = inputs[1]->getDims();
|
||||
|
||||
if (axis < 0)
|
||||
IT_TODO_HALT();
|
||||
|
||||
if ((size_t)axis >= dims0.size())
|
||||
return {};
|
||||
|
||||
IT_ASSERT(CheckIndexValid());
|
||||
|
||||
Shape dim = dims0;
|
||||
|
@ -28,7 +25,7 @@ optional<vector<Shape>> GatherObj::inferShape(const TensorVec &inputs) const {
|
|||
vector<DataType> GatherObj::inferDataType(const TensorVec &inputs) const {
|
||||
IT_ASSERT(inputs.size() == 2);
|
||||
auto index = inputs[1];
|
||||
IT_ASSERT(index->getDType() == DataType::UInt32);
|
||||
IT_ASSERT(index->getDType() == DataType::Int32);
|
||||
return {inputs[0]->getDType()};
|
||||
}
|
||||
|
||||
|
@ -72,7 +69,7 @@ std::string GatherObj::toString() const {
|
|||
|
||||
vector<int> GatherObj::getWorkloadVector() const {
|
||||
vector<int> ret = inputs[0]->getDims();
|
||||
ret.emplace(ret.begin(), enum_to_underlying(type));
|
||||
ret.emplace(ret.begin(), type.underlying());
|
||||
for (auto it : inputs[1]->getDims())
|
||||
ret.emplace_back(it);
|
||||
ret.emplace_back(axis);
|
||||
|
@ -80,7 +77,7 @@ vector<int> GatherObj::getWorkloadVector() const {
|
|||
}
|
||||
|
||||
vector<int> GatherObj::getOpAttrVector() const {
|
||||
return {enum_to_underlying(type), axis};
|
||||
return {type.underlying(), axis};
|
||||
}
|
||||
|
||||
} // namespace infini
|
||||
|
|
|
@ -1,33 +1,33 @@
|
|||
#include "operators/matmul.h"
|
||||
#include "utils/operator_utils.h"
|
||||
#include <numeric>
|
||||
|
||||
namespace infini {
|
||||
|
||||
MatmulObj::MatmulObj(GraphObj *graph, Tensor A, Tensor B, Tensor C, bool transA,
|
||||
bool transB, [[maybe_unused]] Tensor bias, ActType act)
|
||||
: OperatorObj(OpType::Matmul,
|
||||
: OperatorObj(OpType::MatMul,
|
||||
bias ? TensorVec{A, B, bias} : TensorVec{A, B}, {C}),
|
||||
transA(transA), transB(transB), act(act), b(1) {
|
||||
auto shape_a = A->getDims();
|
||||
auto shape_b = B->getDims();
|
||||
int dimA = shape_a.size(), dimB = shape_b.size();
|
||||
IT_ASSERT(dimA >= 2 && dimB >= 2);
|
||||
|
||||
b = 1;
|
||||
if (dimA <= 3 && dimB <= 3) {
|
||||
int b1 = dimA == 2 ? 1 : A->getDims()[0];
|
||||
int b2 = dimB == 2 ? 1 : B->getDims()[0];
|
||||
|
||||
b = std::max(b1, b2);
|
||||
int rankA = A->getRank();
|
||||
int rankB = B->getRank();
|
||||
IT_ASSERT(rankA >= 2 && rankB >= 2);
|
||||
Shape shape_a1(shape_a.begin(), shape_a.begin() + (rankA - 2));
|
||||
Shape shape_b1(shape_b.begin(), shape_b.begin() + (rankB - 2));
|
||||
auto ret = infer_broadcast(shape_a1, shape_b1);
|
||||
if (ret.empty()) {
|
||||
b = 1;
|
||||
} else {
|
||||
IT_ASSERT_TODO(dimA == dimB);
|
||||
for (size_t i = 0; i < shape_a.size() - 2; ++i) {
|
||||
IT_ASSERT_TODO(shape_a[i] == shape_b[i]);
|
||||
b *= shape_a[i];
|
||||
}
|
||||
b = std::accumulate(ret.begin(), ret.end(), 1);
|
||||
}
|
||||
auto kA = *(transA ? shape_a.rbegin() + 1 : shape_a.rbegin());
|
||||
auto kB = *(transB ? shape_b.rbegin() : shape_b.rbegin() + 1);
|
||||
IT_ASSERT(kA == kB);
|
||||
m = *(transA ? shape_a.rbegin() : shape_a.rbegin() + 1);
|
||||
n = *(transB ? shape_b.rbegin() + 1 : shape_b.rbegin());
|
||||
k = *(transA ? shape_a.rbegin() + 1 : shape_a.rbegin());
|
||||
k = kA;
|
||||
IT_ASSERT(checkValid(graph));
|
||||
}
|
||||
|
||||
|
@ -42,52 +42,25 @@ string MatmulObj::toString() const {
|
|||
|
||||
optional<vector<Shape>> MatmulObj::inferShape(const TensorVec &inputs) const {
|
||||
auto A = inputs[0], B = inputs[1];
|
||||
int dimA = A->getDims().size(), dimB = B->getDims().size();
|
||||
|
||||
if (dimA > 3 || dimB > 3) {
|
||||
// no broadcast
|
||||
auto shape_a = inputs[0]->getDims();
|
||||
auto it = shape_a.rbegin();
|
||||
*it++ = n;
|
||||
*it++ = m;
|
||||
return {{std::move(shape_a)}};
|
||||
}
|
||||
|
||||
int b1 = dimA == 2 ? 1 : A->getDims()[0];
|
||||
int b2 = dimB == 2 ? 1 : B->getDims()[0];
|
||||
|
||||
int b = std::max(b1, b2);
|
||||
int m = transA ? A->getDims()[dimA - 1] : A->getDims()[dimA - 2];
|
||||
int n = transB ? B->getDims()[dimB - 2] : B->getDims()[dimB - 1];
|
||||
int kA = transA ? A->getDims()[dimA - 2] : A->getDims()[dimA - 1];
|
||||
int kB = transB ? B->getDims()[dimB - 1] : B->getDims()[dimB - 2];
|
||||
|
||||
if ((dimA != 2 && dimA != 3) || (dimB != 2 && dimB != 3)) {
|
||||
printf("Bad input dim: dimA = %d, dimB = %d\n", dimA, dimB);
|
||||
return {};
|
||||
}
|
||||
if (b1 != 1 && b2 != 1 && b1 != b2) {
|
||||
printf("Bad batch size b1 = %d, b2 = %d\n", b1, b2);
|
||||
return {};
|
||||
}
|
||||
if (kA != kB) {
|
||||
printf("Bad K: kA = %d, kB = %d\n", kA, kB);
|
||||
return {};
|
||||
}
|
||||
if (dimA == 2 && dimB == 2) {
|
||||
return {{{m, n}}};
|
||||
} else {
|
||||
return {{{b, m, n}}};
|
||||
}
|
||||
auto shapeA = A->getDims();
|
||||
auto shapeB = B->getDims();
|
||||
int rankA = A->getRank();
|
||||
int rankB = B->getRank();
|
||||
Shape shapeA1(shapeA.begin(), shapeA.begin() + (rankA - 2));
|
||||
Shape shapeB1(shapeB.begin(), shapeB.begin() + (rankB - 2));
|
||||
Shape ret = infer_broadcast(shapeA1, shapeB1);
|
||||
ret.emplace_back(m);
|
||||
ret.emplace_back(n);
|
||||
return {{ret}};
|
||||
}
|
||||
|
||||
vector<int> MatmulObj::getWorkloadVector() const {
|
||||
return {enum_to_underlying(type), b, m, n, k, transA, transB,
|
||||
return {type.underlying(), b, m, n, k, transA, transB,
|
||||
enum_to_underlying(act)};
|
||||
}
|
||||
|
||||
vector<int> MatmulObj::getOpAttrVector() const {
|
||||
return {enum_to_underlying(type), transA, transB, enum_to_underlying(act)};
|
||||
return {type.underlying(), transA, transB, enum_to_underlying(act)};
|
||||
}
|
||||
|
||||
} // namespace infini
|
||||
|
|
|
@ -69,7 +69,7 @@ optional<vector<Shape>> MemBoundObj::inferShape(const TensorVec &inputs) const {
|
|||
}
|
||||
|
||||
vector<int> MemBoundObj::getWorkloadVector() const {
|
||||
return {enum_to_underlying(type), (int)simplifiedHash};
|
||||
return {type.underlying(), (int)simplifiedHash};
|
||||
}
|
||||
|
||||
vector<int> MemBoundObj::getOpAttrVector() const { return getWorkloadVector(); }
|
||||
|
|
|
@ -9,7 +9,7 @@ PadObj::PadObj(GraphObj *graph, Tensor input, Tensor output,
|
|||
else {
|
||||
auto nAxis = (*axes).size();
|
||||
IT_ASSERT(_pads.size() == nAxis * 2);
|
||||
auto nDims = input->getDims().size();
|
||||
auto nDims = input->getRank();
|
||||
pads = vector<int>(nDims * 2, 0);
|
||||
|
||||
for (size_t i = 0; i < nAxis; ++i) {
|
||||
|
@ -24,13 +24,11 @@ PadObj::PadObj(GraphObj *graph, Tensor input, Tensor output,
|
|||
|
||||
optional<vector<Shape>> PadObj::inferShape(const TensorVec &inputs) const {
|
||||
auto dims = inputs[0]->getDims();
|
||||
int nDims = dims.size();
|
||||
if (nDims * 2 != (int)pads.size())
|
||||
return {};
|
||||
for (int i = 0; i < nDims; ++i) {
|
||||
if (pads[i] < 0 || pads[i + nDims] < 0)
|
||||
return {};
|
||||
dims[i] += pads[i] + pads[i + nDims];
|
||||
int rank = inputs[0]->getRank();
|
||||
IT_ASSERT(rank * 2 == (int)pads.size());
|
||||
for (int i = 0; i < rank; ++i) {
|
||||
IT_ASSERT(pads[i] >= 0 && pads[i + rank] >= 0);
|
||||
dims[i] += pads[i] + pads[i + rank];
|
||||
}
|
||||
|
||||
return {{dims}};
|
||||
|
@ -50,13 +48,13 @@ std::string PadObj::toString() const {
|
|||
vector<int> PadObj::getWorkloadVector() const {
|
||||
vector<int> ret = inputs[0]->getDims();
|
||||
ret.insert(ret.end(), pads.begin(), pads.end());
|
||||
ret.emplace(ret.begin(), enum_to_underlying(type));
|
||||
ret.emplace(ret.begin(), type.underlying());
|
||||
return ret;
|
||||
}
|
||||
|
||||
vector<int> PadObj::getOpAttrVector() const {
|
||||
vector<int> ret = pads;
|
||||
ret.emplace(ret.begin(), enum_to_underlying(type));
|
||||
ret.emplace(ret.begin(), type.underlying());
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
|
|
@ -16,19 +16,19 @@ PoolingObj::PoolingObj(GraphObj *graph, OpType optype, Tensor input,
|
|||
|
||||
optional<vector<Shape>> PoolingObj::inferShape(const TensorVec &inputs) const {
|
||||
const auto &input = inputs[0];
|
||||
auto h = input->getDims()[input->getDims().size() - 2],
|
||||
w = input->getDims()[input->getDims().size() - 1];
|
||||
auto h = input->getDims()[input->getRank() - 2],
|
||||
w = input->getDims()[input->getRank() - 1];
|
||||
int oh = (h - (kh - sh) + ph * 2) / sh;
|
||||
int ow = (w - (kw - sw) + pw * 2) / sw;
|
||||
auto ret = input->getDims();
|
||||
ret[input->getDims().size() - 2] = oh;
|
||||
ret[input->getDims().size() - 1] = ow;
|
||||
ret[input->getRank() - 2] = oh;
|
||||
ret[input->getRank() - 1] = ow;
|
||||
return {{ret}};
|
||||
}
|
||||
|
||||
std::string PoolingObj::toString() const {
|
||||
std::ostringstream os;
|
||||
os << OpRegistry::getOpName(type) << "[" << getGuid() << "]";
|
||||
os << type.toString() << "[" << getGuid() << "]";
|
||||
os << "(";
|
||||
os << "k=[" << kh << "," << kw << "],";
|
||||
os << "p=[" << ph << "," << pw << "],";
|
||||
|
@ -40,12 +40,11 @@ std::string PoolingObj::toString() const {
|
|||
}
|
||||
|
||||
vector<int> PoolingObj::getWorkloadVector() const {
|
||||
return {
|
||||
enum_to_underlying(type), n, c, h, w, kh, kw, ph, pw, sh, sw, dh, dw};
|
||||
return {type.underlying(), n, c, h, w, kh, kw, ph, pw, sh, sw, dh, dw};
|
||||
}
|
||||
|
||||
vector<int> PoolingObj::getOpAttrVector() const {
|
||||
return {enum_to_underlying(type), kh, kw, ph, pw, sh, sw, dh, dw};
|
||||
return {type.underlying(), kh, kw, ph, pw, sh, sw, dh, dw};
|
||||
}
|
||||
|
||||
}; // namespace infini
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue