New upstream version 1.2.1

This commit is contained in:
szm-min 2022-08-02 09:08:48 +08:00
parent 277191b0b0
commit 2335ec9d1e
89 changed files with 9302 additions and 97 deletions

26
.gitignore vendored Normal file
View File

@ -0,0 +1,26 @@
*.o
.vscode
build/
bin/
obj-x86_64-linux-gnu/
debian/tmp/
debian/files/
debian/libkysdk-disk/
debian/libkysdk-disk-dev/
debian/libkysdk-filesystem/
debian/libkysdk-filesystem-dev/
debian/libkysdk-hardware/
debian/libkysdk-hardware-dev/
debian/libkysdk-package/
debian/libkysdk-package-dev/
debian/libkysdk-proc/
debian/libkysdk-proc-dev/
debian/libkysdk-sysinfo/
debian/libkysdk-sysinfo-dev/
debian/libkysdk-system/
debian/libkysdk-system-dev/
debian/libkysdk-systime/
debian/libkysdk-systime-dev/
debian/libkysdk*.substvars
debian/libkysdk*.debhelper
debian/.debhelper

121
debian/changelog vendored Normal file
View File

@ -0,0 +1,121 @@
libkysdk-system (1.2.1) v101; urgency=medium
*Bug号
*需求号:
*其他修改:取消指定版本号依赖
-- hanpinlong <hanpinlong@kylinos.cn> Fri, 15 Jul 2022 16:22:40 +0800
libkysdk-system (1.2.1.0build2) v101; urgency=medium
*Bug号126838,126836,126834,126922
*需求号:无
*其他修改:增加 kdk_system_get_projectSubName 接口,用于获取操作系统项目子编号名
-- hanpinlong <hanpinlong@kylinos.cn> Fri, 15 Jul 2022 16:22:40 +0800
libkysdk-system (1.2.1.0build1) v101; urgency=medium
*Bug号126838,126836,126834,126922
*需求号15924 提供平板特性接口
*其他修改:增加 kdk_system_get_projectSubName 接口,用于获取操作系统项目子编号名
-- hanpinlong <hanpinlong@kylinos.cn> Fri, 15 Jul 2022 14:14:10 +0800
libkysdk-system (1.2.1.0) v101; urgency=medium
*Bug号126838,126836,126834,126922
*需求号:
*其他修改:增加 kdk_system_get_projectSubName 接口,用于获取操作系统项目子编号名
-- hanpinlong <hanpinlong@kylinos.cn> Thu, 14 Jul 2022 17:03:53 +0800
libkysdk-system (1.2.0.5) v101; urgency=medium
*Bug号124264
*需求号:无
*其他修改:
修改自测问题 shlibs文件问题
修改自测问题 libkysdk-systemcommon公用数据包用于指定运行链接库
-- chenzhikai <chenzhikai@kylinos.cn> Wed, 29 Jun 2022 09:51:29 +0800
libkysdk-system (1.2.0.4) v101; urgency=medium
*Bug号
*需求号13708 为看图增加sw64支持
*其他修改:无
-- wangweiran <wangweiran@kylinos.cn> Wed, 22 Jun 2022 10:34:13 +0800
libkysdk-system (1.2.0.3) v101; urgency=medium
*Bug号122537
*需求号:无
*其他修改:无
-- wangweiran <wangweiran@kylinos.cn> Mon, 13 Jun 2022 14:38:58 +0800
libkysdk-system (1.2.0.2) v101; urgency=medium
*Bug号120511,120498,120580
*需求号:无
*其他修改:无
-- shaozhimin <shaozhimin@kylinos.cn> Mon, 13 Jun 2022 10:48:54 +0800
libkysdk-system (1.2.0.1) v101; urgency=medium
*Bug号120474
*需求号13708
*其他修改:
-- wangweiran <wangweiran@kylinos.cn> Wed, 18 May 2022 10:30:14 +0800
libkysdk-system (1.2.0kylin1) v101; urgency=medium
*Bug号
*需求号13735
*其他修改:
-- wangweiran <wangweiran@kylinos.cn> Wed, 18 May 2022 10:30:14 +0800
libkysdk-system (1.1.1kylin1) ultron; urgency=medium
*Bug号119833
*需求号14071
*其他修改:
-- liuyunhe <liuyunhe@kylinos.cn> Tue, 17 May 2022 16:01:54 +0800
libkysdk-system (1.0.0kylin5) v101; urgency=medium
*Bug号90497
*需求号:无
*其他修改解决nvme硬盘信息获取失败问题
-- chenzhikai <chenzhikai@kylinos.cn> Thu, 02 Dec 2021 17:01:49 +0800
libkysdk-system (1.0.0kylin4) v101; urgency=medium
*Bug号92439
*需求号:无
*其他修改解决d-feet调试服务报错问题
-- chenzhikai <chenzhikai@kylinos.cn> Fri, 19 Nov 2021 14:17:56 +0800
libkysdk-system (1.0.0kylin3) v101; urgency=medium
*Bug号90619
*需求号:无
*其他修改:无
-- chenzhikai <chenzhikai@kylinos.cn> Fri, 12 Nov 2021 17:12:42 +0800
libkysdk-system (1.0.0kylin2) v101; urgency=medium
*Bug号
*需求号8996
*其他修改KYSDK 1.0.0 release
-- chenzhikai <chenzhikai@kylinos.cn> Fri, 05 Nov 2021 15:35:58 +0800

271
debian/control vendored Normal file
View File

@ -0,0 +1,271 @@
Source: libkysdk-system
Section: utils
Priority: optional
Maintainer: kylin <chenzhikai@kylinos.cn>
Build-Depends: debhelper-compat (= 12),
cmake,
libc6-dev,
libsystemd-dev,
libdbus-1-dev,
libdbus-glib-1-dev,
libglib2.0-dev,
libkysdk-log-dev,
libkysdk-config-dev,
libkysdk-utils-dev,
libkysdk-timer-dev,
libqt5core5a,
qtbase5-dev,
libudev-dev,
libopencv-dev,
libarchive-dev,
libtesseract-dev
Standards-Version: 4.4.1
Homepage: http://gitlab2.kylin.com/kysdk/kysdk-system
Package: libkysdk-system
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends},
libkysdk-disk,
libkysdk-sysinfo,
libkysdk-systime,
libkysdk-filesystem,
libkysdk-proc,
libkysdk-hardware,
libkysdk-package,
libkysdk-powermanagement,
systemd,
libglib2.0-0,
libkysdk-systemcommon,
Multi-Arch: same
Description: 麒麟开发者套件 - 系统层套件提供系统信息、磁盘信息、系统时间等API与服务
Package: libkysdk-system-dev
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends},
libkysdk-system,
libkysdk-disk-dev,
libkysdk-sysinfo-dev,
libkysdk-systime-dev,
libkysdk-sysinfo-dev,
libkysdk-filesystem-dev,
libkysdk-proc-dev,
libkysdk-hardware-dev,
libkysdk-package-dev,
libkysdk-powermanagement-dev
Multi-Arch: same
Description: 麒麟开发者套件 - 系统层套件 - 开发库提供系统信息、磁盘信息、系统时间等API与服务
Package: libkysdk-disk
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends},
libkysdk-log,
libkysdk-systemcommon
Multi-Arch: same
Description: 系统磁盘信息获取库
Package: libkysdk-disk-dev
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends},
libkysdk-disk,
libkysdk-log-dev,
libkysdk-utils-dev,
libblkid-dev,
libudev-dev
Multi-Arch: same
Description: 系统磁盘信息获取库 - 开发库
Package: libkysdk-systime
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends},
libkysdk-log,
dbus,
systemd,
libglib2.0-0,
libkysdk-timer,
libkysdk-systemcommon
Multi-Arch: same
Description: 系统时间相关操作库
Package: libkysdk-systime-dev
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends},
libkysdk-systime,
libglib2.0-dev,
libkysdk-timer-dev
Multi-Arch: same
Description: 系统时间相关操作库 - 开发库
Package: libkysdk-sysinfo
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends},
libkysdk-log,
dbus,
libglib2.0-0,
systemd,
libkysdk-systemcommon
Multi-Arch: same
Description: 系统信息获取库
Package: libkysdk-sysinfo-dev
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends},
libkysdk-sysinfo,
libkysdk-log-dev,
libkysdk-utils-dev,
libsystemd-dev,
libdbus-1-dev,
libdbus-glib-1-dev,
libglib2.0-dev
Multi-Arch: same
Description: 系统信息获取库 - 开发库
Package: libkysdk-filesystem
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends},
libkysdk-log,
libqt5core5a,
libkysdk-config,
systemd,
libkysdk-systemcommon
Multi-Arch: same
Description: 文件系统库
Package: libkysdk-filesystem-dev
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends},
libkysdk-filesystem,
libkysdk-log-dev,
libkysdk-config-dev,
libsystemd-dev,
qtbase5-dev
Multi-Arch: same
Description: 文件系统库 - 开发库
Package: libkysdk-hardware
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends},
libkysdk-log,
libkysdk-config,
systemd,
libkysdk-systemcommon
Multi-Arch: same
Description: 硬件信息获取库
Package: libkysdk-hardware-dev
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends},
libkysdk-hardware,
libkysdk-log-dev,
libkysdk-config-dev,
libsystemd-dev
Multi-Arch: same
Description: 硬件信息获取库 - 开发库
Package: libkysdk-package
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends},
libkysdk-systemcommon
Multi-Arch: same
Description: 包管理库
Package: libkysdk-package-dev
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends},
libkysdk-package
Multi-Arch: same
Description: 包管理库 - 开发库
Package: libkysdk-proc
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends},
libkysdk-systemcommon
Multi-Arch: same
Description: 运行时信息获取库
Package: libkysdk-proc-dev
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends},
libkysdk-proc
Multi-Arch: same
Description: 运行时信息获取库 - 开发库
Package: libkysdk-powermanagement
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends},
libkysdk-log,
libkysdk-systemcommon
Multi-Arch: same
Description: 电源管理库
Package: libkysdk-powermanagement-dev
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends},
libkysdk-log-dev,
libkysdk-powermanagement
Multi-Arch: same
Description: 电源管理 - 开发库
Package: libkysdk-ocr
Architecture: amd64 arm64 mips64el loongarch64 sw64
Multi-Arch: same
Depends: ${shlibs:Depends},
${misc:Depends},
libopencv-core4.2,
libopencv-highgui4.2,
libopencv-imgproc4.2,
libopencv-imgcodecs4.2,
liblept5,
libarchive13,
libtesseract4,
libkysdk-systemcommon
Description: AI文字识别功能
Package: libkysdk-ocr-dev
Architecture: amd64 arm64 mips64el loongarch64 sw64
Multi-Arch: same
Depends: ${shlibs:Depends}, ${misc:Depends},libkysdk-ocr,libleptonica-dev,libopencv-dev,libarchive-dev,libtesseract-dev
Description: AI文字识别功能-开发库
Package: libkysdk-systemcommon
Architecture: any
Section: utils
Depends: ${shlibs:Depends},
${misc:Depends}
Multi-Arch: same
Description: kysdk-system层公用数据包

43
debian/copyright vendored Normal file
View File

@ -0,0 +1,43 @@
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: kysd-system
Upstream-Contact: <preferred name and address to reach the upstream project>
Source: <url://example.com>
Files: *
Copyright: <years> <put author's name and email here>
<years> <likewise for another author>
License: <special license>
<Put the license of the package here indented by 1 space>
<This follows the format of Description: lines in control file>
.
<Including paragraphs>
# If you want to use GPL v2 or later for the /debian/* files use
# the following clauses, or change it to suit. Delete these two lines
Files: debian/*
Copyright: 2021 kylin <chenzhikai@kylinos.cn>
License: GPL-2+
This package is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>
.
On Debian systems, the complete text of the GNU General
Public License version 2 can be found in "/usr/share/common-licenses/GPL-2".
# Please also look if there are files or directories which have a
# different copyright/license attached and list them here.
# Please avoid picking licenses with terms that are more restrictive than the
# packaged work, as it may make Debian's contributions unacceptable upstream.
#
# If you need, there are some extra license texts available in two places:
# /usr/share/debhelper/dh_make/licenses/
# /usr/share/common-licenses/

2
debian/libkysdk-disk-dev.install vendored Normal file
View File

@ -0,0 +1,2 @@
usr/include/kysdk/kysdk-system/libkydiskinfo.h
development-files/kysdk-disk.pc usr/share/pkgconfig/

1
debian/libkysdk-disk.install vendored Normal file
View File

@ -0,0 +1 @@
usr/lib/kysdk/kysdk-system/libkydiskinfo.so*

View File

@ -0,0 +1,3 @@
usr/include/kysdk/kysdk-system/libkyfilewatcher.hpp
usr/include/kysdk/kysdk-system/libkyfilewatcher_global.hpp
development-files/kysdk-filesystem.pc usr/share/pkgconfig/

1
debian/libkysdk-filesystem.install vendored Normal file
View File

@ -0,0 +1 @@
usr/lib/kysdk/kysdk-system/libkyfilewatcher.so*

3
debian/libkysdk-hardware-dev.install vendored Normal file
View File

@ -0,0 +1,3 @@
usr/include/kysdk/kysdk-system/libkync.h
usr/include/kysdk/kysdk-system/libkycpu.h
development-files/kysdk-hardware.pc usr/share/pkgconfig/

1
debian/libkysdk-hardware.install vendored Normal file
View File

@ -0,0 +1 @@
usr/lib/kysdk/kysdk-system/libkyhw.so*

2
debian/libkysdk-ocr-dev.install vendored Normal file
View File

@ -0,0 +1,2 @@
usr/include/kysdk/kysdk-system/libkyocr.hpp
development-files/kysdk-ocr.pc usr/share/pkgconfig/

3
debian/libkysdk-ocr.install vendored Normal file
View File

@ -0,0 +1,3 @@
usr/lib/kysdk/kysdk-system/libkyocr.so*
usr/lib/libpaddle_inference.so
src/kdkocr/models/* /etc/kdkocr/

8
debian/libkysdk-ocr.postinst vendored Normal file
View File

@ -0,0 +1,8 @@
#!/bin/sh
#DEBHELPER#
if [ -f "/usr/lib/libpaddle_inference.so/libpaddle_inference" ]
then
rm -rf /usr/lib/libpaddle_inference.so/
fi

2
debian/libkysdk-package-dev.install vendored Normal file
View File

@ -0,0 +1,2 @@
usr/include/kysdk/kysdk-system/libkypackages.h
development-files/kysdk-package.pc usr/share/pkgconfig/

1
debian/libkysdk-package.install vendored Normal file
View File

@ -0,0 +1 @@
usr/lib/kysdk/kysdk-system/libkypackage.so*

View File

@ -0,0 +1,2 @@
src/powermanagement/libkylockscreen.h usr/include/kysdk/kysdk-system/
development-files/kysdk-powermanagement.pc usr/share/pkgconfig/

View File

@ -0,0 +1 @@
usr/lib/kysdk/kysdk-system/libkypowermanagement.so*

2
debian/libkysdk-proc-dev.install vendored Normal file
View File

@ -0,0 +1,2 @@
usr/include/kysdk/kysdk-system/libkyrtinfo.h
development-files/kysdk-proc.pc usr/share/pkgconfig/

1
debian/libkysdk-proc.install vendored Normal file
View File

@ -0,0 +1 @@
usr/lib/kysdk/kysdk-system/libkyrtinfo.so*

3
debian/libkysdk-sysinfo-dev.install vendored Normal file
View File

@ -0,0 +1,3 @@
usr/include/kysdk/kysdk-system/libkysysinfo.hpp
usr/include/kysdk/kysdk-system/libkysysinfo.h
development-files/kysdk-sysinfo.pc usr/share/pkgconfig/

1
debian/libkysdk-sysinfo.install vendored Normal file
View File

@ -0,0 +1 @@
usr/lib/kysdk/kysdk-system/libkysysinfo.so*

1
debian/libkysdk-system-dev.install vendored Normal file
View File

@ -0,0 +1 @@
development-files/kysdk-system.pc usr/share/pkgconfig/

1
debian/libkysdk-systemcommon.install vendored Normal file
View File

@ -0,0 +1 @@
development-files/kysdk-system.conf etc/ld.so.conf.d

4
debian/libkysdk-systime.install vendored Normal file
View File

@ -0,0 +1,4 @@
bin/* usr/bin/
src/systemtime/com.kylin.kysdk.TimeServer.conf etc/dbus-1/system.d/
src/systemtime/kysdk-systime.service lib/systemd/system/

3
debian/postinst vendored Normal file
View File

@ -0,0 +1,3 @@
systemctl daemon-reload
systemctl enable kysdk-systime.service
systemctl restart kysdk-systime.service

25
debian/rules vendored Executable file
View File

@ -0,0 +1,25 @@
#!/usr/bin/make -f
# See debhelper(7) (uncomment to enable)
# output every command that modifies files on the build system.
#export DH_VERBOSE = 1
# see FEATURE AREAS in dpkg-buildflags(1)
#export DEB_BUILD_MAINT_OPTIONS = hardening=+all
# see ENVIRONMENT in dpkg-buildflags(1)
# package maintainers to append CFLAGS
#export DEB_CFLAGS_MAINT_APPEND = -Wall -pedantic
# package maintainers to append LDFLAGS
#export DEB_LDFLAGS_MAINT_APPEND = -Wl,--as-needed
%:
dh $@
# dh_make generated override targets
# This is example for Cmake (See https://bugs.debian.org/641051 )
#override_dh_auto_configure:
# dh_auto_configure -- # -DCMAKE_LIBRARY_PATH=$(DEB_HOST_MULTIARCH)

1
debian/source/format vendored Normal file
View File

@ -0,0 +1 @@
3.0 (native)

View File

@ -0,0 +1 @@
/usr/lib/kysdk/kysdk-system

View File

@ -15,6 +15,8 @@ elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "mips64")
add_subdirectory(kdkocr)
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "loongarch64")
add_subdirectory(kdkocr)
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "sw_64")
add_subdirectory(kdkocr)
else()
message(STATUS "host processor architecture is not supported for ocr")
endif()

View File

@ -1,5 +1,6 @@
aux_source_directory(. SOURCESCODE)
add_library(kydiskinfo SHARED ${SOURCESCODE})
set_target_properties(kydiskinfo PROPERTIES VERSION 1.2.0 SOVERSION 1)
add_executable(test-getdiskinfo test/getdiskinfo.c)
add_executable(test-getdisklist test/getdisklist.c)
find_library(UDEV_LIB udev)

View File

@ -5,6 +5,7 @@ aux_source_directory(. SOURCECODE)
find_package(Qt5Core)
include_directories(${Qt5Core_INCLUDE_DIRS})
add_library(kyfilewatcher SHARED ${SOURCECODE})
set_target_properties(kyfilewatcher PROPERTIES VERSION 1.2.0 SOVERSION 1)
add_executable(kyfilewatcher-test test/kyfilewatcher-test.cpp)
target_link_libraries(kyfilewatcher kylog kyconf systemd pthread ${Qt5Core_LIBRARIES})
target_link_libraries(kyfilewatcher-test kyfilewatcher)

View File

@ -1,6 +1,7 @@
aux_source_directory(. SOURCESCODE)
include_directories(.)
add_library(kyhw SHARED ${SOURCESCODE})
set_target_properties(kyhw PROPERTIES VERSION 1.2.0 SOVERSION 1)
add_executable(kync-test test/kync-test.c)
add_executable(kycpu-test test/kycpu-test.c)
target_link_libraries(kyhw kylog kyconf pthread systemd)

View File

@ -6,6 +6,8 @@
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include "sys/sysinfo.h"
#include "unistd.h"
#ifdef __linux__
#include <sys/utsname.h>
#endif
@ -29,6 +31,223 @@ struct _cpuInfo{
struct _cpuInfo *cpuinf;
struct id_part {
const int id;
const char* name;
};
static const struct id_part arm_part[] = {
{ 0x810, "ARM810" },
{ 0x920, "ARM920" },
{ 0x922, "ARM922" },
{ 0x926, "ARM926" },
{ 0x940, "ARM940" },
{ 0x946, "ARM946" },
{ 0x966, "ARM966" },
{ 0xa20, "ARM1020" },
{ 0xa22, "ARM1022" },
{ 0xa26, "ARM1026" },
{ 0xb02, "ARM11 MPCore" },
{ 0xb36, "ARM1136" },
{ 0xb56, "ARM1156" },
{ 0xb76, "ARM1176" },
{ 0xc05, "Cortex-A5" },
{ 0xc07, "Cortex-A7" },
{ 0xc08, "Cortex-A8" },
{ 0xc09, "Cortex-A9" },
{ 0xc0d, "Cortex-A17" }, /* Originally A12 */
{ 0xc0f, "Cortex-A15" },
{ 0xc0e, "Cortex-A17" },
{ 0xc14, "Cortex-R4" },
{ 0xc15, "Cortex-R5" },
{ 0xc17, "Cortex-R7" },
{ 0xc18, "Cortex-R8" },
{ 0xc20, "Cortex-M0" },
{ 0xc21, "Cortex-M1" },
{ 0xc23, "Cortex-M3" },
{ 0xc24, "Cortex-M4" },
{ 0xc27, "Cortex-M7" },
{ 0xc60, "Cortex-M0+" },
{ 0xd01, "Cortex-A32" },
{ 0xd03, "Cortex-A53" },
{ 0xd04, "Cortex-A35" },
{ 0xd05, "Cortex-A55" },
{ 0xd06, "Cortex-A65" },
{ 0xd07, "Cortex-A57" },
{ 0xd08, "Cortex-A72" },
{ 0xd09, "Cortex-A73" },
{ 0xd0a, "Cortex-A75" },
{ 0xd0b, "Cortex-A76" },
{ 0xd0c, "Neoverse-N1" },
{ 0xd0d, "Cortex-A77" },
{ 0xd0e, "Cortex-A76AE" },
{ 0xd13, "Cortex-R52" },
{ 0xd20, "Cortex-M23" },
{ 0xd21, "Cortex-M33" },
{ 0xd40, "Neoverse-V1" },
{ 0xd41, "Cortex-A78" },
{ 0xd42, "Cortex-A78AE" },
{ 0xd44, "Cortex-X1" },
{ 0xd46, "Cortex-510" },
{ 0xd47, "Cortex-710" },
{ 0xd48, "Cortex-X2" },
{ 0xd49, "Neoverse-N2" },
{ 0xd4a, "Neoverse-E1" },
{ 0xd4b, "Cortex-A78C" },
{ -1, "unknown" },
};
static const struct id_part brcm_part[] = {
{ 0x0f, "Brahma B15" },
{ 0x100, "Brahma B53" },
{ 0x516, "ThunderX2" },
{ -1, "unknown" },
};
static const struct id_part dec_part[] = {
{ 0xa10, "SA110" },
{ 0xa11, "SA1100" },
{ -1, "unknown" },
};
static const struct id_part cavium_part[] = {
{ 0x0a0, "ThunderX" },
{ 0x0a1, "ThunderX 88XX" },
{ 0x0a2, "ThunderX 81XX" },
{ 0x0a3, "ThunderX 83XX" },
{ 0x0af, "ThunderX2 99xx" },
{ -1, "unknown" },
};
static const struct id_part apm_part[] = {
{ 0x000, "X-Gene" },
{ -1, "unknown" },
};
static const struct id_part qcom_part[] = {
{ 0x00f, "Scorpion" },
{ 0x02d, "Scorpion" },
{ 0x04d, "Krait" },
{ 0x06f, "Krait" },
{ 0x201, "Kryo" },
{ 0x205, "Kryo" },
{ 0x211, "Kryo" },
{ 0x800, "Falkor V1/Kryo" },
{ 0x801, "Kryo V2" },
{ 0x803, "Kryo 3XX Silver" },
{ 0x804, "Kryo 4XX Gold" },
{ 0x805, "Kryo 4XX Silver" },
{ 0xc00, "Falkor" },
{ 0xc01, "Saphira" },
{ -1, "unknown" },
};
static const struct id_part samsung_part[] = {
{ 0x001, "exynos-m1" },
{ -1, "unknown" },
};
static const struct id_part nvidia_part[] = {
{ 0x000, "Denver" },
{ 0x003, "Denver 2" },
{ 0x004, "Carmel" },
{ -1, "unknown" },
};
static const struct id_part marvell_part[] = {
{ 0x131, "Feroceon 88FR131" },
{ 0x581, "PJ4/PJ4b" },
{ 0x584, "PJ4B-MP" },
{ -1, "unknown" },
};
static const struct id_part apple_part[] = {
{ 0x022, "Icestorm" },
{ 0x023, "Firestorm" },
{ -1, "unknown" },
};
static const struct id_part faraday_part[] = {
{ 0x526, "FA526" },
{ 0x626, "FA626" },
{ -1, "unknown" },
};
static const struct id_part intel_part[] = {
{ 0x200, "i80200" },
{ 0x210, "PXA250A" },
{ 0x212, "PXA210A" },
{ 0x242, "i80321-400" },
{ 0x243, "i80321-600" },
{ 0x290, "PXA250B/PXA26x" },
{ 0x292, "PXA210B" },
{ 0x2c2, "i80321-400-B0" },
{ 0x2c3, "i80321-600-B0" },
{ 0x2d0, "PXA250C/PXA255/PXA26x" },
{ 0x2d2, "PXA210C" },
{ 0x411, "PXA27x" },
{ 0x41c, "IPX425-533" },
{ 0x41d, "IPX425-400" },
{ 0x41f, "IPX425-266" },
{ 0x682, "PXA32x" },
{ 0x683, "PXA930/PXA935" },
{ 0x688, "PXA30x" },
{ 0x689, "PXA31x" },
{ 0xb11, "SA1110" },
{ 0xc12, "IPX1200" },
{ -1, "unknown" },
};
static const struct id_part fujitsu_part[] = {
{ 0x001, "A64FX" },
{ -1, "unknown" },
};
static const struct id_part hisi_part[] = {
{ 0xd01, "Kunpeng-920" }, /* aka tsv110 */
{ -1, "unknown" },
};
static const struct id_part ft_part[] = {
{ 0x660, "FTC660" },
{ 0x661, "FTC661" },
{ 0x662, "FTC662" },
{ 0x663, "FTC663" },
{ -1, "unknown" },
};
static const struct id_part unknown_part[] = {
{ -1, "unknown" },
};
struct hw_impl {
const int id;
const struct id_part *parts;
const char *name;
};
static const struct hw_impl hw_implementer[] = {
{ 0x41, arm_part, "ARM" },
{ 0x42, brcm_part, "Broadcom" },
{ 0x43, cavium_part, "Cavium" },
{ 0x44, dec_part, "DEC" },
{ 0x46, fujitsu_part, "FUJITSU" },
{ 0x48, hisi_part, "HiSilicon" },
{ 0x49, unknown_part, "Infineon" },
{ 0x4d, unknown_part, "Motorola/Freescale" },
{ 0x4e, nvidia_part, "NVIDIA" },
{ 0x50, apm_part, "APM" },
{ 0x51, qcom_part, "Qualcomm" },
{ 0x53, samsung_part, "Samsung" },
{ 0x56, marvell_part, "Marvell" },
{ 0x61, apple_part, "Apple" },
{ 0x66, faraday_part, "Faraday" },
{ 0x69, intel_part, "Intel" },
{ 0x70, ft_part, "Phytium" },
{ 0xc0, unknown_part, "Ampere" },
{ -1, unknown_part, "unknown" },
};
static void _free_cpuinfo()
{
if (cpuinf)
@ -83,23 +302,6 @@ static int lookup(char *line, char *pattern, char **value)
return 1;
}
static int do_shell(char *comm, char *buf)
{
FILE *stream;
stream = popen(comm, "r");
if (stream == NULL) {
return 0;
}
fread(buf, sizeof(char), MIDSIZE, stream);
pclose(stream);
buf[strlen(buf) - 1] = '\0';//去掉结尾转行符
if (strlen(buf) == 0) {
return 0;
}
return 1;
}
static void _get_cpu_info()
{
// 我知道这里有竞态问题但是不想引入pthread所以算了
@ -163,6 +365,11 @@ static void _get_cpu_info()
}
fclose(fp);
if(strstr(cpuinf->model, "Loongson"))
{
cpuinf->vendor = strdup("loongson");
}
if (cpuinf->flags)
{
if (strstr(cpuinf->flags, " svm "))
@ -174,42 +381,51 @@ static void _get_cpu_info()
if(cpuinf->vendor == NULL)
{
char ret[MIDSIZE];
do_shell("lscpu | grep \"厂商 ID\"", ret);
if(strcmp(ret, ""))
int num = 0;
int part, j;
const struct id_part *parts = NULL;
fp = fopen("/sys/devices/system/cpu/cpu0/regs/identification/midr_el1", "rt");
if (!fp)
{
printf("test\n");
char *substr = "";
char *date = strstr(ret, substr);
strcpy(ret, date + 27);
cpuinf->vendor = strdup(ret);
klog_err("midr_el1 读取失败:%s\n", strerror(errno));
SAFE_FREE(cpuinf);
return ;
}
fgets(buffer, CPUINF_BUFSIZE, fp);
char *substr = "x";
char *date = strstr(buffer, substr);
strcpy(buffer, date + 9);
sscanf(buffer,"%2x",&num) ;
for (j = 0; hw_implementer[j].id != -1; j++) {
if (hw_implementer[j].id == num) {
parts = hw_implementer[j].parts;
cpuinf->vendor = strdup(hw_implementer[j].name);
break;
}
}
fclose(fp);
}
if(cpuinf->model == NULL)
{
char ret[MIDSIZE];
do_shell("lscpu | grep \"型号名称\"", ret);
if(strcmp(ret, ""))
fp = fopen("/proc/cpuinfo", "rt");
if (!fp)
{
char *substr = "";
char *date = strstr(ret, substr);
strcpy(ret, date + 26);
cpuinf->model = strdup(ret);
klog_err("/proc/cpuinfo 读取失败:%s\n", strerror(errno));
SAFE_FREE(cpuinf);
return ;
}
while(fgets(buffer, CPUINF_BUFSIZE, fp))
{
if (lookup(buffer, "Hardware", &cpuinf->model));//huawei 9A0
}
fclose(fp);
}
if(cpuinf->corenums == 0)
{
char ret[MIDSIZE];
do_shell("lscpu | grep \"每个座的核数\"", ret);
if(strcmp(ret, ""))
{
char *substr = "";
char *date = strstr(ret, substr);
strcpy(ret, date + 20);
cpuinf->corenums = atoi(ret);
}
cpuinf->corenums = sysconf(_SC_NPROCESSORS_ONLN);
}
#endif

View File

@ -1,3 +1,4 @@
#define _GNU_SOURCE # required for NI_NUMERICHOST
#include "libkync.h"
#include <cstring-extension.h>
#include <errno.h>
@ -12,6 +13,8 @@
#include <unistd.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <netdb.h>
#include <ifaddrs.h>
enum cardspec{
NCSPEC_ALL,
@ -201,9 +204,51 @@ char **_get_nc_cfg(const char *nc, enum cardcfg cfg)
memcpy(&sin, &stIf.ifr_ifru.ifru_addr, sizeof(sin));
snprintf(res[0], NC_IPv4_SIZE, "%s", inet_ntoa(sin.sin_addr));
}break;
case NCCFG_IPv6:{
struct ifaddrs *ifap, *ifa;
struct sockaddr_in6 *sa;
char addr[INET6_ADDRSTRLEN] = {0};
getifaddrs(&ifap);
for (ifa = ifap; ifa; ifa = ifa->ifa_next)
{
if (ifa->ifa_addr->sa_family == AF_INET6 && !strcmp(ifa->ifa_name, nc))
{
sa = (struct sockaddr_in6 *)ifa->ifa_addr;
getnameinfo(ifa->ifa_addr, sizeof(struct sockaddr_in6), addr,
sizeof(addr), NULL, 0, NI_NUMERICHOST);
}
}
res = malloc(sizeof(char*));
if (!res)
{
klog_err("内存申请失败:%s\n", strerror(errno));
close(sfd);
freeifaddrs(ifap);
return NULL;
}
res[0] = malloc(sizeof(char) * INET6_ADDRSTRLEN);
if (!res[0])
{
klog_err("内存申请失败:%s\n", strerror(errno));
close(sfd);
freeifaddrs(ifap);
return NULL;
}
int i = 0;
while (addr[i] != '%' && addr[i] != '\0')
i++;
addr[i] = '\0';
sprintf(res[0], "%s",addr);
freeifaddrs(ifap);
}break;
default:
break;
}
close(sfd);
return res;
}
@ -255,6 +300,19 @@ char* kdk_nc_get_private_ipv4(const char *nc)
return ipv4;
}
char* kdk_nc_get_private_ipv6(const char *nc)
{
if (!nc)
return NULL;
char **ipv6list = _get_nc_cfg(nc, NCCFG_IPv6);
if (!ipv6list)
return NULL;
char *ipv6 = ipv6list[0];
free(ipv6list);
return ipv6;
}
inline void kdk_nc_freeall(char **list)
{
if (! list)

View File

@ -11,9 +11,11 @@ int main()
{
char *mac = kdk_nc_get_phymac(cards[index]);
char *ipv4 = kdk_nc_get_private_ipv4(cards[index]);
printf("Card %zd: %s\tStatus: %s\tMac: %s\tIPv4: %s\n", index + 1, cards[index], kdk_nc_is_up(cards[index]) == 1 ? "Up" : "Down", mac, ipv4);
char *ipv6 = kdk_nc_get_private_ipv6(cards[index]);
printf("Card %zd: %s\tStatus: %s\tMac: %s\tIPv4: %s\tIPv6: %s\n", index + 1, cards[index], kdk_nc_is_up(cards[index]) == 1 ? "Up" : "Down", mac, ipv4,ipv6);
free(mac);
free(ipv4);
free(ipv6);
index ++;
}
kdk_nc_freeall(cards);

View File

@ -64,7 +64,11 @@ list(APPEND kyocr_libraries ${TESSERACT_PKG_LIBRARIES})
list(APPEND kyocr_libraries -llept)
message("kyocr_cflags is ${kyocr_libraries}")
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "loongarch64")
if(NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/libs/amd64/paddle_inference/paddle/lib/libpaddle_inference.so")
add_definitions(-DLOONGARCH64)
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "loongarch64")
add_definitions(-DLOONGARCH64)
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "sw_64")
add_definitions(-DLOONGARCH64)
else()
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "i386")
@ -162,22 +166,36 @@ include_directories(${FETCHCONTENT_BASE_DIR}/extern_autolog-src)
endif()
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "loongarch64")
if(NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/libs/amd64/paddle_inference/paddle/lib/libpaddle_inference.so")
message("no paddle lib")
add_library(kyocr SHARED libkyocr.cpp)
target_link_libraries(kyocr ${kyocr_libraries})
target_compile_options(kyocr PUBLIC ${kyocr_cflags})
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "loongarch64")
message(loongarch64)
add_library(kyocr SHARED libkyocr.cpp)
target_link_libraries(kyocr ${kyocr_libraries})
target_compile_options(kyocr PUBLIC ${kyocr_cflags})
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "sw_64")
message(loongarch64)
add_library(kyocr SHARED libkyocr.cpp)
target_link_libraries(kyocr ${kyocr_libraries})
target_compile_options(kyocr PUBLIC ${kyocr_cflags})
else()
add_library(kyocr SHARED libkyocr.cpp ocr_main.cpp utility.cpp preprocess_op.cpp postprocess_op.cpp)
target_link_libraries(kyocr ${OpenCV_LIBS} ${DEPS})
endif()
set_target_properties(kyocr PROPERTIES VERSION 1.2.0 SOVERSION 1)
#target_link_libraries(kdkOCR -lleptonica)
target_link_libraries(kyocr ${OpenCV_LIBS} ${DEPS})
target_link_libraries(kyocr ${kyocr_libraries})
target_compile_options(kyocr PUBLIC ${kyocr_cflags})
install(TARGETS kyocr LIBRARY DESTINATION lib/kysdk/kysdk-system)
install(FILES ${PROJECT_SOURCE_DIR}/libkyocr.hpp DESTINATION include/kysdk/kysdk-system)
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "x86_64")
if(NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/libs/amd64/paddle_inference/paddle/lib/libpaddle_inference.so")
install(FILES ${PROJECT_SOURCE_DIR}/libs/libpaddle_inference DESTINATION lib/libpaddle_inference.so)
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "x86_64")
install(FILES ${PROJECT_SOURCE_DIR}/libs/amd64/paddle_inference/paddle/lib/libpaddle_inference.so DESTINATION lib/)
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "AMD64")
install(FILES ${PROJECT_SOURCE_DIR}/libs/amd64/paddle_inference/paddle/lib/libpaddle_inference.so DESTINATION lib/)
@ -186,6 +204,10 @@ elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64")
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "mips64")
install(FILES ${PROJECT_SOURCE_DIR}/libs/mips64el/paddle_inference/paddle/lib/libpaddle_inference.so DESTINATION lib/)
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "loongarch64")
install(FILES ${PROJECT_SOURCE_DIR}/libs/loongarch64/paddle_inference/paddle/lib/libpaddle_inference.so DESTINATION lib/)
install(FILES ${PROJECT_SOURCE_DIR}/libs/libpaddle_inference DESTINATION lib/libpaddle_inference.so)
# install(FILES ${PROJECT_SOURCE_DIR}/libs/loongarch64/paddle_inference/paddle/lib/libpaddle_inference.so DESTINATION lib/)
elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "sw_64")
install(FILES ${PROJECT_SOURCE_DIR}/libs/libpaddle_inference DESTINATION lib/libpaddle_inference.so)
# install(FILES ${PROJECT_SOURCE_DIR}/libs/sw64/paddle_inference/paddle/lib/libpaddle_inference.so DESTINATION lib/)
endif()
#target_link_libraries(test libkdkOCR.so)

View File

View File

@ -0,0 +1,50 @@
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <memory>
#include <string>
#include <unordered_map>
namespace paddle {
namespace framework {
class Cipher {
public:
Cipher() = default;
virtual ~Cipher() {}
// encrypt string
virtual std::string Encrypt(const std::string& plaintext,
const std::string& key) = 0;
// decrypt string
virtual std::string Decrypt(const std::string& ciphertext,
const std::string& key) = 0;
// encrypt strings and read them to file,
virtual void EncryptToFile(const std::string& plaintext,
const std::string& key,
const std::string& filename) = 0;
// read from file and decrypt them
virtual std::string DecryptFromFile(const std::string& key,
const std::string& filename) = 0;
};
class CipherFactory {
public:
CipherFactory() = default;
static std::shared_ptr<Cipher> CreateCipher(const std::string& config_file);
};
} // namespace framework
} // namespace paddle

View File

@ -0,0 +1,32 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#if !defined(_MSC_VER) && __cplusplus < 199711L
#error C++11 or later compatible compiler is required to use Paddle.
#endif
#ifdef _WIN32
#ifndef NOMINMAX
#define NOMINMAX // msvc max/min macro conflict with std::min/max
#endif
#endif
#include "ext_dispatch.h" // NOLINT
#include "ext_dtype.h" // NOLINT
#include "ext_exception.h" // NOLINT
#include "ext_op_meta_info.h" // NOLINT
#include "ext_place.h" // NOLINT
#include "ext_tensor.h" // NOLINT

View File

@ -0,0 +1,98 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include "ext_dtype.h" // NOLINT
#include "ext_exception.h" // NOLINT
namespace paddle {
///////// Basic Marco ///////////
#define PD_PRIVATE_CASE_TYPE_USING_HINT(NAME, enum_type, type, HINT, ...) \
case enum_type: { \
using HINT = type; \
__VA_ARGS__(); \
break; \
}
#define PD_PRIVATE_CASE_TYPE(NAME, enum_type, type, ...) \
PD_PRIVATE_CASE_TYPE_USING_HINT(NAME, enum_type, type, data_t, __VA_ARGS__)
///////// Floating Dispatch Marco ///////////
#define PD_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \
[&] { \
const auto& __dtype__ = TYPE; \
switch (__dtype__) { \
PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::FLOAT32, float, \
__VA_ARGS__) \
PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::FLOAT64, double, \
__VA_ARGS__) \
default: \
PD_THROW("function " #NAME " is not implemented for data type `", \
::paddle::ToString(__dtype__), "`"); \
} \
}()
///////// Integral Dispatch Marco ///////////
#define PD_DISPATCH_INTEGRAL_TYPES(TYPE, NAME, ...) \
[&] { \
const auto& __dtype__ = TYPE; \
switch (__dtype__) { \
PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::INT32, int, __VA_ARGS__) \
PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::INT64, int64_t, \
__VA_ARGS__) \
PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::INT8, int8_t, \
__VA_ARGS__) \
PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::UINT8, uint8_t, \
__VA_ARGS__) \
PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::INT16, int16_t, \
__VA_ARGS__) \
default: \
PD_THROW("function " #NAME " is not implemented for data type `" + \
::paddle::ToString(__dtype__) + "`"); \
} \
}()
///////// Floating and Integral Dispatch Marco ///////////
#define PD_DISPATCH_FLOATING_AND_INTEGRAL_TYPES(TYPE, NAME, ...) \
[&] { \
const auto& __dtype__ = TYPE; \
switch (__dtype__) { \
PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::FLOAT32, float, \
__VA_ARGS__) \
PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::FLOAT64, double, \
__VA_ARGS__) \
PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::INT32, int, __VA_ARGS__) \
PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::INT64, int64_t, \
__VA_ARGS__) \
PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::INT8, int8_t, \
__VA_ARGS__) \
PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::UINT8, uint8_t, \
__VA_ARGS__) \
PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::INT16, int16_t, \
__VA_ARGS__) \
default: \
PD_THROW("function " #NAME " is not implemented for data type `" + \
::paddle::ToString(__dtype__) + "`"); \
} \
}()
// TODO(chenweihang): Add more Marcos in the future if needed
} // namespace paddle

View File

@ -0,0 +1,27 @@
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#if defined(_WIN32)
#ifndef PD_DLL_DECL
#ifdef PADDLE_DLL_EXPORT
#define PD_DLL_DECL __declspec(dllexport)
#else
#define PD_DLL_DECL __declspec(dllimport)
#endif // PADDLE_DLL_EXPORT
#endif // PD_DLL_DECL
#else
#define PD_DLL_DECL
#endif // _WIN32

View File

@ -0,0 +1,81 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <cstdint>
#include <string>
#include "ext_exception.h" // NOLINT
namespace paddle {
enum class DataType {
BOOL,
INT8,
UINT8,
INT16,
INT32,
INT64,
FLOAT32,
FLOAT64,
// TODO(JiabinYang) support more data types if needed.
};
inline std::string ToString(DataType dtype) {
switch (dtype) {
case DataType::BOOL:
return "bool";
case DataType::INT8:
return "int8_t";
case DataType::UINT8:
return "uint8_t";
case DataType::INT16:
return "int16_t";
case DataType::INT32:
return "int32_t";
case DataType::INT64:
return "int64_t";
case DataType::FLOAT32:
return "float";
case DataType::FLOAT64:
return "double";
default:
PD_THROW("Unsupported paddle enum data type.");
}
}
#define PD_FOR_EACH_DATA_TYPE(_) \
_(bool, DataType::BOOL) \
_(int8_t, DataType::INT8) \
_(uint8_t, DataType::UINT8) \
_(int16_t, DataType::INT16) \
_(int, DataType::INT32) \
_(int64_t, DataType::INT64) \
_(float, DataType::FLOAT32) \
_(double, DataType::FLOAT64)
template <paddle::DataType T>
struct DataTypeToCPPType;
#define PD_SPECIALIZE_DataTypeToCPPType(cpp_type, data_type) \
template <> \
struct DataTypeToCPPType<data_type> { \
using type = cpp_type; \
};
PD_FOR_EACH_DATA_TYPE(PD_SPECIALIZE_DataTypeToCPPType)
#undef PD_SPECIALIZE_DataTypeToCPPType
} // namespace paddle

View File

@ -0,0 +1,108 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <iostream>
#include <sstream>
#include <string>
namespace paddle {
//////////////// Exception handling and Error Message /////////////////
#if !defined(_WIN32)
#define PD_UNLIKELY(expr) (__builtin_expect(static_cast<bool>(expr), 0))
#define PD_LIKELY(expr) (__builtin_expect(static_cast<bool>(expr), 1))
#else
#define PD_UNLIKELY(expr) (expr)
#define PD_LIKELY(expr) (expr)
#endif
struct PD_Exception : public std::exception {
public:
template <typename... Args>
explicit PD_Exception(const std::string& msg, const char* file, int line,
const char* default_msg) {
std::ostringstream sout;
if (msg.empty()) {
sout << default_msg << "\n [" << file << ":" << line << "]";
} else {
sout << msg << "\n [" << file << ":" << line << "]";
}
err_msg_ = sout.str();
}
const char* what() const noexcept override { return err_msg_.c_str(); }
private:
std::string err_msg_;
};
class ErrorMessage {
public:
template <typename... Args>
explicit ErrorMessage(const Args&... args) {
build_string(args...);
}
void build_string() { oss << ""; }
template <typename T>
void build_string(const T& t) {
oss << t;
}
template <typename T, typename... Args>
void build_string(const T& t, const Args&... args) {
build_string(t);
build_string(args...);
}
std::string to_string() { return oss.str(); }
private:
std::ostringstream oss;
};
#if defined _WIN32
#define HANDLE_THE_ERROR try {
#define END_HANDLE_THE_ERROR \
} \
catch (const std::exception& e) { \
std::cerr << e.what() << std::endl; \
throw e; \
}
#else
#define HANDLE_THE_ERROR
#define END_HANDLE_THE_ERROR
#endif
#define PD_CHECK(COND, ...) \
do { \
if (PD_UNLIKELY(!(COND))) { \
auto __message__ = ::paddle::ErrorMessage(__VA_ARGS__).to_string(); \
throw ::paddle::PD_Exception(__message__, __FILE__, __LINE__, \
"Expected " #COND \
", but it's not satisfied."); \
} \
} while (0)
#define PD_THROW(...) \
do { \
auto __message__ = ::paddle::ErrorMessage(__VA_ARGS__).to_string(); \
throw ::paddle::PD_Exception(__message__, __FILE__, __LINE__, \
"An error occured."); \
} while (0)
} // namespace paddle

View File

@ -0,0 +1,381 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <iostream>
#include <string>
#include <unordered_map>
#include <vector>
#include <boost/any.hpp>
#include "ext_dll_decl.h" // NOLINT
#include "ext_exception.h" // NOLINT
#include "ext_tensor.h" // NOLINT
/**
* Op Meta Info Related Define.
*
* Used to maintain operator core information.
*
*/
namespace paddle {
namespace framework {
class PD_DLL_DECL OpMetaInfoHelper;
} // namespace framework
using Tensor = paddle::Tensor;
///////////////// Util Marco Define ////////////////
#define PD_DISABLE_COPY_AND_ASSIGN(classname) \
private: \
classname(const classname&) = delete; \
classname(classname&&) = delete; \
classname& operator=(const classname&) = delete; \
classname& operator=(classname&&) = delete
#define STATIC_ASSERT_GLOBAL_NAMESPACE(uniq_name, msg) \
struct __test_global_namespace_##uniq_name##__ {}; \
static_assert(std::is_same<::__test_global_namespace_##uniq_name##__, \
__test_global_namespace_##uniq_name##__>::value, \
msg)
///////////////// Util Define and Function ////////////////
inline std::string Grad(const std::string& var_name) {
std::string result;
result.reserve(var_name.size() + 5U);
result += var_name;
result += "@GRAD";
return result;
}
////////////////////// Kernel Function (PD_KERNEL) ////////////////////////
// Record Op kernel core function
using KernelFunc = std::vector<Tensor> (*)(std::vector<Tensor> inputs,
std::vector<boost::any> attrs);
#define PD_SPECIALIZE_ComputeCallHelper(attr_type) \
template <typename... Tail> \
struct ComputeCallHelper<attr_type, Tail...> { \
template <int in_idx, int attr_idx, typename... PreviousArgs> \
static Return Compute(std::vector<Tensor> inputs, \
std::vector<boost::any> attrs, \
const PreviousArgs&... pargs) { \
try { \
attr_type arg = boost::any_cast<attr_type>(attrs[attr_idx]); \
return ComputeCallHelper<Tail...>::template Compute<in_idx, \
attr_idx + 1>( \
inputs, attrs, pargs..., arg); \
} catch (boost::bad_any_cast&) { \
PD_THROW( \
"Attribute cast error in custom operator. Expected " #attr_type \
" value."); \
} \
} \
}
template <typename T>
struct TypeTag {};
template <typename F, F f>
struct KernelFuncImpl;
template <typename Return, typename... Args, Return (*impl_fn)(Args...)>
struct KernelFuncImpl<Return (*)(Args...), impl_fn> {
static Return Compute(std::vector<Tensor> inputs,
std::vector<boost::any> attrs) {
return ComputeCallHelper<Args..., TypeTag<int>>::template Compute<0, 0>(
inputs, attrs);
}
private:
template <typename... RemainingArgs>
struct ComputeCallHelper;
// for Tensor input
template <typename... Tail>
struct ComputeCallHelper<const Tensor&, Tail...> {
template <int in_idx, int attr_idx, typename... PreviousArgs>
static Return Compute(std::vector<Tensor> inputs,
std::vector<boost::any> attrs,
const PreviousArgs&... pargs) {
static_assert(attr_idx == 0,
"Input tensor should appear before attributes.");
const Tensor& arg = inputs[in_idx];
return ComputeCallHelper<Tail...>::template Compute<in_idx + 1, attr_idx>(
inputs, attrs, pargs..., arg);
}
};
PD_SPECIALIZE_ComputeCallHelper(bool);
PD_SPECIALIZE_ComputeCallHelper(int);
PD_SPECIALIZE_ComputeCallHelper(float);
PD_SPECIALIZE_ComputeCallHelper(int64_t);
PD_SPECIALIZE_ComputeCallHelper(std::string);
PD_SPECIALIZE_ComputeCallHelper(std::vector<int>);
PD_SPECIALIZE_ComputeCallHelper(std::vector<float>);
PD_SPECIALIZE_ComputeCallHelper(std::vector<int64_t>);
PD_SPECIALIZE_ComputeCallHelper(std::vector<std::string>);
// TODO(chenweihang): support other attribute type if needed.
// Why not support other attribute type here?
// - boost::blank, std::vector<bool> and std::vector<double>
// are not used in op
// - BlockDesc* and std::vector<BlockDesc*> are used in framework
// end: base template
template <typename T>
struct ComputeCallHelper<TypeTag<T>> {
template <int in_idx, int attr_idx>
static Return Compute(std::vector<Tensor> inputs,
std::vector<boost::any> attrs, const Args&... args) {
return impl_fn(args...);
}
};
};
#define PD_KERNEL(...) \
::paddle::KernelFuncImpl<decltype(&__VA_ARGS__), &__VA_ARGS__>::Compute
/////////////// InferShape Function (PD_INFER_SHAPE) ///////////////
// Record Op infershape core function
using InferShapeFunc = std::vector<std::vector<int64_t>> (*)(
std::vector<std::vector<int64_t>> input_shapes);
template <typename F, F f>
struct InferShapeFuncImpl;
template <typename Return, typename... Args, Return (*impl_fn)(Args...)>
struct InferShapeFuncImpl<Return (*)(Args...), impl_fn> {
static Return InferShape(std::vector<std::vector<int64_t>> input_shapes) {
return InferShapeCallHelper<Args..., TypeTag<int>>::template InferShape<0>(
input_shapes);
}
private:
template <typename... RemainingArgs>
struct InferShapeCallHelper;
// only one type input: std::vector<int64_t>
template <typename... Tail>
struct InferShapeCallHelper<std::vector<int64_t>, Tail...> {
template <int in_idx, typename... PreviousArgs>
static Return InferShape(std::vector<std::vector<int64_t>> input_shapes,
const PreviousArgs&... pargs) {
std::vector<int64_t> arg = input_shapes[in_idx];
return InferShapeCallHelper<Tail...>::template InferShape<in_idx + 1>(
input_shapes, pargs..., arg);
}
};
// end: base template
template <typename T>
struct InferShapeCallHelper<TypeTag<T>> {
template <int in_idx>
static Return InferShape(std::vector<std::vector<int64_t>> input_shapes,
const Args&... args) {
return impl_fn(args...);
}
};
};
#define PD_INFER_SHAPE(...) \
::paddle::InferShapeFuncImpl<decltype(&__VA_ARGS__), &__VA_ARGS__>::InferShape
/////////////// InferDataType Function (PD_INFER_DTYPE) ///////////////
// Record Op Infer dtype core function
using InferDtypeFunc =
std::vector<DataType> (*)(std::vector<DataType> input_dtypes);
template <typename F, F f>
struct InferDtypeFuncImpl;
template <typename Return, typename... Args, Return (*impl_fn)(Args...)>
struct InferDtypeFuncImpl<Return (*)(Args...), impl_fn> {
static Return InferDtype(std::vector<DataType> input_dtypes) {
return InferDtypeCallHelper<Args..., TypeTag<int>>::template InferDtype<0>(
input_dtypes);
}
private:
template <typename... RemainingArgs>
struct InferDtypeCallHelper;
// Only one type input now: DataType
template <typename... Tail>
struct InferDtypeCallHelper<DataType, Tail...> {
template <int in_idx, typename... PreviousArgs>
static Return InferDtype(std::vector<DataType> input_dtypes,
const PreviousArgs&... pargs) {
DataType arg = input_dtypes[in_idx];
return InferDtypeCallHelper<Tail...>::template InferDtype<in_idx + 1>(
input_dtypes, pargs..., arg);
}
};
// end: base template
template <typename T>
struct InferDtypeCallHelper<TypeTag<T>> {
template <int in_idx>
static Return InferDtype(std::vector<DataType> input_dtypes,
const Args&... args) {
return impl_fn(args...);
}
};
};
#define PD_INFER_DTYPE(...) \
::paddle::InferDtypeFuncImpl<decltype(&__VA_ARGS__), &__VA_ARGS__>::InferDtype
////////////////////// Op Meta Info //////////////////////
class PD_DLL_DECL OpMetaInfo {
public:
explicit OpMetaInfo(const std::string& op_name) : name_(op_name) {}
// format: {"<name1>", "<name2>", ...}
OpMetaInfo& Inputs(std::vector<std::string>&& inputs);
// format: {"<name1>", "<name2>", ...}
OpMetaInfo& Outputs(std::vector<std::string>&& outputs);
// format: {"<name1>:<type1>", "<name1>:<type1>", ...}
OpMetaInfo& Attrs(std::vector<std::string>&& attrs);
// format: PD_KERNEL(...)
OpMetaInfo& SetKernelFn(KernelFunc&& func);
// format: PD_INFER_SHAPE(...)
OpMetaInfo& SetInferShapeFn(InferShapeFunc&& func);
// format: PD_INFER_DTYPE(...)
OpMetaInfo& SetInferDtypeFn(InferDtypeFunc&& func);
private:
friend class framework::OpMetaInfoHelper;
// 1. desc info
std::string name_;
std::vector<std::string> inputs_;
std::vector<std::string> outputs_;
std::vector<std::string> attrs_;
// 2. func info
KernelFunc kernel_fn_{nullptr};
InferShapeFunc infer_shape_fn_{nullptr};
InferDtypeFunc infer_dtype_fn_{nullptr};
};
//////////////// Op Meta Info Map /////////////////
class PD_DLL_DECL OpMetaInfoMap {
public:
// this function's impl should keep in header file.
// if move to cc file, meta info can not be added
// into map
static OpMetaInfoMap& Instance() {
static OpMetaInfoMap g_custom_op_meta_info_map;
return g_custom_op_meta_info_map;
}
std::vector<OpMetaInfo>& operator[](const std::string& name);
const std::unordered_map<std::string, std::vector<OpMetaInfo>>& GetMap()
const;
private:
OpMetaInfoMap() = default;
std::unordered_map<std::string, std::vector<OpMetaInfo>> map_;
PD_DISABLE_COPY_AND_ASSIGN(OpMetaInfoMap);
};
//////////////// Op Meta Info Builder /////////////////
class PD_DLL_DECL OpMetaInfoBuilder {
public:
explicit OpMetaInfoBuilder(std::string&& name, size_t index);
OpMetaInfoBuilder& Inputs(std::vector<std::string>&& inputs);
OpMetaInfoBuilder& Outputs(std::vector<std::string>&& outputs);
OpMetaInfoBuilder& Attrs(std::vector<std::string>&& attrs);
OpMetaInfoBuilder& SetKernelFn(KernelFunc func);
OpMetaInfoBuilder& SetInferShapeFn(InferShapeFunc func);
OpMetaInfoBuilder& SetInferDtypeFn(InferDtypeFunc func);
private:
// Forward Op name
std::string name_;
// ref current info ptr
OpMetaInfo* info_ptr_;
// The current op meta info index in vector
// - 0: op, 1: grad_op, 2: grad_grad_op
size_t index_;
};
/////////////////////// Op register API /////////////////////////
// For inference: compile directly with framework
// Call after PD_BUILD_OP(...)
void RegisterAllCustomOperator();
// Using this api to load compiled custom operator's dynamic library and
// register Custom
// Operator into it
void LoadCustomOperatorLib(const std::string& dso_name);
/////////////////////// Op register Macro /////////////////////////
#define PD_BUILD_OP(op_name) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_op__##op_name, "PD_BUILD_OP must be called in global namespace."); \
static ::paddle::OpMetaInfoBuilder __op_meta_info_##op_name##__ = \
::paddle::OpMetaInfoBuilder(#op_name, 0)
#define PD_BUILD_GRAD_OP(op_name) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_grad_op__##op_name, \
"PD_BUILD_GRAD_OP must be called in global namespace."); \
static ::paddle::OpMetaInfoBuilder __grad_op_meta_info_##op_name##__ = \
::paddle::OpMetaInfoBuilder(#op_name, 1)
#define PD_BUILD_DOUBLE_GRAD_OP(op_name) \
STATIC_ASSERT_GLOBAL_NAMESPACE( \
__reg_grad_grad_op__##op_name, \
"PD_BUILD_DOUBLE_GRAD_OP must be called in global namespace."); \
static ::paddle::OpMetaInfoBuilder __grad_grad_op_meta_info_##op_name##__ = \
::paddle::OpMetaInfoBuilder(#op_name, 2)
} // namespace paddle
///////////////////// C API ///////////////////
#ifdef __cplusplus
extern "C" {
#endif
#if defined(_WIN32)
// C-API to get global OpMetaInfoMap.
__declspec(dllexport) inline paddle::OpMetaInfoMap& PD_GetOpMetaInfoMap() {
return paddle::OpMetaInfoMap::Instance();
}
#endif // _WIN32
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,22 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
namespace paddle {
// TODO(yangjiabin): Add other place support in next PR
enum class PlaceType { kUNK = -1, kCPU, kGPU };
} // namespace paddle

View File

@ -0,0 +1,125 @@
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#pragma once
#include <memory>
#include <vector>
#ifdef PADDLE_WITH_CUDA
#include <cuda_runtime.h>
#endif
#include "ext_dll_decl.h" // NOLINT
#include "ext_dtype.h" // NOLINT
#include "ext_place.h" // NOLINT
namespace paddle {
namespace framework {
class CustomTensorUtils;
} // namespace framework
class StreamWrapper {
public:
StreamWrapper() : stream_(nullptr), is_stream_set_(false) {}
void SetStream(void* stream) {
stream_ = stream;
is_stream_set_ = true;
}
void* GetStream() const { return stream_; }
bool IsStreamSet() const { return is_stream_set_; }
private:
// cudaStream_t stream_;
void* stream_;
bool is_stream_set_;
};
class PD_DLL_DECL Tensor {
public:
/// \brief Construct a Tensor on target Place for CustomOp.
/// Generally it's only used for user to create Tensor.
explicit Tensor(const PlaceType& place);
/// \brief Reset the shape of the tensor.
/// Generally it's only used for the input tensor.
/// Reshape must be called before calling
/// mutable_data() or copy_to(const PlaceType& place)
/// \param shape The shape to set.
void reshape(const std::vector<int64_t>& shape);
/// \brief Get the memory pointer in CPU or GPU with
/// specific data type.
/// Please Reshape the tensor first before call this.
/// It's usually used to get input data pointer.
/// \param place The place of the tensor this will
/// override the original place of current tensor.
template <typename T>
T* mutable_data(const PlaceType& place);
/// \brief Get the memory pointer in CPU or GPU with
/// specific data type. Please Reshape the tensor
/// first before call this.It's usually used to get
/// input data pointer.
template <typename T>
T* mutable_data();
/// \brief Get the memory pointer directly.
/// It's usually used to get the output data pointer.
/// \return The tensor data buffer pointer.
template <typename T>
T* data() const;
/// \brief Copy the host memory to tensor data.
/// It's usually used to set the input tensor data.
/// \param PlaceType of target place, of which
/// the tensor will copy to.
template <typename T>
Tensor copy_to(const PlaceType& place) const;
/// \brief Return the shape of the Tensor.
std::vector<int64_t> shape() const;
/// \brief Return the data type of the tensor.
/// It's usually used to get the output tensor data type.
/// \return The data type of the tensor.
DataType type() const;
/// \brief Get the size of current tensor.
/// Use this method to get the size of tensor
/// \return int64_t.
int64_t size() const;
/// \brief Get the place of current tensor.
/// Use this method to get the place of tensor
/// \return Place.
const PlaceType& place() const;
/// \brief Cast datatype from one to another
Tensor cast(const DataType& target_type) const;
#ifdef PADDLE_WITH_CUDA
/// \bref Get current stream of Tensor
cudaStream_t stream() const;
#endif
private:
friend class framework::CustomTensorUtils;
mutable std::shared_ptr<void> tensor_;
mutable PlaceType place_;
StreamWrapper stream_;
};
} // namespace paddle

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,680 @@
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///
/// \file paddle_analysis_config.h
///
/// \brief Paddle Analysis Config API信息
///
/// \author paddle-infer@baidu.com
/// \date 2020-03-20
/// \since 1.7
///
#pragma once
#include <cassert>
#include <map>
#include <memory>
#include <string>
#include <unordered_set>
#include <utility>
#include <vector>
#include "paddle_infer_declare.h" // NOLINT
/*! \file */
// Here we include some header files with relative paths, for that in deploy,
// the abstract path of this header file will be changed.
#include "paddle_api.h" // NOLINT
#include "paddle_pass_builder.h" // NOLINT
#ifdef PADDLE_WITH_MKLDNN
#include "paddle_mkldnn_quantizer_config.h" // NOLINT
#endif
namespace paddle {
class AnalysisPredictor;
struct MkldnnQuantizerConfig;
///
/// \brief configuration manager for AnalysisPredictor.
/// \since 1.7.0
///
/// AnalysisConfig manages configurations of AnalysisPredictor.
/// During inference procedure, there are many parameters(model/params path,
/// place of inference, etc.)
/// to be specified, and various optimizations(subgraph fusion, memory
/// optimazation, TensorRT engine, etc.)
/// to be done. Users can manage these settings by creating and modifying an
/// AnalysisConfig,
/// and loading it into AnalysisPredictor.
///
struct PD_INFER_DECL AnalysisConfig {
AnalysisConfig() = default;
///
/// \brief Construct a new AnalysisConfig from another
/// AnalysisConfig.
///
/// \param[in] other another AnalysisConfig
///
explicit AnalysisConfig(const AnalysisConfig& other);
///
/// \brief Construct a new AnalysisConfig from a no-combined model.
///
/// \param[in] model_dir model directory of the no-combined model.
///
explicit AnalysisConfig(const std::string& model_dir);
///
/// \brief Construct a new AnalysisConfig from a combined model.
///
/// \param[in] prog_file model file path of the combined model.
/// \param[in] params_file params file path of the combined model.
///
explicit AnalysisConfig(const std::string& prog_file,
const std::string& params_file);
///
/// \brief Precision of inference in TensorRT.
///
enum class Precision {
kFloat32 = 0, ///< fp32
kInt8, ///< int8
kHalf, ///< fp16
};
///
/// \brief Set the no-combined model dir path.
///
/// \param model_dir model dir path.
///
void SetModel(const std::string& model_dir) { model_dir_ = model_dir; }
///
/// \brief Set the combined model with two specific pathes for program and
/// parameters.
///
/// \param prog_file_path model file path of the combined model.
/// \param params_file_path params file path of the combined model.
///
void SetModel(const std::string& prog_file_path,
const std::string& params_file_path);
///
/// \brief Set the model file path of a combined model.
///
/// \param x model file path.
///
void SetProgFile(const std::string& x) { prog_file_ = x; }
///
/// \brief Set the params file path of a combined model.
///
/// \param x params file path.
///
void SetParamsFile(const std::string& x) { params_file_ = x; }
///
/// \brief Set the path of optimization cache directory.
///
/// \param opt_cache_dir the path of optimization cache directory.
///
void SetOptimCacheDir(const std::string& opt_cache_dir) {
opt_cache_dir_ = opt_cache_dir;
}
///
/// \brief Get the model directory path.
///
/// \return const std::string& The model directory path.
///
const std::string& model_dir() const { return model_dir_; }
///
/// \brief Get the program file path.
///
/// \return const std::string& The program file path.
///
const std::string& prog_file() const { return prog_file_; }
///
/// \brief Get the combined parameters file.
///
/// \return const std::string& The combined parameters file.
///
const std::string& params_file() const { return params_file_; }
// Padding related.
///
/// \brief Turn off FC Padding.
///
///
void DisableFCPadding();
///
/// \brief A boolean state telling whether fc padding is used.
///
/// \return bool Whether fc padding is used.
///
bool use_fc_padding() const { return use_fc_padding_; }
// GPU related.
///
/// \brief Turn on GPU.
///
/// \param memory_pool_init_size_mb initial size of the GPU memory pool in MB.
/// \param device_id device_id the GPU card to use (default is 0).
///
void EnableUseGpu(uint64_t memory_pool_init_size_mb, int device_id = 0);
///
/// \brief Turn off GPU.
///
///
void DisableGpu();
void EnableXpu(int l3_workspace_size = 0xfffc00);
///
/// \brief A boolean state telling whether the GPU is turned on.
///
/// \return bool Whether the GPU is turned on.
///
bool use_gpu() const { return use_gpu_; }
///
/// \brief A boolean state telling whether the XPU is turned on.
///
/// \return bool Whether the XPU is turned on.
///
bool use_xpu() const { return use_xpu_; }
///
/// \brief Get the GPU device id.
///
/// \return int The GPU device id.
///
int gpu_device_id() const { return gpu_device_id_; }
///
/// \brief Get the XPU device id.
///
/// \return int The XPU device id.
///
int xpu_device_id() const { return xpu_device_id_; }
///
/// \brief Get the initial size in MB of the GPU memory pool.
///
/// \return int The initial size in MB of the GPU memory pool.
///
int memory_pool_init_size_mb() const { return memory_pool_init_size_mb_; }
///
/// \brief Get the proportion of the initial memory pool size compared to the
/// device.
///
/// \return float The proportion of the initial memory pool size.
///
float fraction_of_gpu_memory_for_pool() const;
// CUDNN related.
///
/// \brief Turn on CUDNN.
///
///
void EnableCUDNN();
///
/// \brief A boolean state telling whether to use CUDNN.
///
/// \return bool Whether to use CUDNN.
///
bool cudnn_enabled() const { return use_cudnn_; }
///
/// \brief Control whether to perform IR graph optimization.
/// If turned off, the AnalysisConfig will act just like a NativeConfig.
///
/// \param x Whether the ir graph optimization is actived.
///
void SwitchIrOptim(int x = true) { enable_ir_optim_ = x; }
///
/// \brief A boolean state telling whether the ir graph optimization is
/// actived.
///
/// \return bool Whether to use ir graph optimization.
///
bool ir_optim() const { return enable_ir_optim_; }
///
/// \brief INTERNAL Determine whether to use the feed and fetch operators.
/// Just for internal development, not stable yet.
/// When ZeroCopyTensor is used, this should be turned off.
///
/// \param x Whether to use the feed and fetch operators.
///
void SwitchUseFeedFetchOps(int x = true) { use_feed_fetch_ops_ = x; }
///
/// \brief A boolean state telling whether to use the feed and fetch
/// operators.
///
/// \return bool Whether to use the feed and fetch operators.
///
bool use_feed_fetch_ops_enabled() const { return use_feed_fetch_ops_; }
///
/// \brief Control whether to specify the inputs' names.
/// The ZeroCopyTensor type has a name member, assign it with the
/// corresponding
/// variable name. This is used only when the input ZeroCopyTensors passed to
/// the
/// AnalysisPredictor.ZeroCopyRun() cannot follow the order in the training
/// phase.
///
/// \param x Whether to specify the inputs' names.
///
void SwitchSpecifyInputNames(bool x = true) { specify_input_name_ = x; }
///
/// \brief A boolean state tell whether the input ZeroCopyTensor names
/// specified should
/// be used to reorder the inputs in AnalysisPredictor.ZeroCopyRun().
///
/// \return bool Whether to specify the inputs' names.
///
bool specify_input_name() const { return specify_input_name_; }
///
/// \brief Turn on the TensorRT engine.
/// The TensorRT engine will accelerate some subgraphes in the original Fluid
/// computation graph. In some models such as resnet50, GoogleNet and so on,
/// it gains significant performance acceleration.
///
/// \param workspace_size The memory size(in byte) used for TensorRT
/// workspace.
/// \param max_batch_size The maximum batch size of this prediction task,
/// better set as small as possible for less performance loss.
/// \param min_subgrpah_size The minimum TensorRT subgraph size needed, if a
/// subgraph is smaller than this, it will not be transferred to TensorRT
/// engine.
/// \param precision The precision used in TensorRT.
/// \param use_static Serialize optimization information to disk for reusing.
/// \param use_calib_mode Use TRT int8 calibration(post training
/// quantization).
///
///
void EnableTensorRtEngine(int workspace_size = 1 << 20,
int max_batch_size = 1, int min_subgraph_size = 3,
Precision precision = Precision::kFloat32,
bool use_static = false,
bool use_calib_mode = true);
///
/// \brief A boolean state telling whether the TensorRT engine is used.
///
/// \return bool Whether the TensorRT engine is used.
///
bool tensorrt_engine_enabled() const { return use_tensorrt_; }
///
/// \brief Set min, max, opt shape for TensorRT Dynamic shape mode.
/// \param min_input_shape The min input shape of the subgraph input.
/// \param max_input_shape The max input shape of the subgraph input.
/// \param opt_input_shape The opt input shape of the subgraph input.
/// \param disable_trt_plugin_fp16 Setting this parameter to true means that
/// TRT plugin will not run fp16.
///
void SetTRTDynamicShapeInfo(
std::map<std::string, std::vector<int>> min_input_shape,
std::map<std::string, std::vector<int>> max_input_shape,
std::map<std::string, std::vector<int>> optim_input_shape,
bool disable_trt_plugin_fp16 = false);
///
/// \brief Prevent ops running in Paddle-TRT
/// NOTE: just experimental, not an official stable API, easy to be broken.
///
void Exp_DisableTensorRtOPs(const std::vector<std::string>& ops);
///
/// \brief Replace some TensorRT plugins to TensorRT OSS(
/// https://github.com/NVIDIA/TensorRT), with which some models's inference
/// may be more high-performance. Libnvinfer_plugin.so greater than
/// V7.2.1 is needed.
///
void EnableTensorRtOSS();
///
/// \brief A boolean state telling whether to use the TensorRT OSS.
///
/// \return bool Whether to use the TensorRT OSS.
///
bool tensorrt_oss_enabled() { return trt_use_oss_; }
///
/// \brief Enable TensorRT DLA
/// \param dla_core ID of DLACore, which should be 0, 1,
/// ..., IBuilder.getNbDLACores() - 1
///
void EnableTensorRtDLA(int dla_core = 0);
///
/// \brief A boolean state telling whether to use the TensorRT DLA.
///
/// \return bool Whether to use the TensorRT DLA.
///
bool tensorrt_dla_enabled() { return trt_use_dla_; }
///
/// \brief Turn on the usage of Lite sub-graph engine.
///
/// \param precision_mode Precion used in Lite sub-graph engine.
/// \param passes_filter Set the passes used in Lite sub-graph engine.
/// \param ops_filter Operators not supported by Lite.
///
void EnableLiteEngine(
AnalysisConfig::Precision precision_mode = Precision::kFloat32,
bool zero_copy = false,
const std::vector<std::string>& passes_filter = {},
const std::vector<std::string>& ops_filter = {});
///
/// \brief A boolean state indicating whether the Lite sub-graph engine is
/// used.
///
/// \return bool whether the Lite sub-graph engine is used.
///
bool lite_engine_enabled() const { return use_lite_; }
///
/// \brief Control whether to debug IR graph analysis phase.
/// This will generate DOT files for visualizing the computation graph after
/// each analysis pass applied.
///
/// \param x whether to debug IR graph analysis phase.
///
void SwitchIrDebug(int x = true);
///
/// \brief Turn on MKLDNN.
///
///
void EnableMKLDNN();
///
/// \brief Set the cache capacity of different input shapes for MKLDNN.
/// Default value 0 means not caching any shape.
/// Please see MKL-DNN Data Caching Design Document:
/// https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/mkldnn/caching/caching.md
///
/// \param capacity The cache capacity.
///
void SetMkldnnCacheCapacity(int capacity);
///
/// \brief A boolean state telling whether to use the MKLDNN.
///
/// \return bool Whether to use the MKLDNN.
///
bool mkldnn_enabled() const { return use_mkldnn_; }
///
/// \brief Set the number of cpu math library threads.
///
/// \param cpu_math_library_num_threads The number of cpu math library
/// threads.
///
void SetCpuMathLibraryNumThreads(int cpu_math_library_num_threads);
///
/// \brief An int state telling how many threads are used in the CPU math
/// library.
///
/// \return int The number of threads used in the CPU math library.
///
int cpu_math_library_num_threads() const {
return cpu_math_library_num_threads_;
}
///
/// \brief Transform the AnalysisConfig to NativeConfig.
///
/// \return NativeConfig The NativeConfig transformed.
///
NativeConfig ToNativeConfig() const;
///
/// \brief Specify the operator type list to use MKLDNN acceleration.
///
/// \param op_list The operator type list.
///
void SetMKLDNNOp(std::unordered_set<std::string> op_list) {
mkldnn_enabled_op_types_ = op_list;
}
///
/// \brief Turn on MKLDNN quantization.
///
///
void EnableMkldnnQuantizer();
///
/// \brief Turn on MKLDNN bfloat16.
///
///
void EnableMkldnnBfloat16();
///
/// \brief A boolean state telling whether to use the MKLDNN Bfloat16.
///
/// \return bool Whether to use the MKLDNN Bfloat16.
///
bool mkldnn_bfloat16_enabled() const { return use_mkldnn_bfloat16_; }
/// \brief Specify the operator type list to use Bfloat16 acceleration.
///
/// \param op_list The operator type list.
///
void SetBfloat16Op(std::unordered_set<std::string> op_list) {
bfloat16_enabled_op_types_ = op_list;
}
///
/// \brief A boolean state telling whether the thread local CUDA stream is
/// enabled.
///
/// \return bool Whether the thread local CUDA stream is enabled.
///
bool thread_local_stream_enabled() const { return thread_local_stream_; }
///
/// \brief A boolean state telling whether the MKLDNN quantization is enabled.
///
/// \return bool Whether the MKLDNN quantization is enabled.
///
bool mkldnn_quantizer_enabled() const { return use_mkldnn_quantizer_; }
///
/// \brief Get MKLDNN quantizer config.
///
/// \return MkldnnQuantizerConfig* MKLDNN quantizer config.
///
MkldnnQuantizerConfig* mkldnn_quantizer_config() const;
///
/// \brief Specify the memory buffer of program and parameter.
/// Used when model and params are loaded directly from memory.
///
/// \param prog_buffer The memory buffer of program.
/// \param prog_buffer_size The size of the model data.
/// \param params_buffer The memory buffer of the combined parameters file.
/// \param params_buffer_size The size of the combined parameters data.
///
void SetModelBuffer(const char* prog_buffer, size_t prog_buffer_size,
const char* params_buffer, size_t params_buffer_size);
///
/// \brief A boolean state telling whether the model is set from the CPU
/// memory.
///
/// \return bool Whether model and params are loaded directly from memory.
///
bool model_from_memory() const { return model_from_memory_; }
///
/// \brief Turn on memory optimize
/// NOTE still in development.
///
void EnableMemoryOptim();
///
/// \brief A boolean state telling whether the memory optimization is
/// activated.
///
/// \return bool Whether the memory optimization is activated.
///
bool enable_memory_optim() const;
///
/// \brief Turn on profiling report.
/// If not turned on, no profiling report will be generated.
///
void EnableProfile();
///
/// \brief A boolean state telling whether the profiler is activated.
///
/// \return bool Whether the profiler is activated.
///
bool profile_enabled() const { return with_profile_; }
///
/// \brief Mute all logs in Paddle inference.
///
void DisableGlogInfo();
///
/// \brief A boolean state telling whether logs in Paddle inference are muted.
///
/// \return bool Whether logs in Paddle inference are muted.
///
bool glog_info_disabled() const { return !with_glog_info_; }
///
/// \brief Set the AnalysisConfig to be invalid.
/// This is to ensure that an AnalysisConfig can only be used in one
/// AnalysisPredictor.
///
void SetInValid() const { is_valid_ = false; }
///
/// \brief A boolean state telling whether the AnalysisConfig is valid.
///
/// \return bool Whether the AnalysisConfig is valid.
///
bool is_valid() const { return is_valid_; }
friend class ::paddle::AnalysisPredictor;
///
/// \brief Get a pass builder for customize the passes in IR analysis phase.
/// NOTE: Just for developer, not an official API, easy to be broken.
///
///
PassStrategy* pass_builder() const;
///
/// \brief Enable the GPU multi-computing stream feature.
/// NOTE: The current behavior of this interface is to bind the computation
/// stream to the thread, and this behavior may be changed in the future.
///
void EnableGpuMultiStream();
void PartiallyRelease();
protected:
// Update the config.
void Update();
std::string SerializeInfoCache();
protected:
// Model pathes.
std::string model_dir_;
mutable std::string prog_file_;
mutable std::string params_file_;
// GPU related.
bool use_gpu_{false};
int gpu_device_id_{0};
int xpu_device_id_{0};
uint64_t memory_pool_init_size_mb_{100}; // initial size is 100MB.
bool use_cudnn_{false};
// Padding related
bool use_fc_padding_{true};
// TensorRT related.
bool use_tensorrt_{false};
// For workspace_size, refer it from here:
// https://docs.nvidia.com/deeplearning/sdk/tensorrt-developer-guide/index.html#troubleshooting
int tensorrt_workspace_size_{1 << 30};
// While TensorRT allows an engine optimized for a given max batch size
// to run at any smaller size, the performance for those smaller
// sizes may not be as well-optimized. Therefore, Max batch is best
// equivalent to the runtime batch size.
int tensorrt_max_batchsize_{1};
// We transform the Ops that can be converted into TRT layer in the model,
// and aggregate these Ops into subgraphs for TRT execution.
// We set this variable to control the minimum number of nodes in the
// subgraph, 3 as default value.
int tensorrt_min_subgraph_size_{3};
Precision tensorrt_precision_mode_{Precision::kFloat32};
bool trt_use_static_engine_{false};
bool trt_use_calib_mode_{true};
bool trt_use_oss_{false};
bool trt_use_dla_{false};
int trt_dla_core_{0};
std::map<std::string, std::vector<int>> min_input_shape_{};
std::map<std::string, std::vector<int>> max_input_shape_{};
std::map<std::string, std::vector<int>> optim_input_shape_{};
std::vector<std::string> trt_disabled_ops_{};
bool disable_trt_plugin_fp16_{false};
// memory reuse related.
bool enable_memory_optim_{false};
bool use_mkldnn_{false};
std::unordered_set<std::string> mkldnn_enabled_op_types_;
bool model_from_memory_{false};
bool enable_ir_optim_{true};
bool use_feed_fetch_ops_{true};
bool ir_debug_{false};
bool specify_input_name_{false};
int cpu_math_library_num_threads_{1};
bool with_profile_{false};
bool with_glog_info_{true};
// A runtime cache, shouldn't be transferred to others.
std::string serialized_info_cache_;
mutable std::unique_ptr<PassStrategy> pass_builder_;
bool use_lite_{false};
std::vector<std::string> lite_passes_filter_;
std::vector<std::string> lite_ops_filter_;
Precision lite_precision_mode_;
bool lite_zero_copy_;
bool thread_local_stream_{false};
bool use_xpu_{false};
int xpu_l3_workspace_size_;
// mkldnn related.
int mkldnn_cache_capacity_{0};
bool use_mkldnn_quantizer_{false};
std::shared_ptr<MkldnnQuantizerConfig> mkldnn_quantizer_config_;
bool use_mkldnn_bfloat16_{false};
std::unordered_set<std::string> bfloat16_enabled_op_types_;
// If the config is already used on a predictor, it becomes invalid.
// Any config can only be used with one predictor.
// Variables held by config can take up a lot of memory in some cases.
// So we release the memory when the predictor is set up.
mutable bool is_valid_{true};
std::string opt_cache_dir_;
};
} // namespace paddle

View File

@ -0,0 +1,457 @@
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
/*! \file paddle_api.h
*/
/*! \mainpage Paddle Inference APIs
* \section intro_sec Introduction
* The Paddle inference library aims to offer an high performance inference SDK
* for Paddle users.
*/
#include <cassert>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "crypto/cipher.h"
#include "paddle_infer_declare.h" // NOLINT
/*! \namespace paddle
*/
namespace paddle {
/// \brief Paddle data type.
enum PaddleDType {
FLOAT32,
INT64,
INT32,
UINT8,
// TODO(Superjomn) support more data types if needed.
};
/// \brief Memory manager for PaddleTensor.
///
/// The PaddleBuf holds a buffer for data input or output. The memory can be
/// allocated by user or by PaddleBuf itself, but in any case, the PaddleBuf
/// should be reused for better performance.
///
/// For user allocated memory, the following API can be used:
/// - PaddleBuf(void* data, size_t length) to set an external memory by
/// specifying the memory address and length.
/// - Reset(void* data, size_t length) to reset the PaddleBuf with an external
/// memory.
/// ATTENTION, for user allocated memory, deallocation should be done by users
/// externally after the program finished. The PaddleBuf won't do any allocation
/// or deallocation.
///
/// To have the PaddleBuf allocate and manage the memory:
/// - PaddleBuf(size_t length) will allocate a memory of size `length`.
/// - Resize(size_t length) resize the memory to no less than `length`,
/// ATTENTION
/// if the allocated memory is larger than `length`, nothing will done.
///
/// Usage:
///
/// Let PaddleBuf manage the memory internally.
/// \code{cpp}
/// const int num_elements = 128;
/// PaddleBuf buf(num_elements/// sizeof(float));
/// \endcode
///
/// Or
/// \code{cpp}
/// PaddleBuf buf;
/// buf.Resize(num_elements/// sizeof(float));
/// \endcode
/// Works the exactly the same.
///
/// One can also make the `PaddleBuf` use the external memory.
/// \code{cpp}
/// PaddleBuf buf;
/// void* external_memory = new float[num_elements];
/// buf.Reset(external_memory, num_elements*sizeof(float));
/// ...
/// delete[] external_memory; // manage the memory lifetime outside.
/// \endcode
///
class PD_INFER_DECL PaddleBuf {
public:
///
/// \brief PaddleBuf allocate memory internally, and manage it.
///
/// \param[in] length The length of data.
///
explicit PaddleBuf(size_t length)
: data_(new char[length]), length_(length), memory_owned_(true) {}
///
/// \brief Set external memory, the PaddleBuf won't manage it.
///
/// \param[in] data The start address of the external memory.
/// \param[in] length The length of data.
///
PaddleBuf(void* data, size_t length)
: data_(data), length_(length), memory_owned_{false} {}
///
/// \brief Copy only available when memory is managed externally.
///
/// \param[in] other another `PaddleBuf`
///
explicit PaddleBuf(const PaddleBuf& other);
///
/// \brief Resize the memory.
///
/// \param[in] length The length of data.
///
void Resize(size_t length);
///
/// \brief Reset to external memory, with address and length set.
///
/// \param[in] data The start address of the external memory.
/// \param[in] length The length of data.
///
void Reset(void* data, size_t length);
///
/// \brief Tell whether the buffer is empty.
///
bool empty() const { return length_ == 0; }
///
/// \brief Get the data's memory address.
///
void* data() const { return data_; }
///
/// \brief Get the memory length.
///
size_t length() const { return length_; }
~PaddleBuf() { Free(); }
PaddleBuf& operator=(const PaddleBuf&);
PaddleBuf& operator=(PaddleBuf&&);
PaddleBuf() = default;
PaddleBuf(PaddleBuf&& other);
private:
void Free();
void* data_{nullptr}; ///< pointer to the data memory.
size_t length_{0}; ///< number of memory bytes.
bool memory_owned_{true};
};
///
/// \brief Basic input and output data structure for PaddlePredictor.
///
struct PD_INFER_DECL PaddleTensor {
PaddleTensor() = default;
std::string name; ///< variable name.
std::vector<int> shape;
PaddleBuf data; ///< blob of data.
PaddleDType dtype;
std::vector<std::vector<size_t>> lod; ///< Tensor+LoD equals LoDTensor
};
enum class PaddlePlace { kUNK = -1, kCPU, kGPU, kXPU };
/// \brief Represents an n-dimensional array of values.
/// The ZeroCopyTensor is used to store the input or output of the network.
/// Zero copy means that the tensor supports direct copy of host or device data
/// to device,
/// eliminating additional CPU copy. ZeroCopyTensor is only used in the
/// AnalysisPredictor.
/// It is obtained through PaddlePredictor::GetinputTensor()
/// and PaddlePredictor::GetOutputTensor() interface.
class PD_INFER_DECL ZeroCopyTensor {
public:
/// \brief Reset the shape of the tensor.
/// Generally it's only used for the input tensor.
/// Reshape must be called before calling mutable_data() or copy_from_cpu()
/// \param shape The shape to set.
void Reshape(const std::vector<int>& shape);
/// \brief Get the memory pointer in CPU or GPU with specific data type.
/// Please Reshape the tensor first before call this.
/// It's usually used to get input data pointer.
/// \param place The place of the tensor.
template <typename T>
T* mutable_data(PaddlePlace place);
/// \brief Get the memory pointer directly.
/// It's usually used to get the output data pointer.
/// \param[out] place To get the device type of the tensor.
/// \param[out] size To get the data size of the tensor.
/// \return The tensor data buffer pointer.
template <typename T>
T* data(PaddlePlace* place, int* size) const;
/// \brief Copy the host memory to tensor data.
/// It's usually used to set the input tensor data.
/// \param data The pointer of the data, from which the tensor will copy.
template <typename T>
void copy_from_cpu(const T* data);
/// \brief Copy the tensor data to the host memory.
/// It's usually used to get the output tensor data.
/// \param[out] data The tensor will copy the data to the address.
template <typename T>
void copy_to_cpu(T* data);
/// \brief Return the shape of the Tensor.
std::vector<int> shape() const;
/// \brief Set lod info of the tensor.
/// More about LOD can be seen here:
/// https://www.paddlepaddle.org.cn/documentation/docs/zh/beginners_guide/basic_concept/lod_tensor.html#lodtensor
/// \param x the lod info.
void SetLoD(const std::vector<std::vector<size_t>>& x);
/// \brief Return the lod info of the tensor.
std::vector<std::vector<size_t>> lod() const;
/// \brief Return the name of the tensor.
const std::string& name() const { return name_; }
void SetPlace(PaddlePlace place, int device = -1) {
place_ = place;
device_ = device;
}
/// \brief Return the data type of the tensor.
/// It's usually used to get the output tensor data type.
/// \return The data type of the tensor.
PaddleDType type() const;
protected:
explicit ZeroCopyTensor(void* scope) : scope_{scope} {}
void SetName(const std::string& name) { name_ = name; }
void* FindTensor() const;
private:
std::string name_;
bool input_or_output_;
friend class AnalysisPredictor;
void* scope_{nullptr};
// The corresponding tensor pointer inside Paddle workspace is cached for
// performance.
mutable void* tensor_{nullptr};
PaddlePlace place_;
PaddleDType dtype_;
int device_;
};
/// \brief A Predictor for executing inference on a model.
/// Base class for AnalysisPredictor and NativePaddlePredictor.
class PD_INFER_DECL PaddlePredictor {
public:
struct Config;
PaddlePredictor() = default;
PaddlePredictor(const PaddlePredictor&) = delete;
PaddlePredictor& operator=(const PaddlePredictor&) = delete;
/// \brief This interface takes input and runs the network.
/// There are redundant copies of data between hosts in this operation,
/// so it is more recommended to use the zecopyrun interface
/// \param[in] inputs An list of PaddleTensor as the input to the network.
/// \param[out] output_data Pointer to the tensor list, which holds the output
/// paddletensor
/// \param[in] batch_size This setting has been discarded and can be ignored.
/// \return Whether the run is successful
virtual bool Run(const std::vector<PaddleTensor>& inputs,
std::vector<PaddleTensor>* output_data,
int batch_size = -1) = 0;
/// \brief Used to get the name of the network input.
/// Be inherited by AnalysisPredictor, Only used in ZeroCopy scenarios.
/// \return Input tensor names.
virtual std::vector<std::string> GetInputNames() { return {}; }
/// \brief Get the input shape of the model.
/// \return A map contains all the input names and shape defined in the model.
virtual std::map<std::string, std::vector<int64_t>> GetInputTensorShape() {
return {};
}
/// \brief Used to get the name of the network output.
/// Be inherited by AnalysisPredictor, Only used in ZeroCopy scenarios.
/// \return Output tensor names.
virtual std::vector<std::string> GetOutputNames() { return {}; }
/// \brief Get the input ZeroCopyTensor by name.
/// Be inherited by AnalysisPredictor, Only used in ZeroCopy scenarios.
/// The name is obtained from the GetInputNames() interface.
/// \param name The input tensor name.
/// \return Return the corresponding input ZeroCopyTensor.
virtual std::unique_ptr<ZeroCopyTensor> GetInputTensor(
const std::string& name) {
return nullptr;
}
/// \brief Get the output ZeroCopyTensor by name.
/// Be inherited by AnalysisPredictor, Only used in ZeroCopy scenarios.
/// The name is obtained from the GetOutputNames() interface.
/// \param name The output tensor name.
/// \return Return the corresponding output ZeroCopyTensor.
virtual std::unique_ptr<ZeroCopyTensor> GetOutputTensor(
const std::string& name) {
return nullptr;
}
/// \brief Run the network with zero-copied inputs and outputs.
/// Be inherited by AnalysisPredictor and only used in ZeroCopy scenarios.
/// This will save the IO copy for transfering inputs and outputs to predictor
/// workspace
/// and get some performance improvement.
/// To use it, one should call the AnalysisConfig.SwitchUseFeedFetchOp(false)
/// and then use the `GetInputTensor` and `GetOutputTensor`
/// to directly write or read the input/output tensors.
/// \return Whether the run is successful
virtual bool ZeroCopyRun() { return false; }
///
/// \brief Clear the intermediate tensors of the predictor
///
///
virtual void ClearIntermediateTensor() {}
///
/// \brief Release all tmp tensor to compress the size of the memory pool.
/// The memory pool is considered to be composed of a list of chunks, if
/// the chunk is not occupied, it can be released.
///
/// \return Number of bytes released. It may be smaller than the actual
/// released memory, because part of the memory is not managed by the
/// MemoryPool.
///
virtual uint64_t TryShrinkMemory() { return 0; }
/// \brief Clone an existing predictor
/// When using clone, the same network will be created,
/// and the parameters between them are shared.
/// \return unique_ptr which contains the pointer of predictor
virtual std::unique_ptr<PaddlePredictor> Clone() = 0;
/// \brief Destroy the Predictor.
virtual ~PaddlePredictor() = default;
virtual std::string GetSerializedProgram() const {
assert(false); // Force raise error.
return "NotImplemented";
}
/// \brief Base class for NativeConfig and AnalysisConfig.
struct Config {
std::string model_dir; /*!< path to the model directory. */
};
};
///
/// \brief configuration manager for `NativePredictor`.
///
/// `AnalysisConfig` manages configurations of `NativePredictor`.
/// During inference procedure, there are many parameters(model/params path,
/// place of inference, etc.)
///
struct PD_INFER_DECL NativeConfig : public PaddlePredictor::Config {
NativeConfig();
/// GPU related fields.
bool use_xpu{false};
bool use_gpu{false};
int device{0};
float fraction_of_gpu_memory{
-1.f}; ///< Change to a float in (0,1] if needed.
std::string prog_file;
std::string
param_file; ///< Specify the exact path of program and parameter files.
bool specify_input_name{false}; ///< Specify the variable's name of each
///< input if input tensors don't follow the
///< `feeds` and `fetches` of the phase
///< `save_inference_model`.
/// Set and get the number of cpu math library threads.
void SetCpuMathLibraryNumThreads(int cpu_math_library_num_threads) {
cpu_math_library_num_threads_ = cpu_math_library_num_threads;
}
int cpu_math_library_num_threads() const {
return cpu_math_library_num_threads_;
}
protected:
int cpu_math_library_num_threads_{1}; ///< number of cpu math library (such
///< as MKL, OpenBlas) threads for each
///< instance.
};
///
/// \brief A factory to help create different predictors.
///
/// Usage:
///
/// \code{.cpp}
/// NativeConfig config;
/// ... // change the configs.
/// auto native_predictor = CreatePaddlePredictor(config);
/// \endcode
///
/// FOR EXTENSION DEVELOPER:
/// Different predictors are designated by config type. Similar configs can be
/// merged, but there shouldn't be a huge config containing different fields for
/// more than one kind of predictors.
////
template <typename ConfigT>
std::unique_ptr<PaddlePredictor> CreatePaddlePredictor(const ConfigT& config);
struct AnalysisConfig;
struct NativeConfig;
struct DemoConfig;
template <>
PD_INFER_DECL std::unique_ptr<PaddlePredictor>
CreatePaddlePredictor<AnalysisConfig>(const AnalysisConfig& config);
template <>
PD_INFER_DECL std::unique_ptr<PaddlePredictor>
CreatePaddlePredictor<NativeConfig>(const NativeConfig& config);
template <>
PD_INFER_DECL std::unique_ptr<PaddlePredictor>
CreatePaddlePredictor<DemoConfig>(const DemoConfig& config);
/// NOTE The following APIs are too trivial, we will discard it in the following
/// versions.
///
enum class PaddleEngineKind {
kNative = 0, ///< Use the native Fluid facility.
kAutoMixedTensorRT, ///< Automatically mix Fluid with TensorRT.
kAnalysis, ///< More optimization.
};
template <typename ConfigT, PaddleEngineKind engine>
PD_INFER_DECL std::unique_ptr<PaddlePredictor> CreatePaddlePredictor(
const ConfigT& config);
template <>
PD_INFER_DECL std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
NativeConfig, PaddleEngineKind::kNative>(const NativeConfig& config);
template <>
PD_INFER_DECL std::unique_ptr<PaddlePredictor> CreatePaddlePredictor<
AnalysisConfig, PaddleEngineKind::kAnalysis>(const AnalysisConfig& config);
PD_INFER_DECL int PaddleDtypeSize(PaddleDType dtype);
PD_INFER_DECL std::string get_version();
PD_INFER_DECL std::string UpdateDllFlag(const char* name, const char* value);
PD_INFER_DECL std::shared_ptr<framework::Cipher> MakeCipher(
const std::string& config_file);
} // namespace paddle

View File

@ -0,0 +1,27 @@
// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#if defined(_WIN32)
#ifndef PD_INFER_DECL
#ifdef PADDLE_DLL_INFERENCE
#define PD_INFER_DECL __declspec(dllexport)
#else
#define PD_INFER_DECL __declspec(dllimport)
#endif // PADDLE_DLL_INFERENCE
#endif // PD_INFER_DECL
#else
#define PD_INFER_DECL __attribute__((visibility("default")))
#endif // _WIN32

View File

@ -0,0 +1,311 @@
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
/*
* This file contains the definition of a simple Inference API for Paddle.
*
* ATTENTION: It requires some C++11 features, for lower version C++ or C, we
* might release another API.
*/
#pragma once
#include <cassert>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "paddle_analysis_config.h" // NOLINT
#include "paddle_api.h" // NOLINT
///
/// \file paddle_inference_api.h
///
/// \brief Paddle Inference API
///
/// \author paddle-infer@baidu.com
/// \date 2020-09-01
/// \since 2.0.0-beta
///
namespace paddle_infer {
using DataType = paddle::PaddleDType;
using PlaceType = paddle::PaddlePlace;
using PrecisionType = paddle::AnalysisConfig::Precision;
using Config = paddle::AnalysisConfig;
///
/// \class Tensor
///
/// \brief Represents an n-dimensional array of values.
/// The Tensor is used to store the input or output of the network.
/// It is obtained through Predictor::GetinputHandle()
/// and Predictor::GetOutputHandle() interface.
///
class PD_INFER_DECL Tensor {
public:
// Can only be created by predictor->GetInputHandle(cosnt std::string& name)
// or predictor->GetOutputHandle(cosnt std::string& name)
Tensor() = delete;
explicit Tensor(std::unique_ptr<paddle::ZeroCopyTensor>&& tensor)
: tensor_(std::move(tensor)) {}
///
/// \brief Reset the shape of the tensor.
/// Generally it's only used for the input tensor.
/// Reshape must be called before calling mutable_data() or CopyFromCpu()
/// \param shape The shape to set.
///
void Reshape(const std::vector<int>& shape);
///
/// \brief Copy the host memory to tensor data.
/// It's usually used to set the input tensor data.
/// \param data The pointer of the data, from which the tensor will copy.
///
template <typename T>
void CopyFromCpu(const T* data);
///
/// \brief Get the memory pointer in CPU or GPU with specific data type.
/// Please Reshape the tensor first before call this.
/// It's usually used to get input data pointer.
/// \param place The place of the tensor.
/// \return The tensor data buffer pointer.
///
template <typename T>
T* mutable_data(PlaceType place);
///
/// \brief Copy the tensor data to the host memory.
/// It's usually used to get the output tensor data.
/// \param[out] data The tensor will copy the data to the address.
///
template <typename T>
void CopyToCpu(T* data);
///
/// \brief Get the memory pointer directly.
/// It's usually used to get the output data pointer.
/// \param[out] place To get the device type of the tensor.
/// \param[out] size To get the data size of the tensor.
/// \return The tensor data buffer pointer.
///
template <typename T>
T* data(PlaceType* place, int* size) const;
///
/// \brief Set lod info of the tensor.
/// More about LOD can be seen here:
/// https://www.paddlepaddle.org.cn/documentation/docs/zh/beginners_guide/basic_concept/lod_tensor.html#lodtensor
/// \param x the lod info.
///
void SetLoD(const std::vector<std::vector<size_t>>& x);
/// \brief Return the lod info of the tensor.
std::vector<std::vector<size_t>> lod() const;
/// \brief Return the data type of the tensor.
/// It's usually used to get the output tensor data type.
/// \return The data type of the tensor.
DataType type() const;
/// \brief Return the shape of the Tensor.
std::vector<int> shape() const;
/// \brief Return the name of the tensor.
const std::string& name() const;
private:
std::unique_ptr<paddle::ZeroCopyTensor> tensor_;
};
///
/// \class Predictor
///
/// \brief Predictor is the interface for model prediction.
///
/// The predictor has the following typical uses:
///
/// Get predictor
/// \code{cpp}
/// auto predictor = CreatePredictor(config);
/// \endcode
///
/// Get input or output names
/// \code{cpp}
/// auto input_names = predictor->GetInputNames();
/// auto output_names = predictor->GetOutputNames();
/// \endcode
///
/// Get input or output handle
/// \code{cpp}
/// auto input_t = predictor->GetInputHandle(input_names[0]);
/// auto output_t = predictor->GetOutputHandle(output_names[0]);
/// \endcode
///
/// Run predictor
/// \code{cpp}
/// predictor->Run();
/// \endcode
///
class PD_INFER_DECL Predictor {
public:
Predictor() = delete;
~Predictor() {}
// Use for clone
explicit Predictor(std::unique_ptr<paddle::PaddlePredictor>&& pred)
: predictor_(std::move(pred)) {}
///
/// \brief Construct a new Predictor object
///
/// \param[in] Config config
///
explicit Predictor(const Config& config);
///
/// \brief Get the input names
///
/// \return input names
///
std::vector<std::string> GetInputNames();
///
/// \brief Get the Input Tensor object
///
/// \param[in] name input name
/// \return input tensor
///
std::unique_ptr<Tensor> GetInputHandle(const std::string& name);
///
/// \brief Run the prediction engine
///
/// \return Whether the function executed successfully
///
bool Run();
///
/// \brief Get the output names
///
/// \return output names
///
std::vector<std::string> GetOutputNames();
///
/// \brief Get the Output Tensor object
///
/// \param[in] name otuput name
/// \return output tensor
///
std::unique_ptr<Tensor> GetOutputHandle(const std::string& name);
///
/// \brief Clone to get the new predictor. thread safe.
///
/// \return get a new predictor
///
std::unique_ptr<Predictor> Clone();
/// \brief Clear the intermediate tensors of the predictor
void ClearIntermediateTensor();
///
/// \brief Release all tmp tensor to compress the size of the memory pool.
/// The memory pool is considered to be composed of a list of chunks, if
/// the chunk is not occupied, it can be released.
///
/// \return Number of bytes released. It may be smaller than the actual
/// released memory, because part of the memory is not managed by the
/// MemoryPool.
///
uint64_t TryShrinkMemory();
private:
std::unique_ptr<paddle::PaddlePredictor> predictor_;
};
///
/// \brief A factory to help create predictors.
///
/// Usage:
///
/// \code{.cpp}
/// Config config;
/// ... // change the configs.
/// auto predictor = CreatePredictor(config);
/// \endcode
///
PD_INFER_DECL std::shared_ptr<Predictor> CreatePredictor(
const Config& config); // NOLINT
PD_INFER_DECL int GetNumBytesOfDataType(DataType dtype);
PD_INFER_DECL std::string GetVersion();
PD_INFER_DECL std::string UpdateDllFlag(const char* name, const char* value);
template <typename T>
void Tensor::CopyFromCpu(const T* data) {
tensor_->copy_from_cpu<T>(data);
}
template <typename T>
void Tensor::CopyToCpu(T* data) {
return tensor_->copy_to_cpu<T>(data);
}
template <typename T>
T* Tensor::mutable_data(PlaceType place) {
return tensor_->mutable_data<T>(place);
}
template <typename T>
T* Tensor::data(PlaceType* place, int* size) const {
return tensor_->data<T>(place, size);
}
} // namespace paddle_infer
namespace paddle_infer {
namespace services {
///
/// \class PredictorPool
///
/// \brief PredictorPool is a simple encapsulation of Predictor, suitable for
/// use in multi-threaded situations. According to the thread id, the
/// corresponding Predictor is taken out from PredictorPool to complete the
/// prediction.
///
class PD_INFER_DECL PredictorPool {
public:
PredictorPool() = delete;
PredictorPool(const PredictorPool&) = delete;
PredictorPool& operator=(const PredictorPool&) = delete;
/// \brief Construct the predictor pool with \param size predictor instances.
explicit PredictorPool(const Config& config, size_t size = 1);
/// \brief Get \param id-th predictor.
Predictor* Retrive(size_t idx);
private:
std::shared_ptr<Predictor> main_pred_;
std::vector<std::unique_ptr<Predictor>> preds_;
};
} // namespace services
} // namespace paddle_infer

View File

@ -0,0 +1,199 @@
// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
///
/// \file paddle_mkldnn_quantizer_config.h
///
/// \brief Mkldnn quantizer config.
///
/// \author paddle-infer@baidu.com
/// \date 2020-01-01
/// \since 1.7.0
///
#pragma once
#include <cassert>
#include <map>
#include <memory>
#include <string>
#include <unordered_set>
#include <vector>
#include "paddle_api.h" // NOLINT
#include "paddle_infer_declare.h" // NOLINT
namespace paddle {
///
/// \brief Algorithms for finding scale of quantized Tensors.
///
enum class ScaleAlgo {
NONE, ///< Do not compute scale
MAX, ///< Find scale based on the max absolute value
MAX_CH, ///< Find scale based on the max absolute value per output channel
MAX_CH_T, ///< Find scale based on the max absolute value per output channel
///< of a transposed tensor
KL, ///< Find scale based on KL Divergence
};
///
/// \class MkldnnQuantizerConfig
///
/// \brief Config for mkldnn quantize.
///
/// The MkldnnQuantizerConfig is used to configure Mkldnn's quantization
/// parameters, including scale algorithm, warmup data, warmup batch size,
/// quantized op list, etc.
///
/// It is not recommended to use this config directly, please refer to
/// AnalysisConfig::mkldnn_quantizer_config()
///
struct PD_INFER_DECL MkldnnQuantizerConfig {
///
/// \brief Construct a new Mkldnn Quantizer Config object
///
MkldnnQuantizerConfig();
///
/// \brief Set the scale algo
///
/// Specify a quantization algorithm for a connection (input/output) of the
/// operator type.
/// \param[in] op_type_name the operator's name.
/// \param[in] conn_name name of the connection (input/output) of the
/// operator.
/// \param[in] algo the algorithm for computing scale.
///
void SetScaleAlgo(std::string op_type_name, std::string conn_name,
ScaleAlgo algo) {
rules_[op_type_name][conn_name] = algo;
}
///
/// \brief Get the scale algo
///
/// Get the quantization algorithm for a connection (input/output) of the
/// operator type.
///
/// \param[in] op_type_name the operator's name.
/// \param[in] conn_name name of the connection (input/output) of the
/// operator.
/// \return the scale algo.
///
ScaleAlgo scale_algo(const std::string& op_type_name,
const std::string& conn_name) const;
///
/// \brief Set the warmup data
///
/// Set the batch of data to be used for warm-up iteration.
///
/// \param[in] data batch of data.
///
void SetWarmupData(std::shared_ptr<std::vector<PaddleTensor>> data) {
warmup_data_ = data;
}
///
/// \brief Get the warmup data
///
/// Get the batch of data used for warm-up iteration.
///
/// \return the warm up data
///
std::shared_ptr<std::vector<PaddleTensor>> warmup_data() const {
return warmup_data_;
}
///
/// \brief Set the warmup batch size
///
/// Set the batch size for warm-up iteration.
///
/// \param[in] batch_size warm-up batch size
///
void SetWarmupBatchSize(int batch_size) { warmup_bs_ = batch_size; }
///
/// \brief Get the warmup batch size
///
/// Get the batch size for warm-up iteration.
///
/// \return the warm up batch size
int warmup_batch_size() const { return warmup_bs_; }
///
/// \brief Set quantized op list
///
/// In the quantization process, set the op list that supports quantization
///
/// \param[in] op_list List of quantized ops
///
void SetEnabledOpTypes(std::unordered_set<std::string> op_list) {
enabled_op_types_ = op_list;
}
///
/// \brief Get quantized op list
///
/// \return list of quantized ops
///
const std::unordered_set<std::string>& enabled_op_types() const {
return enabled_op_types_;
}
///
/// \brief Set the excluded op ids
///
/// \param[in] op_ids_list excluded op ids
///
void SetExcludedOpIds(std::unordered_set<int> op_ids_list) {
excluded_op_ids_ = op_ids_list;
}
///
/// \brief Get the excluded op ids
///
/// \return exclude op ids
///
const std::unordered_set<int>& excluded_op_ids() const {
return excluded_op_ids_;
}
///
/// \brief Set default scale algorithm
///
/// \param[in] algo Method for calculating scale in quantization process
///
void SetDefaultScaleAlgo(ScaleAlgo algo) { default_scale_algo_ = algo; }
///
/// \brief Get default scale algorithm
///
/// \return Method for calculating scale in quantization
/// process
///
ScaleAlgo default_scale_algo() const { return default_scale_algo_; }
protected:
std::map<std::string, std::map<std::string, ScaleAlgo>> rules_;
std::unordered_set<std::string> enabled_op_types_;
std::unordered_set<int> excluded_op_ids_;
std::shared_ptr<std::vector<PaddleTensor>> warmup_data_;
int warmup_bs_{1};
ScaleAlgo default_scale_algo_{ScaleAlgo::MAX};
};
} // namespace paddle

View File

@ -0,0 +1,248 @@
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include <sstream>
#include <string>
#include <vector>
#include "paddle_infer_declare.h" // NOLINT
///
/// \file paddle_pass_builder.h
///
/// \brief Class Paddle Passs Builder and its subclasses(pass strategies).
/// \section sec_intro Introduction
/// This class aims to build passes for paddle and define passes' strategies.
///
/// \author paddle-infer@baidu.com
/// \date 2020-3-23
/// \since 1.7
/// \namespace paddle
namespace paddle {
/// \class PaddlePassBuilder
/// \brief This class build passes based on vector<string> input. It is part of
/// inference API. Users can build passes, insert new passes, delete passes
/// using this class and its functions.
///
/// Example Usage:
/// Build a new pass.
/// \code{cpp}
/// const vector<string> passes(1, "conv_relu_mkldnn_fuse_pass");
/// PaddlePassBuilder builder(passes);
/// \endcode
class PD_INFER_DECL PaddlePassBuilder {
public:
/// \brief Constructor of the class. It stores the input passes.
/// \param[in] passes passes' types.
explicit PaddlePassBuilder(const std::vector<std::string> &passes)
: passes_(passes) {}
/// \brief Stores the input passes.
/// \param[in] passes passes' types.
void SetPasses(std::initializer_list<std::string> passes) {
passes_ = passes;
}
/// \brief Append a pass to the end of the passes.
/// \param[in] pass_type the type of the new pass.
void AppendPass(const std::string &pass_type);
/// \brief Insert a pass to a specific position.
/// \param[in] idx the position to insert.
/// \param[in] pass_type the type of insert pass.
void InsertPass(size_t idx, const std::string &pass_type);
/// \brief Delete the pass at certain position 'idx'.
/// \param[in] idx the position to delete.
void DeletePass(size_t idx);
/// \brief Delete all passes that has a certain type 'pass_type'.
/// \param[in] pass_type the certain pass type to be deleted.
void DeletePass(const std::string &pass_type);
/// \brief Delete all the passes.
void ClearPasses();
/// \brief Append an analysis pass.
/// \param[in] pass the type of the new analysis pass.
void AppendAnalysisPass(const std::string &pass);
/// \brief Visualize the computation graph after each pass by generating a DOT
/// language file, one can draw them with the Graphviz toolkit.
void TurnOnDebug();
/// \brief Human-readable information of the passes.
std::string DebugString();
/// \brief Get information of passes.
/// \return Return list of the passes.
const std::vector<std::string> &AllPasses() const { return passes_; }
/// \brief Get information of analysis passes.
/// \return Return list of analysis passes.
std::vector<std::string> AnalysisPasses() const {
auto passes = analysis_passes_;
// To make sure the ir_graph_to_program should be the last pass so any
// modication of IR will persist to the program.
passes.push_back("ir_graph_to_program_pass");
return passes;
}
protected:
/// \cond Protected
std::vector<std::string> analysis_passes_{
{"ir_graph_build_pass", "ir_graph_clean_pass", "ir_analysis_pass",
"ir_params_sync_among_devices_pass", "adjust_cudnn_workspace_size_pass",
"inference_op_replace_pass"}};
std::vector<std::string> passes_;
/// \endcond
};
/// \class PassStrategy
/// \brief This class defines the pass strategies like whether to use gpu/cuDNN
/// kernel/MKLDNN.
class PD_INFER_DECL PassStrategy : public PaddlePassBuilder {
public:
/// \brief Constructor of PassStrategy class. It works the same as
/// PaddlePassBuilder class. \param[in] passes passes' types.
explicit PassStrategy(const std::vector<std::string> &passes)
: PaddlePassBuilder(passes) {}
/// \brief Enable the use of cuDNN kernel.
virtual void EnableCUDNN() {}
/// \brief Enable the use of MKLDNN.
/// The MKLDNN control exists in both CPU and GPU mode, because there can
/// still be some CPU kernels running in GPU mode.
virtual void EnableMKLDNN() {}
/// \brief Enable MKLDNN quantize optimization.
virtual void EnableMkldnnQuantizer() {}
/// \brief Enable MKLDNN bfloat16.
virtual void EnableMkldnnBfloat16() {}
/// \brief Check if we are using gpu.
/// \return A bool variable implying whether we are in gpu mode.
bool use_gpu() const { return use_gpu_; }
/// \brief Check if we are using xpu.
/// \return A bool variable implying whether we are in xpu mode.
bool use_xpu() const { return use_xpu_; }
/// \brief Default destructor.
virtual ~PassStrategy() = default;
protected:
/// \cond Protected
bool use_xpu_{false};
bool use_gpu_{false};
bool use_mkldnn_{false};
/// \endcond
};
/// \class CpuPassStrategy
/// \brief The CPU passes controller, it is used in AnalysisPredictor with CPU
/// mode.
class PD_INFER_DECL CpuPassStrategy : public PassStrategy {
public:
/// \brief Default constructor of CpuPassStrategy.
CpuPassStrategy();
/// \brief Construct by copying another CpuPassStrategy object.
/// \param[in] other The CpuPassStrategy object we want to copy.
explicit CpuPassStrategy(const CpuPassStrategy &other)
: PassStrategy(other.AllPasses()) {
use_gpu_ = other.use_gpu_;
use_mkldnn_ = other.use_mkldnn_;
use_mkldnn_quantizer_ = other.use_mkldnn_quantizer_;
use_mkldnn_bfloat16_ = other.use_mkldnn_bfloat16_;
}
/// \brief Default destructor.
virtual ~CpuPassStrategy() = default;
/// \brief Enable the use of cuDNN kernel.
void EnableCUDNN() override;
/// \brief Enable the use of MKLDNN.
void EnableMKLDNN() override;
/// \brief Enable MKLDNN quantize optimization.
void EnableMkldnnQuantizer() override;
/// \brief Enable MKLDNN bfloat16.
void EnableMkldnnBfloat16() override;
protected:
/// \cond Protected
bool use_mkldnn_quantizer_{false};
bool use_mkldnn_bfloat16_{false};
/// \endcond
};
/// \class GpuPassStrategy
/// \brief The GPU passes controller, it is used in AnalysisPredictor with GPU
/// mode.
class PD_INFER_DECL GpuPassStrategy : public PassStrategy {
public:
/// \brief Default constructor of GpuPassStrategy.
GpuPassStrategy();
/// \brief Construct by copying another GpuPassStrategy object.
/// \param[in] other The GpuPassStrategy object we want to copy.
explicit GpuPassStrategy(const GpuPassStrategy &other)
: PassStrategy(other.AllPasses()) {
use_gpu_ = true;
use_cudnn_ = other.use_cudnn_;
}
/// \brief Enable the use of cuDNN kernel.
void EnableCUDNN() override;
/// \brief Not supported in GPU mode yet.
void EnableMKLDNN() override;
/// \brief Not supported in GPU mode yet.
void EnableMkldnnQuantizer() override;
/// \brief Not supported in GPU mode yet.
void EnableMkldnnBfloat16() override;
/// \brief Default destructor.
virtual ~GpuPassStrategy() = default;
protected:
/// \cond Protected
bool use_cudnn_{false};
/// \endcond
};
/// \class XpuPassStrategy
/// \brief The XPU passes controller, it is used in AnalysisPredictor with XPU
/// mode.
class PD_INFER_DECL XpuPassStrategy final : public PassStrategy {
public:
XpuPassStrategy() : PassStrategy({}) {}
};
/// \brief List of tensorRT subgraph passes.
PD_INFER_DECL extern const std::vector<std::string> kTRTSubgraphPasses;
/// \brief List of lite subgraph passes.
PD_INFER_DECL extern const std::vector<std::string> kLiteSubgraphPasses;
} // namespace paddle

View File

@ -377,7 +377,7 @@ namespace kdkocr_infer {
boxes = post_processor_.FilterTagDetRes(boxes, ratio_h, ratio_w, srcimg);
auto postprocess_end = std::chrono::steady_clock::now();
std::cout << "Detected boxes num: " << boxes.size() << endl;
//std::cout << "Detected boxes num: " << boxes.size() << endl;
std::chrono::duration<float> preprocess_diff = preprocess_end - preprocess_start;
times->push_back(double(preprocess_diff.count() * 1000));

View File

@ -1,6 +1,7 @@
aux_source_directory(. SOURCESCODE)
include_directories(.)
add_library(kypackage SHARED ${SOURCESCODE})
set_target_properties(kypackage PROPERTIES VERSION 1.2.0 SOVERSION 1)
add_executable(kypackage-test test/kypackage-test.c)
target_link_libraries(kypackage-test kypackage)

View File

@ -5,6 +5,7 @@ project(kypowermanagement LANGUAGES CXX)
set(POWERMANAGEMENT_TOP_DIR ${CMAKE_CURRENT_LIST_DIR})
add_library(${PROJECT_NAME} SHARED)
set_target_properties(kypowermanagement PROPERTIES VERSION 1.2.0 SOVERSION 1)
target_compile_options(${PROJECT_NAME} PRIVATE -Wall -g)
target_compile_features(${PROJECT_NAME} PRIVATE cxx_std_11)

View File

@ -129,7 +129,7 @@ bool LockScreen::unInhibitLockScreen(uint32_t flag)
DBusPendingCall *sendMsgPending = NULL;
sendMsg = dbus_message_new_method_call(dbusServiceName , dbusObjectPath , dbusInterfaceName , dbusUnInhibitLockScreenMethod);
if (!dbus_message_append_args(sendMsg , DBUS_TYPE_UINT32 , &flag)) {
if (!dbus_message_append_args(sendMsg , DBUS_TYPE_UINT32 , &flag , DBUS_TYPE_INVALID)) {
klog_err("kdk : d-bus append args fail !\n");
return false;
}

View File

@ -1,6 +1,7 @@
aux_source_directory(. SOURCESCODE)
include_directories(.)
add_library(kyrtinfo SHARED ${SOURCESCODE})
set_target_properties(kyrtinfo PROPERTIES VERSION 1.2.0 SOVERSION 1)
add_executable(kyrtinfo-test test/kyrtinfo-test.c)
target_link_libraries(kyrtinfo-test kyrtinfo)

View File

@ -4,8 +4,9 @@ find_library(GLIBC_LIB glib-2.0)
find_library(DBUS_LIB dbus-1)
find_library(DBUS_GLIB_LIB dbus-glib-1)
add_library(kysysinfo SHARED ${SOURCESCODE})
set_target_properties(kysysinfo PROPERTIES VERSION 1.2.0 SOVERSION 1)
add_executable(kysysinfo-test test/kysysinfo_test.c)
target_link_libraries(kysysinfo kylog systemd kyconf ${GLIBC_LIB} ${DBUS_LIB} ${DBUS_GLIB_LIB})
target_link_libraries(kysysinfo dl kylog systemd kyconf ${GLIBC_LIB} ${DBUS_LIB} ${DBUS_GLIB_LIB})
target_link_libraries(kysysinfo-test kysysinfo)
# target_link_libraries(kysysinfo-test kysysinfo kylin-activation kylog systemd kyconf ${GLIBC_LIB} ${DBUS_LIB} ${DBUS_GLIB_LIB})

View File

@ -4,7 +4,9 @@
#include <kysdk/kysdk-base/libkylog.h>
#include <pwd.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <dlfcn.h>
#define KYLIN_ACTIVATION_DBUS_ADDRESS "org.freedesktop.activation"
@ -77,7 +79,10 @@ char* kdk_system_get_version(bool verbose)
sysversion = get_val_from_file(fp, "milestone");
if (!sysversion)
{
fclose(fp);
return NULL;
}
fclose(fp);
}
else
@ -88,7 +93,10 @@ char* kdk_system_get_version(bool verbose)
sysversion = get_val_from_file(fp, "VERSION");
if (!sysversion)
{
fclose(fp);
return NULL;
}
strstrip(sysversion, '\"');
fclose(fp);
}
@ -107,33 +115,63 @@ int kdk_system_get_activationStatus(int *status_error_num,int *date_error_num)
}
int res = 0;
// #define _KYLIN_ACTIVATION_H_
#ifdef __linux__
#ifdef _KYLIN_ACTIVATION_H_
int err;
res = kylin_activation_activate_status(&err);
if (err != NO_ERROR)
int ret = -1;
typedef int (*kylin_activation_activate_status)(int *);
typedef int (*kylin_activation_trial_status)(int *);
void *hwnd = dlopen("/usr/lib/libkylin-activation.so", RTLD_LAZY);
if (!hwnd)
{
klog_err("激活状态获取失败:%d\n", err);
return 0;
}
if (res)
{
return 1;
klog_err("加载libkylin-activation.so失败\n");
return ret;
}
res = kylin_activation_trial_status(&err);
if (err != NO_ERROR)
do
{
klog_err("试用状态获取失败:%d\n", err);
return 0;
}
kylin_activation_activate_status pkylin_activation_activate_status = (kylin_activation_activate_status)dlsym(hwnd, "kylin_activation_activate_status");
kylin_activation_trial_status pkylin_activation_trial_status = (kylin_activation_trial_status)dlsym(hwnd, "kylin_activation_trial_status");
if (res == 1)
{
return 0;
}
if (!pkylin_activation_activate_status || !pkylin_activation_trial_status)
{
klog_err("获取接口地址失败\n");
break;
}
res = pkylin_activation_activate_status(&err);
if (err != 0)
{
klog_err("激活状态获取失败:%d\n", err);
ret = 0;
break;
}
if (res)
{
ret = 1;
break;
}
return -1;
res = pkylin_activation_trial_status(&err);
if (err != 0)
{
klog_err("试用状态获取失败:%d\n", err);
ret = 0;
break;
}
if (res == 1)
{
ret = 0;
break;
}
} while (false);
dlclose(hwnd);
return ret;
#else // 修改dbus通信
DBusConnection *conn;
DBusError err;
@ -166,6 +204,12 @@ int kdk_system_get_activationStatus(int *status_error_num,int *date_error_num)
"org.freedesktop.activation.interface", // interface to call on
"status"); // method name
if (!status_msg)
{ // -1 is default timeout
klog_err("status_msgdbus_message_new_method_call调用失败\n");
return -1;
}
if (!dbus_connection_send_with_reply (conn, status_msg, &status_pending, -1)) { // -1 is default timeout
klog_err("status_msgdbus_connection_send_with_reply调用失败\n");
return -1;
@ -269,8 +313,8 @@ int kdk_system_get_activationStatus(int *status_error_num,int *date_error_num)
return 1;
}
#endif // _KYLIN_ACTIVATION_H_
#endif // __linux__
// #endif // _KYLIN_ACTIVATION_H_
// #endif // __linux__
if (status_msg)
{
dbus_message_unref(status_msg);
@ -296,6 +340,8 @@ int kdk_system_get_activationStatus(int *status_error_num,int *date_error_num)
dbus_pending_call_unref(date_pending);
}
return 0;
#endif // _KYLIN_ACTIVATION_H_
#endif // __linux__
}
char* kdk_system_get_serialNumber()
@ -313,7 +359,22 @@ char* kdk_system_get_serialNumber()
strskipspace(serial);
#else
int err;
serial = kylin_activation_get_serial_number(&err);
typedef char *(*kylin_activation_get_serial_number)(int *);
void *hwnd = dlopen("/usr/lib/libkylin-activation.so", RTLD_LAZY);
if (!hwnd)
{
klog_err("加载libkylin-activation.so失败\n");
return serial;
}
kylin_activation_get_serial_number pkylin_activation_get_serial_number = (kylin_activation_get_serial_number)dlsym(hwnd,"kylin_activation_get_serial_number");
if(!pkylin_activation_get_serial_number)
{
klog_err("加载接口kylin_activation_get_serial_number失败\n");
return serial;
}
serial = pkylin_activation_get_serial_number(&err);
if (!serial)
{
klog_err("序列号获取失败:%d\n", err);
@ -322,6 +383,7 @@ char* kdk_system_get_serialNumber()
{
strskipspace(serial);
}
dlclose(hwnd);
return serial;
#endif // _KYLIN_ACTIVATION_H_
#endif // __linux__
@ -389,6 +451,59 @@ char* kdk_system_get_projectName()
return project_codename;
}
char* kdk_system_get_projectSubName()
{
char *project_subcodename = NULL;
#ifdef __linux__
FILE *fp = fopen("/etc/lsb-release", "rt");
if (fp)
{
project_subcodename = get_val_from_file(fp, "SUB_PROJECT_CODENAME");
fclose(fp);
}
if (!project_subcodename)
{
fp = fopen("/etc/os-release", "rt");
ASSERT_NOT_NULL(fp, NULL);
project_subcodename = get_val_from_file(fp, "SUB_PROJECT_CODENAME");
fclose(fp);
}
if (project_subcodename)
strstripspace(project_subcodename);
#endif
return project_subcodename;
}
unsigned int kdk_system_get_productFeatures()
{
char *product_features = NULL;
unsigned int res = 0;
#ifdef __linux__
FILE *fp = fopen("/etc/lsb-release", "rt");
if (fp)
{
product_features = get_val_from_file(fp, "PRODUCT_FEATURES");
fclose(fp);
}
if (!product_features)
{
fp = fopen("/etc/os-release", "rt");
ASSERT_NOT_NULL(fp, 0);
product_features = get_val_from_file(fp, "PRODUCT_FEATURES");
fclose(fp);
}
if (product_features)
{
strstripspace(product_features);
res = atoi(product_features);
}
#endif
return res;
}
char* kdk_system_get_hostVirtType()
{
char *virtType = (char*)malloc(sizeof(char) * 65);

View File

@ -82,6 +82,24 @@ extern char* kdk_system_get_eUser();
*/
extern char* kdk_system_get_projectName();
/**
* @brief
*
* @return char* NULL free
*/
extern char* kdk_system_get_projectSubName();
/**
* @brief
*
* @return unsigned int
* 0000
* 0001 PC特性
* 0010
* 0011 PC特性
*/
extern unsigned int kdk_system_get_productFeatures();
/**
* @brief 宿
*

View File

@ -13,15 +13,17 @@
#include <semaphore.h>
#include <sys/timerfd.h>
#include <errno.h>
// 20020721新增时区变化监听
#include <sys/inotify.h>
pthread_mutex_t lock;
u_int8_t g_Flag;
u_int8_t g_Flag; // 控制启用常驻定时器还是临时定时器
u_int8_t g_Quit;
u_int8_t g_Quit; //退出信号
sem_t g_Wait;
u_int8_t g_TimeChanged;
u_int8_t g_TimeSync;
u_int8_t g_TimeChanged; // 发生了时间变更
u_int8_t g_TimeSync; // 需要进行对时
void sig_Handler(int sig)
{
@ -39,15 +41,16 @@ static void *printClock(void *ptr)
time_t current;
time(&current);
now = localtime(&current);
// printf("%04d/%02d/%02d %02d:%02d:%02d\n", now->tm_year + 1900, now->tm_mon, now->tm_mday, now->tm_hour, now->tm_min, now->tm_sec);
// printf("g_TimeChanged is %d\n", g_TimeChanged);
// printf("%d",now->tm_sec);
// 如果时间发生改变发送TimeVhangeSignal信号
// struct timeval tx;
// struct timezone tz;
// gettimeofday(&tx,&tz);
// zone = tz.tz_minuteswest/60;
// printf("时差:%d\n",zone);
// 如果时间发生改变发送TimeChangeSignal信号
if (g_TimeChanged == 1)
{
char *buf = calloc(1, 128);
sprintf(buf, "%04d/%02d/%02d %02d:%02d:%02d", now->tm_year + 1900, now->tm_mon, now->tm_mday, now->tm_hour, now->tm_min, now->tm_sec);
sprintf(buf, "%04d/%02d/%02d %02d:%02d:%02d", now->tm_year + 1900, now->tm_mon+1, now->tm_mday, now->tm_hour, now->tm_min, now->tm_sec);
printf("%s\n", buf);
msg = dbus_message_new_signal("/com/kylin/kysdk/Timer",
@ -63,9 +66,10 @@ static void *printClock(void *ptr)
dbus_connection_send(conn, msg, &serial);
dbus_connection_flush(conn);
dbus_message_unref(msg);
free(buf);
}
// 非整点情况
// 非整点情况
if (now->tm_sec != 0)
{
pthread_mutex_lock(&lock);
@ -85,7 +89,7 @@ static void *printClock(void *ptr)
else
{
char *buf = calloc(1, 128);
sprintf(buf, "%04d/%02d/%02d %02d:%02d:%02d", now->tm_year + 1900, now->tm_mon, now->tm_mday, now->tm_hour, now->tm_min, now->tm_sec);
sprintf(buf, "%04d/%02d/%02d %02d:%02d:%02d", now->tm_year + 1900, now->tm_mon+1, now->tm_mday, now->tm_hour, now->tm_min, now->tm_sec);
// printf("%s\n", buf);
msg = dbus_message_new_signal("/com/kylin/kysdk/Timer",
@ -108,6 +112,7 @@ static void *printClock(void *ptr)
pthread_mutex_unlock(&lock);
sem_post(&g_Wait);
}
free(buf);
}
return NULL;
@ -117,26 +122,29 @@ static void *printClock(void *ptr)
void *startBroadcastSystemTimePerMin(void *tmp)
{
DBusConnection *conn = tmp;
size_t timerID = -1;
size_t periodicTimerID = 0;
while (!g_Quit)
{
sem_wait(&g_Wait);
if (g_TimeChanged || g_TimeSync)
{
printf("Get Time Changed signal or mis-synced. stop timerID %zd\n", timerID);
kdk_timer_stop(timerID);
// 若临时定时器已启动,则不做处理
// 时钟发生变化需要进行对时调整关闭常驻定时器timerID启动临时定时器
printf("Get Time Changed signal or mis-synced. stop timerID %zd\n", periodicTimerID);
kdk_timer_stop(periodicTimerID);
g_TimeChanged = 0;
g_TimeSync = 0;
timerID = -1;
periodicTimerID = 0;
}
if (!g_Flag)
kdk_timer_start(200, printClock, KTIMER_SINGLESHOT, KTIMER_RELATIVE, conn, 0);
else
{
timerID = kdk_timer_start(1000 * 60, printClock, KTIMER_PERIODIC, KTIMER_RELATIVE, conn, 0);
printf("start periodic timer with ID %zd\n", timerID);
// 当启动常驻定时器时,临时定时器肯定不需要再存在了
periodicTimerID = kdk_timer_start(1000 * 60, printClock, KTIMER_PERIODIC, KTIMER_RELATIVE, conn, 0);
printf("start periodic timer with ID %zd\n", periodicTimerID);
}
}
@ -146,7 +154,7 @@ void *startBroadcastSystemTimePerMin(void *tmp)
int monitorSystemTimeChange()
{
#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
// printf("monitorSystemTimeChange\n");
struct itimerspec its = {.it_value.tv_sec = TIME_T_MAX};
int fd = timerfd_create(CLOCK_REALTIME, TFD_CLOEXEC);
if (fd < 0)
@ -162,6 +170,7 @@ int monitorSystemTimeChange()
u_int64_t dep;
ssize_t ret = read(fd, &dep, sizeof(u_int64_t));
close(fd);
if (ret == -1 && errno == ECANCELED)
return 1;
@ -176,9 +185,63 @@ void *actionTimeChanged(void *ptr)
if (monitorSystemTimeChange() == 1)
{
printf("System Time Changed.\n");
g_TimeChanged = 1;
g_Flag = 0;
printClock(conn);
if (g_Flag)
{
g_TimeChanged = 1;
g_Flag = 0;
printClock(conn);
}
}
}
return NULL;
}
// 20020721新增时区变化监听
int monitorSystemTimeZoneChange(){
char buf[BUFSIZ];
int fd = inotify_init();
buf[sizeof(buf) - 1] = 0;
struct inotify_event *event;
if (fd < 0)
{
return -1;
}
int ftimezone = inotify_add_watch(fd,"/etc/timezone",IN_DELETE_SELF);
if (ftimezone < 0)
{
close(fd);
return -1;
}
int ret = read(fd, buf, sizeof(buf) - 1);
close(fd);
event = (struct inotify_event *)&buf[0];
if (ret)
{
fprintf(stdout, "%s --- %s\n", event->name, "IN_DELETE_SELF");
return 1;
}
return 0;
}
// 20020721新增时区变化监听
void *actionTimeZoneChanged(void *ptr)
{
DBusConnection *conn = ptr;
while (!g_Quit)
{
if (monitorSystemTimeZoneChange() == 1)
{
printf("System Time Changed.\n");
if (g_Flag)
{
g_TimeChanged = 1;
g_Flag = 0;
printClock(conn);
}
}
}
@ -444,13 +507,20 @@ int main(void)
sem_init(&g_Wait, 0, 1);
pthread_mutex_init(&lock, NULL);
pthread_attr_t attr;
pthread_t tid;
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
pthread_create(&tid, &attr, actionTimeChanged, conn);
pthread_mutex_init(&lock, NULL);
// 20020721新增时区变化监听
pthread_attr_t timezone_attr;
pthread_t timezone_id;
pthread_attr_init(&timezone_attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
pthread_create(&timezone_id, &timezone_attr, actionTimeZoneChanged, conn);
/* connect to the daemon bus */