diff --git a/debian/changelog b/debian/changelog index 4560b6d..108fee0 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +libkysdk-system (1.2.1) yangtze; urgency=medium + + * 升级版本号 + + -- szm-min Tue, 02 Aug 2022 09:33:58 +0800 + libkysdk-system (1.2.0kylin4-ok4~0720) yangtze; urgency=medium * 合并主线v1.2.1到openkylin diff --git a/debian/patches/0001-Update.patch b/debian/patches/0001-Update.patch new file mode 100644 index 0000000..41f64fa Binary files /dev/null and b/debian/patches/0001-Update.patch differ diff --git a/debian/patches/0002-fix-bug-125453.patch b/debian/patches/0002-fix-bug-125453.patch new file mode 100644 index 0000000..fdfb9f5 --- /dev/null +++ b/debian/patches/0002-fix-bug-125453.patch @@ -0,0 +1,111 @@ +From: cckylin-cibot +Date: Mon, 27 Jun 2022 03:50:55 +0000 +Subject: =?utf-8?b?Zml4IGJ1ZyMxMjU0NTMs5o6n5Yi26Z2i5p2/6LSm5oi35qih5Z2X57y6?= + =?utf-8?b?5bCRIui0puaIt+S/oeaBryLorr7nva7pobk=?= + +--- + src/disk/CMakeLists.txt | 1 + + src/filesystem/filewatcher/CMakeLists.txt | 1 + + src/hardware/CMakeLists.txt | 1 + + src/kdkocr/CMakeLists.txt | 1 + + src/packages/CMakeLists.txt | 1 + + src/powermanagement/CMakeLists.txt | 1 + + src/proc/CMakeLists.txt | 1 + + src/systeminfo/CMakeLists.txt | 1 + + 8 files changed, 8 insertions(+) + +diff --git a/src/disk/CMakeLists.txt b/src/disk/CMakeLists.txt +index 38661f7..2663b7e 100644 +--- a/src/disk/CMakeLists.txt ++++ b/src/disk/CMakeLists.txt +@@ -1,5 +1,6 @@ + aux_source_directory(. SOURCESCODE) + add_library(kydiskinfo SHARED ${SOURCESCODE}) ++set_target_properties(kydiskinfo PROPERTIES VERSION 1.2.0 SOVERSION 1) + add_executable(test-getdiskinfo test/getdiskinfo.c) + add_executable(test-getdisklist test/getdisklist.c) + find_library(UDEV_LIB udev) +diff --git a/src/filesystem/filewatcher/CMakeLists.txt b/src/filesystem/filewatcher/CMakeLists.txt +index d5bc184..4f23d76 100644 +--- a/src/filesystem/filewatcher/CMakeLists.txt ++++ b/src/filesystem/filewatcher/CMakeLists.txt +@@ -5,6 +5,7 @@ aux_source_directory(. SOURCECODE) + find_package(Qt5Core) + include_directories(${Qt5Core_INCLUDE_DIRS}) + add_library(kyfilewatcher SHARED ${SOURCECODE}) ++set_target_properties(kyfilewatcher PROPERTIES VERSION 1.2.0 SOVERSION 1) + add_executable(kyfilewatcher-test test/kyfilewatcher-test.cpp) + target_link_libraries(kyfilewatcher kylog kyconf systemd pthread ${Qt5Core_LIBRARIES}) + target_link_libraries(kyfilewatcher-test kyfilewatcher) +diff --git a/src/hardware/CMakeLists.txt b/src/hardware/CMakeLists.txt +index ccb9831..916423f 100644 +--- a/src/hardware/CMakeLists.txt ++++ b/src/hardware/CMakeLists.txt +@@ -1,6 +1,7 @@ + aux_source_directory(. SOURCESCODE) + include_directories(.) + add_library(kyhw SHARED ${SOURCESCODE}) ++set_target_properties(kyhw PROPERTIES VERSION 1.2.0 SOVERSION 1) + add_executable(kync-test test/kync-test.c) + add_executable(kycpu-test test/kycpu-test.c) + target_link_libraries(kyhw kylog kyconf pthread systemd) +diff --git a/src/kdkocr/CMakeLists.txt b/src/kdkocr/CMakeLists.txt +index 5a2c516..a3ee095 100644 +--- a/src/kdkocr/CMakeLists.txt ++++ b/src/kdkocr/CMakeLists.txt +@@ -168,6 +168,7 @@ if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "loongarch64") + else() + add_library(kyocr SHARED libkyocr.cpp ocr_main.cpp utility.cpp preprocess_op.cpp postprocess_op.cpp) + endif() ++set_target_properties(kyocr PROPERTIES VERSION 1.2.0 SOVERSION 1) + #target_link_libraries(kdkOCR -lleptonica) + target_link_libraries(kyocr ${OpenCV_LIBS} ${DEPS}) + target_link_libraries(kyocr ${kyocr_libraries}) +diff --git a/src/packages/CMakeLists.txt b/src/packages/CMakeLists.txt +index 0ab21e2..1225379 100644 +--- a/src/packages/CMakeLists.txt ++++ b/src/packages/CMakeLists.txt +@@ -1,6 +1,7 @@ + aux_source_directory(. SOURCESCODE) + include_directories(.) + add_library(kypackage SHARED ${SOURCESCODE}) ++set_target_properties(kypackage PROPERTIES VERSION 1.2.0 SOVERSION 1) + add_executable(kypackage-test test/kypackage-test.c) + target_link_libraries(kypackage-test kypackage) + +diff --git a/src/powermanagement/CMakeLists.txt b/src/powermanagement/CMakeLists.txt +index 282681d..7a495fc 100644 +--- a/src/powermanagement/CMakeLists.txt ++++ b/src/powermanagement/CMakeLists.txt +@@ -5,6 +5,7 @@ project(kypowermanagement LANGUAGES CXX) + set(POWERMANAGEMENT_TOP_DIR ${CMAKE_CURRENT_LIST_DIR}) + + add_library(${PROJECT_NAME} SHARED) ++set_target_properties(kypowermanagement PROPERTIES VERSION 1.2.0 SOVERSION 1) + + target_compile_options(${PROJECT_NAME} PRIVATE -Wall -g) + target_compile_features(${PROJECT_NAME} PRIVATE cxx_std_11) +diff --git a/src/proc/CMakeLists.txt b/src/proc/CMakeLists.txt +index 322fcf6..5833e23 100644 +--- a/src/proc/CMakeLists.txt ++++ b/src/proc/CMakeLists.txt +@@ -1,6 +1,7 @@ + aux_source_directory(. SOURCESCODE) + include_directories(.) + add_library(kyrtinfo SHARED ${SOURCESCODE}) ++set_target_properties(kyrtinfo PROPERTIES VERSION 1.2.0 SOVERSION 1) + add_executable(kyrtinfo-test test/kyrtinfo-test.c) + target_link_libraries(kyrtinfo-test kyrtinfo) + +diff --git a/src/systeminfo/CMakeLists.txt b/src/systeminfo/CMakeLists.txt +index cd6378f..c1392eb 100644 +--- a/src/systeminfo/CMakeLists.txt ++++ b/src/systeminfo/CMakeLists.txt +@@ -4,6 +4,7 @@ find_library(GLIBC_LIB glib-2.0) + find_library(DBUS_LIB dbus-1) + find_library(DBUS_GLIB_LIB dbus-glib-1) + add_library(kysysinfo SHARED ${SOURCESCODE}) ++set_target_properties(kysysinfo PROPERTIES VERSION 1.2.0 SOVERSION 1) + add_executable(kysysinfo-test test/kysysinfo_test.c) + target_link_libraries(kysysinfo kylog systemd kyconf ${GLIBC_LIB} ${DBUS_LIB} ${DBUS_GLIB_LIB}) + target_link_libraries(kysysinfo-test kysysinfo) diff --git a/debian/patches/0003-update-changelog.patch b/debian/patches/0003-update-changelog.patch new file mode 100644 index 0000000..79862b8 --- /dev/null +++ b/debian/patches/0003-update-changelog.patch @@ -0,0 +1,9344 @@ +From: szm-min +Date: Wed, 20 Jul 2022 19:35:21 +0800 +Subject: update changelog + +--- + .gitignore | 26 + + development-files/kysdk-system.conf | 1 + + src/CMakeLists.txt | 2 + + src/hardware/libkycpu.c | 298 +- + src/hardware/libkync.c | 58 + + src/hardware/test/kync-test.c | 4 +- + src/kdkocr/CMakeLists.txt | 35 +- + src/kdkocr/libs/libpaddle_inference | 0 + .../paddle/include/crypto/cipher.h | 50 + + .../paddle/include/experimental/ext_all.h | 32 + + .../paddle/include/experimental/ext_dispatch.h | 98 + + .../paddle/include/experimental/ext_dll_decl.h | 27 + + .../paddle/include/experimental/ext_dtype.h | 81 + + .../paddle/include/experimental/ext_exception.h | 108 + + .../paddle/include/experimental/ext_op_meta_info.h | 381 ++ + .../paddle/include/experimental/ext_place.h | 22 + + .../paddle/include/experimental/ext_tensor.h | 125 + + .../paddle/include/internal/framework.pb.h | 5315 ++++++++++++++++++++ + .../paddle/include/paddle_analysis_config.h | 680 +++ + .../paddle_inference/paddle/include/paddle_api.h | 457 ++ + .../paddle/include/paddle_infer_declare.h | 27 + + .../paddle/include/paddle_inference_api.h | 311 ++ + .../include/paddle_mkldnn_quantizer_config.h | 199 + + .../paddle/include/paddle_pass_builder.h | 248 + + .../paddle/lib/libpaddle_inference.so | 0 + src/kdkocr/ocr_main.cpp | 2 +- + src/powermanagement/lockscreen.cpp | 2 +- + src/systeminfo/CMakeLists.txt | 2 +- + src/systeminfo/libkysysinfo.c | 157 +- + src/systeminfo/libkysysinfo.h | 18 + + src/systemtime/m_systime.c | 52 +- + 31 files changed, 8718 insertions(+), 100 deletions(-) + create mode 100644 .gitignore + create mode 100644 development-files/kysdk-system.conf + create mode 100644 src/kdkocr/libs/libpaddle_inference + create mode 100755 src/kdkocr/libs/sw64/paddle_inference/paddle/include/crypto/cipher.h + create mode 100755 src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_all.h + create mode 100755 src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_dispatch.h + create mode 100755 src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_dll_decl.h + create mode 100755 src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_dtype.h + create mode 100755 src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_exception.h + create mode 100755 src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_op_meta_info.h + create mode 100755 src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_place.h + create mode 100755 src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_tensor.h + create mode 100755 src/kdkocr/libs/sw64/paddle_inference/paddle/include/internal/framework.pb.h + create mode 100755 src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_analysis_config.h + create mode 100755 src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_api.h + create mode 100755 src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_infer_declare.h + create mode 100755 src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_inference_api.h + create mode 100755 src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_mkldnn_quantizer_config.h + create mode 100755 src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_pass_builder.h + create mode 100644 src/kdkocr/libs/sw64/paddle_inference/paddle/lib/libpaddle_inference.so + +diff --git a/.gitignore b/.gitignore +new file mode 100644 +index 0000000..16a6fbc +--- /dev/null ++++ b/.gitignore +@@ -0,0 +1,26 @@ ++*.o ++.vscode ++build/ ++bin/ ++obj-x86_64-linux-gnu/ ++debian/tmp/ ++debian/files/ ++debian/libkysdk-disk/ ++debian/libkysdk-disk-dev/ ++debian/libkysdk-filesystem/ ++debian/libkysdk-filesystem-dev/ ++debian/libkysdk-hardware/ ++debian/libkysdk-hardware-dev/ ++debian/libkysdk-package/ ++debian/libkysdk-package-dev/ ++debian/libkysdk-proc/ ++debian/libkysdk-proc-dev/ ++debian/libkysdk-sysinfo/ ++debian/libkysdk-sysinfo-dev/ ++debian/libkysdk-system/ ++debian/libkysdk-system-dev/ ++debian/libkysdk-systime/ ++debian/libkysdk-systime-dev/ ++debian/libkysdk*.substvars ++debian/libkysdk*.debhelper ++debian/.debhelper +diff --git a/development-files/kysdk-system.conf b/development-files/kysdk-system.conf +new file mode 100644 +index 0000000..e8dc3be +--- /dev/null ++++ b/development-files/kysdk-system.conf +@@ -0,0 +1 @@ ++/usr/lib/kysdk/kysdk-system +\ No newline at end of file +diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt +index 821798e..a6485e7 100644 +--- a/src/CMakeLists.txt ++++ b/src/CMakeLists.txt +@@ -15,6 +15,8 @@ elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "mips64") + add_subdirectory(kdkocr) + elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "loongarch64") + add_subdirectory(kdkocr) ++elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "sw_64") ++ add_subdirectory(kdkocr) + else() + message(STATUS "host processor architecture is not supported for ocr") + endif() +\ No newline at end of file +diff --git a/src/hardware/libkycpu.c b/src/hardware/libkycpu.c +index e146c19..f69b4da 100644 +--- a/src/hardware/libkycpu.c ++++ b/src/hardware/libkycpu.c +@@ -6,6 +6,8 @@ + #include + #include + #include ++#include "sys/sysinfo.h" ++#include "unistd.h" + #ifdef __linux__ + #include + #endif +@@ -29,6 +31,223 @@ struct _cpuInfo{ + + struct _cpuInfo *cpuinf; + ++struct id_part { ++ const int id; ++ const char* name; ++}; ++ ++static const struct id_part arm_part[] = { ++ { 0x810, "ARM810" }, ++ { 0x920, "ARM920" }, ++ { 0x922, "ARM922" }, ++ { 0x926, "ARM926" }, ++ { 0x940, "ARM940" }, ++ { 0x946, "ARM946" }, ++ { 0x966, "ARM966" }, ++ { 0xa20, "ARM1020" }, ++ { 0xa22, "ARM1022" }, ++ { 0xa26, "ARM1026" }, ++ { 0xb02, "ARM11 MPCore" }, ++ { 0xb36, "ARM1136" }, ++ { 0xb56, "ARM1156" }, ++ { 0xb76, "ARM1176" }, ++ { 0xc05, "Cortex-A5" }, ++ { 0xc07, "Cortex-A7" }, ++ { 0xc08, "Cortex-A8" }, ++ { 0xc09, "Cortex-A9" }, ++ { 0xc0d, "Cortex-A17" }, /* Originally A12 */ ++ { 0xc0f, "Cortex-A15" }, ++ { 0xc0e, "Cortex-A17" }, ++ { 0xc14, "Cortex-R4" }, ++ { 0xc15, "Cortex-R5" }, ++ { 0xc17, "Cortex-R7" }, ++ { 0xc18, "Cortex-R8" }, ++ { 0xc20, "Cortex-M0" }, ++ { 0xc21, "Cortex-M1" }, ++ { 0xc23, "Cortex-M3" }, ++ { 0xc24, "Cortex-M4" }, ++ { 0xc27, "Cortex-M7" }, ++ { 0xc60, "Cortex-M0+" }, ++ { 0xd01, "Cortex-A32" }, ++ { 0xd03, "Cortex-A53" }, ++ { 0xd04, "Cortex-A35" }, ++ { 0xd05, "Cortex-A55" }, ++ { 0xd06, "Cortex-A65" }, ++ { 0xd07, "Cortex-A57" }, ++ { 0xd08, "Cortex-A72" }, ++ { 0xd09, "Cortex-A73" }, ++ { 0xd0a, "Cortex-A75" }, ++ { 0xd0b, "Cortex-A76" }, ++ { 0xd0c, "Neoverse-N1" }, ++ { 0xd0d, "Cortex-A77" }, ++ { 0xd0e, "Cortex-A76AE" }, ++ { 0xd13, "Cortex-R52" }, ++ { 0xd20, "Cortex-M23" }, ++ { 0xd21, "Cortex-M33" }, ++ { 0xd40, "Neoverse-V1" }, ++ { 0xd41, "Cortex-A78" }, ++ { 0xd42, "Cortex-A78AE" }, ++ { 0xd44, "Cortex-X1" }, ++ { 0xd46, "Cortex-510" }, ++ { 0xd47, "Cortex-710" }, ++ { 0xd48, "Cortex-X2" }, ++ { 0xd49, "Neoverse-N2" }, ++ { 0xd4a, "Neoverse-E1" }, ++ { 0xd4b, "Cortex-A78C" }, ++ { -1, "unknown" }, ++}; ++ ++static const struct id_part brcm_part[] = { ++ { 0x0f, "Brahma B15" }, ++ { 0x100, "Brahma B53" }, ++ { 0x516, "ThunderX2" }, ++ { -1, "unknown" }, ++}; ++ ++static const struct id_part dec_part[] = { ++ { 0xa10, "SA110" }, ++ { 0xa11, "SA1100" }, ++ { -1, "unknown" }, ++}; ++ ++static const struct id_part cavium_part[] = { ++ { 0x0a0, "ThunderX" }, ++ { 0x0a1, "ThunderX 88XX" }, ++ { 0x0a2, "ThunderX 81XX" }, ++ { 0x0a3, "ThunderX 83XX" }, ++ { 0x0af, "ThunderX2 99xx" }, ++ { -1, "unknown" }, ++}; ++ ++static const struct id_part apm_part[] = { ++ { 0x000, "X-Gene" }, ++ { -1, "unknown" }, ++}; ++ ++static const struct id_part qcom_part[] = { ++ { 0x00f, "Scorpion" }, ++ { 0x02d, "Scorpion" }, ++ { 0x04d, "Krait" }, ++ { 0x06f, "Krait" }, ++ { 0x201, "Kryo" }, ++ { 0x205, "Kryo" }, ++ { 0x211, "Kryo" }, ++ { 0x800, "Falkor V1/Kryo" }, ++ { 0x801, "Kryo V2" }, ++ { 0x803, "Kryo 3XX Silver" }, ++ { 0x804, "Kryo 4XX Gold" }, ++ { 0x805, "Kryo 4XX Silver" }, ++ { 0xc00, "Falkor" }, ++ { 0xc01, "Saphira" }, ++ { -1, "unknown" }, ++}; ++ ++static const struct id_part samsung_part[] = { ++ { 0x001, "exynos-m1" }, ++ { -1, "unknown" }, ++}; ++ ++static const struct id_part nvidia_part[] = { ++ { 0x000, "Denver" }, ++ { 0x003, "Denver 2" }, ++ { 0x004, "Carmel" }, ++ { -1, "unknown" }, ++}; ++ ++static const struct id_part marvell_part[] = { ++ { 0x131, "Feroceon 88FR131" }, ++ { 0x581, "PJ4/PJ4b" }, ++ { 0x584, "PJ4B-MP" }, ++ { -1, "unknown" }, ++}; ++ ++static const struct id_part apple_part[] = { ++ { 0x022, "Icestorm" }, ++ { 0x023, "Firestorm" }, ++ { -1, "unknown" }, ++}; ++ ++static const struct id_part faraday_part[] = { ++ { 0x526, "FA526" }, ++ { 0x626, "FA626" }, ++ { -1, "unknown" }, ++}; ++ ++static const struct id_part intel_part[] = { ++ { 0x200, "i80200" }, ++ { 0x210, "PXA250A" }, ++ { 0x212, "PXA210A" }, ++ { 0x242, "i80321-400" }, ++ { 0x243, "i80321-600" }, ++ { 0x290, "PXA250B/PXA26x" }, ++ { 0x292, "PXA210B" }, ++ { 0x2c2, "i80321-400-B0" }, ++ { 0x2c3, "i80321-600-B0" }, ++ { 0x2d0, "PXA250C/PXA255/PXA26x" }, ++ { 0x2d2, "PXA210C" }, ++ { 0x411, "PXA27x" }, ++ { 0x41c, "IPX425-533" }, ++ { 0x41d, "IPX425-400" }, ++ { 0x41f, "IPX425-266" }, ++ { 0x682, "PXA32x" }, ++ { 0x683, "PXA930/PXA935" }, ++ { 0x688, "PXA30x" }, ++ { 0x689, "PXA31x" }, ++ { 0xb11, "SA1110" }, ++ { 0xc12, "IPX1200" }, ++ { -1, "unknown" }, ++}; ++ ++static const struct id_part fujitsu_part[] = { ++ { 0x001, "A64FX" }, ++ { -1, "unknown" }, ++}; ++ ++static const struct id_part hisi_part[] = { ++ { 0xd01, "Kunpeng-920" }, /* aka tsv110 */ ++ { -1, "unknown" }, ++}; ++ ++static const struct id_part ft_part[] = { ++ { 0x660, "FTC660" }, ++ { 0x661, "FTC661" }, ++ { 0x662, "FTC662" }, ++ { 0x663, "FTC663" }, ++ { -1, "unknown" }, ++}; ++ ++static const struct id_part unknown_part[] = { ++ { -1, "unknown" }, ++}; ++ ++struct hw_impl { ++ const int id; ++ const struct id_part *parts; ++ const char *name; ++}; ++ ++static const struct hw_impl hw_implementer[] = { ++ { 0x41, arm_part, "ARM" }, ++ { 0x42, brcm_part, "Broadcom" }, ++ { 0x43, cavium_part, "Cavium" }, ++ { 0x44, dec_part, "DEC" }, ++ { 0x46, fujitsu_part, "FUJITSU" }, ++ { 0x48, hisi_part, "HiSilicon" }, ++ { 0x49, unknown_part, "Infineon" }, ++ { 0x4d, unknown_part, "Motorola/Freescale" }, ++ { 0x4e, nvidia_part, "NVIDIA" }, ++ { 0x50, apm_part, "APM" }, ++ { 0x51, qcom_part, "Qualcomm" }, ++ { 0x53, samsung_part, "Samsung" }, ++ { 0x56, marvell_part, "Marvell" }, ++ { 0x61, apple_part, "Apple" }, ++ { 0x66, faraday_part, "Faraday" }, ++ { 0x69, intel_part, "Intel" }, ++ { 0x70, ft_part, "Phytium" }, ++ { 0xc0, unknown_part, "Ampere" }, ++ { -1, unknown_part, "unknown" }, ++}; ++ + static void _free_cpuinfo() + { + if (cpuinf) +@@ -83,23 +302,6 @@ static int lookup(char *line, char *pattern, char **value) + return 1; + } + +-static int do_shell(char *comm, char *buf) +-{ +- FILE *stream; +- stream = popen(comm, "r"); +- if (stream == NULL) { +- return 0; +- } +- fread(buf, sizeof(char), MIDSIZE, stream); +- pclose(stream); +- buf[strlen(buf) - 1] = '\0';//去掉结尾转行符 +- if (strlen(buf) == 0) { +- return 0; +- } +- +- return 1; +-} +- + static void _get_cpu_info() + { + // 我知道这里有竞态问题,但是不想引入pthread,所以算了 +@@ -163,6 +365,11 @@ static void _get_cpu_info() + } + fclose(fp); + ++ if(strstr(cpuinf->model, "Loongson")) ++ { ++ cpuinf->vendor = strdup("loongson"); ++ } ++ + if (cpuinf->flags) + { + if (strstr(cpuinf->flags, " svm ")) +@@ -174,42 +381,51 @@ static void _get_cpu_info() + if(cpuinf->vendor == NULL) + { + char ret[MIDSIZE]; +- do_shell("lscpu | grep \"厂商 ID\"", ret); +- if(strcmp(ret, "")) ++ int num = 0; ++ int part, j; ++ const struct id_part *parts = NULL; ++ fp = fopen("/sys/devices/system/cpu/cpu0/regs/identification/midr_el1", "rt"); ++ if (!fp) + { +- printf("test\n"); +- char *substr = ":"; +- char *date = strstr(ret, substr); +- strcpy(ret, date + 27); +- cpuinf->vendor = strdup(ret); ++ klog_err("midr_el1 读取失败:%s\n", strerror(errno)); ++ SAFE_FREE(cpuinf); ++ return ; + } +- ++ fgets(buffer, CPUINF_BUFSIZE, fp); ++ char *substr = "x"; ++ char *date = strstr(buffer, substr); ++ strcpy(buffer, date + 9); ++ sscanf(buffer,"%2x",&num) ; ++ ++ for (j = 0; hw_implementer[j].id != -1; j++) { ++ if (hw_implementer[j].id == num) { ++ parts = hw_implementer[j].parts; ++ cpuinf->vendor = strdup(hw_implementer[j].name); ++ break; ++ } ++ } ++ fclose(fp); + } + + if(cpuinf->model == NULL) + { +- char ret[MIDSIZE]; +- do_shell("lscpu | grep \"型号名称\"", ret); +- if(strcmp(ret, "")) ++ fp = fopen("/proc/cpuinfo", "rt"); ++ if (!fp) ++ { ++ klog_err("/proc/cpuinfo 读取失败:%s\n", strerror(errno)); ++ SAFE_FREE(cpuinf); ++ return ; ++ } ++ while(fgets(buffer, CPUINF_BUFSIZE, fp)) + { +- char *substr = ":"; +- char *date = strstr(ret, substr); +- strcpy(ret, date + 26); +- cpuinf->model = strdup(ret); ++ if (lookup(buffer, "Hardware", &cpuinf->model));//huawei 9A0 + } ++ fclose(fp); + } + + if(cpuinf->corenums == 0) + { +- char ret[MIDSIZE]; +- do_shell("lscpu | grep \"每个座的核数\"", ret); +- if(strcmp(ret, "")) +- { +- char *substr = ":"; +- char *date = strstr(ret, substr); +- strcpy(ret, date + 20); +- cpuinf->corenums = atoi(ret); +- } ++ cpuinf->corenums = sysconf(_SC_NPROCESSORS_ONLN); + } + + #endif +diff --git a/src/hardware/libkync.c b/src/hardware/libkync.c +index bfad819..5264355 100644 +--- a/src/hardware/libkync.c ++++ b/src/hardware/libkync.c +@@ -1,3 +1,4 @@ ++#define _GNU_SOURCE # required for NI_NUMERICHOST + #include "libkync.h" + #include + #include +@@ -12,6 +13,8 @@ + #include + #include + #include ++#include ++#include + + enum cardspec{ + NCSPEC_ALL, +@@ -201,9 +204,51 @@ char **_get_nc_cfg(const char *nc, enum cardcfg cfg) + memcpy(&sin, &stIf.ifr_ifru.ifru_addr, sizeof(sin)); + snprintf(res[0], NC_IPv4_SIZE, "%s", inet_ntoa(sin.sin_addr)); + }break; ++ case NCCFG_IPv6:{ ++ struct ifaddrs *ifap, *ifa; ++ struct sockaddr_in6 *sa; ++ char addr[INET6_ADDRSTRLEN] = {0}; ++ getifaddrs(&ifap); ++ for (ifa = ifap; ifa; ifa = ifa->ifa_next) ++ { ++ if (ifa->ifa_addr->sa_family == AF_INET6 && !strcmp(ifa->ifa_name, nc)) ++ { ++ sa = (struct sockaddr_in6 *)ifa->ifa_addr; ++ getnameinfo(ifa->ifa_addr, sizeof(struct sockaddr_in6), addr, ++ sizeof(addr), NULL, 0, NI_NUMERICHOST); ++ } ++ } ++ ++ res = malloc(sizeof(char*)); ++ if (!res) ++ { ++ klog_err("内存申请失败:%s\n", strerror(errno)); ++ close(sfd); ++ freeifaddrs(ifap); ++ return NULL; ++ } ++ res[0] = malloc(sizeof(char) * INET6_ADDRSTRLEN); ++ if (!res[0]) ++ { ++ klog_err("内存申请失败:%s\n", strerror(errno)); ++ close(sfd); ++ freeifaddrs(ifap); ++ return NULL; ++ } ++ ++ int i = 0; ++ while (addr[i] != '%' && addr[i] != '\0') ++ i++; ++ addr[i] = '\0'; ++ ++ sprintf(res[0], "%s",addr); ++ ++ freeifaddrs(ifap); ++ }break; + default: + break; + } ++ close(sfd); + return res; + } + +@@ -255,6 +300,19 @@ char* kdk_nc_get_private_ipv4(const char *nc) + return ipv4; + } + ++char* kdk_nc_get_private_ipv6(const char *nc) ++{ ++ if (!nc) ++ return NULL; ++ ++ char **ipv6list = _get_nc_cfg(nc, NCCFG_IPv6); ++ if (!ipv6list) ++ return NULL; ++ char *ipv6 = ipv6list[0]; ++ free(ipv6list); ++ return ipv6; ++} ++ + inline void kdk_nc_freeall(char **list) + { + if (! list) +diff --git a/src/hardware/test/kync-test.c b/src/hardware/test/kync-test.c +index 79f9916..eba8659 100644 +--- a/src/hardware/test/kync-test.c ++++ b/src/hardware/test/kync-test.c +@@ -11,9 +11,11 @@ int main() + { + char *mac = kdk_nc_get_phymac(cards[index]); + char *ipv4 = kdk_nc_get_private_ipv4(cards[index]); +- printf("Card %zd: %s\tStatus: %s\tMac: %s\tIPv4: %s\n", index + 1, cards[index], kdk_nc_is_up(cards[index]) == 1 ? "Up" : "Down", mac, ipv4); ++ char *ipv6 = kdk_nc_get_private_ipv6(cards[index]); ++ printf("Card %zd: %s\tStatus: %s\tMac: %s\tIPv4: %s\tIPv6: %s\n", index + 1, cards[index], kdk_nc_is_up(cards[index]) == 1 ? "Up" : "Down", mac, ipv4,ipv6); + free(mac); + free(ipv4); ++ free(ipv6); + index ++; + } + kdk_nc_freeall(cards); +diff --git a/src/kdkocr/CMakeLists.txt b/src/kdkocr/CMakeLists.txt +index a3ee095..e4de886 100644 +--- a/src/kdkocr/CMakeLists.txt ++++ b/src/kdkocr/CMakeLists.txt +@@ -64,7 +64,11 @@ list(APPEND kyocr_libraries ${TESSERACT_PKG_LIBRARIES}) + list(APPEND kyocr_libraries -llept) + message("kyocr_cflags is ${kyocr_libraries}") + +-if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "loongarch64") ++if(NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/libs/amd64/paddle_inference/paddle/lib/libpaddle_inference.so") ++ add_definitions(-DLOONGARCH64) ++elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "loongarch64") ++ add_definitions(-DLOONGARCH64) ++elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "sw_64") + add_definitions(-DLOONGARCH64) + else() + if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "i386") +@@ -162,23 +166,36 @@ include_directories(${FETCHCONTENT_BASE_DIR}/extern_autolog-src) + + endif() + +-if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "loongarch64") ++if(NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/libs/amd64/paddle_inference/paddle/lib/libpaddle_inference.so") ++ message("no paddle lib") ++ add_library(kyocr SHARED libkyocr.cpp) ++ target_link_libraries(kyocr ${kyocr_libraries}) ++ target_compile_options(kyocr PUBLIC ${kyocr_cflags}) ++elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "loongarch64") + message(loongarch64) + add_library(kyocr SHARED libkyocr.cpp) ++ target_link_libraries(kyocr ${kyocr_libraries}) ++ target_compile_options(kyocr PUBLIC ${kyocr_cflags}) ++elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "sw_64") ++ message(loongarch64) ++ add_library(kyocr SHARED libkyocr.cpp) ++ target_link_libraries(kyocr ${kyocr_libraries}) ++ target_compile_options(kyocr PUBLIC ${kyocr_cflags}) + else() + add_library(kyocr SHARED libkyocr.cpp ocr_main.cpp utility.cpp preprocess_op.cpp postprocess_op.cpp) ++ target_link_libraries(kyocr ${OpenCV_LIBS} ${DEPS}) + endif() + set_target_properties(kyocr PROPERTIES VERSION 1.2.0 SOVERSION 1) + #target_link_libraries(kdkOCR -lleptonica) +-target_link_libraries(kyocr ${OpenCV_LIBS} ${DEPS}) +-target_link_libraries(kyocr ${kyocr_libraries}) +-target_compile_options(kyocr PUBLIC ${kyocr_cflags}) ++ + + + install(TARGETS kyocr LIBRARY DESTINATION lib/kysdk/kysdk-system) + install(FILES ${PROJECT_SOURCE_DIR}/libkyocr.hpp DESTINATION include/kysdk/kysdk-system) + +-if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "x86_64") ++if(NOT EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/libs/amd64/paddle_inference/paddle/lib/libpaddle_inference.so") ++ install(FILES ${PROJECT_SOURCE_DIR}/libs/libpaddle_inference DESTINATION lib/libpaddle_inference.so) ++elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "x86_64") + install(FILES ${PROJECT_SOURCE_DIR}/libs/amd64/paddle_inference/paddle/lib/libpaddle_inference.so DESTINATION lib/) + elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "AMD64") + install(FILES ${PROJECT_SOURCE_DIR}/libs/amd64/paddle_inference/paddle/lib/libpaddle_inference.so DESTINATION lib/) +@@ -187,6 +204,10 @@ elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "aarch64") + elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "mips64") + install(FILES ${PROJECT_SOURCE_DIR}/libs/mips64el/paddle_inference/paddle/lib/libpaddle_inference.so DESTINATION lib/) + elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "loongarch64") +- install(FILES ${PROJECT_SOURCE_DIR}/libs/loongarch64/paddle_inference/paddle/lib/libpaddle_inference.so DESTINATION lib/) ++ install(FILES ${PROJECT_SOURCE_DIR}/libs/libpaddle_inference DESTINATION lib/libpaddle_inference.so) ++# install(FILES ${PROJECT_SOURCE_DIR}/libs/loongarch64/paddle_inference/paddle/lib/libpaddle_inference.so DESTINATION lib/) ++elseif(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "sw_64") ++ install(FILES ${PROJECT_SOURCE_DIR}/libs/libpaddle_inference DESTINATION lib/libpaddle_inference.so) ++# install(FILES ${PROJECT_SOURCE_DIR}/libs/sw64/paddle_inference/paddle/lib/libpaddle_inference.so DESTINATION lib/) + endif() + #target_link_libraries(test libkdkOCR.so) +diff --git a/src/kdkocr/libs/libpaddle_inference b/src/kdkocr/libs/libpaddle_inference +new file mode 100644 +index 0000000..e69de29 +diff --git a/src/kdkocr/libs/sw64/paddle_inference/paddle/include/crypto/cipher.h b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/crypto/cipher.h +new file mode 100755 +index 0000000..fc31653 +--- /dev/null ++++ b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/crypto/cipher.h +@@ -0,0 +1,50 @@ ++// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++#pragma once ++ ++#include ++#include ++#include ++ ++namespace paddle { ++namespace framework { ++ ++class Cipher { ++ public: ++ Cipher() = default; ++ virtual ~Cipher() {} ++ // encrypt string ++ virtual std::string Encrypt(const std::string& plaintext, ++ const std::string& key) = 0; ++ // decrypt string ++ virtual std::string Decrypt(const std::string& ciphertext, ++ const std::string& key) = 0; ++ ++ // encrypt strings and read them to file, ++ virtual void EncryptToFile(const std::string& plaintext, ++ const std::string& key, ++ const std::string& filename) = 0; ++ // read from file and decrypt them ++ virtual std::string DecryptFromFile(const std::string& key, ++ const std::string& filename) = 0; ++}; ++ ++class CipherFactory { ++ public: ++ CipherFactory() = default; ++ static std::shared_ptr CreateCipher(const std::string& config_file); ++}; ++} // namespace framework ++} // namespace paddle +diff --git a/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_all.h b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_all.h +new file mode 100755 +index 0000000..f2b3bcf +--- /dev/null ++++ b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_all.h +@@ -0,0 +1,32 @@ ++/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. */ ++ ++#pragma once ++ ++#if !defined(_MSC_VER) && __cplusplus < 199711L ++#error C++11 or later compatible compiler is required to use Paddle. ++#endif ++ ++#ifdef _WIN32 ++#ifndef NOMINMAX ++#define NOMINMAX // msvc max/min macro conflict with std::min/max ++#endif ++#endif ++ ++#include "ext_dispatch.h" // NOLINT ++#include "ext_dtype.h" // NOLINT ++#include "ext_exception.h" // NOLINT ++#include "ext_op_meta_info.h" // NOLINT ++#include "ext_place.h" // NOLINT ++#include "ext_tensor.h" // NOLINT +diff --git a/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_dispatch.h b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_dispatch.h +new file mode 100755 +index 0000000..eed7360 +--- /dev/null ++++ b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_dispatch.h +@@ -0,0 +1,98 @@ ++/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. */ ++ ++#pragma once ++ ++#include "ext_dtype.h" // NOLINT ++#include "ext_exception.h" // NOLINT ++ ++namespace paddle { ++ ++///////// Basic Marco /////////// ++ ++#define PD_PRIVATE_CASE_TYPE_USING_HINT(NAME, enum_type, type, HINT, ...) \ ++ case enum_type: { \ ++ using HINT = type; \ ++ __VA_ARGS__(); \ ++ break; \ ++ } ++ ++#define PD_PRIVATE_CASE_TYPE(NAME, enum_type, type, ...) \ ++ PD_PRIVATE_CASE_TYPE_USING_HINT(NAME, enum_type, type, data_t, __VA_ARGS__) ++ ++///////// Floating Dispatch Marco /////////// ++ ++#define PD_DISPATCH_FLOATING_TYPES(TYPE, NAME, ...) \ ++ [&] { \ ++ const auto& __dtype__ = TYPE; \ ++ switch (__dtype__) { \ ++ PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::FLOAT32, float, \ ++ __VA_ARGS__) \ ++ PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::FLOAT64, double, \ ++ __VA_ARGS__) \ ++ default: \ ++ PD_THROW("function " #NAME " is not implemented for data type `", \ ++ ::paddle::ToString(__dtype__), "`"); \ ++ } \ ++ }() ++ ++///////// Integral Dispatch Marco /////////// ++ ++#define PD_DISPATCH_INTEGRAL_TYPES(TYPE, NAME, ...) \ ++ [&] { \ ++ const auto& __dtype__ = TYPE; \ ++ switch (__dtype__) { \ ++ PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::INT32, int, __VA_ARGS__) \ ++ PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::INT64, int64_t, \ ++ __VA_ARGS__) \ ++ PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::INT8, int8_t, \ ++ __VA_ARGS__) \ ++ PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::UINT8, uint8_t, \ ++ __VA_ARGS__) \ ++ PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::INT16, int16_t, \ ++ __VA_ARGS__) \ ++ default: \ ++ PD_THROW("function " #NAME " is not implemented for data type `" + \ ++ ::paddle::ToString(__dtype__) + "`"); \ ++ } \ ++ }() ++ ++///////// Floating and Integral Dispatch Marco /////////// ++ ++#define PD_DISPATCH_FLOATING_AND_INTEGRAL_TYPES(TYPE, NAME, ...) \ ++ [&] { \ ++ const auto& __dtype__ = TYPE; \ ++ switch (__dtype__) { \ ++ PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::FLOAT32, float, \ ++ __VA_ARGS__) \ ++ PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::FLOAT64, double, \ ++ __VA_ARGS__) \ ++ PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::INT32, int, __VA_ARGS__) \ ++ PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::INT64, int64_t, \ ++ __VA_ARGS__) \ ++ PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::INT8, int8_t, \ ++ __VA_ARGS__) \ ++ PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::UINT8, uint8_t, \ ++ __VA_ARGS__) \ ++ PD_PRIVATE_CASE_TYPE(NAME, ::paddle::DataType::INT16, int16_t, \ ++ __VA_ARGS__) \ ++ default: \ ++ PD_THROW("function " #NAME " is not implemented for data type `" + \ ++ ::paddle::ToString(__dtype__) + "`"); \ ++ } \ ++ }() ++ ++// TODO(chenweihang): Add more Marcos in the future if needed ++ ++} // namespace paddle +diff --git a/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_dll_decl.h b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_dll_decl.h +new file mode 100755 +index 0000000..3dbea5e +--- /dev/null ++++ b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_dll_decl.h +@@ -0,0 +1,27 @@ ++// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++#pragma once ++ ++#if defined(_WIN32) ++#ifndef PD_DLL_DECL ++#ifdef PADDLE_DLL_EXPORT ++#define PD_DLL_DECL __declspec(dllexport) ++#else ++#define PD_DLL_DECL __declspec(dllimport) ++#endif // PADDLE_DLL_EXPORT ++#endif // PD_DLL_DECL ++#else ++#define PD_DLL_DECL ++#endif // _WIN32 +diff --git a/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_dtype.h b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_dtype.h +new file mode 100755 +index 0000000..46c4bac +--- /dev/null ++++ b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_dtype.h +@@ -0,0 +1,81 @@ ++/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. */ ++#pragma once ++ ++#include ++#include ++ ++#include "ext_exception.h" // NOLINT ++ ++namespace paddle { ++ ++enum class DataType { ++ BOOL, ++ INT8, ++ UINT8, ++ INT16, ++ INT32, ++ INT64, ++ FLOAT32, ++ FLOAT64, ++ // TODO(JiabinYang) support more data types if needed. ++}; ++ ++inline std::string ToString(DataType dtype) { ++ switch (dtype) { ++ case DataType::BOOL: ++ return "bool"; ++ case DataType::INT8: ++ return "int8_t"; ++ case DataType::UINT8: ++ return "uint8_t"; ++ case DataType::INT16: ++ return "int16_t"; ++ case DataType::INT32: ++ return "int32_t"; ++ case DataType::INT64: ++ return "int64_t"; ++ case DataType::FLOAT32: ++ return "float"; ++ case DataType::FLOAT64: ++ return "double"; ++ default: ++ PD_THROW("Unsupported paddle enum data type."); ++ } ++} ++ ++#define PD_FOR_EACH_DATA_TYPE(_) \ ++ _(bool, DataType::BOOL) \ ++ _(int8_t, DataType::INT8) \ ++ _(uint8_t, DataType::UINT8) \ ++ _(int16_t, DataType::INT16) \ ++ _(int, DataType::INT32) \ ++ _(int64_t, DataType::INT64) \ ++ _(float, DataType::FLOAT32) \ ++ _(double, DataType::FLOAT64) ++ ++template ++struct DataTypeToCPPType; ++ ++#define PD_SPECIALIZE_DataTypeToCPPType(cpp_type, data_type) \ ++ template <> \ ++ struct DataTypeToCPPType { \ ++ using type = cpp_type; \ ++ }; ++ ++PD_FOR_EACH_DATA_TYPE(PD_SPECIALIZE_DataTypeToCPPType) ++ ++#undef PD_SPECIALIZE_DataTypeToCPPType ++ ++} // namespace paddle +diff --git a/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_exception.h b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_exception.h +new file mode 100755 +index 0000000..f6ea757 +--- /dev/null ++++ b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_exception.h +@@ -0,0 +1,108 @@ ++/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. */ ++ ++#pragma once ++ ++#include ++#include ++#include ++ ++namespace paddle { ++ ++//////////////// Exception handling and Error Message ///////////////// ++#if !defined(_WIN32) ++#define PD_UNLIKELY(expr) (__builtin_expect(static_cast(expr), 0)) ++#define PD_LIKELY(expr) (__builtin_expect(static_cast(expr), 1)) ++#else ++#define PD_UNLIKELY(expr) (expr) ++#define PD_LIKELY(expr) (expr) ++#endif ++ ++struct PD_Exception : public std::exception { ++ public: ++ template ++ explicit PD_Exception(const std::string& msg, const char* file, int line, ++ const char* default_msg) { ++ std::ostringstream sout; ++ if (msg.empty()) { ++ sout << default_msg << "\n [" << file << ":" << line << "]"; ++ } else { ++ sout << msg << "\n [" << file << ":" << line << "]"; ++ } ++ err_msg_ = sout.str(); ++ } ++ ++ const char* what() const noexcept override { return err_msg_.c_str(); } ++ ++ private: ++ std::string err_msg_; ++}; ++ ++class ErrorMessage { ++ public: ++ template ++ explicit ErrorMessage(const Args&... args) { ++ build_string(args...); ++ } ++ ++ void build_string() { oss << ""; } ++ ++ template ++ void build_string(const T& t) { ++ oss << t; ++ } ++ ++ template ++ void build_string(const T& t, const Args&... args) { ++ build_string(t); ++ build_string(args...); ++ } ++ ++ std::string to_string() { return oss.str(); } ++ ++ private: ++ std::ostringstream oss; ++}; ++ ++#if defined _WIN32 ++#define HANDLE_THE_ERROR try { ++#define END_HANDLE_THE_ERROR \ ++ } \ ++ catch (const std::exception& e) { \ ++ std::cerr << e.what() << std::endl; \ ++ throw e; \ ++ } ++#else ++#define HANDLE_THE_ERROR ++#define END_HANDLE_THE_ERROR ++#endif ++ ++#define PD_CHECK(COND, ...) \ ++ do { \ ++ if (PD_UNLIKELY(!(COND))) { \ ++ auto __message__ = ::paddle::ErrorMessage(__VA_ARGS__).to_string(); \ ++ throw ::paddle::PD_Exception(__message__, __FILE__, __LINE__, \ ++ "Expected " #COND \ ++ ", but it's not satisfied."); \ ++ } \ ++ } while (0) ++ ++#define PD_THROW(...) \ ++ do { \ ++ auto __message__ = ::paddle::ErrorMessage(__VA_ARGS__).to_string(); \ ++ throw ::paddle::PD_Exception(__message__, __FILE__, __LINE__, \ ++ "An error occured."); \ ++ } while (0) ++ ++} // namespace paddle +diff --git a/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_op_meta_info.h b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_op_meta_info.h +new file mode 100755 +index 0000000..a3b9a4c +--- /dev/null ++++ b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_op_meta_info.h +@@ -0,0 +1,381 @@ ++/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. */ ++ ++#pragma once ++ ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include "ext_dll_decl.h" // NOLINT ++#include "ext_exception.h" // NOLINT ++#include "ext_tensor.h" // NOLINT ++ ++/** ++ * Op Meta Info Related Define. ++ * ++ * Used to maintain operator core information. ++ * ++ */ ++ ++namespace paddle { ++namespace framework { ++class PD_DLL_DECL OpMetaInfoHelper; ++} // namespace framework ++ ++using Tensor = paddle::Tensor; ++ ++///////////////// Util Marco Define //////////////// ++ ++#define PD_DISABLE_COPY_AND_ASSIGN(classname) \ ++ private: \ ++ classname(const classname&) = delete; \ ++ classname(classname&&) = delete; \ ++ classname& operator=(const classname&) = delete; \ ++ classname& operator=(classname&&) = delete ++ ++#define STATIC_ASSERT_GLOBAL_NAMESPACE(uniq_name, msg) \ ++ struct __test_global_namespace_##uniq_name##__ {}; \ ++ static_assert(std::is_same<::__test_global_namespace_##uniq_name##__, \ ++ __test_global_namespace_##uniq_name##__>::value, \ ++ msg) ++ ++///////////////// Util Define and Function //////////////// ++ ++inline std::string Grad(const std::string& var_name) { ++ std::string result; ++ result.reserve(var_name.size() + 5U); ++ result += var_name; ++ result += "@GRAD"; ++ return result; ++} ++ ++////////////////////// Kernel Function (PD_KERNEL) //////////////////////// ++ ++// Record Op kernel core function ++using KernelFunc = std::vector (*)(std::vector inputs, ++ std::vector attrs); ++ ++#define PD_SPECIALIZE_ComputeCallHelper(attr_type) \ ++ template \ ++ struct ComputeCallHelper { \ ++ template \ ++ static Return Compute(std::vector inputs, \ ++ std::vector attrs, \ ++ const PreviousArgs&... pargs) { \ ++ try { \ ++ attr_type arg = boost::any_cast(attrs[attr_idx]); \ ++ return ComputeCallHelper::template Compute( \ ++ inputs, attrs, pargs..., arg); \ ++ } catch (boost::bad_any_cast&) { \ ++ PD_THROW( \ ++ "Attribute cast error in custom operator. Expected " #attr_type \ ++ " value."); \ ++ } \ ++ } \ ++ } ++ ++template ++struct TypeTag {}; ++ ++template ++struct KernelFuncImpl; ++ ++template ++struct KernelFuncImpl { ++ static Return Compute(std::vector inputs, ++ std::vector attrs) { ++ return ComputeCallHelper>::template Compute<0, 0>( ++ inputs, attrs); ++ } ++ ++ private: ++ template ++ struct ComputeCallHelper; ++ ++ // for Tensor input ++ template ++ struct ComputeCallHelper { ++ template ++ static Return Compute(std::vector inputs, ++ std::vector attrs, ++ const PreviousArgs&... pargs) { ++ static_assert(attr_idx == 0, ++ "Input tensor should appear before attributes."); ++ const Tensor& arg = inputs[in_idx]; ++ return ComputeCallHelper::template Compute( ++ inputs, attrs, pargs..., arg); ++ } ++ }; ++ ++ PD_SPECIALIZE_ComputeCallHelper(bool); ++ PD_SPECIALIZE_ComputeCallHelper(int); ++ PD_SPECIALIZE_ComputeCallHelper(float); ++ PD_SPECIALIZE_ComputeCallHelper(int64_t); ++ PD_SPECIALIZE_ComputeCallHelper(std::string); ++ PD_SPECIALIZE_ComputeCallHelper(std::vector); ++ PD_SPECIALIZE_ComputeCallHelper(std::vector); ++ PD_SPECIALIZE_ComputeCallHelper(std::vector); ++ PD_SPECIALIZE_ComputeCallHelper(std::vector); ++ // TODO(chenweihang): support other attribute type if needed. ++ // Why not support other attribute type here? ++ // - boost::blank, std::vector and std::vector ++ // are not used in op ++ // - BlockDesc* and std::vector are used in framework ++ // end: base template ++ template ++ struct ComputeCallHelper> { ++ template ++ static Return Compute(std::vector inputs, ++ std::vector attrs, const Args&... args) { ++ return impl_fn(args...); ++ } ++ }; ++}; ++ ++#define PD_KERNEL(...) \ ++ ::paddle::KernelFuncImpl::Compute ++ ++/////////////// InferShape Function (PD_INFER_SHAPE) /////////////// ++ ++// Record Op infershape core function ++using InferShapeFunc = std::vector> (*)( ++ std::vector> input_shapes); ++ ++template ++struct InferShapeFuncImpl; ++ ++template ++struct InferShapeFuncImpl { ++ static Return InferShape(std::vector> input_shapes) { ++ return InferShapeCallHelper>::template InferShape<0>( ++ input_shapes); ++ } ++ ++ private: ++ template ++ struct InferShapeCallHelper; ++ ++ // only one type input: std::vector ++ template ++ struct InferShapeCallHelper, Tail...> { ++ template ++ static Return InferShape(std::vector> input_shapes, ++ const PreviousArgs&... pargs) { ++ std::vector arg = input_shapes[in_idx]; ++ return InferShapeCallHelper::template InferShape( ++ input_shapes, pargs..., arg); ++ } ++ }; ++ ++ // end: base template ++ template ++ struct InferShapeCallHelper> { ++ template ++ static Return InferShape(std::vector> input_shapes, ++ const Args&... args) { ++ return impl_fn(args...); ++ } ++ }; ++}; ++ ++#define PD_INFER_SHAPE(...) \ ++ ::paddle::InferShapeFuncImpl::InferShape ++ ++/////////////// InferDataType Function (PD_INFER_DTYPE) /////////////// ++ ++// Record Op Infer dtype core function ++using InferDtypeFunc = ++ std::vector (*)(std::vector input_dtypes); ++ ++template ++struct InferDtypeFuncImpl; ++ ++template ++struct InferDtypeFuncImpl { ++ static Return InferDtype(std::vector input_dtypes) { ++ return InferDtypeCallHelper>::template InferDtype<0>( ++ input_dtypes); ++ } ++ ++ private: ++ template ++ struct InferDtypeCallHelper; ++ ++ // Only one type input now: DataType ++ template ++ struct InferDtypeCallHelper { ++ template ++ static Return InferDtype(std::vector input_dtypes, ++ const PreviousArgs&... pargs) { ++ DataType arg = input_dtypes[in_idx]; ++ return InferDtypeCallHelper::template InferDtype( ++ input_dtypes, pargs..., arg); ++ } ++ }; ++ ++ // end: base template ++ template ++ struct InferDtypeCallHelper> { ++ template ++ static Return InferDtype(std::vector input_dtypes, ++ const Args&... args) { ++ return impl_fn(args...); ++ } ++ }; ++}; ++ ++#define PD_INFER_DTYPE(...) \ ++ ::paddle::InferDtypeFuncImpl::InferDtype ++ ++////////////////////// Op Meta Info ////////////////////// ++ ++class PD_DLL_DECL OpMetaInfo { ++ public: ++ explicit OpMetaInfo(const std::string& op_name) : name_(op_name) {} ++ ++ // format: {"", "", ...} ++ OpMetaInfo& Inputs(std::vector&& inputs); ++ ++ // format: {"", "", ...} ++ OpMetaInfo& Outputs(std::vector&& outputs); ++ ++ // format: {":", ":", ...} ++ OpMetaInfo& Attrs(std::vector&& attrs); ++ ++ // format: PD_KERNEL(...) ++ OpMetaInfo& SetKernelFn(KernelFunc&& func); ++ ++ // format: PD_INFER_SHAPE(...) ++ OpMetaInfo& SetInferShapeFn(InferShapeFunc&& func); ++ ++ // format: PD_INFER_DTYPE(...) ++ OpMetaInfo& SetInferDtypeFn(InferDtypeFunc&& func); ++ ++ private: ++ friend class framework::OpMetaInfoHelper; ++ ++ // 1. desc info ++ std::string name_; ++ std::vector inputs_; ++ std::vector outputs_; ++ std::vector attrs_; ++ ++ // 2. func info ++ KernelFunc kernel_fn_{nullptr}; ++ InferShapeFunc infer_shape_fn_{nullptr}; ++ InferDtypeFunc infer_dtype_fn_{nullptr}; ++}; ++ ++//////////////// Op Meta Info Map ///////////////// ++ ++class PD_DLL_DECL OpMetaInfoMap { ++ public: ++ // this function's impl should keep in header file. ++ // if move to cc file, meta info can not be added ++ // into map ++ static OpMetaInfoMap& Instance() { ++ static OpMetaInfoMap g_custom_op_meta_info_map; ++ return g_custom_op_meta_info_map; ++ } ++ ++ std::vector& operator[](const std::string& name); ++ ++ const std::unordered_map>& GetMap() ++ const; ++ ++ private: ++ OpMetaInfoMap() = default; ++ std::unordered_map> map_; ++ ++ PD_DISABLE_COPY_AND_ASSIGN(OpMetaInfoMap); ++}; ++ ++//////////////// Op Meta Info Builder ///////////////// ++ ++class PD_DLL_DECL OpMetaInfoBuilder { ++ public: ++ explicit OpMetaInfoBuilder(std::string&& name, size_t index); ++ OpMetaInfoBuilder& Inputs(std::vector&& inputs); ++ OpMetaInfoBuilder& Outputs(std::vector&& outputs); ++ OpMetaInfoBuilder& Attrs(std::vector&& attrs); ++ OpMetaInfoBuilder& SetKernelFn(KernelFunc func); ++ OpMetaInfoBuilder& SetInferShapeFn(InferShapeFunc func); ++ OpMetaInfoBuilder& SetInferDtypeFn(InferDtypeFunc func); ++ ++ private: ++ // Forward Op name ++ std::string name_; ++ // ref current info ptr ++ OpMetaInfo* info_ptr_; ++ // The current op meta info index in vector ++ // - 0: op, 1: grad_op, 2: grad_grad_op ++ size_t index_; ++}; ++ ++/////////////////////// Op register API ///////////////////////// ++ ++// For inference: compile directly with framework ++// Call after PD_BUILD_OP(...) ++void RegisterAllCustomOperator(); ++ ++// Using this api to load compiled custom operator's dynamic library and ++// register Custom ++// Operator into it ++void LoadCustomOperatorLib(const std::string& dso_name); ++ ++/////////////////////// Op register Macro ///////////////////////// ++ ++#define PD_BUILD_OP(op_name) \ ++ STATIC_ASSERT_GLOBAL_NAMESPACE( \ ++ __reg_op__##op_name, "PD_BUILD_OP must be called in global namespace."); \ ++ static ::paddle::OpMetaInfoBuilder __op_meta_info_##op_name##__ = \ ++ ::paddle::OpMetaInfoBuilder(#op_name, 0) ++ ++#define PD_BUILD_GRAD_OP(op_name) \ ++ STATIC_ASSERT_GLOBAL_NAMESPACE( \ ++ __reg_grad_op__##op_name, \ ++ "PD_BUILD_GRAD_OP must be called in global namespace."); \ ++ static ::paddle::OpMetaInfoBuilder __grad_op_meta_info_##op_name##__ = \ ++ ::paddle::OpMetaInfoBuilder(#op_name, 1) ++ ++#define PD_BUILD_DOUBLE_GRAD_OP(op_name) \ ++ STATIC_ASSERT_GLOBAL_NAMESPACE( \ ++ __reg_grad_grad_op__##op_name, \ ++ "PD_BUILD_DOUBLE_GRAD_OP must be called in global namespace."); \ ++ static ::paddle::OpMetaInfoBuilder __grad_grad_op_meta_info_##op_name##__ = \ ++ ::paddle::OpMetaInfoBuilder(#op_name, 2) ++ ++} // namespace paddle ++ ++///////////////////// C API /////////////////// ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++#if defined(_WIN32) ++// C-API to get global OpMetaInfoMap. ++__declspec(dllexport) inline paddle::OpMetaInfoMap& PD_GetOpMetaInfoMap() { ++ return paddle::OpMetaInfoMap::Instance(); ++} ++#endif // _WIN32 ++ ++#ifdef __cplusplus ++} ++#endif +diff --git a/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_place.h b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_place.h +new file mode 100755 +index 0000000..91d4f41 +--- /dev/null ++++ b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_place.h +@@ -0,0 +1,22 @@ ++/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. */ ++ ++#pragma once ++ ++namespace paddle { ++ ++// TODO(yangjiabin): Add other place support in next PR ++enum class PlaceType { kUNK = -1, kCPU, kGPU }; ++ ++} // namespace paddle +diff --git a/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_tensor.h b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_tensor.h +new file mode 100755 +index 0000000..be492a6 +--- /dev/null ++++ b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/experimental/ext_tensor.h +@@ -0,0 +1,125 @@ ++/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++ http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. */ ++ ++#pragma once ++ ++#include ++#include ++#ifdef PADDLE_WITH_CUDA ++#include ++#endif ++ ++#include "ext_dll_decl.h" // NOLINT ++#include "ext_dtype.h" // NOLINT ++#include "ext_place.h" // NOLINT ++ ++namespace paddle { ++namespace framework { ++class CustomTensorUtils; ++} // namespace framework ++ ++class StreamWrapper { ++ public: ++ StreamWrapper() : stream_(nullptr), is_stream_set_(false) {} ++ void SetStream(void* stream) { ++ stream_ = stream; ++ is_stream_set_ = true; ++ } ++ ++ void* GetStream() const { return stream_; } ++ ++ bool IsStreamSet() const { return is_stream_set_; } ++ ++ private: ++ // cudaStream_t stream_; ++ void* stream_; ++ bool is_stream_set_; ++}; ++ ++class PD_DLL_DECL Tensor { ++ public: ++ /// \brief Construct a Tensor on target Place for CustomOp. ++ /// Generally it's only used for user to create Tensor. ++ explicit Tensor(const PlaceType& place); ++ /// \brief Reset the shape of the tensor. ++ /// Generally it's only used for the input tensor. ++ /// Reshape must be called before calling ++ /// mutable_data() or copy_to(const PlaceType& place) ++ /// \param shape The shape to set. ++ void reshape(const std::vector& shape); ++ ++ /// \brief Get the memory pointer in CPU or GPU with ++ /// specific data type. ++ /// Please Reshape the tensor first before call this. ++ /// It's usually used to get input data pointer. ++ /// \param place The place of the tensor this will ++ /// override the original place of current tensor. ++ template ++ T* mutable_data(const PlaceType& place); ++ ++ /// \brief Get the memory pointer in CPU or GPU with ++ /// specific data type. Please Reshape the tensor ++ /// first before call this.It's usually used to get ++ /// input data pointer. ++ template ++ T* mutable_data(); ++ ++ /// \brief Get the memory pointer directly. ++ /// It's usually used to get the output data pointer. ++ /// \return The tensor data buffer pointer. ++ template ++ T* data() const; ++ ++ /// \brief Copy the host memory to tensor data. ++ /// It's usually used to set the input tensor data. ++ /// \param PlaceType of target place, of which ++ /// the tensor will copy to. ++ ++ template ++ Tensor copy_to(const PlaceType& place) const; ++ ++ /// \brief Return the shape of the Tensor. ++ std::vector shape() const; ++ ++ /// \brief Return the data type of the tensor. ++ /// It's usually used to get the output tensor data type. ++ /// \return The data type of the tensor. ++ DataType type() const; ++ ++ /// \brief Get the size of current tensor. ++ /// Use this method to get the size of tensor ++ /// \return int64_t. ++ int64_t size() const; ++ ++ /// \brief Get the place of current tensor. ++ /// Use this method to get the place of tensor ++ /// \return Place. ++ const PlaceType& place() const; ++ ++ /// \brief Cast datatype from one to another ++ Tensor cast(const DataType& target_type) const; ++ ++#ifdef PADDLE_WITH_CUDA ++ /// \bref Get current stream of Tensor ++ cudaStream_t stream() const; ++#endif ++ ++ private: ++ friend class framework::CustomTensorUtils; ++ mutable std::shared_ptr tensor_; ++ mutable PlaceType place_; ++ StreamWrapper stream_; ++}; ++ ++} // namespace paddle +diff --git a/src/kdkocr/libs/sw64/paddle_inference/paddle/include/internal/framework.pb.h b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/internal/framework.pb.h +new file mode 100755 +index 0000000..d98b64c +--- /dev/null ++++ b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/internal/framework.pb.h +@@ -0,0 +1,5315 @@ ++// Generated by the protocol buffer compiler. DO NOT EDIT! ++// source: framework.proto ++ ++#ifndef PROTOBUF_framework_2eproto__INCLUDED ++#define PROTOBUF_framework_2eproto__INCLUDED ++ ++#include ++ ++#include ++ ++#if GOOGLE_PROTOBUF_VERSION < 3001000 ++#error This file was generated by a newer version of protoc which is ++#error incompatible with your Protocol Buffer headers. Please update ++#error your headers. ++#endif ++#if 3001000 < GOOGLE_PROTOBUF_MIN_PROTOC_VERSION ++#error This file was generated by an older version of protoc which is ++#error incompatible with your Protocol Buffer headers. Please ++#error regenerate this file with a newer version of protoc. ++#endif ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++// @@protoc_insertion_point(includes) ++ ++namespace paddle { ++namespace framework { ++namespace proto { ++ ++// Internal implementation detail -- do not call these. ++void protobuf_AddDesc_framework_2eproto(); ++void protobuf_InitDefaults_framework_2eproto(); ++void protobuf_AssignDesc_framework_2eproto(); ++void protobuf_ShutdownFile_framework_2eproto(); ++ ++class BlockDesc; ++class OpDesc; ++class OpDesc_Attr; ++class OpDesc_Var; ++class OpProto; ++class OpProto_Attr; ++class OpProto_Var; ++class OpVersion; ++class OpVersionMap; ++class OpVersionMap_OpVersionPair; ++class ProgramDesc; ++class VarDesc; ++class VarType; ++class VarType_LoDTensorArrayDesc; ++class VarType_LoDTensorDesc; ++class VarType_ReaderDesc; ++class VarType_TensorDesc; ++class VarType_Tuple; ++class Version; ++ ++enum VarType_Type { ++ VarType_Type_BOOL = 0, ++ VarType_Type_INT16 = 1, ++ VarType_Type_INT32 = 2, ++ VarType_Type_INT64 = 3, ++ VarType_Type_FP16 = 4, ++ VarType_Type_FP32 = 5, ++ VarType_Type_FP64 = 6, ++ VarType_Type_SIZE_T = 19, ++ VarType_Type_UINT8 = 20, ++ VarType_Type_INT8 = 21, ++ VarType_Type_BF16 = 22, ++ VarType_Type_COMPLEX64 = 23, ++ VarType_Type_COMPLEX128 = 24, ++ VarType_Type_LOD_TENSOR = 7, ++ VarType_Type_SELECTED_ROWS = 8, ++ VarType_Type_FEED_MINIBATCH = 9, ++ VarType_Type_FETCH_LIST = 10, ++ VarType_Type_STEP_SCOPES = 11, ++ VarType_Type_LOD_RANK_TABLE = 12, ++ VarType_Type_LOD_TENSOR_ARRAY = 13, ++ VarType_Type_PLACE_LIST = 14, ++ VarType_Type_READER = 15, ++ VarType_Type_RAW = 17, ++ VarType_Type_TUPLE = 18 ++}; ++bool VarType_Type_IsValid(int value); ++const VarType_Type VarType_Type_Type_MIN = VarType_Type_BOOL; ++const VarType_Type VarType_Type_Type_MAX = VarType_Type_COMPLEX128; ++const int VarType_Type_Type_ARRAYSIZE = VarType_Type_Type_MAX + 1; ++ ++const ::google::protobuf::EnumDescriptor* VarType_Type_descriptor(); ++inline const ::std::string& VarType_Type_Name(VarType_Type value) { ++ return ::google::protobuf::internal::NameOfEnum( ++ VarType_Type_descriptor(), value); ++} ++inline bool VarType_Type_Parse( ++ const ::std::string& name, VarType_Type* value) { ++ return ::google::protobuf::internal::ParseNamedEnum( ++ VarType_Type_descriptor(), name, value); ++} ++enum AttrType { ++ INT = 0, ++ FLOAT = 1, ++ STRING = 2, ++ INTS = 3, ++ FLOATS = 4, ++ STRINGS = 5, ++ BOOLEAN = 6, ++ BOOLEANS = 7, ++ BLOCK = 8, ++ LONG = 9, ++ BLOCKS = 10, ++ LONGS = 11, ++ FLOAT64S = 12 ++}; ++bool AttrType_IsValid(int value); ++const AttrType AttrType_MIN = INT; ++const AttrType AttrType_MAX = FLOAT64S; ++const int AttrType_ARRAYSIZE = AttrType_MAX + 1; ++ ++const ::google::protobuf::EnumDescriptor* AttrType_descriptor(); ++inline const ::std::string& AttrType_Name(AttrType value) { ++ return ::google::protobuf::internal::NameOfEnum( ++ AttrType_descriptor(), value); ++} ++inline bool AttrType_Parse( ++ const ::std::string& name, AttrType* value) { ++ return ::google::protobuf::internal::ParseNamedEnum( ++ AttrType_descriptor(), name, value); ++} ++// =================================================================== ++ ++class Version : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.Version) */ { ++ public: ++ Version(); ++ virtual ~Version(); ++ ++ Version(const Version& from); ++ ++ inline Version& operator=(const Version& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const Version& default_instance(); ++ ++ static const Version* internal_default_instance(); ++ ++ void Swap(Version* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline Version* New() const { return New(NULL); } ++ ++ Version* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const Version& from); ++ void MergeFrom(const Version& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(Version* other); ++ void UnsafeMergeFrom(const Version& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ // accessors ------------------------------------------------------- ++ ++ // optional int64 version = 1 [default = 0]; ++ bool has_version() const; ++ void clear_version(); ++ static const int kVersionFieldNumber = 1; ++ ::google::protobuf::int64 version() const; ++ void set_version(::google::protobuf::int64 value); ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.Version) ++ private: ++ inline void set_has_version(); ++ inline void clear_has_version(); ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::google::protobuf::int64 version_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed Version_default_instance_; ++ ++// ------------------------------------------------------------------- ++ ++class OpDesc_Attr : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.OpDesc.Attr) */ { ++ public: ++ OpDesc_Attr(); ++ virtual ~OpDesc_Attr(); ++ ++ OpDesc_Attr(const OpDesc_Attr& from); ++ ++ inline OpDesc_Attr& operator=(const OpDesc_Attr& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const OpDesc_Attr& default_instance(); ++ ++ static const OpDesc_Attr* internal_default_instance(); ++ ++ void Swap(OpDesc_Attr* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline OpDesc_Attr* New() const { return New(NULL); } ++ ++ OpDesc_Attr* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const OpDesc_Attr& from); ++ void MergeFrom(const OpDesc_Attr& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(OpDesc_Attr* other); ++ void UnsafeMergeFrom(const OpDesc_Attr& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ // accessors ------------------------------------------------------- ++ ++ // required string name = 1; ++ bool has_name() const; ++ void clear_name(); ++ static const int kNameFieldNumber = 1; ++ const ::std::string& name() const; ++ void set_name(const ::std::string& value); ++ void set_name(const char* value); ++ void set_name(const char* value, size_t size); ++ ::std::string* mutable_name(); ++ ::std::string* release_name(); ++ void set_allocated_name(::std::string* name); ++ ++ // required .paddle.framework.proto.AttrType type = 2; ++ bool has_type() const; ++ void clear_type(); ++ static const int kTypeFieldNumber = 2; ++ ::paddle::framework::proto::AttrType type() const; ++ void set_type(::paddle::framework::proto::AttrType value); ++ ++ // optional int32 i = 3; ++ bool has_i() const; ++ void clear_i(); ++ static const int kIFieldNumber = 3; ++ ::google::protobuf::int32 i() const; ++ void set_i(::google::protobuf::int32 value); ++ ++ // optional float f = 4; ++ bool has_f() const; ++ void clear_f(); ++ static const int kFFieldNumber = 4; ++ float f() const; ++ void set_f(float value); ++ ++ // optional string s = 5; ++ bool has_s() const; ++ void clear_s(); ++ static const int kSFieldNumber = 5; ++ const ::std::string& s() const; ++ void set_s(const ::std::string& value); ++ void set_s(const char* value); ++ void set_s(const char* value, size_t size); ++ ::std::string* mutable_s(); ++ ::std::string* release_s(); ++ void set_allocated_s(::std::string* s); ++ ++ // repeated int32 ints = 6; ++ int ints_size() const; ++ void clear_ints(); ++ static const int kIntsFieldNumber = 6; ++ ::google::protobuf::int32 ints(int index) const; ++ void set_ints(int index, ::google::protobuf::int32 value); ++ void add_ints(::google::protobuf::int32 value); ++ const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >& ++ ints() const; ++ ::google::protobuf::RepeatedField< ::google::protobuf::int32 >* ++ mutable_ints(); ++ ++ // repeated float floats = 7; ++ int floats_size() const; ++ void clear_floats(); ++ static const int kFloatsFieldNumber = 7; ++ float floats(int index) const; ++ void set_floats(int index, float value); ++ void add_floats(float value); ++ const ::google::protobuf::RepeatedField< float >& ++ floats() const; ++ ::google::protobuf::RepeatedField< float >* ++ mutable_floats(); ++ ++ // repeated string strings = 8; ++ int strings_size() const; ++ void clear_strings(); ++ static const int kStringsFieldNumber = 8; ++ const ::std::string& strings(int index) const; ++ ::std::string* mutable_strings(int index); ++ void set_strings(int index, const ::std::string& value); ++ void set_strings(int index, const char* value); ++ void set_strings(int index, const char* value, size_t size); ++ ::std::string* add_strings(); ++ void add_strings(const ::std::string& value); ++ void add_strings(const char* value); ++ void add_strings(const char* value, size_t size); ++ const ::google::protobuf::RepeatedPtrField< ::std::string>& strings() const; ++ ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_strings(); ++ ++ // optional bool b = 10; ++ bool has_b() const; ++ void clear_b(); ++ static const int kBFieldNumber = 10; ++ bool b() const; ++ void set_b(bool value); ++ ++ // repeated bool bools = 11; ++ int bools_size() const; ++ void clear_bools(); ++ static const int kBoolsFieldNumber = 11; ++ bool bools(int index) const; ++ void set_bools(int index, bool value); ++ void add_bools(bool value); ++ const ::google::protobuf::RepeatedField< bool >& ++ bools() const; ++ ::google::protobuf::RepeatedField< bool >* ++ mutable_bools(); ++ ++ // optional int32 block_idx = 12; ++ bool has_block_idx() const; ++ void clear_block_idx(); ++ static const int kBlockIdxFieldNumber = 12; ++ ::google::protobuf::int32 block_idx() const; ++ void set_block_idx(::google::protobuf::int32 value); ++ ++ // optional int64 l = 13; ++ bool has_l() const; ++ void clear_l(); ++ static const int kLFieldNumber = 13; ++ ::google::protobuf::int64 l() const; ++ void set_l(::google::protobuf::int64 value); ++ ++ // repeated int32 blocks_idx = 14; ++ int blocks_idx_size() const; ++ void clear_blocks_idx(); ++ static const int kBlocksIdxFieldNumber = 14; ++ ::google::protobuf::int32 blocks_idx(int index) const; ++ void set_blocks_idx(int index, ::google::protobuf::int32 value); ++ void add_blocks_idx(::google::protobuf::int32 value); ++ const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >& ++ blocks_idx() const; ++ ::google::protobuf::RepeatedField< ::google::protobuf::int32 >* ++ mutable_blocks_idx(); ++ ++ // repeated int64 longs = 15; ++ int longs_size() const; ++ void clear_longs(); ++ static const int kLongsFieldNumber = 15; ++ ::google::protobuf::int64 longs(int index) const; ++ void set_longs(int index, ::google::protobuf::int64 value); ++ void add_longs(::google::protobuf::int64 value); ++ const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >& ++ longs() const; ++ ::google::protobuf::RepeatedField< ::google::protobuf::int64 >* ++ mutable_longs(); ++ ++ // repeated double float64s = 16; ++ int float64s_size() const; ++ void clear_float64s(); ++ static const int kFloat64SFieldNumber = 16; ++ double float64s(int index) const; ++ void set_float64s(int index, double value); ++ void add_float64s(double value); ++ const ::google::protobuf::RepeatedField< double >& ++ float64s() const; ++ ::google::protobuf::RepeatedField< double >* ++ mutable_float64s(); ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.OpDesc.Attr) ++ private: ++ inline void set_has_name(); ++ inline void clear_has_name(); ++ inline void set_has_type(); ++ inline void clear_has_type(); ++ inline void set_has_i(); ++ inline void clear_has_i(); ++ inline void set_has_f(); ++ inline void clear_has_f(); ++ inline void set_has_s(); ++ inline void clear_has_s(); ++ inline void set_has_b(); ++ inline void clear_has_b(); ++ inline void set_has_block_idx(); ++ inline void clear_has_block_idx(); ++ inline void set_has_l(); ++ inline void clear_has_l(); ++ ++ // helper for ByteSizeLong() ++ size_t RequiredFieldsByteSizeFallback() const; ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::google::protobuf::RepeatedField< ::google::protobuf::int32 > ints_; ++ ::google::protobuf::RepeatedField< float > floats_; ++ ::google::protobuf::RepeatedPtrField< ::std::string> strings_; ++ ::google::protobuf::RepeatedField< bool > bools_; ++ ::google::protobuf::RepeatedField< ::google::protobuf::int32 > blocks_idx_; ++ ::google::protobuf::RepeatedField< ::google::protobuf::int64 > longs_; ++ ::google::protobuf::RepeatedField< double > float64s_; ++ ::google::protobuf::internal::ArenaStringPtr name_; ++ ::google::protobuf::internal::ArenaStringPtr s_; ++ int type_; ++ ::google::protobuf::int32 i_; ++ float f_; ++ bool b_; ++ ::google::protobuf::int64 l_; ++ ::google::protobuf::int32 block_idx_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed OpDesc_Attr_default_instance_; ++ ++// ------------------------------------------------------------------- ++ ++class OpDesc_Var : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.OpDesc.Var) */ { ++ public: ++ OpDesc_Var(); ++ virtual ~OpDesc_Var(); ++ ++ OpDesc_Var(const OpDesc_Var& from); ++ ++ inline OpDesc_Var& operator=(const OpDesc_Var& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const OpDesc_Var& default_instance(); ++ ++ static const OpDesc_Var* internal_default_instance(); ++ ++ void Swap(OpDesc_Var* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline OpDesc_Var* New() const { return New(NULL); } ++ ++ OpDesc_Var* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const OpDesc_Var& from); ++ void MergeFrom(const OpDesc_Var& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(OpDesc_Var* other); ++ void UnsafeMergeFrom(const OpDesc_Var& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ // accessors ------------------------------------------------------- ++ ++ // required string parameter = 1; ++ bool has_parameter() const; ++ void clear_parameter(); ++ static const int kParameterFieldNumber = 1; ++ const ::std::string& parameter() const; ++ void set_parameter(const ::std::string& value); ++ void set_parameter(const char* value); ++ void set_parameter(const char* value, size_t size); ++ ::std::string* mutable_parameter(); ++ ::std::string* release_parameter(); ++ void set_allocated_parameter(::std::string* parameter); ++ ++ // repeated string arguments = 2; ++ int arguments_size() const; ++ void clear_arguments(); ++ static const int kArgumentsFieldNumber = 2; ++ const ::std::string& arguments(int index) const; ++ ::std::string* mutable_arguments(int index); ++ void set_arguments(int index, const ::std::string& value); ++ void set_arguments(int index, const char* value); ++ void set_arguments(int index, const char* value, size_t size); ++ ::std::string* add_arguments(); ++ void add_arguments(const ::std::string& value); ++ void add_arguments(const char* value); ++ void add_arguments(const char* value, size_t size); ++ const ::google::protobuf::RepeatedPtrField< ::std::string>& arguments() const; ++ ::google::protobuf::RepeatedPtrField< ::std::string>* mutable_arguments(); ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.OpDesc.Var) ++ private: ++ inline void set_has_parameter(); ++ inline void clear_has_parameter(); ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::google::protobuf::RepeatedPtrField< ::std::string> arguments_; ++ ::google::protobuf::internal::ArenaStringPtr parameter_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed OpDesc_Var_default_instance_; ++ ++// ------------------------------------------------------------------- ++ ++class OpDesc : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.OpDesc) */ { ++ public: ++ OpDesc(); ++ virtual ~OpDesc(); ++ ++ OpDesc(const OpDesc& from); ++ ++ inline OpDesc& operator=(const OpDesc& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const OpDesc& default_instance(); ++ ++ static const OpDesc* internal_default_instance(); ++ ++ void Swap(OpDesc* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline OpDesc* New() const { return New(NULL); } ++ ++ OpDesc* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const OpDesc& from); ++ void MergeFrom(const OpDesc& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(OpDesc* other); ++ void UnsafeMergeFrom(const OpDesc& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ typedef OpDesc_Attr Attr; ++ typedef OpDesc_Var Var; ++ ++ // accessors ------------------------------------------------------- ++ ++ // required string type = 3; ++ bool has_type() const; ++ void clear_type(); ++ static const int kTypeFieldNumber = 3; ++ const ::std::string& type() const; ++ void set_type(const ::std::string& value); ++ void set_type(const char* value); ++ void set_type(const char* value, size_t size); ++ ::std::string* mutable_type(); ++ ::std::string* release_type(); ++ void set_allocated_type(::std::string* type); ++ ++ // repeated .paddle.framework.proto.OpDesc.Var inputs = 1; ++ int inputs_size() const; ++ void clear_inputs(); ++ static const int kInputsFieldNumber = 1; ++ const ::paddle::framework::proto::OpDesc_Var& inputs(int index) const; ++ ::paddle::framework::proto::OpDesc_Var* mutable_inputs(int index); ++ ::paddle::framework::proto::OpDesc_Var* add_inputs(); ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc_Var >* ++ mutable_inputs(); ++ const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc_Var >& ++ inputs() const; ++ ++ // repeated .paddle.framework.proto.OpDesc.Var outputs = 2; ++ int outputs_size() const; ++ void clear_outputs(); ++ static const int kOutputsFieldNumber = 2; ++ const ::paddle::framework::proto::OpDesc_Var& outputs(int index) const; ++ ::paddle::framework::proto::OpDesc_Var* mutable_outputs(int index); ++ ::paddle::framework::proto::OpDesc_Var* add_outputs(); ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc_Var >* ++ mutable_outputs(); ++ const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc_Var >& ++ outputs() const; ++ ++ // repeated .paddle.framework.proto.OpDesc.Attr attrs = 4; ++ int attrs_size() const; ++ void clear_attrs(); ++ static const int kAttrsFieldNumber = 4; ++ const ::paddle::framework::proto::OpDesc_Attr& attrs(int index) const; ++ ::paddle::framework::proto::OpDesc_Attr* mutable_attrs(int index); ++ ::paddle::framework::proto::OpDesc_Attr* add_attrs(); ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc_Attr >* ++ mutable_attrs(); ++ const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc_Attr >& ++ attrs() const; ++ ++ // optional bool is_target = 5 [default = false]; ++ bool has_is_target() const; ++ void clear_is_target(); ++ static const int kIsTargetFieldNumber = 5; ++ bool is_target() const; ++ void set_is_target(bool value); ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.OpDesc) ++ private: ++ inline void set_has_type(); ++ inline void clear_has_type(); ++ inline void set_has_is_target(); ++ inline void clear_has_is_target(); ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc_Var > inputs_; ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc_Var > outputs_; ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc_Attr > attrs_; ++ ::google::protobuf::internal::ArenaStringPtr type_; ++ bool is_target_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed OpDesc_default_instance_; ++ ++// ------------------------------------------------------------------- ++ ++class OpProto_Var : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.OpProto.Var) */ { ++ public: ++ OpProto_Var(); ++ virtual ~OpProto_Var(); ++ ++ OpProto_Var(const OpProto_Var& from); ++ ++ inline OpProto_Var& operator=(const OpProto_Var& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const OpProto_Var& default_instance(); ++ ++ static const OpProto_Var* internal_default_instance(); ++ ++ void Swap(OpProto_Var* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline OpProto_Var* New() const { return New(NULL); } ++ ++ OpProto_Var* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const OpProto_Var& from); ++ void MergeFrom(const OpProto_Var& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(OpProto_Var* other); ++ void UnsafeMergeFrom(const OpProto_Var& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ // accessors ------------------------------------------------------- ++ ++ // required string name = 1; ++ bool has_name() const; ++ void clear_name(); ++ static const int kNameFieldNumber = 1; ++ const ::std::string& name() const; ++ void set_name(const ::std::string& value); ++ void set_name(const char* value); ++ void set_name(const char* value, size_t size); ++ ::std::string* mutable_name(); ++ ::std::string* release_name(); ++ void set_allocated_name(::std::string* name); ++ ++ // required string comment = 2; ++ bool has_comment() const; ++ void clear_comment(); ++ static const int kCommentFieldNumber = 2; ++ const ::std::string& comment() const; ++ void set_comment(const ::std::string& value); ++ void set_comment(const char* value); ++ void set_comment(const char* value, size_t size); ++ ::std::string* mutable_comment(); ++ ::std::string* release_comment(); ++ void set_allocated_comment(::std::string* comment); ++ ++ // optional bool duplicable = 3 [default = false]; ++ bool has_duplicable() const; ++ void clear_duplicable(); ++ static const int kDuplicableFieldNumber = 3; ++ bool duplicable() const; ++ void set_duplicable(bool value); ++ ++ // optional bool intermediate = 4 [default = false]; ++ bool has_intermediate() const; ++ void clear_intermediate(); ++ static const int kIntermediateFieldNumber = 4; ++ bool intermediate() const; ++ void set_intermediate(bool value); ++ ++ // optional bool dispensable = 5 [default = false]; ++ bool has_dispensable() const; ++ void clear_dispensable(); ++ static const int kDispensableFieldNumber = 5; ++ bool dispensable() const; ++ void set_dispensable(bool value); ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.OpProto.Var) ++ private: ++ inline void set_has_name(); ++ inline void clear_has_name(); ++ inline void set_has_comment(); ++ inline void clear_has_comment(); ++ inline void set_has_duplicable(); ++ inline void clear_has_duplicable(); ++ inline void set_has_intermediate(); ++ inline void clear_has_intermediate(); ++ inline void set_has_dispensable(); ++ inline void clear_has_dispensable(); ++ ++ // helper for ByteSizeLong() ++ size_t RequiredFieldsByteSizeFallback() const; ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::google::protobuf::internal::ArenaStringPtr name_; ++ ::google::protobuf::internal::ArenaStringPtr comment_; ++ bool duplicable_; ++ bool intermediate_; ++ bool dispensable_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed OpProto_Var_default_instance_; ++ ++// ------------------------------------------------------------------- ++ ++class OpProto_Attr : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.OpProto.Attr) */ { ++ public: ++ OpProto_Attr(); ++ virtual ~OpProto_Attr(); ++ ++ OpProto_Attr(const OpProto_Attr& from); ++ ++ inline OpProto_Attr& operator=(const OpProto_Attr& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const OpProto_Attr& default_instance(); ++ ++ static const OpProto_Attr* internal_default_instance(); ++ ++ void Swap(OpProto_Attr* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline OpProto_Attr* New() const { return New(NULL); } ++ ++ OpProto_Attr* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const OpProto_Attr& from); ++ void MergeFrom(const OpProto_Attr& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(OpProto_Attr* other); ++ void UnsafeMergeFrom(const OpProto_Attr& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ // accessors ------------------------------------------------------- ++ ++ // required string name = 1; ++ bool has_name() const; ++ void clear_name(); ++ static const int kNameFieldNumber = 1; ++ const ::std::string& name() const; ++ void set_name(const ::std::string& value); ++ void set_name(const char* value); ++ void set_name(const char* value, size_t size); ++ ::std::string* mutable_name(); ++ ::std::string* release_name(); ++ void set_allocated_name(::std::string* name); ++ ++ // required .paddle.framework.proto.AttrType type = 2; ++ bool has_type() const; ++ void clear_type(); ++ static const int kTypeFieldNumber = 2; ++ ::paddle::framework::proto::AttrType type() const; ++ void set_type(::paddle::framework::proto::AttrType value); ++ ++ // required string comment = 3; ++ bool has_comment() const; ++ void clear_comment(); ++ static const int kCommentFieldNumber = 3; ++ const ::std::string& comment() const; ++ void set_comment(const ::std::string& value); ++ void set_comment(const char* value); ++ void set_comment(const char* value, size_t size); ++ ::std::string* mutable_comment(); ++ ::std::string* release_comment(); ++ void set_allocated_comment(::std::string* comment); ++ ++ // optional bool generated = 4 [default = false]; ++ bool has_generated() const; ++ void clear_generated(); ++ static const int kGeneratedFieldNumber = 4; ++ bool generated() const; ++ void set_generated(bool value); ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.OpProto.Attr) ++ private: ++ inline void set_has_name(); ++ inline void clear_has_name(); ++ inline void set_has_type(); ++ inline void clear_has_type(); ++ inline void set_has_comment(); ++ inline void clear_has_comment(); ++ inline void set_has_generated(); ++ inline void clear_has_generated(); ++ ++ // helper for ByteSizeLong() ++ size_t RequiredFieldsByteSizeFallback() const; ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::google::protobuf::internal::ArenaStringPtr name_; ++ ::google::protobuf::internal::ArenaStringPtr comment_; ++ int type_; ++ bool generated_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed OpProto_Attr_default_instance_; ++ ++// ------------------------------------------------------------------- ++ ++class OpProto : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.OpProto) */ { ++ public: ++ OpProto(); ++ virtual ~OpProto(); ++ ++ OpProto(const OpProto& from); ++ ++ inline OpProto& operator=(const OpProto& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const OpProto& default_instance(); ++ ++ static const OpProto* internal_default_instance(); ++ ++ void Swap(OpProto* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline OpProto* New() const { return New(NULL); } ++ ++ OpProto* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const OpProto& from); ++ void MergeFrom(const OpProto& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(OpProto* other); ++ void UnsafeMergeFrom(const OpProto& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ typedef OpProto_Var Var; ++ typedef OpProto_Attr Attr; ++ ++ // accessors ------------------------------------------------------- ++ ++ // required string type = 1; ++ bool has_type() const; ++ void clear_type(); ++ static const int kTypeFieldNumber = 1; ++ const ::std::string& type() const; ++ void set_type(const ::std::string& value); ++ void set_type(const char* value); ++ void set_type(const char* value, size_t size); ++ ::std::string* mutable_type(); ++ ::std::string* release_type(); ++ void set_allocated_type(::std::string* type); ++ ++ // repeated .paddle.framework.proto.OpProto.Var inputs = 2; ++ int inputs_size() const; ++ void clear_inputs(); ++ static const int kInputsFieldNumber = 2; ++ const ::paddle::framework::proto::OpProto_Var& inputs(int index) const; ++ ::paddle::framework::proto::OpProto_Var* mutable_inputs(int index); ++ ::paddle::framework::proto::OpProto_Var* add_inputs(); ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpProto_Var >* ++ mutable_inputs(); ++ const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpProto_Var >& ++ inputs() const; ++ ++ // repeated .paddle.framework.proto.OpProto.Var outputs = 3; ++ int outputs_size() const; ++ void clear_outputs(); ++ static const int kOutputsFieldNumber = 3; ++ const ::paddle::framework::proto::OpProto_Var& outputs(int index) const; ++ ::paddle::framework::proto::OpProto_Var* mutable_outputs(int index); ++ ::paddle::framework::proto::OpProto_Var* add_outputs(); ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpProto_Var >* ++ mutable_outputs(); ++ const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpProto_Var >& ++ outputs() const; ++ ++ // repeated .paddle.framework.proto.OpProto.Attr attrs = 4; ++ int attrs_size() const; ++ void clear_attrs(); ++ static const int kAttrsFieldNumber = 4; ++ const ::paddle::framework::proto::OpProto_Attr& attrs(int index) const; ++ ::paddle::framework::proto::OpProto_Attr* mutable_attrs(int index); ++ ::paddle::framework::proto::OpProto_Attr* add_attrs(); ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpProto_Attr >* ++ mutable_attrs(); ++ const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpProto_Attr >& ++ attrs() const; ++ ++ // required string comment = 5; ++ bool has_comment() const; ++ void clear_comment(); ++ static const int kCommentFieldNumber = 5; ++ const ::std::string& comment() const; ++ void set_comment(const ::std::string& value); ++ void set_comment(const char* value); ++ void set_comment(const char* value, size_t size); ++ ::std::string* mutable_comment(); ++ ::std::string* release_comment(); ++ void set_allocated_comment(::std::string* comment); ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.OpProto) ++ private: ++ inline void set_has_type(); ++ inline void clear_has_type(); ++ inline void set_has_comment(); ++ inline void clear_has_comment(); ++ ++ // helper for ByteSizeLong() ++ size_t RequiredFieldsByteSizeFallback() const; ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpProto_Var > inputs_; ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpProto_Var > outputs_; ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpProto_Attr > attrs_; ++ ::google::protobuf::internal::ArenaStringPtr type_; ++ ::google::protobuf::internal::ArenaStringPtr comment_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed OpProto_default_instance_; ++ ++// ------------------------------------------------------------------- ++ ++class VarType_TensorDesc : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.VarType.TensorDesc) */ { ++ public: ++ VarType_TensorDesc(); ++ virtual ~VarType_TensorDesc(); ++ ++ VarType_TensorDesc(const VarType_TensorDesc& from); ++ ++ inline VarType_TensorDesc& operator=(const VarType_TensorDesc& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const VarType_TensorDesc& default_instance(); ++ ++ static const VarType_TensorDesc* internal_default_instance(); ++ ++ void Swap(VarType_TensorDesc* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline VarType_TensorDesc* New() const { return New(NULL); } ++ ++ VarType_TensorDesc* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const VarType_TensorDesc& from); ++ void MergeFrom(const VarType_TensorDesc& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(VarType_TensorDesc* other); ++ void UnsafeMergeFrom(const VarType_TensorDesc& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ // accessors ------------------------------------------------------- ++ ++ // required .paddle.framework.proto.VarType.Type data_type = 1; ++ bool has_data_type() const; ++ void clear_data_type(); ++ static const int kDataTypeFieldNumber = 1; ++ ::paddle::framework::proto::VarType_Type data_type() const; ++ void set_data_type(::paddle::framework::proto::VarType_Type value); ++ ++ // repeated int64 dims = 2; ++ int dims_size() const; ++ void clear_dims(); ++ static const int kDimsFieldNumber = 2; ++ ::google::protobuf::int64 dims(int index) const; ++ void set_dims(int index, ::google::protobuf::int64 value); ++ void add_dims(::google::protobuf::int64 value); ++ const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >& ++ dims() const; ++ ::google::protobuf::RepeatedField< ::google::protobuf::int64 >* ++ mutable_dims(); ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.VarType.TensorDesc) ++ private: ++ inline void set_has_data_type(); ++ inline void clear_has_data_type(); ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::google::protobuf::RepeatedField< ::google::protobuf::int64 > dims_; ++ int data_type_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed VarType_TensorDesc_default_instance_; ++ ++// ------------------------------------------------------------------- ++ ++class VarType_LoDTensorDesc : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.VarType.LoDTensorDesc) */ { ++ public: ++ VarType_LoDTensorDesc(); ++ virtual ~VarType_LoDTensorDesc(); ++ ++ VarType_LoDTensorDesc(const VarType_LoDTensorDesc& from); ++ ++ inline VarType_LoDTensorDesc& operator=(const VarType_LoDTensorDesc& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const VarType_LoDTensorDesc& default_instance(); ++ ++ static const VarType_LoDTensorDesc* internal_default_instance(); ++ ++ void Swap(VarType_LoDTensorDesc* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline VarType_LoDTensorDesc* New() const { return New(NULL); } ++ ++ VarType_LoDTensorDesc* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const VarType_LoDTensorDesc& from); ++ void MergeFrom(const VarType_LoDTensorDesc& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(VarType_LoDTensorDesc* other); ++ void UnsafeMergeFrom(const VarType_LoDTensorDesc& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ // accessors ------------------------------------------------------- ++ ++ // required .paddle.framework.proto.VarType.TensorDesc tensor = 1; ++ bool has_tensor() const; ++ void clear_tensor(); ++ static const int kTensorFieldNumber = 1; ++ const ::paddle::framework::proto::VarType_TensorDesc& tensor() const; ++ ::paddle::framework::proto::VarType_TensorDesc* mutable_tensor(); ++ ::paddle::framework::proto::VarType_TensorDesc* release_tensor(); ++ void set_allocated_tensor(::paddle::framework::proto::VarType_TensorDesc* tensor); ++ ++ // optional int32 lod_level = 2 [default = 0]; ++ bool has_lod_level() const; ++ void clear_lod_level(); ++ static const int kLodLevelFieldNumber = 2; ++ ::google::protobuf::int32 lod_level() const; ++ void set_lod_level(::google::protobuf::int32 value); ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.VarType.LoDTensorDesc) ++ private: ++ inline void set_has_tensor(); ++ inline void clear_has_tensor(); ++ inline void set_has_lod_level(); ++ inline void clear_has_lod_level(); ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::paddle::framework::proto::VarType_TensorDesc* tensor_; ++ ::google::protobuf::int32 lod_level_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed VarType_LoDTensorDesc_default_instance_; ++ ++// ------------------------------------------------------------------- ++ ++class VarType_LoDTensorArrayDesc : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.VarType.LoDTensorArrayDesc) */ { ++ public: ++ VarType_LoDTensorArrayDesc(); ++ virtual ~VarType_LoDTensorArrayDesc(); ++ ++ VarType_LoDTensorArrayDesc(const VarType_LoDTensorArrayDesc& from); ++ ++ inline VarType_LoDTensorArrayDesc& operator=(const VarType_LoDTensorArrayDesc& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const VarType_LoDTensorArrayDesc& default_instance(); ++ ++ static const VarType_LoDTensorArrayDesc* internal_default_instance(); ++ ++ void Swap(VarType_LoDTensorArrayDesc* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline VarType_LoDTensorArrayDesc* New() const { return New(NULL); } ++ ++ VarType_LoDTensorArrayDesc* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const VarType_LoDTensorArrayDesc& from); ++ void MergeFrom(const VarType_LoDTensorArrayDesc& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(VarType_LoDTensorArrayDesc* other); ++ void UnsafeMergeFrom(const VarType_LoDTensorArrayDesc& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ // accessors ------------------------------------------------------- ++ ++ // required .paddle.framework.proto.VarType.TensorDesc tensor = 1; ++ bool has_tensor() const; ++ void clear_tensor(); ++ static const int kTensorFieldNumber = 1; ++ const ::paddle::framework::proto::VarType_TensorDesc& tensor() const; ++ ::paddle::framework::proto::VarType_TensorDesc* mutable_tensor(); ++ ::paddle::framework::proto::VarType_TensorDesc* release_tensor(); ++ void set_allocated_tensor(::paddle::framework::proto::VarType_TensorDesc* tensor); ++ ++ // optional int32 lod_level = 2 [default = 0]; ++ bool has_lod_level() const; ++ void clear_lod_level(); ++ static const int kLodLevelFieldNumber = 2; ++ ::google::protobuf::int32 lod_level() const; ++ void set_lod_level(::google::protobuf::int32 value); ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.VarType.LoDTensorArrayDesc) ++ private: ++ inline void set_has_tensor(); ++ inline void clear_has_tensor(); ++ inline void set_has_lod_level(); ++ inline void clear_has_lod_level(); ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::paddle::framework::proto::VarType_TensorDesc* tensor_; ++ ::google::protobuf::int32 lod_level_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed VarType_LoDTensorArrayDesc_default_instance_; ++ ++// ------------------------------------------------------------------- ++ ++class VarType_ReaderDesc : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.VarType.ReaderDesc) */ { ++ public: ++ VarType_ReaderDesc(); ++ virtual ~VarType_ReaderDesc(); ++ ++ VarType_ReaderDesc(const VarType_ReaderDesc& from); ++ ++ inline VarType_ReaderDesc& operator=(const VarType_ReaderDesc& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const VarType_ReaderDesc& default_instance(); ++ ++ static const VarType_ReaderDesc* internal_default_instance(); ++ ++ void Swap(VarType_ReaderDesc* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline VarType_ReaderDesc* New() const { return New(NULL); } ++ ++ VarType_ReaderDesc* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const VarType_ReaderDesc& from); ++ void MergeFrom(const VarType_ReaderDesc& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(VarType_ReaderDesc* other); ++ void UnsafeMergeFrom(const VarType_ReaderDesc& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ // accessors ------------------------------------------------------- ++ ++ // repeated .paddle.framework.proto.VarType.LoDTensorDesc lod_tensor = 1; ++ int lod_tensor_size() const; ++ void clear_lod_tensor(); ++ static const int kLodTensorFieldNumber = 1; ++ const ::paddle::framework::proto::VarType_LoDTensorDesc& lod_tensor(int index) const; ++ ::paddle::framework::proto::VarType_LoDTensorDesc* mutable_lod_tensor(int index); ++ ::paddle::framework::proto::VarType_LoDTensorDesc* add_lod_tensor(); ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::VarType_LoDTensorDesc >* ++ mutable_lod_tensor(); ++ const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::VarType_LoDTensorDesc >& ++ lod_tensor() const; ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.VarType.ReaderDesc) ++ private: ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::VarType_LoDTensorDesc > lod_tensor_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed VarType_ReaderDesc_default_instance_; ++ ++// ------------------------------------------------------------------- ++ ++class VarType_Tuple : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.VarType.Tuple) */ { ++ public: ++ VarType_Tuple(); ++ virtual ~VarType_Tuple(); ++ ++ VarType_Tuple(const VarType_Tuple& from); ++ ++ inline VarType_Tuple& operator=(const VarType_Tuple& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const VarType_Tuple& default_instance(); ++ ++ static const VarType_Tuple* internal_default_instance(); ++ ++ void Swap(VarType_Tuple* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline VarType_Tuple* New() const { return New(NULL); } ++ ++ VarType_Tuple* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const VarType_Tuple& from); ++ void MergeFrom(const VarType_Tuple& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(VarType_Tuple* other); ++ void UnsafeMergeFrom(const VarType_Tuple& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ // accessors ------------------------------------------------------- ++ ++ // repeated .paddle.framework.proto.VarType.Type element_type = 1; ++ int element_type_size() const; ++ void clear_element_type(); ++ static const int kElementTypeFieldNumber = 1; ++ ::paddle::framework::proto::VarType_Type element_type(int index) const; ++ void set_element_type(int index, ::paddle::framework::proto::VarType_Type value); ++ void add_element_type(::paddle::framework::proto::VarType_Type value); ++ const ::google::protobuf::RepeatedField& element_type() const; ++ ::google::protobuf::RepeatedField* mutable_element_type(); ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.VarType.Tuple) ++ private: ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::google::protobuf::RepeatedField element_type_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed VarType_Tuple_default_instance_; ++ ++// ------------------------------------------------------------------- ++ ++class VarType : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.VarType) */ { ++ public: ++ VarType(); ++ virtual ~VarType(); ++ ++ VarType(const VarType& from); ++ ++ inline VarType& operator=(const VarType& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const VarType& default_instance(); ++ ++ static const VarType* internal_default_instance(); ++ ++ void Swap(VarType* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline VarType* New() const { return New(NULL); } ++ ++ VarType* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const VarType& from); ++ void MergeFrom(const VarType& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(VarType* other); ++ void UnsafeMergeFrom(const VarType& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ typedef VarType_TensorDesc TensorDesc; ++ typedef VarType_LoDTensorDesc LoDTensorDesc; ++ typedef VarType_LoDTensorArrayDesc LoDTensorArrayDesc; ++ typedef VarType_ReaderDesc ReaderDesc; ++ typedef VarType_Tuple Tuple; ++ ++ typedef VarType_Type Type; ++ static const Type BOOL = ++ VarType_Type_BOOL; ++ static const Type INT16 = ++ VarType_Type_INT16; ++ static const Type INT32 = ++ VarType_Type_INT32; ++ static const Type INT64 = ++ VarType_Type_INT64; ++ static const Type FP16 = ++ VarType_Type_FP16; ++ static const Type FP32 = ++ VarType_Type_FP32; ++ static const Type FP64 = ++ VarType_Type_FP64; ++ static const Type SIZE_T = ++ VarType_Type_SIZE_T; ++ static const Type UINT8 = ++ VarType_Type_UINT8; ++ static const Type INT8 = ++ VarType_Type_INT8; ++ static const Type BF16 = ++ VarType_Type_BF16; ++ static const Type COMPLEX64 = ++ VarType_Type_COMPLEX64; ++ static const Type COMPLEX128 = ++ VarType_Type_COMPLEX128; ++ static const Type LOD_TENSOR = ++ VarType_Type_LOD_TENSOR; ++ static const Type SELECTED_ROWS = ++ VarType_Type_SELECTED_ROWS; ++ static const Type FEED_MINIBATCH = ++ VarType_Type_FEED_MINIBATCH; ++ static const Type FETCH_LIST = ++ VarType_Type_FETCH_LIST; ++ static const Type STEP_SCOPES = ++ VarType_Type_STEP_SCOPES; ++ static const Type LOD_RANK_TABLE = ++ VarType_Type_LOD_RANK_TABLE; ++ static const Type LOD_TENSOR_ARRAY = ++ VarType_Type_LOD_TENSOR_ARRAY; ++ static const Type PLACE_LIST = ++ VarType_Type_PLACE_LIST; ++ static const Type READER = ++ VarType_Type_READER; ++ static const Type RAW = ++ VarType_Type_RAW; ++ static const Type TUPLE = ++ VarType_Type_TUPLE; ++ static inline bool Type_IsValid(int value) { ++ return VarType_Type_IsValid(value); ++ } ++ static const Type Type_MIN = ++ VarType_Type_Type_MIN; ++ static const Type Type_MAX = ++ VarType_Type_Type_MAX; ++ static const int Type_ARRAYSIZE = ++ VarType_Type_Type_ARRAYSIZE; ++ static inline const ::google::protobuf::EnumDescriptor* ++ Type_descriptor() { ++ return VarType_Type_descriptor(); ++ } ++ static inline const ::std::string& Type_Name(Type value) { ++ return VarType_Type_Name(value); ++ } ++ static inline bool Type_Parse(const ::std::string& name, ++ Type* value) { ++ return VarType_Type_Parse(name, value); ++ } ++ ++ // accessors ------------------------------------------------------- ++ ++ // required .paddle.framework.proto.VarType.Type type = 1; ++ bool has_type() const; ++ void clear_type(); ++ static const int kTypeFieldNumber = 1; ++ ::paddle::framework::proto::VarType_Type type() const; ++ void set_type(::paddle::framework::proto::VarType_Type value); ++ ++ // optional .paddle.framework.proto.VarType.TensorDesc selected_rows = 2; ++ bool has_selected_rows() const; ++ void clear_selected_rows(); ++ static const int kSelectedRowsFieldNumber = 2; ++ const ::paddle::framework::proto::VarType_TensorDesc& selected_rows() const; ++ ::paddle::framework::proto::VarType_TensorDesc* mutable_selected_rows(); ++ ::paddle::framework::proto::VarType_TensorDesc* release_selected_rows(); ++ void set_allocated_selected_rows(::paddle::framework::proto::VarType_TensorDesc* selected_rows); ++ ++ // optional .paddle.framework.proto.VarType.LoDTensorDesc lod_tensor = 3; ++ bool has_lod_tensor() const; ++ void clear_lod_tensor(); ++ static const int kLodTensorFieldNumber = 3; ++ const ::paddle::framework::proto::VarType_LoDTensorDesc& lod_tensor() const; ++ ::paddle::framework::proto::VarType_LoDTensorDesc* mutable_lod_tensor(); ++ ::paddle::framework::proto::VarType_LoDTensorDesc* release_lod_tensor(); ++ void set_allocated_lod_tensor(::paddle::framework::proto::VarType_LoDTensorDesc* lod_tensor); ++ ++ // optional .paddle.framework.proto.VarType.LoDTensorArrayDesc tensor_array = 4; ++ bool has_tensor_array() const; ++ void clear_tensor_array(); ++ static const int kTensorArrayFieldNumber = 4; ++ const ::paddle::framework::proto::VarType_LoDTensorArrayDesc& tensor_array() const; ++ ::paddle::framework::proto::VarType_LoDTensorArrayDesc* mutable_tensor_array(); ++ ::paddle::framework::proto::VarType_LoDTensorArrayDesc* release_tensor_array(); ++ void set_allocated_tensor_array(::paddle::framework::proto::VarType_LoDTensorArrayDesc* tensor_array); ++ ++ // optional .paddle.framework.proto.VarType.ReaderDesc reader = 5; ++ bool has_reader() const; ++ void clear_reader(); ++ static const int kReaderFieldNumber = 5; ++ const ::paddle::framework::proto::VarType_ReaderDesc& reader() const; ++ ::paddle::framework::proto::VarType_ReaderDesc* mutable_reader(); ++ ::paddle::framework::proto::VarType_ReaderDesc* release_reader(); ++ void set_allocated_reader(::paddle::framework::proto::VarType_ReaderDesc* reader); ++ ++ // optional .paddle.framework.proto.VarType.Tuple tuple = 7; ++ bool has_tuple() const; ++ void clear_tuple(); ++ static const int kTupleFieldNumber = 7; ++ const ::paddle::framework::proto::VarType_Tuple& tuple() const; ++ ::paddle::framework::proto::VarType_Tuple* mutable_tuple(); ++ ::paddle::framework::proto::VarType_Tuple* release_tuple(); ++ void set_allocated_tuple(::paddle::framework::proto::VarType_Tuple* tuple); ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.VarType) ++ private: ++ inline void set_has_type(); ++ inline void clear_has_type(); ++ inline void set_has_selected_rows(); ++ inline void clear_has_selected_rows(); ++ inline void set_has_lod_tensor(); ++ inline void clear_has_lod_tensor(); ++ inline void set_has_tensor_array(); ++ inline void clear_has_tensor_array(); ++ inline void set_has_reader(); ++ inline void clear_has_reader(); ++ inline void set_has_tuple(); ++ inline void clear_has_tuple(); ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::paddle::framework::proto::VarType_TensorDesc* selected_rows_; ++ ::paddle::framework::proto::VarType_LoDTensorDesc* lod_tensor_; ++ ::paddle::framework::proto::VarType_LoDTensorArrayDesc* tensor_array_; ++ ::paddle::framework::proto::VarType_ReaderDesc* reader_; ++ ::paddle::framework::proto::VarType_Tuple* tuple_; ++ int type_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed VarType_default_instance_; ++ ++// ------------------------------------------------------------------- ++ ++class VarDesc : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.VarDesc) */ { ++ public: ++ VarDesc(); ++ virtual ~VarDesc(); ++ ++ VarDesc(const VarDesc& from); ++ ++ inline VarDesc& operator=(const VarDesc& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const VarDesc& default_instance(); ++ ++ static const VarDesc* internal_default_instance(); ++ ++ void Swap(VarDesc* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline VarDesc* New() const { return New(NULL); } ++ ++ VarDesc* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const VarDesc& from); ++ void MergeFrom(const VarDesc& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(VarDesc* other); ++ void UnsafeMergeFrom(const VarDesc& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ // accessors ------------------------------------------------------- ++ ++ // required string name = 1; ++ bool has_name() const; ++ void clear_name(); ++ static const int kNameFieldNumber = 1; ++ const ::std::string& name() const; ++ void set_name(const ::std::string& value); ++ void set_name(const char* value); ++ void set_name(const char* value, size_t size); ++ ::std::string* mutable_name(); ++ ::std::string* release_name(); ++ void set_allocated_name(::std::string* name); ++ ++ // required .paddle.framework.proto.VarType type = 2; ++ bool has_type() const; ++ void clear_type(); ++ static const int kTypeFieldNumber = 2; ++ const ::paddle::framework::proto::VarType& type() const; ++ ::paddle::framework::proto::VarType* mutable_type(); ++ ::paddle::framework::proto::VarType* release_type(); ++ void set_allocated_type(::paddle::framework::proto::VarType* type); ++ ++ // optional bool persistable = 3 [default = false]; ++ bool has_persistable() const; ++ void clear_persistable(); ++ static const int kPersistableFieldNumber = 3; ++ bool persistable() const; ++ void set_persistable(bool value); ++ ++ // optional bool need_check_feed = 4 [default = false]; ++ bool has_need_check_feed() const; ++ void clear_need_check_feed(); ++ static const int kNeedCheckFeedFieldNumber = 4; ++ bool need_check_feed() const; ++ void set_need_check_feed(bool value); ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.VarDesc) ++ private: ++ inline void set_has_name(); ++ inline void clear_has_name(); ++ inline void set_has_type(); ++ inline void clear_has_type(); ++ inline void set_has_persistable(); ++ inline void clear_has_persistable(); ++ inline void set_has_need_check_feed(); ++ inline void clear_has_need_check_feed(); ++ ++ // helper for ByteSizeLong() ++ size_t RequiredFieldsByteSizeFallback() const; ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::google::protobuf::internal::ArenaStringPtr name_; ++ ::paddle::framework::proto::VarType* type_; ++ bool persistable_; ++ bool need_check_feed_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed VarDesc_default_instance_; ++ ++// ------------------------------------------------------------------- ++ ++class BlockDesc : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.BlockDesc) */ { ++ public: ++ BlockDesc(); ++ virtual ~BlockDesc(); ++ ++ BlockDesc(const BlockDesc& from); ++ ++ inline BlockDesc& operator=(const BlockDesc& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const BlockDesc& default_instance(); ++ ++ static const BlockDesc* internal_default_instance(); ++ ++ void Swap(BlockDesc* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline BlockDesc* New() const { return New(NULL); } ++ ++ BlockDesc* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const BlockDesc& from); ++ void MergeFrom(const BlockDesc& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(BlockDesc* other); ++ void UnsafeMergeFrom(const BlockDesc& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ // accessors ------------------------------------------------------- ++ ++ // required int32 idx = 1; ++ bool has_idx() const; ++ void clear_idx(); ++ static const int kIdxFieldNumber = 1; ++ ::google::protobuf::int32 idx() const; ++ void set_idx(::google::protobuf::int32 value); ++ ++ // required int32 parent_idx = 2; ++ bool has_parent_idx() const; ++ void clear_parent_idx(); ++ static const int kParentIdxFieldNumber = 2; ++ ::google::protobuf::int32 parent_idx() const; ++ void set_parent_idx(::google::protobuf::int32 value); ++ ++ // repeated .paddle.framework.proto.VarDesc vars = 3; ++ int vars_size() const; ++ void clear_vars(); ++ static const int kVarsFieldNumber = 3; ++ const ::paddle::framework::proto::VarDesc& vars(int index) const; ++ ::paddle::framework::proto::VarDesc* mutable_vars(int index); ++ ::paddle::framework::proto::VarDesc* add_vars(); ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::VarDesc >* ++ mutable_vars(); ++ const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::VarDesc >& ++ vars() const; ++ ++ // repeated .paddle.framework.proto.OpDesc ops = 4; ++ int ops_size() const; ++ void clear_ops(); ++ static const int kOpsFieldNumber = 4; ++ const ::paddle::framework::proto::OpDesc& ops(int index) const; ++ ::paddle::framework::proto::OpDesc* mutable_ops(int index); ++ ::paddle::framework::proto::OpDesc* add_ops(); ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc >* ++ mutable_ops(); ++ const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc >& ++ ops() const; ++ ++ // optional int32 forward_block_idx = 5 [default = -1]; ++ bool has_forward_block_idx() const; ++ void clear_forward_block_idx(); ++ static const int kForwardBlockIdxFieldNumber = 5; ++ ::google::protobuf::int32 forward_block_idx() const; ++ void set_forward_block_idx(::google::protobuf::int32 value); ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.BlockDesc) ++ private: ++ inline void set_has_idx(); ++ inline void clear_has_idx(); ++ inline void set_has_parent_idx(); ++ inline void clear_has_parent_idx(); ++ inline void set_has_forward_block_idx(); ++ inline void clear_has_forward_block_idx(); ++ ++ // helper for ByteSizeLong() ++ size_t RequiredFieldsByteSizeFallback() const; ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::VarDesc > vars_; ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc > ops_; ++ ::google::protobuf::int32 idx_; ++ ::google::protobuf::int32 parent_idx_; ++ ::google::protobuf::int32 forward_block_idx_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed BlockDesc_default_instance_; ++ ++// ------------------------------------------------------------------- ++ ++class OpVersion : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.OpVersion) */ { ++ public: ++ OpVersion(); ++ virtual ~OpVersion(); ++ ++ OpVersion(const OpVersion& from); ++ ++ inline OpVersion& operator=(const OpVersion& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const OpVersion& default_instance(); ++ ++ static const OpVersion* internal_default_instance(); ++ ++ void Swap(OpVersion* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline OpVersion* New() const { return New(NULL); } ++ ++ OpVersion* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const OpVersion& from); ++ void MergeFrom(const OpVersion& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(OpVersion* other); ++ void UnsafeMergeFrom(const OpVersion& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ // accessors ------------------------------------------------------- ++ ++ // required int32 version = 1; ++ bool has_version() const; ++ void clear_version(); ++ static const int kVersionFieldNumber = 1; ++ ::google::protobuf::int32 version() const; ++ void set_version(::google::protobuf::int32 value); ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.OpVersion) ++ private: ++ inline void set_has_version(); ++ inline void clear_has_version(); ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::google::protobuf::int32 version_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed OpVersion_default_instance_; ++ ++// ------------------------------------------------------------------- ++ ++class OpVersionMap_OpVersionPair : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.OpVersionMap.OpVersionPair) */ { ++ public: ++ OpVersionMap_OpVersionPair(); ++ virtual ~OpVersionMap_OpVersionPair(); ++ ++ OpVersionMap_OpVersionPair(const OpVersionMap_OpVersionPair& from); ++ ++ inline OpVersionMap_OpVersionPair& operator=(const OpVersionMap_OpVersionPair& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const OpVersionMap_OpVersionPair& default_instance(); ++ ++ static const OpVersionMap_OpVersionPair* internal_default_instance(); ++ ++ void Swap(OpVersionMap_OpVersionPair* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline OpVersionMap_OpVersionPair* New() const { return New(NULL); } ++ ++ OpVersionMap_OpVersionPair* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const OpVersionMap_OpVersionPair& from); ++ void MergeFrom(const OpVersionMap_OpVersionPair& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(OpVersionMap_OpVersionPair* other); ++ void UnsafeMergeFrom(const OpVersionMap_OpVersionPair& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ // accessors ------------------------------------------------------- ++ ++ // required string op_name = 1; ++ bool has_op_name() const; ++ void clear_op_name(); ++ static const int kOpNameFieldNumber = 1; ++ const ::std::string& op_name() const; ++ void set_op_name(const ::std::string& value); ++ void set_op_name(const char* value); ++ void set_op_name(const char* value, size_t size); ++ ::std::string* mutable_op_name(); ++ ::std::string* release_op_name(); ++ void set_allocated_op_name(::std::string* op_name); ++ ++ // required .paddle.framework.proto.OpVersion op_version = 2; ++ bool has_op_version() const; ++ void clear_op_version(); ++ static const int kOpVersionFieldNumber = 2; ++ const ::paddle::framework::proto::OpVersion& op_version() const; ++ ::paddle::framework::proto::OpVersion* mutable_op_version(); ++ ::paddle::framework::proto::OpVersion* release_op_version(); ++ void set_allocated_op_version(::paddle::framework::proto::OpVersion* op_version); ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.OpVersionMap.OpVersionPair) ++ private: ++ inline void set_has_op_name(); ++ inline void clear_has_op_name(); ++ inline void set_has_op_version(); ++ inline void clear_has_op_version(); ++ ++ // helper for ByteSizeLong() ++ size_t RequiredFieldsByteSizeFallback() const; ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::google::protobuf::internal::ArenaStringPtr op_name_; ++ ::paddle::framework::proto::OpVersion* op_version_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed OpVersionMap_OpVersionPair_default_instance_; ++ ++// ------------------------------------------------------------------- ++ ++class OpVersionMap : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.OpVersionMap) */ { ++ public: ++ OpVersionMap(); ++ virtual ~OpVersionMap(); ++ ++ OpVersionMap(const OpVersionMap& from); ++ ++ inline OpVersionMap& operator=(const OpVersionMap& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const OpVersionMap& default_instance(); ++ ++ static const OpVersionMap* internal_default_instance(); ++ ++ void Swap(OpVersionMap* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline OpVersionMap* New() const { return New(NULL); } ++ ++ OpVersionMap* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const OpVersionMap& from); ++ void MergeFrom(const OpVersionMap& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(OpVersionMap* other); ++ void UnsafeMergeFrom(const OpVersionMap& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ typedef OpVersionMap_OpVersionPair OpVersionPair; ++ ++ // accessors ------------------------------------------------------- ++ ++ // repeated .paddle.framework.proto.OpVersionMap.OpVersionPair pair = 1; ++ int pair_size() const; ++ void clear_pair(); ++ static const int kPairFieldNumber = 1; ++ const ::paddle::framework::proto::OpVersionMap_OpVersionPair& pair(int index) const; ++ ::paddle::framework::proto::OpVersionMap_OpVersionPair* mutable_pair(int index); ++ ::paddle::framework::proto::OpVersionMap_OpVersionPair* add_pair(); ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpVersionMap_OpVersionPair >* ++ mutable_pair(); ++ const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpVersionMap_OpVersionPair >& ++ pair() const; ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.OpVersionMap) ++ private: ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpVersionMap_OpVersionPair > pair_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed OpVersionMap_default_instance_; ++ ++// ------------------------------------------------------------------- ++ ++class ProgramDesc : public ::google::protobuf::Message /* @@protoc_insertion_point(class_definition:paddle.framework.proto.ProgramDesc) */ { ++ public: ++ ProgramDesc(); ++ virtual ~ProgramDesc(); ++ ++ ProgramDesc(const ProgramDesc& from); ++ ++ inline ProgramDesc& operator=(const ProgramDesc& from) { ++ CopyFrom(from); ++ return *this; ++ } ++ ++ inline const ::google::protobuf::UnknownFieldSet& unknown_fields() const { ++ return _internal_metadata_.unknown_fields(); ++ } ++ ++ inline ::google::protobuf::UnknownFieldSet* mutable_unknown_fields() { ++ return _internal_metadata_.mutable_unknown_fields(); ++ } ++ ++ static const ::google::protobuf::Descriptor* descriptor(); ++ static const ProgramDesc& default_instance(); ++ ++ static const ProgramDesc* internal_default_instance(); ++ ++ void Swap(ProgramDesc* other); ++ ++ // implements Message ---------------------------------------------- ++ ++ inline ProgramDesc* New() const { return New(NULL); } ++ ++ ProgramDesc* New(::google::protobuf::Arena* arena) const; ++ void CopyFrom(const ::google::protobuf::Message& from); ++ void MergeFrom(const ::google::protobuf::Message& from); ++ void CopyFrom(const ProgramDesc& from); ++ void MergeFrom(const ProgramDesc& from); ++ void Clear(); ++ bool IsInitialized() const; ++ ++ size_t ByteSizeLong() const; ++ bool MergePartialFromCodedStream( ++ ::google::protobuf::io::CodedInputStream* input); ++ void SerializeWithCachedSizes( ++ ::google::protobuf::io::CodedOutputStream* output) const; ++ ::google::protobuf::uint8* InternalSerializeWithCachedSizesToArray( ++ bool deterministic, ::google::protobuf::uint8* output) const; ++ ::google::protobuf::uint8* SerializeWithCachedSizesToArray(::google::protobuf::uint8* output) const { ++ return InternalSerializeWithCachedSizesToArray(false, output); ++ } ++ int GetCachedSize() const { return _cached_size_; } ++ private: ++ void SharedCtor(); ++ void SharedDtor(); ++ void SetCachedSize(int size) const; ++ void InternalSwap(ProgramDesc* other); ++ void UnsafeMergeFrom(const ProgramDesc& from); ++ private: ++ inline ::google::protobuf::Arena* GetArenaNoVirtual() const { ++ return _internal_metadata_.arena(); ++ } ++ inline void* MaybeArenaPtr() const { ++ return _internal_metadata_.raw_arena_ptr(); ++ } ++ public: ++ ++ ::google::protobuf::Metadata GetMetadata() const; ++ ++ // nested types ---------------------------------------------------- ++ ++ // accessors ------------------------------------------------------- ++ ++ // repeated .paddle.framework.proto.BlockDesc blocks = 1; ++ int blocks_size() const; ++ void clear_blocks(); ++ static const int kBlocksFieldNumber = 1; ++ const ::paddle::framework::proto::BlockDesc& blocks(int index) const; ++ ::paddle::framework::proto::BlockDesc* mutable_blocks(int index); ++ ::paddle::framework::proto::BlockDesc* add_blocks(); ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::BlockDesc >* ++ mutable_blocks(); ++ const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::BlockDesc >& ++ blocks() const; ++ ++ // optional .paddle.framework.proto.Version version = 4; ++ bool has_version() const; ++ void clear_version(); ++ static const int kVersionFieldNumber = 4; ++ const ::paddle::framework::proto::Version& version() const; ++ ::paddle::framework::proto::Version* mutable_version(); ++ ::paddle::framework::proto::Version* release_version(); ++ void set_allocated_version(::paddle::framework::proto::Version* version); ++ ++ // optional .paddle.framework.proto.OpVersionMap op_version_map = 5; ++ bool has_op_version_map() const; ++ void clear_op_version_map(); ++ static const int kOpVersionMapFieldNumber = 5; ++ const ::paddle::framework::proto::OpVersionMap& op_version_map() const; ++ ::paddle::framework::proto::OpVersionMap* mutable_op_version_map(); ++ ::paddle::framework::proto::OpVersionMap* release_op_version_map(); ++ void set_allocated_op_version_map(::paddle::framework::proto::OpVersionMap* op_version_map); ++ ++ // @@protoc_insertion_point(class_scope:paddle.framework.proto.ProgramDesc) ++ private: ++ inline void set_has_version(); ++ inline void clear_has_version(); ++ inline void set_has_op_version_map(); ++ inline void clear_has_op_version_map(); ++ ++ ::google::protobuf::internal::InternalMetadataWithArena _internal_metadata_; ++ ::google::protobuf::internal::HasBits<1> _has_bits_; ++ mutable int _cached_size_; ++ ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::BlockDesc > blocks_; ++ ::paddle::framework::proto::Version* version_; ++ ::paddle::framework::proto::OpVersionMap* op_version_map_; ++ friend void protobuf_InitDefaults_framework_2eproto_impl(); ++ friend void protobuf_AddDesc_framework_2eproto_impl(); ++ friend void protobuf_AssignDesc_framework_2eproto(); ++ friend void protobuf_ShutdownFile_framework_2eproto(); ++ ++ void InitAsDefaultInstance(); ++}; ++extern ::google::protobuf::internal::ExplicitlyConstructed ProgramDesc_default_instance_; ++ ++// =================================================================== ++ ++ ++// =================================================================== ++ ++#if !PROTOBUF_INLINE_NOT_IN_HEADERS ++// Version ++ ++// optional int64 version = 1 [default = 0]; ++inline bool Version::has_version() const { ++ return (_has_bits_[0] & 0x00000001u) != 0; ++} ++inline void Version::set_has_version() { ++ _has_bits_[0] |= 0x00000001u; ++} ++inline void Version::clear_has_version() { ++ _has_bits_[0] &= ~0x00000001u; ++} ++inline void Version::clear_version() { ++ version_ = GOOGLE_LONGLONG(0); ++ clear_has_version(); ++} ++inline ::google::protobuf::int64 Version::version() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.Version.version) ++ return version_; ++} ++inline void Version::set_version(::google::protobuf::int64 value) { ++ set_has_version(); ++ version_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.Version.version) ++} ++ ++inline const Version* Version::internal_default_instance() { ++ return &Version_default_instance_.get(); ++} ++// ------------------------------------------------------------------- ++ ++// OpDesc_Attr ++ ++// required string name = 1; ++inline bool OpDesc_Attr::has_name() const { ++ return (_has_bits_[0] & 0x00000001u) != 0; ++} ++inline void OpDesc_Attr::set_has_name() { ++ _has_bits_[0] |= 0x00000001u; ++} ++inline void OpDesc_Attr::clear_has_name() { ++ _has_bits_[0] &= ~0x00000001u; ++} ++inline void OpDesc_Attr::clear_name() { ++ name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++ clear_has_name(); ++} ++inline const ::std::string& OpDesc_Attr::name() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.Attr.name) ++ return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpDesc_Attr::set_name(const ::std::string& value) { ++ set_has_name(); ++ name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.Attr.name) ++} ++inline void OpDesc_Attr::set_name(const char* value) { ++ set_has_name(); ++ name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); ++ // @@protoc_insertion_point(field_set_char:paddle.framework.proto.OpDesc.Attr.name) ++} ++inline void OpDesc_Attr::set_name(const char* value, size_t size) { ++ set_has_name(); ++ name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ++ ::std::string(reinterpret_cast(value), size)); ++ // @@protoc_insertion_point(field_set_pointer:paddle.framework.proto.OpDesc.Attr.name) ++} ++inline ::std::string* OpDesc_Attr::mutable_name() { ++ set_has_name(); ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpDesc.Attr.name) ++ return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline ::std::string* OpDesc_Attr::release_name() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.OpDesc.Attr.name) ++ clear_has_name(); ++ return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpDesc_Attr::set_allocated_name(::std::string* name) { ++ if (name != NULL) { ++ set_has_name(); ++ } else { ++ clear_has_name(); ++ } ++ name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name); ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.OpDesc.Attr.name) ++} ++ ++// required .paddle.framework.proto.AttrType type = 2; ++inline bool OpDesc_Attr::has_type() const { ++ return (_has_bits_[0] & 0x00000002u) != 0; ++} ++inline void OpDesc_Attr::set_has_type() { ++ _has_bits_[0] |= 0x00000002u; ++} ++inline void OpDesc_Attr::clear_has_type() { ++ _has_bits_[0] &= ~0x00000002u; ++} ++inline void OpDesc_Attr::clear_type() { ++ type_ = 0; ++ clear_has_type(); ++} ++inline ::paddle::framework::proto::AttrType OpDesc_Attr::type() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.Attr.type) ++ return static_cast< ::paddle::framework::proto::AttrType >(type_); ++} ++inline void OpDesc_Attr::set_type(::paddle::framework::proto::AttrType value) { ++ assert(::paddle::framework::proto::AttrType_IsValid(value)); ++ set_has_type(); ++ type_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.Attr.type) ++} ++ ++// optional int32 i = 3; ++inline bool OpDesc_Attr::has_i() const { ++ return (_has_bits_[0] & 0x00000004u) != 0; ++} ++inline void OpDesc_Attr::set_has_i() { ++ _has_bits_[0] |= 0x00000004u; ++} ++inline void OpDesc_Attr::clear_has_i() { ++ _has_bits_[0] &= ~0x00000004u; ++} ++inline void OpDesc_Attr::clear_i() { ++ i_ = 0; ++ clear_has_i(); ++} ++inline ::google::protobuf::int32 OpDesc_Attr::i() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.Attr.i) ++ return i_; ++} ++inline void OpDesc_Attr::set_i(::google::protobuf::int32 value) { ++ set_has_i(); ++ i_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.Attr.i) ++} ++ ++// optional float f = 4; ++inline bool OpDesc_Attr::has_f() const { ++ return (_has_bits_[0] & 0x00000008u) != 0; ++} ++inline void OpDesc_Attr::set_has_f() { ++ _has_bits_[0] |= 0x00000008u; ++} ++inline void OpDesc_Attr::clear_has_f() { ++ _has_bits_[0] &= ~0x00000008u; ++} ++inline void OpDesc_Attr::clear_f() { ++ f_ = 0; ++ clear_has_f(); ++} ++inline float OpDesc_Attr::f() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.Attr.f) ++ return f_; ++} ++inline void OpDesc_Attr::set_f(float value) { ++ set_has_f(); ++ f_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.Attr.f) ++} ++ ++// optional string s = 5; ++inline bool OpDesc_Attr::has_s() const { ++ return (_has_bits_[0] & 0x00000010u) != 0; ++} ++inline void OpDesc_Attr::set_has_s() { ++ _has_bits_[0] |= 0x00000010u; ++} ++inline void OpDesc_Attr::clear_has_s() { ++ _has_bits_[0] &= ~0x00000010u; ++} ++inline void OpDesc_Attr::clear_s() { ++ s_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++ clear_has_s(); ++} ++inline const ::std::string& OpDesc_Attr::s() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.Attr.s) ++ return s_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpDesc_Attr::set_s(const ::std::string& value) { ++ set_has_s(); ++ s_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.Attr.s) ++} ++inline void OpDesc_Attr::set_s(const char* value) { ++ set_has_s(); ++ s_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); ++ // @@protoc_insertion_point(field_set_char:paddle.framework.proto.OpDesc.Attr.s) ++} ++inline void OpDesc_Attr::set_s(const char* value, size_t size) { ++ set_has_s(); ++ s_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ++ ::std::string(reinterpret_cast(value), size)); ++ // @@protoc_insertion_point(field_set_pointer:paddle.framework.proto.OpDesc.Attr.s) ++} ++inline ::std::string* OpDesc_Attr::mutable_s() { ++ set_has_s(); ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpDesc.Attr.s) ++ return s_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline ::std::string* OpDesc_Attr::release_s() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.OpDesc.Attr.s) ++ clear_has_s(); ++ return s_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpDesc_Attr::set_allocated_s(::std::string* s) { ++ if (s != NULL) { ++ set_has_s(); ++ } else { ++ clear_has_s(); ++ } ++ s_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), s); ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.OpDesc.Attr.s) ++} ++ ++// repeated int32 ints = 6; ++inline int OpDesc_Attr::ints_size() const { ++ return ints_.size(); ++} ++inline void OpDesc_Attr::clear_ints() { ++ ints_.Clear(); ++} ++inline ::google::protobuf::int32 OpDesc_Attr::ints(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.Attr.ints) ++ return ints_.Get(index); ++} ++inline void OpDesc_Attr::set_ints(int index, ::google::protobuf::int32 value) { ++ ints_.Set(index, value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.Attr.ints) ++} ++inline void OpDesc_Attr::add_ints(::google::protobuf::int32 value) { ++ ints_.Add(value); ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.OpDesc.Attr.ints) ++} ++inline const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >& ++OpDesc_Attr::ints() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.OpDesc.Attr.ints) ++ return ints_; ++} ++inline ::google::protobuf::RepeatedField< ::google::protobuf::int32 >* ++OpDesc_Attr::mutable_ints() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.OpDesc.Attr.ints) ++ return &ints_; ++} ++ ++// repeated float floats = 7; ++inline int OpDesc_Attr::floats_size() const { ++ return floats_.size(); ++} ++inline void OpDesc_Attr::clear_floats() { ++ floats_.Clear(); ++} ++inline float OpDesc_Attr::floats(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.Attr.floats) ++ return floats_.Get(index); ++} ++inline void OpDesc_Attr::set_floats(int index, float value) { ++ floats_.Set(index, value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.Attr.floats) ++} ++inline void OpDesc_Attr::add_floats(float value) { ++ floats_.Add(value); ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.OpDesc.Attr.floats) ++} ++inline const ::google::protobuf::RepeatedField< float >& ++OpDesc_Attr::floats() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.OpDesc.Attr.floats) ++ return floats_; ++} ++inline ::google::protobuf::RepeatedField< float >* ++OpDesc_Attr::mutable_floats() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.OpDesc.Attr.floats) ++ return &floats_; ++} ++ ++// repeated string strings = 8; ++inline int OpDesc_Attr::strings_size() const { ++ return strings_.size(); ++} ++inline void OpDesc_Attr::clear_strings() { ++ strings_.Clear(); ++} ++inline const ::std::string& OpDesc_Attr::strings(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.Attr.strings) ++ return strings_.Get(index); ++} ++inline ::std::string* OpDesc_Attr::mutable_strings(int index) { ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpDesc.Attr.strings) ++ return strings_.Mutable(index); ++} ++inline void OpDesc_Attr::set_strings(int index, const ::std::string& value) { ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.Attr.strings) ++ strings_.Mutable(index)->assign(value); ++} ++inline void OpDesc_Attr::set_strings(int index, const char* value) { ++ strings_.Mutable(index)->assign(value); ++ // @@protoc_insertion_point(field_set_char:paddle.framework.proto.OpDesc.Attr.strings) ++} ++inline void OpDesc_Attr::set_strings(int index, const char* value, size_t size) { ++ strings_.Mutable(index)->assign( ++ reinterpret_cast(value), size); ++ // @@protoc_insertion_point(field_set_pointer:paddle.framework.proto.OpDesc.Attr.strings) ++} ++inline ::std::string* OpDesc_Attr::add_strings() { ++ // @@protoc_insertion_point(field_add_mutable:paddle.framework.proto.OpDesc.Attr.strings) ++ return strings_.Add(); ++} ++inline void OpDesc_Attr::add_strings(const ::std::string& value) { ++ strings_.Add()->assign(value); ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.OpDesc.Attr.strings) ++} ++inline void OpDesc_Attr::add_strings(const char* value) { ++ strings_.Add()->assign(value); ++ // @@protoc_insertion_point(field_add_char:paddle.framework.proto.OpDesc.Attr.strings) ++} ++inline void OpDesc_Attr::add_strings(const char* value, size_t size) { ++ strings_.Add()->assign(reinterpret_cast(value), size); ++ // @@protoc_insertion_point(field_add_pointer:paddle.framework.proto.OpDesc.Attr.strings) ++} ++inline const ::google::protobuf::RepeatedPtrField< ::std::string>& ++OpDesc_Attr::strings() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.OpDesc.Attr.strings) ++ return strings_; ++} ++inline ::google::protobuf::RepeatedPtrField< ::std::string>* ++OpDesc_Attr::mutable_strings() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.OpDesc.Attr.strings) ++ return &strings_; ++} ++ ++// optional bool b = 10; ++inline bool OpDesc_Attr::has_b() const { ++ return (_has_bits_[0] & 0x00000100u) != 0; ++} ++inline void OpDesc_Attr::set_has_b() { ++ _has_bits_[0] |= 0x00000100u; ++} ++inline void OpDesc_Attr::clear_has_b() { ++ _has_bits_[0] &= ~0x00000100u; ++} ++inline void OpDesc_Attr::clear_b() { ++ b_ = false; ++ clear_has_b(); ++} ++inline bool OpDesc_Attr::b() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.Attr.b) ++ return b_; ++} ++inline void OpDesc_Attr::set_b(bool value) { ++ set_has_b(); ++ b_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.Attr.b) ++} ++ ++// repeated bool bools = 11; ++inline int OpDesc_Attr::bools_size() const { ++ return bools_.size(); ++} ++inline void OpDesc_Attr::clear_bools() { ++ bools_.Clear(); ++} ++inline bool OpDesc_Attr::bools(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.Attr.bools) ++ return bools_.Get(index); ++} ++inline void OpDesc_Attr::set_bools(int index, bool value) { ++ bools_.Set(index, value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.Attr.bools) ++} ++inline void OpDesc_Attr::add_bools(bool value) { ++ bools_.Add(value); ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.OpDesc.Attr.bools) ++} ++inline const ::google::protobuf::RepeatedField< bool >& ++OpDesc_Attr::bools() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.OpDesc.Attr.bools) ++ return bools_; ++} ++inline ::google::protobuf::RepeatedField< bool >* ++OpDesc_Attr::mutable_bools() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.OpDesc.Attr.bools) ++ return &bools_; ++} ++ ++// optional int32 block_idx = 12; ++inline bool OpDesc_Attr::has_block_idx() const { ++ return (_has_bits_[0] & 0x00000400u) != 0; ++} ++inline void OpDesc_Attr::set_has_block_idx() { ++ _has_bits_[0] |= 0x00000400u; ++} ++inline void OpDesc_Attr::clear_has_block_idx() { ++ _has_bits_[0] &= ~0x00000400u; ++} ++inline void OpDesc_Attr::clear_block_idx() { ++ block_idx_ = 0; ++ clear_has_block_idx(); ++} ++inline ::google::protobuf::int32 OpDesc_Attr::block_idx() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.Attr.block_idx) ++ return block_idx_; ++} ++inline void OpDesc_Attr::set_block_idx(::google::protobuf::int32 value) { ++ set_has_block_idx(); ++ block_idx_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.Attr.block_idx) ++} ++ ++// optional int64 l = 13; ++inline bool OpDesc_Attr::has_l() const { ++ return (_has_bits_[0] & 0x00000800u) != 0; ++} ++inline void OpDesc_Attr::set_has_l() { ++ _has_bits_[0] |= 0x00000800u; ++} ++inline void OpDesc_Attr::clear_has_l() { ++ _has_bits_[0] &= ~0x00000800u; ++} ++inline void OpDesc_Attr::clear_l() { ++ l_ = GOOGLE_LONGLONG(0); ++ clear_has_l(); ++} ++inline ::google::protobuf::int64 OpDesc_Attr::l() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.Attr.l) ++ return l_; ++} ++inline void OpDesc_Attr::set_l(::google::protobuf::int64 value) { ++ set_has_l(); ++ l_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.Attr.l) ++} ++ ++// repeated int32 blocks_idx = 14; ++inline int OpDesc_Attr::blocks_idx_size() const { ++ return blocks_idx_.size(); ++} ++inline void OpDesc_Attr::clear_blocks_idx() { ++ blocks_idx_.Clear(); ++} ++inline ::google::protobuf::int32 OpDesc_Attr::blocks_idx(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.Attr.blocks_idx) ++ return blocks_idx_.Get(index); ++} ++inline void OpDesc_Attr::set_blocks_idx(int index, ::google::protobuf::int32 value) { ++ blocks_idx_.Set(index, value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.Attr.blocks_idx) ++} ++inline void OpDesc_Attr::add_blocks_idx(::google::protobuf::int32 value) { ++ blocks_idx_.Add(value); ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.OpDesc.Attr.blocks_idx) ++} ++inline const ::google::protobuf::RepeatedField< ::google::protobuf::int32 >& ++OpDesc_Attr::blocks_idx() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.OpDesc.Attr.blocks_idx) ++ return blocks_idx_; ++} ++inline ::google::protobuf::RepeatedField< ::google::protobuf::int32 >* ++OpDesc_Attr::mutable_blocks_idx() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.OpDesc.Attr.blocks_idx) ++ return &blocks_idx_; ++} ++ ++// repeated int64 longs = 15; ++inline int OpDesc_Attr::longs_size() const { ++ return longs_.size(); ++} ++inline void OpDesc_Attr::clear_longs() { ++ longs_.Clear(); ++} ++inline ::google::protobuf::int64 OpDesc_Attr::longs(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.Attr.longs) ++ return longs_.Get(index); ++} ++inline void OpDesc_Attr::set_longs(int index, ::google::protobuf::int64 value) { ++ longs_.Set(index, value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.Attr.longs) ++} ++inline void OpDesc_Attr::add_longs(::google::protobuf::int64 value) { ++ longs_.Add(value); ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.OpDesc.Attr.longs) ++} ++inline const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >& ++OpDesc_Attr::longs() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.OpDesc.Attr.longs) ++ return longs_; ++} ++inline ::google::protobuf::RepeatedField< ::google::protobuf::int64 >* ++OpDesc_Attr::mutable_longs() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.OpDesc.Attr.longs) ++ return &longs_; ++} ++ ++// repeated double float64s = 16; ++inline int OpDesc_Attr::float64s_size() const { ++ return float64s_.size(); ++} ++inline void OpDesc_Attr::clear_float64s() { ++ float64s_.Clear(); ++} ++inline double OpDesc_Attr::float64s(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.Attr.float64s) ++ return float64s_.Get(index); ++} ++inline void OpDesc_Attr::set_float64s(int index, double value) { ++ float64s_.Set(index, value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.Attr.float64s) ++} ++inline void OpDesc_Attr::add_float64s(double value) { ++ float64s_.Add(value); ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.OpDesc.Attr.float64s) ++} ++inline const ::google::protobuf::RepeatedField< double >& ++OpDesc_Attr::float64s() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.OpDesc.Attr.float64s) ++ return float64s_; ++} ++inline ::google::protobuf::RepeatedField< double >* ++OpDesc_Attr::mutable_float64s() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.OpDesc.Attr.float64s) ++ return &float64s_; ++} ++ ++inline const OpDesc_Attr* OpDesc_Attr::internal_default_instance() { ++ return &OpDesc_Attr_default_instance_.get(); ++} ++// ------------------------------------------------------------------- ++ ++// OpDesc_Var ++ ++// required string parameter = 1; ++inline bool OpDesc_Var::has_parameter() const { ++ return (_has_bits_[0] & 0x00000001u) != 0; ++} ++inline void OpDesc_Var::set_has_parameter() { ++ _has_bits_[0] |= 0x00000001u; ++} ++inline void OpDesc_Var::clear_has_parameter() { ++ _has_bits_[0] &= ~0x00000001u; ++} ++inline void OpDesc_Var::clear_parameter() { ++ parameter_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++ clear_has_parameter(); ++} ++inline const ::std::string& OpDesc_Var::parameter() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.Var.parameter) ++ return parameter_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpDesc_Var::set_parameter(const ::std::string& value) { ++ set_has_parameter(); ++ parameter_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.Var.parameter) ++} ++inline void OpDesc_Var::set_parameter(const char* value) { ++ set_has_parameter(); ++ parameter_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); ++ // @@protoc_insertion_point(field_set_char:paddle.framework.proto.OpDesc.Var.parameter) ++} ++inline void OpDesc_Var::set_parameter(const char* value, size_t size) { ++ set_has_parameter(); ++ parameter_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ++ ::std::string(reinterpret_cast(value), size)); ++ // @@protoc_insertion_point(field_set_pointer:paddle.framework.proto.OpDesc.Var.parameter) ++} ++inline ::std::string* OpDesc_Var::mutable_parameter() { ++ set_has_parameter(); ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpDesc.Var.parameter) ++ return parameter_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline ::std::string* OpDesc_Var::release_parameter() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.OpDesc.Var.parameter) ++ clear_has_parameter(); ++ return parameter_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpDesc_Var::set_allocated_parameter(::std::string* parameter) { ++ if (parameter != NULL) { ++ set_has_parameter(); ++ } else { ++ clear_has_parameter(); ++ } ++ parameter_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), parameter); ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.OpDesc.Var.parameter) ++} ++ ++// repeated string arguments = 2; ++inline int OpDesc_Var::arguments_size() const { ++ return arguments_.size(); ++} ++inline void OpDesc_Var::clear_arguments() { ++ arguments_.Clear(); ++} ++inline const ::std::string& OpDesc_Var::arguments(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.Var.arguments) ++ return arguments_.Get(index); ++} ++inline ::std::string* OpDesc_Var::mutable_arguments(int index) { ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpDesc.Var.arguments) ++ return arguments_.Mutable(index); ++} ++inline void OpDesc_Var::set_arguments(int index, const ::std::string& value) { ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.Var.arguments) ++ arguments_.Mutable(index)->assign(value); ++} ++inline void OpDesc_Var::set_arguments(int index, const char* value) { ++ arguments_.Mutable(index)->assign(value); ++ // @@protoc_insertion_point(field_set_char:paddle.framework.proto.OpDesc.Var.arguments) ++} ++inline void OpDesc_Var::set_arguments(int index, const char* value, size_t size) { ++ arguments_.Mutable(index)->assign( ++ reinterpret_cast(value), size); ++ // @@protoc_insertion_point(field_set_pointer:paddle.framework.proto.OpDesc.Var.arguments) ++} ++inline ::std::string* OpDesc_Var::add_arguments() { ++ // @@protoc_insertion_point(field_add_mutable:paddle.framework.proto.OpDesc.Var.arguments) ++ return arguments_.Add(); ++} ++inline void OpDesc_Var::add_arguments(const ::std::string& value) { ++ arguments_.Add()->assign(value); ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.OpDesc.Var.arguments) ++} ++inline void OpDesc_Var::add_arguments(const char* value) { ++ arguments_.Add()->assign(value); ++ // @@protoc_insertion_point(field_add_char:paddle.framework.proto.OpDesc.Var.arguments) ++} ++inline void OpDesc_Var::add_arguments(const char* value, size_t size) { ++ arguments_.Add()->assign(reinterpret_cast(value), size); ++ // @@protoc_insertion_point(field_add_pointer:paddle.framework.proto.OpDesc.Var.arguments) ++} ++inline const ::google::protobuf::RepeatedPtrField< ::std::string>& ++OpDesc_Var::arguments() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.OpDesc.Var.arguments) ++ return arguments_; ++} ++inline ::google::protobuf::RepeatedPtrField< ::std::string>* ++OpDesc_Var::mutable_arguments() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.OpDesc.Var.arguments) ++ return &arguments_; ++} ++ ++inline const OpDesc_Var* OpDesc_Var::internal_default_instance() { ++ return &OpDesc_Var_default_instance_.get(); ++} ++// ------------------------------------------------------------------- ++ ++// OpDesc ++ ++// required string type = 3; ++inline bool OpDesc::has_type() const { ++ return (_has_bits_[0] & 0x00000001u) != 0; ++} ++inline void OpDesc::set_has_type() { ++ _has_bits_[0] |= 0x00000001u; ++} ++inline void OpDesc::clear_has_type() { ++ _has_bits_[0] &= ~0x00000001u; ++} ++inline void OpDesc::clear_type() { ++ type_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++ clear_has_type(); ++} ++inline const ::std::string& OpDesc::type() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.type) ++ return type_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpDesc::set_type(const ::std::string& value) { ++ set_has_type(); ++ type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.type) ++} ++inline void OpDesc::set_type(const char* value) { ++ set_has_type(); ++ type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); ++ // @@protoc_insertion_point(field_set_char:paddle.framework.proto.OpDesc.type) ++} ++inline void OpDesc::set_type(const char* value, size_t size) { ++ set_has_type(); ++ type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ++ ::std::string(reinterpret_cast(value), size)); ++ // @@protoc_insertion_point(field_set_pointer:paddle.framework.proto.OpDesc.type) ++} ++inline ::std::string* OpDesc::mutable_type() { ++ set_has_type(); ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpDesc.type) ++ return type_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline ::std::string* OpDesc::release_type() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.OpDesc.type) ++ clear_has_type(); ++ return type_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpDesc::set_allocated_type(::std::string* type) { ++ if (type != NULL) { ++ set_has_type(); ++ } else { ++ clear_has_type(); ++ } ++ type_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), type); ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.OpDesc.type) ++} ++ ++// repeated .paddle.framework.proto.OpDesc.Var inputs = 1; ++inline int OpDesc::inputs_size() const { ++ return inputs_.size(); ++} ++inline void OpDesc::clear_inputs() { ++ inputs_.Clear(); ++} ++inline const ::paddle::framework::proto::OpDesc_Var& OpDesc::inputs(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.inputs) ++ return inputs_.Get(index); ++} ++inline ::paddle::framework::proto::OpDesc_Var* OpDesc::mutable_inputs(int index) { ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpDesc.inputs) ++ return inputs_.Mutable(index); ++} ++inline ::paddle::framework::proto::OpDesc_Var* OpDesc::add_inputs() { ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.OpDesc.inputs) ++ return inputs_.Add(); ++} ++inline ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc_Var >* ++OpDesc::mutable_inputs() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.OpDesc.inputs) ++ return &inputs_; ++} ++inline const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc_Var >& ++OpDesc::inputs() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.OpDesc.inputs) ++ return inputs_; ++} ++ ++// repeated .paddle.framework.proto.OpDesc.Var outputs = 2; ++inline int OpDesc::outputs_size() const { ++ return outputs_.size(); ++} ++inline void OpDesc::clear_outputs() { ++ outputs_.Clear(); ++} ++inline const ::paddle::framework::proto::OpDesc_Var& OpDesc::outputs(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.outputs) ++ return outputs_.Get(index); ++} ++inline ::paddle::framework::proto::OpDesc_Var* OpDesc::mutable_outputs(int index) { ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpDesc.outputs) ++ return outputs_.Mutable(index); ++} ++inline ::paddle::framework::proto::OpDesc_Var* OpDesc::add_outputs() { ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.OpDesc.outputs) ++ return outputs_.Add(); ++} ++inline ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc_Var >* ++OpDesc::mutable_outputs() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.OpDesc.outputs) ++ return &outputs_; ++} ++inline const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc_Var >& ++OpDesc::outputs() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.OpDesc.outputs) ++ return outputs_; ++} ++ ++// repeated .paddle.framework.proto.OpDesc.Attr attrs = 4; ++inline int OpDesc::attrs_size() const { ++ return attrs_.size(); ++} ++inline void OpDesc::clear_attrs() { ++ attrs_.Clear(); ++} ++inline const ::paddle::framework::proto::OpDesc_Attr& OpDesc::attrs(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.attrs) ++ return attrs_.Get(index); ++} ++inline ::paddle::framework::proto::OpDesc_Attr* OpDesc::mutable_attrs(int index) { ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpDesc.attrs) ++ return attrs_.Mutable(index); ++} ++inline ::paddle::framework::proto::OpDesc_Attr* OpDesc::add_attrs() { ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.OpDesc.attrs) ++ return attrs_.Add(); ++} ++inline ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc_Attr >* ++OpDesc::mutable_attrs() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.OpDesc.attrs) ++ return &attrs_; ++} ++inline const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc_Attr >& ++OpDesc::attrs() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.OpDesc.attrs) ++ return attrs_; ++} ++ ++// optional bool is_target = 5 [default = false]; ++inline bool OpDesc::has_is_target() const { ++ return (_has_bits_[0] & 0x00000010u) != 0; ++} ++inline void OpDesc::set_has_is_target() { ++ _has_bits_[0] |= 0x00000010u; ++} ++inline void OpDesc::clear_has_is_target() { ++ _has_bits_[0] &= ~0x00000010u; ++} ++inline void OpDesc::clear_is_target() { ++ is_target_ = false; ++ clear_has_is_target(); ++} ++inline bool OpDesc::is_target() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpDesc.is_target) ++ return is_target_; ++} ++inline void OpDesc::set_is_target(bool value) { ++ set_has_is_target(); ++ is_target_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpDesc.is_target) ++} ++ ++inline const OpDesc* OpDesc::internal_default_instance() { ++ return &OpDesc_default_instance_.get(); ++} ++// ------------------------------------------------------------------- ++ ++// OpProto_Var ++ ++// required string name = 1; ++inline bool OpProto_Var::has_name() const { ++ return (_has_bits_[0] & 0x00000001u) != 0; ++} ++inline void OpProto_Var::set_has_name() { ++ _has_bits_[0] |= 0x00000001u; ++} ++inline void OpProto_Var::clear_has_name() { ++ _has_bits_[0] &= ~0x00000001u; ++} ++inline void OpProto_Var::clear_name() { ++ name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++ clear_has_name(); ++} ++inline const ::std::string& OpProto_Var::name() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpProto.Var.name) ++ return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpProto_Var::set_name(const ::std::string& value) { ++ set_has_name(); ++ name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpProto.Var.name) ++} ++inline void OpProto_Var::set_name(const char* value) { ++ set_has_name(); ++ name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); ++ // @@protoc_insertion_point(field_set_char:paddle.framework.proto.OpProto.Var.name) ++} ++inline void OpProto_Var::set_name(const char* value, size_t size) { ++ set_has_name(); ++ name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ++ ::std::string(reinterpret_cast(value), size)); ++ // @@protoc_insertion_point(field_set_pointer:paddle.framework.proto.OpProto.Var.name) ++} ++inline ::std::string* OpProto_Var::mutable_name() { ++ set_has_name(); ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpProto.Var.name) ++ return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline ::std::string* OpProto_Var::release_name() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.OpProto.Var.name) ++ clear_has_name(); ++ return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpProto_Var::set_allocated_name(::std::string* name) { ++ if (name != NULL) { ++ set_has_name(); ++ } else { ++ clear_has_name(); ++ } ++ name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name); ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.OpProto.Var.name) ++} ++ ++// required string comment = 2; ++inline bool OpProto_Var::has_comment() const { ++ return (_has_bits_[0] & 0x00000002u) != 0; ++} ++inline void OpProto_Var::set_has_comment() { ++ _has_bits_[0] |= 0x00000002u; ++} ++inline void OpProto_Var::clear_has_comment() { ++ _has_bits_[0] &= ~0x00000002u; ++} ++inline void OpProto_Var::clear_comment() { ++ comment_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++ clear_has_comment(); ++} ++inline const ::std::string& OpProto_Var::comment() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpProto.Var.comment) ++ return comment_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpProto_Var::set_comment(const ::std::string& value) { ++ set_has_comment(); ++ comment_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpProto.Var.comment) ++} ++inline void OpProto_Var::set_comment(const char* value) { ++ set_has_comment(); ++ comment_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); ++ // @@protoc_insertion_point(field_set_char:paddle.framework.proto.OpProto.Var.comment) ++} ++inline void OpProto_Var::set_comment(const char* value, size_t size) { ++ set_has_comment(); ++ comment_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ++ ::std::string(reinterpret_cast(value), size)); ++ // @@protoc_insertion_point(field_set_pointer:paddle.framework.proto.OpProto.Var.comment) ++} ++inline ::std::string* OpProto_Var::mutable_comment() { ++ set_has_comment(); ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpProto.Var.comment) ++ return comment_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline ::std::string* OpProto_Var::release_comment() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.OpProto.Var.comment) ++ clear_has_comment(); ++ return comment_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpProto_Var::set_allocated_comment(::std::string* comment) { ++ if (comment != NULL) { ++ set_has_comment(); ++ } else { ++ clear_has_comment(); ++ } ++ comment_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), comment); ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.OpProto.Var.comment) ++} ++ ++// optional bool duplicable = 3 [default = false]; ++inline bool OpProto_Var::has_duplicable() const { ++ return (_has_bits_[0] & 0x00000004u) != 0; ++} ++inline void OpProto_Var::set_has_duplicable() { ++ _has_bits_[0] |= 0x00000004u; ++} ++inline void OpProto_Var::clear_has_duplicable() { ++ _has_bits_[0] &= ~0x00000004u; ++} ++inline void OpProto_Var::clear_duplicable() { ++ duplicable_ = false; ++ clear_has_duplicable(); ++} ++inline bool OpProto_Var::duplicable() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpProto.Var.duplicable) ++ return duplicable_; ++} ++inline void OpProto_Var::set_duplicable(bool value) { ++ set_has_duplicable(); ++ duplicable_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpProto.Var.duplicable) ++} ++ ++// optional bool intermediate = 4 [default = false]; ++inline bool OpProto_Var::has_intermediate() const { ++ return (_has_bits_[0] & 0x00000008u) != 0; ++} ++inline void OpProto_Var::set_has_intermediate() { ++ _has_bits_[0] |= 0x00000008u; ++} ++inline void OpProto_Var::clear_has_intermediate() { ++ _has_bits_[0] &= ~0x00000008u; ++} ++inline void OpProto_Var::clear_intermediate() { ++ intermediate_ = false; ++ clear_has_intermediate(); ++} ++inline bool OpProto_Var::intermediate() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpProto.Var.intermediate) ++ return intermediate_; ++} ++inline void OpProto_Var::set_intermediate(bool value) { ++ set_has_intermediate(); ++ intermediate_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpProto.Var.intermediate) ++} ++ ++// optional bool dispensable = 5 [default = false]; ++inline bool OpProto_Var::has_dispensable() const { ++ return (_has_bits_[0] & 0x00000010u) != 0; ++} ++inline void OpProto_Var::set_has_dispensable() { ++ _has_bits_[0] |= 0x00000010u; ++} ++inline void OpProto_Var::clear_has_dispensable() { ++ _has_bits_[0] &= ~0x00000010u; ++} ++inline void OpProto_Var::clear_dispensable() { ++ dispensable_ = false; ++ clear_has_dispensable(); ++} ++inline bool OpProto_Var::dispensable() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpProto.Var.dispensable) ++ return dispensable_; ++} ++inline void OpProto_Var::set_dispensable(bool value) { ++ set_has_dispensable(); ++ dispensable_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpProto.Var.dispensable) ++} ++ ++inline const OpProto_Var* OpProto_Var::internal_default_instance() { ++ return &OpProto_Var_default_instance_.get(); ++} ++// ------------------------------------------------------------------- ++ ++// OpProto_Attr ++ ++// required string name = 1; ++inline bool OpProto_Attr::has_name() const { ++ return (_has_bits_[0] & 0x00000001u) != 0; ++} ++inline void OpProto_Attr::set_has_name() { ++ _has_bits_[0] |= 0x00000001u; ++} ++inline void OpProto_Attr::clear_has_name() { ++ _has_bits_[0] &= ~0x00000001u; ++} ++inline void OpProto_Attr::clear_name() { ++ name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++ clear_has_name(); ++} ++inline const ::std::string& OpProto_Attr::name() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpProto.Attr.name) ++ return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpProto_Attr::set_name(const ::std::string& value) { ++ set_has_name(); ++ name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpProto.Attr.name) ++} ++inline void OpProto_Attr::set_name(const char* value) { ++ set_has_name(); ++ name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); ++ // @@protoc_insertion_point(field_set_char:paddle.framework.proto.OpProto.Attr.name) ++} ++inline void OpProto_Attr::set_name(const char* value, size_t size) { ++ set_has_name(); ++ name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ++ ::std::string(reinterpret_cast(value), size)); ++ // @@protoc_insertion_point(field_set_pointer:paddle.framework.proto.OpProto.Attr.name) ++} ++inline ::std::string* OpProto_Attr::mutable_name() { ++ set_has_name(); ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpProto.Attr.name) ++ return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline ::std::string* OpProto_Attr::release_name() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.OpProto.Attr.name) ++ clear_has_name(); ++ return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpProto_Attr::set_allocated_name(::std::string* name) { ++ if (name != NULL) { ++ set_has_name(); ++ } else { ++ clear_has_name(); ++ } ++ name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name); ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.OpProto.Attr.name) ++} ++ ++// required .paddle.framework.proto.AttrType type = 2; ++inline bool OpProto_Attr::has_type() const { ++ return (_has_bits_[0] & 0x00000002u) != 0; ++} ++inline void OpProto_Attr::set_has_type() { ++ _has_bits_[0] |= 0x00000002u; ++} ++inline void OpProto_Attr::clear_has_type() { ++ _has_bits_[0] &= ~0x00000002u; ++} ++inline void OpProto_Attr::clear_type() { ++ type_ = 0; ++ clear_has_type(); ++} ++inline ::paddle::framework::proto::AttrType OpProto_Attr::type() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpProto.Attr.type) ++ return static_cast< ::paddle::framework::proto::AttrType >(type_); ++} ++inline void OpProto_Attr::set_type(::paddle::framework::proto::AttrType value) { ++ assert(::paddle::framework::proto::AttrType_IsValid(value)); ++ set_has_type(); ++ type_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpProto.Attr.type) ++} ++ ++// required string comment = 3; ++inline bool OpProto_Attr::has_comment() const { ++ return (_has_bits_[0] & 0x00000004u) != 0; ++} ++inline void OpProto_Attr::set_has_comment() { ++ _has_bits_[0] |= 0x00000004u; ++} ++inline void OpProto_Attr::clear_has_comment() { ++ _has_bits_[0] &= ~0x00000004u; ++} ++inline void OpProto_Attr::clear_comment() { ++ comment_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++ clear_has_comment(); ++} ++inline const ::std::string& OpProto_Attr::comment() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpProto.Attr.comment) ++ return comment_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpProto_Attr::set_comment(const ::std::string& value) { ++ set_has_comment(); ++ comment_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpProto.Attr.comment) ++} ++inline void OpProto_Attr::set_comment(const char* value) { ++ set_has_comment(); ++ comment_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); ++ // @@protoc_insertion_point(field_set_char:paddle.framework.proto.OpProto.Attr.comment) ++} ++inline void OpProto_Attr::set_comment(const char* value, size_t size) { ++ set_has_comment(); ++ comment_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ++ ::std::string(reinterpret_cast(value), size)); ++ // @@protoc_insertion_point(field_set_pointer:paddle.framework.proto.OpProto.Attr.comment) ++} ++inline ::std::string* OpProto_Attr::mutable_comment() { ++ set_has_comment(); ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpProto.Attr.comment) ++ return comment_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline ::std::string* OpProto_Attr::release_comment() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.OpProto.Attr.comment) ++ clear_has_comment(); ++ return comment_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpProto_Attr::set_allocated_comment(::std::string* comment) { ++ if (comment != NULL) { ++ set_has_comment(); ++ } else { ++ clear_has_comment(); ++ } ++ comment_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), comment); ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.OpProto.Attr.comment) ++} ++ ++// optional bool generated = 4 [default = false]; ++inline bool OpProto_Attr::has_generated() const { ++ return (_has_bits_[0] & 0x00000008u) != 0; ++} ++inline void OpProto_Attr::set_has_generated() { ++ _has_bits_[0] |= 0x00000008u; ++} ++inline void OpProto_Attr::clear_has_generated() { ++ _has_bits_[0] &= ~0x00000008u; ++} ++inline void OpProto_Attr::clear_generated() { ++ generated_ = false; ++ clear_has_generated(); ++} ++inline bool OpProto_Attr::generated() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpProto.Attr.generated) ++ return generated_; ++} ++inline void OpProto_Attr::set_generated(bool value) { ++ set_has_generated(); ++ generated_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpProto.Attr.generated) ++} ++ ++inline const OpProto_Attr* OpProto_Attr::internal_default_instance() { ++ return &OpProto_Attr_default_instance_.get(); ++} ++// ------------------------------------------------------------------- ++ ++// OpProto ++ ++// required string type = 1; ++inline bool OpProto::has_type() const { ++ return (_has_bits_[0] & 0x00000001u) != 0; ++} ++inline void OpProto::set_has_type() { ++ _has_bits_[0] |= 0x00000001u; ++} ++inline void OpProto::clear_has_type() { ++ _has_bits_[0] &= ~0x00000001u; ++} ++inline void OpProto::clear_type() { ++ type_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++ clear_has_type(); ++} ++inline const ::std::string& OpProto::type() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpProto.type) ++ return type_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpProto::set_type(const ::std::string& value) { ++ set_has_type(); ++ type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpProto.type) ++} ++inline void OpProto::set_type(const char* value) { ++ set_has_type(); ++ type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); ++ // @@protoc_insertion_point(field_set_char:paddle.framework.proto.OpProto.type) ++} ++inline void OpProto::set_type(const char* value, size_t size) { ++ set_has_type(); ++ type_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ++ ::std::string(reinterpret_cast(value), size)); ++ // @@protoc_insertion_point(field_set_pointer:paddle.framework.proto.OpProto.type) ++} ++inline ::std::string* OpProto::mutable_type() { ++ set_has_type(); ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpProto.type) ++ return type_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline ::std::string* OpProto::release_type() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.OpProto.type) ++ clear_has_type(); ++ return type_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpProto::set_allocated_type(::std::string* type) { ++ if (type != NULL) { ++ set_has_type(); ++ } else { ++ clear_has_type(); ++ } ++ type_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), type); ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.OpProto.type) ++} ++ ++// repeated .paddle.framework.proto.OpProto.Var inputs = 2; ++inline int OpProto::inputs_size() const { ++ return inputs_.size(); ++} ++inline void OpProto::clear_inputs() { ++ inputs_.Clear(); ++} ++inline const ::paddle::framework::proto::OpProto_Var& OpProto::inputs(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpProto.inputs) ++ return inputs_.Get(index); ++} ++inline ::paddle::framework::proto::OpProto_Var* OpProto::mutable_inputs(int index) { ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpProto.inputs) ++ return inputs_.Mutable(index); ++} ++inline ::paddle::framework::proto::OpProto_Var* OpProto::add_inputs() { ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.OpProto.inputs) ++ return inputs_.Add(); ++} ++inline ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpProto_Var >* ++OpProto::mutable_inputs() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.OpProto.inputs) ++ return &inputs_; ++} ++inline const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpProto_Var >& ++OpProto::inputs() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.OpProto.inputs) ++ return inputs_; ++} ++ ++// repeated .paddle.framework.proto.OpProto.Var outputs = 3; ++inline int OpProto::outputs_size() const { ++ return outputs_.size(); ++} ++inline void OpProto::clear_outputs() { ++ outputs_.Clear(); ++} ++inline const ::paddle::framework::proto::OpProto_Var& OpProto::outputs(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpProto.outputs) ++ return outputs_.Get(index); ++} ++inline ::paddle::framework::proto::OpProto_Var* OpProto::mutable_outputs(int index) { ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpProto.outputs) ++ return outputs_.Mutable(index); ++} ++inline ::paddle::framework::proto::OpProto_Var* OpProto::add_outputs() { ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.OpProto.outputs) ++ return outputs_.Add(); ++} ++inline ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpProto_Var >* ++OpProto::mutable_outputs() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.OpProto.outputs) ++ return &outputs_; ++} ++inline const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpProto_Var >& ++OpProto::outputs() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.OpProto.outputs) ++ return outputs_; ++} ++ ++// repeated .paddle.framework.proto.OpProto.Attr attrs = 4; ++inline int OpProto::attrs_size() const { ++ return attrs_.size(); ++} ++inline void OpProto::clear_attrs() { ++ attrs_.Clear(); ++} ++inline const ::paddle::framework::proto::OpProto_Attr& OpProto::attrs(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpProto.attrs) ++ return attrs_.Get(index); ++} ++inline ::paddle::framework::proto::OpProto_Attr* OpProto::mutable_attrs(int index) { ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpProto.attrs) ++ return attrs_.Mutable(index); ++} ++inline ::paddle::framework::proto::OpProto_Attr* OpProto::add_attrs() { ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.OpProto.attrs) ++ return attrs_.Add(); ++} ++inline ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpProto_Attr >* ++OpProto::mutable_attrs() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.OpProto.attrs) ++ return &attrs_; ++} ++inline const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpProto_Attr >& ++OpProto::attrs() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.OpProto.attrs) ++ return attrs_; ++} ++ ++// required string comment = 5; ++inline bool OpProto::has_comment() const { ++ return (_has_bits_[0] & 0x00000010u) != 0; ++} ++inline void OpProto::set_has_comment() { ++ _has_bits_[0] |= 0x00000010u; ++} ++inline void OpProto::clear_has_comment() { ++ _has_bits_[0] &= ~0x00000010u; ++} ++inline void OpProto::clear_comment() { ++ comment_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++ clear_has_comment(); ++} ++inline const ::std::string& OpProto::comment() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpProto.comment) ++ return comment_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpProto::set_comment(const ::std::string& value) { ++ set_has_comment(); ++ comment_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpProto.comment) ++} ++inline void OpProto::set_comment(const char* value) { ++ set_has_comment(); ++ comment_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); ++ // @@protoc_insertion_point(field_set_char:paddle.framework.proto.OpProto.comment) ++} ++inline void OpProto::set_comment(const char* value, size_t size) { ++ set_has_comment(); ++ comment_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ++ ::std::string(reinterpret_cast(value), size)); ++ // @@protoc_insertion_point(field_set_pointer:paddle.framework.proto.OpProto.comment) ++} ++inline ::std::string* OpProto::mutable_comment() { ++ set_has_comment(); ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpProto.comment) ++ return comment_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline ::std::string* OpProto::release_comment() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.OpProto.comment) ++ clear_has_comment(); ++ return comment_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpProto::set_allocated_comment(::std::string* comment) { ++ if (comment != NULL) { ++ set_has_comment(); ++ } else { ++ clear_has_comment(); ++ } ++ comment_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), comment); ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.OpProto.comment) ++} ++ ++inline const OpProto* OpProto::internal_default_instance() { ++ return &OpProto_default_instance_.get(); ++} ++// ------------------------------------------------------------------- ++ ++// VarType_TensorDesc ++ ++// required .paddle.framework.proto.VarType.Type data_type = 1; ++inline bool VarType_TensorDesc::has_data_type() const { ++ return (_has_bits_[0] & 0x00000001u) != 0; ++} ++inline void VarType_TensorDesc::set_has_data_type() { ++ _has_bits_[0] |= 0x00000001u; ++} ++inline void VarType_TensorDesc::clear_has_data_type() { ++ _has_bits_[0] &= ~0x00000001u; ++} ++inline void VarType_TensorDesc::clear_data_type() { ++ data_type_ = 0; ++ clear_has_data_type(); ++} ++inline ::paddle::framework::proto::VarType_Type VarType_TensorDesc::data_type() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.VarType.TensorDesc.data_type) ++ return static_cast< ::paddle::framework::proto::VarType_Type >(data_type_); ++} ++inline void VarType_TensorDesc::set_data_type(::paddle::framework::proto::VarType_Type value) { ++ assert(::paddle::framework::proto::VarType_Type_IsValid(value)); ++ set_has_data_type(); ++ data_type_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.VarType.TensorDesc.data_type) ++} ++ ++// repeated int64 dims = 2; ++inline int VarType_TensorDesc::dims_size() const { ++ return dims_.size(); ++} ++inline void VarType_TensorDesc::clear_dims() { ++ dims_.Clear(); ++} ++inline ::google::protobuf::int64 VarType_TensorDesc::dims(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.VarType.TensorDesc.dims) ++ return dims_.Get(index); ++} ++inline void VarType_TensorDesc::set_dims(int index, ::google::protobuf::int64 value) { ++ dims_.Set(index, value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.VarType.TensorDesc.dims) ++} ++inline void VarType_TensorDesc::add_dims(::google::protobuf::int64 value) { ++ dims_.Add(value); ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.VarType.TensorDesc.dims) ++} ++inline const ::google::protobuf::RepeatedField< ::google::protobuf::int64 >& ++VarType_TensorDesc::dims() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.VarType.TensorDesc.dims) ++ return dims_; ++} ++inline ::google::protobuf::RepeatedField< ::google::protobuf::int64 >* ++VarType_TensorDesc::mutable_dims() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.VarType.TensorDesc.dims) ++ return &dims_; ++} ++ ++inline const VarType_TensorDesc* VarType_TensorDesc::internal_default_instance() { ++ return &VarType_TensorDesc_default_instance_.get(); ++} ++// ------------------------------------------------------------------- ++ ++// VarType_LoDTensorDesc ++ ++// required .paddle.framework.proto.VarType.TensorDesc tensor = 1; ++inline bool VarType_LoDTensorDesc::has_tensor() const { ++ return (_has_bits_[0] & 0x00000001u) != 0; ++} ++inline void VarType_LoDTensorDesc::set_has_tensor() { ++ _has_bits_[0] |= 0x00000001u; ++} ++inline void VarType_LoDTensorDesc::clear_has_tensor() { ++ _has_bits_[0] &= ~0x00000001u; ++} ++inline void VarType_LoDTensorDesc::clear_tensor() { ++ if (tensor_ != NULL) tensor_->::paddle::framework::proto::VarType_TensorDesc::Clear(); ++ clear_has_tensor(); ++} ++inline const ::paddle::framework::proto::VarType_TensorDesc& VarType_LoDTensorDesc::tensor() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.VarType.LoDTensorDesc.tensor) ++ return tensor_ != NULL ? *tensor_ ++ : *::paddle::framework::proto::VarType_TensorDesc::internal_default_instance(); ++} ++inline ::paddle::framework::proto::VarType_TensorDesc* VarType_LoDTensorDesc::mutable_tensor() { ++ set_has_tensor(); ++ if (tensor_ == NULL) { ++ tensor_ = new ::paddle::framework::proto::VarType_TensorDesc; ++ } ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.VarType.LoDTensorDesc.tensor) ++ return tensor_; ++} ++inline ::paddle::framework::proto::VarType_TensorDesc* VarType_LoDTensorDesc::release_tensor() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.VarType.LoDTensorDesc.tensor) ++ clear_has_tensor(); ++ ::paddle::framework::proto::VarType_TensorDesc* temp = tensor_; ++ tensor_ = NULL; ++ return temp; ++} ++inline void VarType_LoDTensorDesc::set_allocated_tensor(::paddle::framework::proto::VarType_TensorDesc* tensor) { ++ delete tensor_; ++ tensor_ = tensor; ++ if (tensor) { ++ set_has_tensor(); ++ } else { ++ clear_has_tensor(); ++ } ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.VarType.LoDTensorDesc.tensor) ++} ++ ++// optional int32 lod_level = 2 [default = 0]; ++inline bool VarType_LoDTensorDesc::has_lod_level() const { ++ return (_has_bits_[0] & 0x00000002u) != 0; ++} ++inline void VarType_LoDTensorDesc::set_has_lod_level() { ++ _has_bits_[0] |= 0x00000002u; ++} ++inline void VarType_LoDTensorDesc::clear_has_lod_level() { ++ _has_bits_[0] &= ~0x00000002u; ++} ++inline void VarType_LoDTensorDesc::clear_lod_level() { ++ lod_level_ = 0; ++ clear_has_lod_level(); ++} ++inline ::google::protobuf::int32 VarType_LoDTensorDesc::lod_level() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.VarType.LoDTensorDesc.lod_level) ++ return lod_level_; ++} ++inline void VarType_LoDTensorDesc::set_lod_level(::google::protobuf::int32 value) { ++ set_has_lod_level(); ++ lod_level_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.VarType.LoDTensorDesc.lod_level) ++} ++ ++inline const VarType_LoDTensorDesc* VarType_LoDTensorDesc::internal_default_instance() { ++ return &VarType_LoDTensorDesc_default_instance_.get(); ++} ++// ------------------------------------------------------------------- ++ ++// VarType_LoDTensorArrayDesc ++ ++// required .paddle.framework.proto.VarType.TensorDesc tensor = 1; ++inline bool VarType_LoDTensorArrayDesc::has_tensor() const { ++ return (_has_bits_[0] & 0x00000001u) != 0; ++} ++inline void VarType_LoDTensorArrayDesc::set_has_tensor() { ++ _has_bits_[0] |= 0x00000001u; ++} ++inline void VarType_LoDTensorArrayDesc::clear_has_tensor() { ++ _has_bits_[0] &= ~0x00000001u; ++} ++inline void VarType_LoDTensorArrayDesc::clear_tensor() { ++ if (tensor_ != NULL) tensor_->::paddle::framework::proto::VarType_TensorDesc::Clear(); ++ clear_has_tensor(); ++} ++inline const ::paddle::framework::proto::VarType_TensorDesc& VarType_LoDTensorArrayDesc::tensor() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.VarType.LoDTensorArrayDesc.tensor) ++ return tensor_ != NULL ? *tensor_ ++ : *::paddle::framework::proto::VarType_TensorDesc::internal_default_instance(); ++} ++inline ::paddle::framework::proto::VarType_TensorDesc* VarType_LoDTensorArrayDesc::mutable_tensor() { ++ set_has_tensor(); ++ if (tensor_ == NULL) { ++ tensor_ = new ::paddle::framework::proto::VarType_TensorDesc; ++ } ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.VarType.LoDTensorArrayDesc.tensor) ++ return tensor_; ++} ++inline ::paddle::framework::proto::VarType_TensorDesc* VarType_LoDTensorArrayDesc::release_tensor() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.VarType.LoDTensorArrayDesc.tensor) ++ clear_has_tensor(); ++ ::paddle::framework::proto::VarType_TensorDesc* temp = tensor_; ++ tensor_ = NULL; ++ return temp; ++} ++inline void VarType_LoDTensorArrayDesc::set_allocated_tensor(::paddle::framework::proto::VarType_TensorDesc* tensor) { ++ delete tensor_; ++ tensor_ = tensor; ++ if (tensor) { ++ set_has_tensor(); ++ } else { ++ clear_has_tensor(); ++ } ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.VarType.LoDTensorArrayDesc.tensor) ++} ++ ++// optional int32 lod_level = 2 [default = 0]; ++inline bool VarType_LoDTensorArrayDesc::has_lod_level() const { ++ return (_has_bits_[0] & 0x00000002u) != 0; ++} ++inline void VarType_LoDTensorArrayDesc::set_has_lod_level() { ++ _has_bits_[0] |= 0x00000002u; ++} ++inline void VarType_LoDTensorArrayDesc::clear_has_lod_level() { ++ _has_bits_[0] &= ~0x00000002u; ++} ++inline void VarType_LoDTensorArrayDesc::clear_lod_level() { ++ lod_level_ = 0; ++ clear_has_lod_level(); ++} ++inline ::google::protobuf::int32 VarType_LoDTensorArrayDesc::lod_level() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.VarType.LoDTensorArrayDesc.lod_level) ++ return lod_level_; ++} ++inline void VarType_LoDTensorArrayDesc::set_lod_level(::google::protobuf::int32 value) { ++ set_has_lod_level(); ++ lod_level_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.VarType.LoDTensorArrayDesc.lod_level) ++} ++ ++inline const VarType_LoDTensorArrayDesc* VarType_LoDTensorArrayDesc::internal_default_instance() { ++ return &VarType_LoDTensorArrayDesc_default_instance_.get(); ++} ++// ------------------------------------------------------------------- ++ ++// VarType_ReaderDesc ++ ++// repeated .paddle.framework.proto.VarType.LoDTensorDesc lod_tensor = 1; ++inline int VarType_ReaderDesc::lod_tensor_size() const { ++ return lod_tensor_.size(); ++} ++inline void VarType_ReaderDesc::clear_lod_tensor() { ++ lod_tensor_.Clear(); ++} ++inline const ::paddle::framework::proto::VarType_LoDTensorDesc& VarType_ReaderDesc::lod_tensor(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.VarType.ReaderDesc.lod_tensor) ++ return lod_tensor_.Get(index); ++} ++inline ::paddle::framework::proto::VarType_LoDTensorDesc* VarType_ReaderDesc::mutable_lod_tensor(int index) { ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.VarType.ReaderDesc.lod_tensor) ++ return lod_tensor_.Mutable(index); ++} ++inline ::paddle::framework::proto::VarType_LoDTensorDesc* VarType_ReaderDesc::add_lod_tensor() { ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.VarType.ReaderDesc.lod_tensor) ++ return lod_tensor_.Add(); ++} ++inline ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::VarType_LoDTensorDesc >* ++VarType_ReaderDesc::mutable_lod_tensor() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.VarType.ReaderDesc.lod_tensor) ++ return &lod_tensor_; ++} ++inline const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::VarType_LoDTensorDesc >& ++VarType_ReaderDesc::lod_tensor() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.VarType.ReaderDesc.lod_tensor) ++ return lod_tensor_; ++} ++ ++inline const VarType_ReaderDesc* VarType_ReaderDesc::internal_default_instance() { ++ return &VarType_ReaderDesc_default_instance_.get(); ++} ++// ------------------------------------------------------------------- ++ ++// VarType_Tuple ++ ++// repeated .paddle.framework.proto.VarType.Type element_type = 1; ++inline int VarType_Tuple::element_type_size() const { ++ return element_type_.size(); ++} ++inline void VarType_Tuple::clear_element_type() { ++ element_type_.Clear(); ++} ++inline ::paddle::framework::proto::VarType_Type VarType_Tuple::element_type(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.VarType.Tuple.element_type) ++ return static_cast< ::paddle::framework::proto::VarType_Type >(element_type_.Get(index)); ++} ++inline void VarType_Tuple::set_element_type(int index, ::paddle::framework::proto::VarType_Type value) { ++ assert(::paddle::framework::proto::VarType_Type_IsValid(value)); ++ element_type_.Set(index, value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.VarType.Tuple.element_type) ++} ++inline void VarType_Tuple::add_element_type(::paddle::framework::proto::VarType_Type value) { ++ assert(::paddle::framework::proto::VarType_Type_IsValid(value)); ++ element_type_.Add(value); ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.VarType.Tuple.element_type) ++} ++inline const ::google::protobuf::RepeatedField& ++VarType_Tuple::element_type() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.VarType.Tuple.element_type) ++ return element_type_; ++} ++inline ::google::protobuf::RepeatedField* ++VarType_Tuple::mutable_element_type() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.VarType.Tuple.element_type) ++ return &element_type_; ++} ++ ++inline const VarType_Tuple* VarType_Tuple::internal_default_instance() { ++ return &VarType_Tuple_default_instance_.get(); ++} ++// ------------------------------------------------------------------- ++ ++// VarType ++ ++// required .paddle.framework.proto.VarType.Type type = 1; ++inline bool VarType::has_type() const { ++ return (_has_bits_[0] & 0x00000001u) != 0; ++} ++inline void VarType::set_has_type() { ++ _has_bits_[0] |= 0x00000001u; ++} ++inline void VarType::clear_has_type() { ++ _has_bits_[0] &= ~0x00000001u; ++} ++inline void VarType::clear_type() { ++ type_ = 0; ++ clear_has_type(); ++} ++inline ::paddle::framework::proto::VarType_Type VarType::type() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.VarType.type) ++ return static_cast< ::paddle::framework::proto::VarType_Type >(type_); ++} ++inline void VarType::set_type(::paddle::framework::proto::VarType_Type value) { ++ assert(::paddle::framework::proto::VarType_Type_IsValid(value)); ++ set_has_type(); ++ type_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.VarType.type) ++} ++ ++// optional .paddle.framework.proto.VarType.TensorDesc selected_rows = 2; ++inline bool VarType::has_selected_rows() const { ++ return (_has_bits_[0] & 0x00000002u) != 0; ++} ++inline void VarType::set_has_selected_rows() { ++ _has_bits_[0] |= 0x00000002u; ++} ++inline void VarType::clear_has_selected_rows() { ++ _has_bits_[0] &= ~0x00000002u; ++} ++inline void VarType::clear_selected_rows() { ++ if (selected_rows_ != NULL) selected_rows_->::paddle::framework::proto::VarType_TensorDesc::Clear(); ++ clear_has_selected_rows(); ++} ++inline const ::paddle::framework::proto::VarType_TensorDesc& VarType::selected_rows() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.VarType.selected_rows) ++ return selected_rows_ != NULL ? *selected_rows_ ++ : *::paddle::framework::proto::VarType_TensorDesc::internal_default_instance(); ++} ++inline ::paddle::framework::proto::VarType_TensorDesc* VarType::mutable_selected_rows() { ++ set_has_selected_rows(); ++ if (selected_rows_ == NULL) { ++ selected_rows_ = new ::paddle::framework::proto::VarType_TensorDesc; ++ } ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.VarType.selected_rows) ++ return selected_rows_; ++} ++inline ::paddle::framework::proto::VarType_TensorDesc* VarType::release_selected_rows() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.VarType.selected_rows) ++ clear_has_selected_rows(); ++ ::paddle::framework::proto::VarType_TensorDesc* temp = selected_rows_; ++ selected_rows_ = NULL; ++ return temp; ++} ++inline void VarType::set_allocated_selected_rows(::paddle::framework::proto::VarType_TensorDesc* selected_rows) { ++ delete selected_rows_; ++ selected_rows_ = selected_rows; ++ if (selected_rows) { ++ set_has_selected_rows(); ++ } else { ++ clear_has_selected_rows(); ++ } ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.VarType.selected_rows) ++} ++ ++// optional .paddle.framework.proto.VarType.LoDTensorDesc lod_tensor = 3; ++inline bool VarType::has_lod_tensor() const { ++ return (_has_bits_[0] & 0x00000004u) != 0; ++} ++inline void VarType::set_has_lod_tensor() { ++ _has_bits_[0] |= 0x00000004u; ++} ++inline void VarType::clear_has_lod_tensor() { ++ _has_bits_[0] &= ~0x00000004u; ++} ++inline void VarType::clear_lod_tensor() { ++ if (lod_tensor_ != NULL) lod_tensor_->::paddle::framework::proto::VarType_LoDTensorDesc::Clear(); ++ clear_has_lod_tensor(); ++} ++inline const ::paddle::framework::proto::VarType_LoDTensorDesc& VarType::lod_tensor() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.VarType.lod_tensor) ++ return lod_tensor_ != NULL ? *lod_tensor_ ++ : *::paddle::framework::proto::VarType_LoDTensorDesc::internal_default_instance(); ++} ++inline ::paddle::framework::proto::VarType_LoDTensorDesc* VarType::mutable_lod_tensor() { ++ set_has_lod_tensor(); ++ if (lod_tensor_ == NULL) { ++ lod_tensor_ = new ::paddle::framework::proto::VarType_LoDTensorDesc; ++ } ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.VarType.lod_tensor) ++ return lod_tensor_; ++} ++inline ::paddle::framework::proto::VarType_LoDTensorDesc* VarType::release_lod_tensor() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.VarType.lod_tensor) ++ clear_has_lod_tensor(); ++ ::paddle::framework::proto::VarType_LoDTensorDesc* temp = lod_tensor_; ++ lod_tensor_ = NULL; ++ return temp; ++} ++inline void VarType::set_allocated_lod_tensor(::paddle::framework::proto::VarType_LoDTensorDesc* lod_tensor) { ++ delete lod_tensor_; ++ lod_tensor_ = lod_tensor; ++ if (lod_tensor) { ++ set_has_lod_tensor(); ++ } else { ++ clear_has_lod_tensor(); ++ } ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.VarType.lod_tensor) ++} ++ ++// optional .paddle.framework.proto.VarType.LoDTensorArrayDesc tensor_array = 4; ++inline bool VarType::has_tensor_array() const { ++ return (_has_bits_[0] & 0x00000008u) != 0; ++} ++inline void VarType::set_has_tensor_array() { ++ _has_bits_[0] |= 0x00000008u; ++} ++inline void VarType::clear_has_tensor_array() { ++ _has_bits_[0] &= ~0x00000008u; ++} ++inline void VarType::clear_tensor_array() { ++ if (tensor_array_ != NULL) tensor_array_->::paddle::framework::proto::VarType_LoDTensorArrayDesc::Clear(); ++ clear_has_tensor_array(); ++} ++inline const ::paddle::framework::proto::VarType_LoDTensorArrayDesc& VarType::tensor_array() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.VarType.tensor_array) ++ return tensor_array_ != NULL ? *tensor_array_ ++ : *::paddle::framework::proto::VarType_LoDTensorArrayDesc::internal_default_instance(); ++} ++inline ::paddle::framework::proto::VarType_LoDTensorArrayDesc* VarType::mutable_tensor_array() { ++ set_has_tensor_array(); ++ if (tensor_array_ == NULL) { ++ tensor_array_ = new ::paddle::framework::proto::VarType_LoDTensorArrayDesc; ++ } ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.VarType.tensor_array) ++ return tensor_array_; ++} ++inline ::paddle::framework::proto::VarType_LoDTensorArrayDesc* VarType::release_tensor_array() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.VarType.tensor_array) ++ clear_has_tensor_array(); ++ ::paddle::framework::proto::VarType_LoDTensorArrayDesc* temp = tensor_array_; ++ tensor_array_ = NULL; ++ return temp; ++} ++inline void VarType::set_allocated_tensor_array(::paddle::framework::proto::VarType_LoDTensorArrayDesc* tensor_array) { ++ delete tensor_array_; ++ tensor_array_ = tensor_array; ++ if (tensor_array) { ++ set_has_tensor_array(); ++ } else { ++ clear_has_tensor_array(); ++ } ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.VarType.tensor_array) ++} ++ ++// optional .paddle.framework.proto.VarType.ReaderDesc reader = 5; ++inline bool VarType::has_reader() const { ++ return (_has_bits_[0] & 0x00000010u) != 0; ++} ++inline void VarType::set_has_reader() { ++ _has_bits_[0] |= 0x00000010u; ++} ++inline void VarType::clear_has_reader() { ++ _has_bits_[0] &= ~0x00000010u; ++} ++inline void VarType::clear_reader() { ++ if (reader_ != NULL) reader_->::paddle::framework::proto::VarType_ReaderDesc::Clear(); ++ clear_has_reader(); ++} ++inline const ::paddle::framework::proto::VarType_ReaderDesc& VarType::reader() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.VarType.reader) ++ return reader_ != NULL ? *reader_ ++ : *::paddle::framework::proto::VarType_ReaderDesc::internal_default_instance(); ++} ++inline ::paddle::framework::proto::VarType_ReaderDesc* VarType::mutable_reader() { ++ set_has_reader(); ++ if (reader_ == NULL) { ++ reader_ = new ::paddle::framework::proto::VarType_ReaderDesc; ++ } ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.VarType.reader) ++ return reader_; ++} ++inline ::paddle::framework::proto::VarType_ReaderDesc* VarType::release_reader() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.VarType.reader) ++ clear_has_reader(); ++ ::paddle::framework::proto::VarType_ReaderDesc* temp = reader_; ++ reader_ = NULL; ++ return temp; ++} ++inline void VarType::set_allocated_reader(::paddle::framework::proto::VarType_ReaderDesc* reader) { ++ delete reader_; ++ reader_ = reader; ++ if (reader) { ++ set_has_reader(); ++ } else { ++ clear_has_reader(); ++ } ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.VarType.reader) ++} ++ ++// optional .paddle.framework.proto.VarType.Tuple tuple = 7; ++inline bool VarType::has_tuple() const { ++ return (_has_bits_[0] & 0x00000020u) != 0; ++} ++inline void VarType::set_has_tuple() { ++ _has_bits_[0] |= 0x00000020u; ++} ++inline void VarType::clear_has_tuple() { ++ _has_bits_[0] &= ~0x00000020u; ++} ++inline void VarType::clear_tuple() { ++ if (tuple_ != NULL) tuple_->::paddle::framework::proto::VarType_Tuple::Clear(); ++ clear_has_tuple(); ++} ++inline const ::paddle::framework::proto::VarType_Tuple& VarType::tuple() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.VarType.tuple) ++ return tuple_ != NULL ? *tuple_ ++ : *::paddle::framework::proto::VarType_Tuple::internal_default_instance(); ++} ++inline ::paddle::framework::proto::VarType_Tuple* VarType::mutable_tuple() { ++ set_has_tuple(); ++ if (tuple_ == NULL) { ++ tuple_ = new ::paddle::framework::proto::VarType_Tuple; ++ } ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.VarType.tuple) ++ return tuple_; ++} ++inline ::paddle::framework::proto::VarType_Tuple* VarType::release_tuple() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.VarType.tuple) ++ clear_has_tuple(); ++ ::paddle::framework::proto::VarType_Tuple* temp = tuple_; ++ tuple_ = NULL; ++ return temp; ++} ++inline void VarType::set_allocated_tuple(::paddle::framework::proto::VarType_Tuple* tuple) { ++ delete tuple_; ++ tuple_ = tuple; ++ if (tuple) { ++ set_has_tuple(); ++ } else { ++ clear_has_tuple(); ++ } ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.VarType.tuple) ++} ++ ++inline const VarType* VarType::internal_default_instance() { ++ return &VarType_default_instance_.get(); ++} ++// ------------------------------------------------------------------- ++ ++// VarDesc ++ ++// required string name = 1; ++inline bool VarDesc::has_name() const { ++ return (_has_bits_[0] & 0x00000001u) != 0; ++} ++inline void VarDesc::set_has_name() { ++ _has_bits_[0] |= 0x00000001u; ++} ++inline void VarDesc::clear_has_name() { ++ _has_bits_[0] &= ~0x00000001u; ++} ++inline void VarDesc::clear_name() { ++ name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++ clear_has_name(); ++} ++inline const ::std::string& VarDesc::name() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.VarDesc.name) ++ return name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void VarDesc::set_name(const ::std::string& value) { ++ set_has_name(); ++ name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.VarDesc.name) ++} ++inline void VarDesc::set_name(const char* value) { ++ set_has_name(); ++ name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); ++ // @@protoc_insertion_point(field_set_char:paddle.framework.proto.VarDesc.name) ++} ++inline void VarDesc::set_name(const char* value, size_t size) { ++ set_has_name(); ++ name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ++ ::std::string(reinterpret_cast(value), size)); ++ // @@protoc_insertion_point(field_set_pointer:paddle.framework.proto.VarDesc.name) ++} ++inline ::std::string* VarDesc::mutable_name() { ++ set_has_name(); ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.VarDesc.name) ++ return name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline ::std::string* VarDesc::release_name() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.VarDesc.name) ++ clear_has_name(); ++ return name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void VarDesc::set_allocated_name(::std::string* name) { ++ if (name != NULL) { ++ set_has_name(); ++ } else { ++ clear_has_name(); ++ } ++ name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), name); ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.VarDesc.name) ++} ++ ++// required .paddle.framework.proto.VarType type = 2; ++inline bool VarDesc::has_type() const { ++ return (_has_bits_[0] & 0x00000002u) != 0; ++} ++inline void VarDesc::set_has_type() { ++ _has_bits_[0] |= 0x00000002u; ++} ++inline void VarDesc::clear_has_type() { ++ _has_bits_[0] &= ~0x00000002u; ++} ++inline void VarDesc::clear_type() { ++ if (type_ != NULL) type_->::paddle::framework::proto::VarType::Clear(); ++ clear_has_type(); ++} ++inline const ::paddle::framework::proto::VarType& VarDesc::type() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.VarDesc.type) ++ return type_ != NULL ? *type_ ++ : *::paddle::framework::proto::VarType::internal_default_instance(); ++} ++inline ::paddle::framework::proto::VarType* VarDesc::mutable_type() { ++ set_has_type(); ++ if (type_ == NULL) { ++ type_ = new ::paddle::framework::proto::VarType; ++ } ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.VarDesc.type) ++ return type_; ++} ++inline ::paddle::framework::proto::VarType* VarDesc::release_type() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.VarDesc.type) ++ clear_has_type(); ++ ::paddle::framework::proto::VarType* temp = type_; ++ type_ = NULL; ++ return temp; ++} ++inline void VarDesc::set_allocated_type(::paddle::framework::proto::VarType* type) { ++ delete type_; ++ type_ = type; ++ if (type) { ++ set_has_type(); ++ } else { ++ clear_has_type(); ++ } ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.VarDesc.type) ++} ++ ++// optional bool persistable = 3 [default = false]; ++inline bool VarDesc::has_persistable() const { ++ return (_has_bits_[0] & 0x00000004u) != 0; ++} ++inline void VarDesc::set_has_persistable() { ++ _has_bits_[0] |= 0x00000004u; ++} ++inline void VarDesc::clear_has_persistable() { ++ _has_bits_[0] &= ~0x00000004u; ++} ++inline void VarDesc::clear_persistable() { ++ persistable_ = false; ++ clear_has_persistable(); ++} ++inline bool VarDesc::persistable() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.VarDesc.persistable) ++ return persistable_; ++} ++inline void VarDesc::set_persistable(bool value) { ++ set_has_persistable(); ++ persistable_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.VarDesc.persistable) ++} ++ ++// optional bool need_check_feed = 4 [default = false]; ++inline bool VarDesc::has_need_check_feed() const { ++ return (_has_bits_[0] & 0x00000008u) != 0; ++} ++inline void VarDesc::set_has_need_check_feed() { ++ _has_bits_[0] |= 0x00000008u; ++} ++inline void VarDesc::clear_has_need_check_feed() { ++ _has_bits_[0] &= ~0x00000008u; ++} ++inline void VarDesc::clear_need_check_feed() { ++ need_check_feed_ = false; ++ clear_has_need_check_feed(); ++} ++inline bool VarDesc::need_check_feed() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.VarDesc.need_check_feed) ++ return need_check_feed_; ++} ++inline void VarDesc::set_need_check_feed(bool value) { ++ set_has_need_check_feed(); ++ need_check_feed_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.VarDesc.need_check_feed) ++} ++ ++inline const VarDesc* VarDesc::internal_default_instance() { ++ return &VarDesc_default_instance_.get(); ++} ++// ------------------------------------------------------------------- ++ ++// BlockDesc ++ ++// required int32 idx = 1; ++inline bool BlockDesc::has_idx() const { ++ return (_has_bits_[0] & 0x00000001u) != 0; ++} ++inline void BlockDesc::set_has_idx() { ++ _has_bits_[0] |= 0x00000001u; ++} ++inline void BlockDesc::clear_has_idx() { ++ _has_bits_[0] &= ~0x00000001u; ++} ++inline void BlockDesc::clear_idx() { ++ idx_ = 0; ++ clear_has_idx(); ++} ++inline ::google::protobuf::int32 BlockDesc::idx() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.BlockDesc.idx) ++ return idx_; ++} ++inline void BlockDesc::set_idx(::google::protobuf::int32 value) { ++ set_has_idx(); ++ idx_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.BlockDesc.idx) ++} ++ ++// required int32 parent_idx = 2; ++inline bool BlockDesc::has_parent_idx() const { ++ return (_has_bits_[0] & 0x00000002u) != 0; ++} ++inline void BlockDesc::set_has_parent_idx() { ++ _has_bits_[0] |= 0x00000002u; ++} ++inline void BlockDesc::clear_has_parent_idx() { ++ _has_bits_[0] &= ~0x00000002u; ++} ++inline void BlockDesc::clear_parent_idx() { ++ parent_idx_ = 0; ++ clear_has_parent_idx(); ++} ++inline ::google::protobuf::int32 BlockDesc::parent_idx() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.BlockDesc.parent_idx) ++ return parent_idx_; ++} ++inline void BlockDesc::set_parent_idx(::google::protobuf::int32 value) { ++ set_has_parent_idx(); ++ parent_idx_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.BlockDesc.parent_idx) ++} ++ ++// repeated .paddle.framework.proto.VarDesc vars = 3; ++inline int BlockDesc::vars_size() const { ++ return vars_.size(); ++} ++inline void BlockDesc::clear_vars() { ++ vars_.Clear(); ++} ++inline const ::paddle::framework::proto::VarDesc& BlockDesc::vars(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.BlockDesc.vars) ++ return vars_.Get(index); ++} ++inline ::paddle::framework::proto::VarDesc* BlockDesc::mutable_vars(int index) { ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.BlockDesc.vars) ++ return vars_.Mutable(index); ++} ++inline ::paddle::framework::proto::VarDesc* BlockDesc::add_vars() { ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.BlockDesc.vars) ++ return vars_.Add(); ++} ++inline ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::VarDesc >* ++BlockDesc::mutable_vars() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.BlockDesc.vars) ++ return &vars_; ++} ++inline const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::VarDesc >& ++BlockDesc::vars() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.BlockDesc.vars) ++ return vars_; ++} ++ ++// repeated .paddle.framework.proto.OpDesc ops = 4; ++inline int BlockDesc::ops_size() const { ++ return ops_.size(); ++} ++inline void BlockDesc::clear_ops() { ++ ops_.Clear(); ++} ++inline const ::paddle::framework::proto::OpDesc& BlockDesc::ops(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.BlockDesc.ops) ++ return ops_.Get(index); ++} ++inline ::paddle::framework::proto::OpDesc* BlockDesc::mutable_ops(int index) { ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.BlockDesc.ops) ++ return ops_.Mutable(index); ++} ++inline ::paddle::framework::proto::OpDesc* BlockDesc::add_ops() { ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.BlockDesc.ops) ++ return ops_.Add(); ++} ++inline ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc >* ++BlockDesc::mutable_ops() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.BlockDesc.ops) ++ return &ops_; ++} ++inline const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpDesc >& ++BlockDesc::ops() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.BlockDesc.ops) ++ return ops_; ++} ++ ++// optional int32 forward_block_idx = 5 [default = -1]; ++inline bool BlockDesc::has_forward_block_idx() const { ++ return (_has_bits_[0] & 0x00000010u) != 0; ++} ++inline void BlockDesc::set_has_forward_block_idx() { ++ _has_bits_[0] |= 0x00000010u; ++} ++inline void BlockDesc::clear_has_forward_block_idx() { ++ _has_bits_[0] &= ~0x00000010u; ++} ++inline void BlockDesc::clear_forward_block_idx() { ++ forward_block_idx_ = -1; ++ clear_has_forward_block_idx(); ++} ++inline ::google::protobuf::int32 BlockDesc::forward_block_idx() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.BlockDesc.forward_block_idx) ++ return forward_block_idx_; ++} ++inline void BlockDesc::set_forward_block_idx(::google::protobuf::int32 value) { ++ set_has_forward_block_idx(); ++ forward_block_idx_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.BlockDesc.forward_block_idx) ++} ++ ++inline const BlockDesc* BlockDesc::internal_default_instance() { ++ return &BlockDesc_default_instance_.get(); ++} ++// ------------------------------------------------------------------- ++ ++// OpVersion ++ ++// required int32 version = 1; ++inline bool OpVersion::has_version() const { ++ return (_has_bits_[0] & 0x00000001u) != 0; ++} ++inline void OpVersion::set_has_version() { ++ _has_bits_[0] |= 0x00000001u; ++} ++inline void OpVersion::clear_has_version() { ++ _has_bits_[0] &= ~0x00000001u; ++} ++inline void OpVersion::clear_version() { ++ version_ = 0; ++ clear_has_version(); ++} ++inline ::google::protobuf::int32 OpVersion::version() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpVersion.version) ++ return version_; ++} ++inline void OpVersion::set_version(::google::protobuf::int32 value) { ++ set_has_version(); ++ version_ = value; ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpVersion.version) ++} ++ ++inline const OpVersion* OpVersion::internal_default_instance() { ++ return &OpVersion_default_instance_.get(); ++} ++// ------------------------------------------------------------------- ++ ++// OpVersionMap_OpVersionPair ++ ++// required string op_name = 1; ++inline bool OpVersionMap_OpVersionPair::has_op_name() const { ++ return (_has_bits_[0] & 0x00000001u) != 0; ++} ++inline void OpVersionMap_OpVersionPair::set_has_op_name() { ++ _has_bits_[0] |= 0x00000001u; ++} ++inline void OpVersionMap_OpVersionPair::clear_has_op_name() { ++ _has_bits_[0] &= ~0x00000001u; ++} ++inline void OpVersionMap_OpVersionPair::clear_op_name() { ++ op_name_.ClearToEmptyNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++ clear_has_op_name(); ++} ++inline const ::std::string& OpVersionMap_OpVersionPair::op_name() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpVersionMap.OpVersionPair.op_name) ++ return op_name_.GetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpVersionMap_OpVersionPair::set_op_name(const ::std::string& value) { ++ set_has_op_name(); ++ op_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), value); ++ // @@protoc_insertion_point(field_set:paddle.framework.proto.OpVersionMap.OpVersionPair.op_name) ++} ++inline void OpVersionMap_OpVersionPair::set_op_name(const char* value) { ++ set_has_op_name(); ++ op_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ::std::string(value)); ++ // @@protoc_insertion_point(field_set_char:paddle.framework.proto.OpVersionMap.OpVersionPair.op_name) ++} ++inline void OpVersionMap_OpVersionPair::set_op_name(const char* value, size_t size) { ++ set_has_op_name(); ++ op_name_.SetNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), ++ ::std::string(reinterpret_cast(value), size)); ++ // @@protoc_insertion_point(field_set_pointer:paddle.framework.proto.OpVersionMap.OpVersionPair.op_name) ++} ++inline ::std::string* OpVersionMap_OpVersionPair::mutable_op_name() { ++ set_has_op_name(); ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpVersionMap.OpVersionPair.op_name) ++ return op_name_.MutableNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline ::std::string* OpVersionMap_OpVersionPair::release_op_name() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.OpVersionMap.OpVersionPair.op_name) ++ clear_has_op_name(); ++ return op_name_.ReleaseNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited()); ++} ++inline void OpVersionMap_OpVersionPair::set_allocated_op_name(::std::string* op_name) { ++ if (op_name != NULL) { ++ set_has_op_name(); ++ } else { ++ clear_has_op_name(); ++ } ++ op_name_.SetAllocatedNoArena(&::google::protobuf::internal::GetEmptyStringAlreadyInited(), op_name); ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.OpVersionMap.OpVersionPair.op_name) ++} ++ ++// required .paddle.framework.proto.OpVersion op_version = 2; ++inline bool OpVersionMap_OpVersionPair::has_op_version() const { ++ return (_has_bits_[0] & 0x00000002u) != 0; ++} ++inline void OpVersionMap_OpVersionPair::set_has_op_version() { ++ _has_bits_[0] |= 0x00000002u; ++} ++inline void OpVersionMap_OpVersionPair::clear_has_op_version() { ++ _has_bits_[0] &= ~0x00000002u; ++} ++inline void OpVersionMap_OpVersionPair::clear_op_version() { ++ if (op_version_ != NULL) op_version_->::paddle::framework::proto::OpVersion::Clear(); ++ clear_has_op_version(); ++} ++inline const ::paddle::framework::proto::OpVersion& OpVersionMap_OpVersionPair::op_version() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpVersionMap.OpVersionPair.op_version) ++ return op_version_ != NULL ? *op_version_ ++ : *::paddle::framework::proto::OpVersion::internal_default_instance(); ++} ++inline ::paddle::framework::proto::OpVersion* OpVersionMap_OpVersionPair::mutable_op_version() { ++ set_has_op_version(); ++ if (op_version_ == NULL) { ++ op_version_ = new ::paddle::framework::proto::OpVersion; ++ } ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpVersionMap.OpVersionPair.op_version) ++ return op_version_; ++} ++inline ::paddle::framework::proto::OpVersion* OpVersionMap_OpVersionPair::release_op_version() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.OpVersionMap.OpVersionPair.op_version) ++ clear_has_op_version(); ++ ::paddle::framework::proto::OpVersion* temp = op_version_; ++ op_version_ = NULL; ++ return temp; ++} ++inline void OpVersionMap_OpVersionPair::set_allocated_op_version(::paddle::framework::proto::OpVersion* op_version) { ++ delete op_version_; ++ op_version_ = op_version; ++ if (op_version) { ++ set_has_op_version(); ++ } else { ++ clear_has_op_version(); ++ } ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.OpVersionMap.OpVersionPair.op_version) ++} ++ ++inline const OpVersionMap_OpVersionPair* OpVersionMap_OpVersionPair::internal_default_instance() { ++ return &OpVersionMap_OpVersionPair_default_instance_.get(); ++} ++// ------------------------------------------------------------------- ++ ++// OpVersionMap ++ ++// repeated .paddle.framework.proto.OpVersionMap.OpVersionPair pair = 1; ++inline int OpVersionMap::pair_size() const { ++ return pair_.size(); ++} ++inline void OpVersionMap::clear_pair() { ++ pair_.Clear(); ++} ++inline const ::paddle::framework::proto::OpVersionMap_OpVersionPair& OpVersionMap::pair(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.OpVersionMap.pair) ++ return pair_.Get(index); ++} ++inline ::paddle::framework::proto::OpVersionMap_OpVersionPair* OpVersionMap::mutable_pair(int index) { ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.OpVersionMap.pair) ++ return pair_.Mutable(index); ++} ++inline ::paddle::framework::proto::OpVersionMap_OpVersionPair* OpVersionMap::add_pair() { ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.OpVersionMap.pair) ++ return pair_.Add(); ++} ++inline ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpVersionMap_OpVersionPair >* ++OpVersionMap::mutable_pair() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.OpVersionMap.pair) ++ return &pair_; ++} ++inline const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::OpVersionMap_OpVersionPair >& ++OpVersionMap::pair() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.OpVersionMap.pair) ++ return pair_; ++} ++ ++inline const OpVersionMap* OpVersionMap::internal_default_instance() { ++ return &OpVersionMap_default_instance_.get(); ++} ++// ------------------------------------------------------------------- ++ ++// ProgramDesc ++ ++// repeated .paddle.framework.proto.BlockDesc blocks = 1; ++inline int ProgramDesc::blocks_size() const { ++ return blocks_.size(); ++} ++inline void ProgramDesc::clear_blocks() { ++ blocks_.Clear(); ++} ++inline const ::paddle::framework::proto::BlockDesc& ProgramDesc::blocks(int index) const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.ProgramDesc.blocks) ++ return blocks_.Get(index); ++} ++inline ::paddle::framework::proto::BlockDesc* ProgramDesc::mutable_blocks(int index) { ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.ProgramDesc.blocks) ++ return blocks_.Mutable(index); ++} ++inline ::paddle::framework::proto::BlockDesc* ProgramDesc::add_blocks() { ++ // @@protoc_insertion_point(field_add:paddle.framework.proto.ProgramDesc.blocks) ++ return blocks_.Add(); ++} ++inline ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::BlockDesc >* ++ProgramDesc::mutable_blocks() { ++ // @@protoc_insertion_point(field_mutable_list:paddle.framework.proto.ProgramDesc.blocks) ++ return &blocks_; ++} ++inline const ::google::protobuf::RepeatedPtrField< ::paddle::framework::proto::BlockDesc >& ++ProgramDesc::blocks() const { ++ // @@protoc_insertion_point(field_list:paddle.framework.proto.ProgramDesc.blocks) ++ return blocks_; ++} ++ ++// optional .paddle.framework.proto.Version version = 4; ++inline bool ProgramDesc::has_version() const { ++ return (_has_bits_[0] & 0x00000002u) != 0; ++} ++inline void ProgramDesc::set_has_version() { ++ _has_bits_[0] |= 0x00000002u; ++} ++inline void ProgramDesc::clear_has_version() { ++ _has_bits_[0] &= ~0x00000002u; ++} ++inline void ProgramDesc::clear_version() { ++ if (version_ != NULL) version_->::paddle::framework::proto::Version::Clear(); ++ clear_has_version(); ++} ++inline const ::paddle::framework::proto::Version& ProgramDesc::version() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.ProgramDesc.version) ++ return version_ != NULL ? *version_ ++ : *::paddle::framework::proto::Version::internal_default_instance(); ++} ++inline ::paddle::framework::proto::Version* ProgramDesc::mutable_version() { ++ set_has_version(); ++ if (version_ == NULL) { ++ version_ = new ::paddle::framework::proto::Version; ++ } ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.ProgramDesc.version) ++ return version_; ++} ++inline ::paddle::framework::proto::Version* ProgramDesc::release_version() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.ProgramDesc.version) ++ clear_has_version(); ++ ::paddle::framework::proto::Version* temp = version_; ++ version_ = NULL; ++ return temp; ++} ++inline void ProgramDesc::set_allocated_version(::paddle::framework::proto::Version* version) { ++ delete version_; ++ version_ = version; ++ if (version) { ++ set_has_version(); ++ } else { ++ clear_has_version(); ++ } ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.ProgramDesc.version) ++} ++ ++// optional .paddle.framework.proto.OpVersionMap op_version_map = 5; ++inline bool ProgramDesc::has_op_version_map() const { ++ return (_has_bits_[0] & 0x00000004u) != 0; ++} ++inline void ProgramDesc::set_has_op_version_map() { ++ _has_bits_[0] |= 0x00000004u; ++} ++inline void ProgramDesc::clear_has_op_version_map() { ++ _has_bits_[0] &= ~0x00000004u; ++} ++inline void ProgramDesc::clear_op_version_map() { ++ if (op_version_map_ != NULL) op_version_map_->::paddle::framework::proto::OpVersionMap::Clear(); ++ clear_has_op_version_map(); ++} ++inline const ::paddle::framework::proto::OpVersionMap& ProgramDesc::op_version_map() const { ++ // @@protoc_insertion_point(field_get:paddle.framework.proto.ProgramDesc.op_version_map) ++ return op_version_map_ != NULL ? *op_version_map_ ++ : *::paddle::framework::proto::OpVersionMap::internal_default_instance(); ++} ++inline ::paddle::framework::proto::OpVersionMap* ProgramDesc::mutable_op_version_map() { ++ set_has_op_version_map(); ++ if (op_version_map_ == NULL) { ++ op_version_map_ = new ::paddle::framework::proto::OpVersionMap; ++ } ++ // @@protoc_insertion_point(field_mutable:paddle.framework.proto.ProgramDesc.op_version_map) ++ return op_version_map_; ++} ++inline ::paddle::framework::proto::OpVersionMap* ProgramDesc::release_op_version_map() { ++ // @@protoc_insertion_point(field_release:paddle.framework.proto.ProgramDesc.op_version_map) ++ clear_has_op_version_map(); ++ ::paddle::framework::proto::OpVersionMap* temp = op_version_map_; ++ op_version_map_ = NULL; ++ return temp; ++} ++inline void ProgramDesc::set_allocated_op_version_map(::paddle::framework::proto::OpVersionMap* op_version_map) { ++ delete op_version_map_; ++ op_version_map_ = op_version_map; ++ if (op_version_map) { ++ set_has_op_version_map(); ++ } else { ++ clear_has_op_version_map(); ++ } ++ // @@protoc_insertion_point(field_set_allocated:paddle.framework.proto.ProgramDesc.op_version_map) ++} ++ ++inline const ProgramDesc* ProgramDesc::internal_default_instance() { ++ return &ProgramDesc_default_instance_.get(); ++} ++#endif // !PROTOBUF_INLINE_NOT_IN_HEADERS ++// ------------------------------------------------------------------- ++ ++// ------------------------------------------------------------------- ++ ++// ------------------------------------------------------------------- ++ ++// ------------------------------------------------------------------- ++ ++// ------------------------------------------------------------------- ++ ++// ------------------------------------------------------------------- ++ ++// ------------------------------------------------------------------- ++ ++// ------------------------------------------------------------------- ++ ++// ------------------------------------------------------------------- ++ ++// ------------------------------------------------------------------- ++ ++// ------------------------------------------------------------------- ++ ++// ------------------------------------------------------------------- ++ ++// ------------------------------------------------------------------- ++ ++// ------------------------------------------------------------------- ++ ++// ------------------------------------------------------------------- ++ ++// ------------------------------------------------------------------- ++ ++// ------------------------------------------------------------------- ++ ++// ------------------------------------------------------------------- ++ ++ ++// @@protoc_insertion_point(namespace_scope) ++ ++} // namespace proto ++} // namespace framework ++} // namespace paddle ++ ++#ifndef SWIG ++namespace google { ++namespace protobuf { ++ ++template <> struct is_proto_enum< ::paddle::framework::proto::VarType_Type> : ::google::protobuf::internal::true_type {}; ++template <> ++inline const EnumDescriptor* GetEnumDescriptor< ::paddle::framework::proto::VarType_Type>() { ++ return ::paddle::framework::proto::VarType_Type_descriptor(); ++} ++template <> struct is_proto_enum< ::paddle::framework::proto::AttrType> : ::google::protobuf::internal::true_type {}; ++template <> ++inline const EnumDescriptor* GetEnumDescriptor< ::paddle::framework::proto::AttrType>() { ++ return ::paddle::framework::proto::AttrType_descriptor(); ++} ++ ++} // namespace protobuf ++} // namespace google ++#endif // SWIG ++ ++// @@protoc_insertion_point(global_scope) ++ ++#endif // PROTOBUF_framework_2eproto__INCLUDED +diff --git a/src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_analysis_config.h b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_analysis_config.h +new file mode 100755 +index 0000000..e492b32 +--- /dev/null ++++ b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_analysis_config.h +@@ -0,0 +1,680 @@ ++// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++/// ++/// \file paddle_analysis_config.h ++/// ++/// \brief Paddle Analysis Config API信息 ++/// ++/// \author paddle-infer@baidu.com ++/// \date 2020-03-20 ++/// \since 1.7 ++/// ++ ++#pragma once ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "paddle_infer_declare.h" // NOLINT ++ ++/*! \file */ ++// Here we include some header files with relative paths, for that in deploy, ++// the abstract path of this header file will be changed. ++#include "paddle_api.h" // NOLINT ++#include "paddle_pass_builder.h" // NOLINT ++#ifdef PADDLE_WITH_MKLDNN ++#include "paddle_mkldnn_quantizer_config.h" // NOLINT ++#endif ++ ++namespace paddle { ++ ++class AnalysisPredictor; ++struct MkldnnQuantizerConfig; ++ ++/// ++/// \brief configuration manager for AnalysisPredictor. ++/// \since 1.7.0 ++/// ++/// AnalysisConfig manages configurations of AnalysisPredictor. ++/// During inference procedure, there are many parameters(model/params path, ++/// place of inference, etc.) ++/// to be specified, and various optimizations(subgraph fusion, memory ++/// optimazation, TensorRT engine, etc.) ++/// to be done. Users can manage these settings by creating and modifying an ++/// AnalysisConfig, ++/// and loading it into AnalysisPredictor. ++/// ++struct PD_INFER_DECL AnalysisConfig { ++ AnalysisConfig() = default; ++ /// ++ /// \brief Construct a new AnalysisConfig from another ++ /// AnalysisConfig. ++ /// ++ /// \param[in] other another AnalysisConfig ++ /// ++ explicit AnalysisConfig(const AnalysisConfig& other); ++ /// ++ /// \brief Construct a new AnalysisConfig from a no-combined model. ++ /// ++ /// \param[in] model_dir model directory of the no-combined model. ++ /// ++ explicit AnalysisConfig(const std::string& model_dir); ++ /// ++ /// \brief Construct a new AnalysisConfig from a combined model. ++ /// ++ /// \param[in] prog_file model file path of the combined model. ++ /// \param[in] params_file params file path of the combined model. ++ /// ++ explicit AnalysisConfig(const std::string& prog_file, ++ const std::string& params_file); ++ /// ++ /// \brief Precision of inference in TensorRT. ++ /// ++ enum class Precision { ++ kFloat32 = 0, ///< fp32 ++ kInt8, ///< int8 ++ kHalf, ///< fp16 ++ }; ++ ++ /// ++ /// \brief Set the no-combined model dir path. ++ /// ++ /// \param model_dir model dir path. ++ /// ++ void SetModel(const std::string& model_dir) { model_dir_ = model_dir; } ++ ++ /// ++ /// \brief Set the combined model with two specific pathes for program and ++ /// parameters. ++ /// ++ /// \param prog_file_path model file path of the combined model. ++ /// \param params_file_path params file path of the combined model. ++ /// ++ void SetModel(const std::string& prog_file_path, ++ const std::string& params_file_path); ++ /// ++ /// \brief Set the model file path of a combined model. ++ /// ++ /// \param x model file path. ++ /// ++ void SetProgFile(const std::string& x) { prog_file_ = x; } ++ /// ++ /// \brief Set the params file path of a combined model. ++ /// ++ /// \param x params file path. ++ /// ++ void SetParamsFile(const std::string& x) { params_file_ = x; } ++ ++ /// ++ /// \brief Set the path of optimization cache directory. ++ /// ++ /// \param opt_cache_dir the path of optimization cache directory. ++ /// ++ void SetOptimCacheDir(const std::string& opt_cache_dir) { ++ opt_cache_dir_ = opt_cache_dir; ++ } ++ /// ++ /// \brief Get the model directory path. ++ /// ++ /// \return const std::string& The model directory path. ++ /// ++ const std::string& model_dir() const { return model_dir_; } ++ /// ++ /// \brief Get the program file path. ++ /// ++ /// \return const std::string& The program file path. ++ /// ++ const std::string& prog_file() const { return prog_file_; } ++ /// ++ /// \brief Get the combined parameters file. ++ /// ++ /// \return const std::string& The combined parameters file. ++ /// ++ const std::string& params_file() const { return params_file_; } ++ ++ // Padding related. ++ ++ /// ++ /// \brief Turn off FC Padding. ++ /// ++ /// ++ void DisableFCPadding(); ++ /// ++ /// \brief A boolean state telling whether fc padding is used. ++ /// ++ /// \return bool Whether fc padding is used. ++ /// ++ bool use_fc_padding() const { return use_fc_padding_; } ++ ++ // GPU related. ++ ++ /// ++ /// \brief Turn on GPU. ++ /// ++ /// \param memory_pool_init_size_mb initial size of the GPU memory pool in MB. ++ /// \param device_id device_id the GPU card to use (default is 0). ++ /// ++ void EnableUseGpu(uint64_t memory_pool_init_size_mb, int device_id = 0); ++ /// ++ /// \brief Turn off GPU. ++ /// ++ /// ++ void DisableGpu(); ++ ++ void EnableXpu(int l3_workspace_size = 0xfffc00); ++ /// ++ /// \brief A boolean state telling whether the GPU is turned on. ++ /// ++ /// \return bool Whether the GPU is turned on. ++ /// ++ bool use_gpu() const { return use_gpu_; } ++ /// ++ /// \brief A boolean state telling whether the XPU is turned on. ++ /// ++ /// \return bool Whether the XPU is turned on. ++ /// ++ bool use_xpu() const { return use_xpu_; } ++ /// ++ /// \brief Get the GPU device id. ++ /// ++ /// \return int The GPU device id. ++ /// ++ int gpu_device_id() const { return gpu_device_id_; } ++ /// ++ /// \brief Get the XPU device id. ++ /// ++ /// \return int The XPU device id. ++ /// ++ int xpu_device_id() const { return xpu_device_id_; } ++ /// ++ /// \brief Get the initial size in MB of the GPU memory pool. ++ /// ++ /// \return int The initial size in MB of the GPU memory pool. ++ /// ++ int memory_pool_init_size_mb() const { return memory_pool_init_size_mb_; } ++ /// ++ /// \brief Get the proportion of the initial memory pool size compared to the ++ /// device. ++ /// ++ /// \return float The proportion of the initial memory pool size. ++ /// ++ float fraction_of_gpu_memory_for_pool() const; ++ ++ // CUDNN related. ++ /// ++ /// \brief Turn on CUDNN. ++ /// ++ /// ++ void EnableCUDNN(); ++ /// ++ /// \brief A boolean state telling whether to use CUDNN. ++ /// ++ /// \return bool Whether to use CUDNN. ++ /// ++ bool cudnn_enabled() const { return use_cudnn_; } ++ ++ /// ++ /// \brief Control whether to perform IR graph optimization. ++ /// If turned off, the AnalysisConfig will act just like a NativeConfig. ++ /// ++ /// \param x Whether the ir graph optimization is actived. ++ /// ++ void SwitchIrOptim(int x = true) { enable_ir_optim_ = x; } ++ /// ++ /// \brief A boolean state telling whether the ir graph optimization is ++ /// actived. ++ /// ++ /// \return bool Whether to use ir graph optimization. ++ /// ++ bool ir_optim() const { return enable_ir_optim_; } ++ ++ /// ++ /// \brief INTERNAL Determine whether to use the feed and fetch operators. ++ /// Just for internal development, not stable yet. ++ /// When ZeroCopyTensor is used, this should be turned off. ++ /// ++ /// \param x Whether to use the feed and fetch operators. ++ /// ++ void SwitchUseFeedFetchOps(int x = true) { use_feed_fetch_ops_ = x; } ++ /// ++ /// \brief A boolean state telling whether to use the feed and fetch ++ /// operators. ++ /// ++ /// \return bool Whether to use the feed and fetch operators. ++ /// ++ bool use_feed_fetch_ops_enabled() const { return use_feed_fetch_ops_; } ++ ++ /// ++ /// \brief Control whether to specify the inputs' names. ++ /// The ZeroCopyTensor type has a name member, assign it with the ++ /// corresponding ++ /// variable name. This is used only when the input ZeroCopyTensors passed to ++ /// the ++ /// AnalysisPredictor.ZeroCopyRun() cannot follow the order in the training ++ /// phase. ++ /// ++ /// \param x Whether to specify the inputs' names. ++ /// ++ void SwitchSpecifyInputNames(bool x = true) { specify_input_name_ = x; } ++ /// ++ /// \brief A boolean state tell whether the input ZeroCopyTensor names ++ /// specified should ++ /// be used to reorder the inputs in AnalysisPredictor.ZeroCopyRun(). ++ /// ++ /// \return bool Whether to specify the inputs' names. ++ /// ++ bool specify_input_name() const { return specify_input_name_; } ++ ++ /// ++ /// \brief Turn on the TensorRT engine. ++ /// The TensorRT engine will accelerate some subgraphes in the original Fluid ++ /// computation graph. In some models such as resnet50, GoogleNet and so on, ++ /// it gains significant performance acceleration. ++ /// ++ /// \param workspace_size The memory size(in byte) used for TensorRT ++ /// workspace. ++ /// \param max_batch_size The maximum batch size of this prediction task, ++ /// better set as small as possible for less performance loss. ++ /// \param min_subgrpah_size The minimum TensorRT subgraph size needed, if a ++ /// subgraph is smaller than this, it will not be transferred to TensorRT ++ /// engine. ++ /// \param precision The precision used in TensorRT. ++ /// \param use_static Serialize optimization information to disk for reusing. ++ /// \param use_calib_mode Use TRT int8 calibration(post training ++ /// quantization). ++ /// ++ /// ++ void EnableTensorRtEngine(int workspace_size = 1 << 20, ++ int max_batch_size = 1, int min_subgraph_size = 3, ++ Precision precision = Precision::kFloat32, ++ bool use_static = false, ++ bool use_calib_mode = true); ++ /// ++ /// \brief A boolean state telling whether the TensorRT engine is used. ++ /// ++ /// \return bool Whether the TensorRT engine is used. ++ /// ++ bool tensorrt_engine_enabled() const { return use_tensorrt_; } ++ /// ++ /// \brief Set min, max, opt shape for TensorRT Dynamic shape mode. ++ /// \param min_input_shape The min input shape of the subgraph input. ++ /// \param max_input_shape The max input shape of the subgraph input. ++ /// \param opt_input_shape The opt input shape of the subgraph input. ++ /// \param disable_trt_plugin_fp16 Setting this parameter to true means that ++ /// TRT plugin will not run fp16. ++ /// ++ void SetTRTDynamicShapeInfo( ++ std::map> min_input_shape, ++ std::map> max_input_shape, ++ std::map> optim_input_shape, ++ bool disable_trt_plugin_fp16 = false); ++ ++ /// ++ /// \brief Prevent ops running in Paddle-TRT ++ /// NOTE: just experimental, not an official stable API, easy to be broken. ++ /// ++ void Exp_DisableTensorRtOPs(const std::vector& ops); ++ ++ /// ++ /// \brief Replace some TensorRT plugins to TensorRT OSS( ++ /// https://github.com/NVIDIA/TensorRT), with which some models's inference ++ /// may be more high-performance. Libnvinfer_plugin.so greater than ++ /// V7.2.1 is needed. ++ /// ++ void EnableTensorRtOSS(); ++ ++ /// ++ /// \brief A boolean state telling whether to use the TensorRT OSS. ++ /// ++ /// \return bool Whether to use the TensorRT OSS. ++ /// ++ bool tensorrt_oss_enabled() { return trt_use_oss_; } ++ ++ /// ++ /// \brief Enable TensorRT DLA ++ /// \param dla_core ID of DLACore, which should be 0, 1, ++ /// ..., IBuilder.getNbDLACores() - 1 ++ /// ++ void EnableTensorRtDLA(int dla_core = 0); ++ ++ /// ++ /// \brief A boolean state telling whether to use the TensorRT DLA. ++ /// ++ /// \return bool Whether to use the TensorRT DLA. ++ /// ++ bool tensorrt_dla_enabled() { return trt_use_dla_; } ++ ++ /// ++ /// \brief Turn on the usage of Lite sub-graph engine. ++ /// ++ /// \param precision_mode Precion used in Lite sub-graph engine. ++ /// \param passes_filter Set the passes used in Lite sub-graph engine. ++ /// \param ops_filter Operators not supported by Lite. ++ /// ++ void EnableLiteEngine( ++ AnalysisConfig::Precision precision_mode = Precision::kFloat32, ++ bool zero_copy = false, ++ const std::vector& passes_filter = {}, ++ const std::vector& ops_filter = {}); ++ ++ /// ++ /// \brief A boolean state indicating whether the Lite sub-graph engine is ++ /// used. ++ /// ++ /// \return bool whether the Lite sub-graph engine is used. ++ /// ++ bool lite_engine_enabled() const { return use_lite_; } ++ ++ /// ++ /// \brief Control whether to debug IR graph analysis phase. ++ /// This will generate DOT files for visualizing the computation graph after ++ /// each analysis pass applied. ++ /// ++ /// \param x whether to debug IR graph analysis phase. ++ /// ++ void SwitchIrDebug(int x = true); ++ ++ /// ++ /// \brief Turn on MKLDNN. ++ /// ++ /// ++ void EnableMKLDNN(); ++ /// ++ /// \brief Set the cache capacity of different input shapes for MKLDNN. ++ /// Default value 0 means not caching any shape. ++ /// Please see MKL-DNN Data Caching Design Document: ++ /// https://github.com/PaddlePaddle/FluidDoc/blob/develop/doc/fluid/design/mkldnn/caching/caching.md ++ /// ++ /// \param capacity The cache capacity. ++ /// ++ void SetMkldnnCacheCapacity(int capacity); ++ /// ++ /// \brief A boolean state telling whether to use the MKLDNN. ++ /// ++ /// \return bool Whether to use the MKLDNN. ++ /// ++ bool mkldnn_enabled() const { return use_mkldnn_; } ++ ++ /// ++ /// \brief Set the number of cpu math library threads. ++ /// ++ /// \param cpu_math_library_num_threads The number of cpu math library ++ /// threads. ++ /// ++ void SetCpuMathLibraryNumThreads(int cpu_math_library_num_threads); ++ /// ++ /// \brief An int state telling how many threads are used in the CPU math ++ /// library. ++ /// ++ /// \return int The number of threads used in the CPU math library. ++ /// ++ int cpu_math_library_num_threads() const { ++ return cpu_math_library_num_threads_; ++ } ++ ++ /// ++ /// \brief Transform the AnalysisConfig to NativeConfig. ++ /// ++ /// \return NativeConfig The NativeConfig transformed. ++ /// ++ NativeConfig ToNativeConfig() const; ++ /// ++ /// \brief Specify the operator type list to use MKLDNN acceleration. ++ /// ++ /// \param op_list The operator type list. ++ /// ++ void SetMKLDNNOp(std::unordered_set op_list) { ++ mkldnn_enabled_op_types_ = op_list; ++ } ++ ++ /// ++ /// \brief Turn on MKLDNN quantization. ++ /// ++ /// ++ void EnableMkldnnQuantizer(); ++ ++ /// ++ /// \brief Turn on MKLDNN bfloat16. ++ /// ++ /// ++ void EnableMkldnnBfloat16(); ++ ++ /// ++ /// \brief A boolean state telling whether to use the MKLDNN Bfloat16. ++ /// ++ /// \return bool Whether to use the MKLDNN Bfloat16. ++ /// ++ bool mkldnn_bfloat16_enabled() const { return use_mkldnn_bfloat16_; } ++ ++ /// \brief Specify the operator type list to use Bfloat16 acceleration. ++ /// ++ /// \param op_list The operator type list. ++ /// ++ void SetBfloat16Op(std::unordered_set op_list) { ++ bfloat16_enabled_op_types_ = op_list; ++ } ++ ++ /// ++ /// \brief A boolean state telling whether the thread local CUDA stream is ++ /// enabled. ++ /// ++ /// \return bool Whether the thread local CUDA stream is enabled. ++ /// ++ bool thread_local_stream_enabled() const { return thread_local_stream_; } ++ ++ /// ++ /// \brief A boolean state telling whether the MKLDNN quantization is enabled. ++ /// ++ /// \return bool Whether the MKLDNN quantization is enabled. ++ /// ++ bool mkldnn_quantizer_enabled() const { return use_mkldnn_quantizer_; } ++ ++ /// ++ /// \brief Get MKLDNN quantizer config. ++ /// ++ /// \return MkldnnQuantizerConfig* MKLDNN quantizer config. ++ /// ++ MkldnnQuantizerConfig* mkldnn_quantizer_config() const; ++ ++ /// ++ /// \brief Specify the memory buffer of program and parameter. ++ /// Used when model and params are loaded directly from memory. ++ /// ++ /// \param prog_buffer The memory buffer of program. ++ /// \param prog_buffer_size The size of the model data. ++ /// \param params_buffer The memory buffer of the combined parameters file. ++ /// \param params_buffer_size The size of the combined parameters data. ++ /// ++ void SetModelBuffer(const char* prog_buffer, size_t prog_buffer_size, ++ const char* params_buffer, size_t params_buffer_size); ++ /// ++ /// \brief A boolean state telling whether the model is set from the CPU ++ /// memory. ++ /// ++ /// \return bool Whether model and params are loaded directly from memory. ++ /// ++ bool model_from_memory() const { return model_from_memory_; } ++ ++ /// ++ /// \brief Turn on memory optimize ++ /// NOTE still in development. ++ /// ++ void EnableMemoryOptim(); ++ /// ++ /// \brief A boolean state telling whether the memory optimization is ++ /// activated. ++ /// ++ /// \return bool Whether the memory optimization is activated. ++ /// ++ bool enable_memory_optim() const; ++ ++ /// ++ /// \brief Turn on profiling report. ++ /// If not turned on, no profiling report will be generated. ++ /// ++ void EnableProfile(); ++ /// ++ /// \brief A boolean state telling whether the profiler is activated. ++ /// ++ /// \return bool Whether the profiler is activated. ++ /// ++ bool profile_enabled() const { return with_profile_; } ++ ++ /// ++ /// \brief Mute all logs in Paddle inference. ++ /// ++ void DisableGlogInfo(); ++ /// ++ /// \brief A boolean state telling whether logs in Paddle inference are muted. ++ /// ++ /// \return bool Whether logs in Paddle inference are muted. ++ /// ++ bool glog_info_disabled() const { return !with_glog_info_; } ++ ++ /// ++ /// \brief Set the AnalysisConfig to be invalid. ++ /// This is to ensure that an AnalysisConfig can only be used in one ++ /// AnalysisPredictor. ++ /// ++ void SetInValid() const { is_valid_ = false; } ++ /// ++ /// \brief A boolean state telling whether the AnalysisConfig is valid. ++ /// ++ /// \return bool Whether the AnalysisConfig is valid. ++ /// ++ bool is_valid() const { return is_valid_; } ++ ++ friend class ::paddle::AnalysisPredictor; ++ ++ /// ++ /// \brief Get a pass builder for customize the passes in IR analysis phase. ++ /// NOTE: Just for developer, not an official API, easy to be broken. ++ /// ++ /// ++ PassStrategy* pass_builder() const; ++ ++ /// ++ /// \brief Enable the GPU multi-computing stream feature. ++ /// NOTE: The current behavior of this interface is to bind the computation ++ /// stream to the thread, and this behavior may be changed in the future. ++ /// ++ void EnableGpuMultiStream(); ++ void PartiallyRelease(); ++ ++ protected: ++ // Update the config. ++ void Update(); ++ ++ std::string SerializeInfoCache(); ++ ++ protected: ++ // Model pathes. ++ std::string model_dir_; ++ mutable std::string prog_file_; ++ mutable std::string params_file_; ++ ++ // GPU related. ++ bool use_gpu_{false}; ++ int gpu_device_id_{0}; ++ int xpu_device_id_{0}; ++ uint64_t memory_pool_init_size_mb_{100}; // initial size is 100MB. ++ ++ bool use_cudnn_{false}; ++ ++ // Padding related ++ bool use_fc_padding_{true}; ++ ++ // TensorRT related. ++ bool use_tensorrt_{false}; ++ // For workspace_size, refer it from here: ++ // https://docs.nvidia.com/deeplearning/sdk/tensorrt-developer-guide/index.html#troubleshooting ++ int tensorrt_workspace_size_{1 << 30}; ++ // While TensorRT allows an engine optimized for a given max batch size ++ // to run at any smaller size, the performance for those smaller ++ // sizes may not be as well-optimized. Therefore, Max batch is best ++ // equivalent to the runtime batch size. ++ int tensorrt_max_batchsize_{1}; ++ // We transform the Ops that can be converted into TRT layer in the model, ++ // and aggregate these Ops into subgraphs for TRT execution. ++ // We set this variable to control the minimum number of nodes in the ++ // subgraph, 3 as default value. ++ int tensorrt_min_subgraph_size_{3}; ++ Precision tensorrt_precision_mode_{Precision::kFloat32}; ++ bool trt_use_static_engine_{false}; ++ bool trt_use_calib_mode_{true}; ++ bool trt_use_oss_{false}; ++ bool trt_use_dla_{false}; ++ int trt_dla_core_{0}; ++ std::map> min_input_shape_{}; ++ std::map> max_input_shape_{}; ++ std::map> optim_input_shape_{}; ++ std::vector trt_disabled_ops_{}; ++ bool disable_trt_plugin_fp16_{false}; ++ ++ // memory reuse related. ++ bool enable_memory_optim_{false}; ++ ++ bool use_mkldnn_{false}; ++ std::unordered_set mkldnn_enabled_op_types_; ++ ++ bool model_from_memory_{false}; ++ ++ bool enable_ir_optim_{true}; ++ bool use_feed_fetch_ops_{true}; ++ bool ir_debug_{false}; ++ ++ bool specify_input_name_{false}; ++ ++ int cpu_math_library_num_threads_{1}; ++ ++ bool with_profile_{false}; ++ ++ bool with_glog_info_{true}; ++ ++ // A runtime cache, shouldn't be transferred to others. ++ std::string serialized_info_cache_; ++ ++ mutable std::unique_ptr pass_builder_; ++ ++ bool use_lite_{false}; ++ std::vector lite_passes_filter_; ++ std::vector lite_ops_filter_; ++ Precision lite_precision_mode_; ++ bool lite_zero_copy_; ++ ++ bool thread_local_stream_{false}; ++ bool use_xpu_{false}; ++ int xpu_l3_workspace_size_; ++ ++ // mkldnn related. ++ int mkldnn_cache_capacity_{0}; ++ bool use_mkldnn_quantizer_{false}; ++ std::shared_ptr mkldnn_quantizer_config_; ++ bool use_mkldnn_bfloat16_{false}; ++ std::unordered_set bfloat16_enabled_op_types_; ++ ++ // If the config is already used on a predictor, it becomes invalid. ++ // Any config can only be used with one predictor. ++ // Variables held by config can take up a lot of memory in some cases. ++ // So we release the memory when the predictor is set up. ++ mutable bool is_valid_{true}; ++ std::string opt_cache_dir_; ++}; ++ ++} // namespace paddle +diff --git a/src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_api.h b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_api.h +new file mode 100755 +index 0000000..daba578 +--- /dev/null ++++ b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_api.h +@@ -0,0 +1,457 @@ ++// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++#pragma once ++ ++/*! \file paddle_api.h ++ */ ++ ++/*! \mainpage Paddle Inference APIs ++ * \section intro_sec Introduction ++ * The Paddle inference library aims to offer an high performance inference SDK ++ * for Paddle users. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include "crypto/cipher.h" ++#include "paddle_infer_declare.h" // NOLINT ++ /*! \namespace paddle ++ */ ++namespace paddle { ++ ++/// \brief Paddle data type. ++enum PaddleDType { ++ FLOAT32, ++ INT64, ++ INT32, ++ UINT8, ++ // TODO(Superjomn) support more data types if needed. ++}; ++ ++/// \brief Memory manager for PaddleTensor. ++/// ++/// The PaddleBuf holds a buffer for data input or output. The memory can be ++/// allocated by user or by PaddleBuf itself, but in any case, the PaddleBuf ++/// should be reused for better performance. ++/// ++/// For user allocated memory, the following API can be used: ++/// - PaddleBuf(void* data, size_t length) to set an external memory by ++/// specifying the memory address and length. ++/// - Reset(void* data, size_t length) to reset the PaddleBuf with an external ++/// memory. ++/// ATTENTION, for user allocated memory, deallocation should be done by users ++/// externally after the program finished. The PaddleBuf won't do any allocation ++/// or deallocation. ++/// ++/// To have the PaddleBuf allocate and manage the memory: ++/// - PaddleBuf(size_t length) will allocate a memory of size `length`. ++/// - Resize(size_t length) resize the memory to no less than `length`, ++/// ATTENTION ++/// if the allocated memory is larger than `length`, nothing will done. ++/// ++/// Usage: ++/// ++/// Let PaddleBuf manage the memory internally. ++/// \code{cpp} ++/// const int num_elements = 128; ++/// PaddleBuf buf(num_elements/// sizeof(float)); ++/// \endcode ++/// ++/// Or ++/// \code{cpp} ++/// PaddleBuf buf; ++/// buf.Resize(num_elements/// sizeof(float)); ++/// \endcode ++/// Works the exactly the same. ++/// ++/// One can also make the `PaddleBuf` use the external memory. ++/// \code{cpp} ++/// PaddleBuf buf; ++/// void* external_memory = new float[num_elements]; ++/// buf.Reset(external_memory, num_elements*sizeof(float)); ++/// ... ++/// delete[] external_memory; // manage the memory lifetime outside. ++/// \endcode ++/// ++class PD_INFER_DECL PaddleBuf { ++ public: ++ /// ++ /// \brief PaddleBuf allocate memory internally, and manage it. ++ /// ++ /// \param[in] length The length of data. ++ /// ++ explicit PaddleBuf(size_t length) ++ : data_(new char[length]), length_(length), memory_owned_(true) {} ++ /// ++ /// \brief Set external memory, the PaddleBuf won't manage it. ++ /// ++ /// \param[in] data The start address of the external memory. ++ /// \param[in] length The length of data. ++ /// ++ PaddleBuf(void* data, size_t length) ++ : data_(data), length_(length), memory_owned_{false} {} ++ /// ++ /// \brief Copy only available when memory is managed externally. ++ /// ++ /// \param[in] other another `PaddleBuf` ++ /// ++ explicit PaddleBuf(const PaddleBuf& other); ++ /// ++ /// \brief Resize the memory. ++ /// ++ /// \param[in] length The length of data. ++ /// ++ void Resize(size_t length); ++ /// ++ /// \brief Reset to external memory, with address and length set. ++ /// ++ /// \param[in] data The start address of the external memory. ++ /// \param[in] length The length of data. ++ /// ++ void Reset(void* data, size_t length); ++ /// ++ /// \brief Tell whether the buffer is empty. ++ /// ++ bool empty() const { return length_ == 0; } ++ /// ++ /// \brief Get the data's memory address. ++ /// ++ void* data() const { return data_; } ++ /// ++ /// \brief Get the memory length. ++ /// ++ size_t length() const { return length_; } ++ ++ ~PaddleBuf() { Free(); } ++ PaddleBuf& operator=(const PaddleBuf&); ++ PaddleBuf& operator=(PaddleBuf&&); ++ PaddleBuf() = default; ++ PaddleBuf(PaddleBuf&& other); ++ ++ private: ++ void Free(); ++ void* data_{nullptr}; ///< pointer to the data memory. ++ size_t length_{0}; ///< number of memory bytes. ++ bool memory_owned_{true}; ++}; ++ ++/// ++/// \brief Basic input and output data structure for PaddlePredictor. ++/// ++struct PD_INFER_DECL PaddleTensor { ++ PaddleTensor() = default; ++ std::string name; ///< variable name. ++ std::vector shape; ++ PaddleBuf data; ///< blob of data. ++ PaddleDType dtype; ++ std::vector> lod; ///< Tensor+LoD equals LoDTensor ++}; ++ ++enum class PaddlePlace { kUNK = -1, kCPU, kGPU, kXPU }; ++ ++/// \brief Represents an n-dimensional array of values. ++/// The ZeroCopyTensor is used to store the input or output of the network. ++/// Zero copy means that the tensor supports direct copy of host or device data ++/// to device, ++/// eliminating additional CPU copy. ZeroCopyTensor is only used in the ++/// AnalysisPredictor. ++/// It is obtained through PaddlePredictor::GetinputTensor() ++/// and PaddlePredictor::GetOutputTensor() interface. ++class PD_INFER_DECL ZeroCopyTensor { ++ public: ++ /// \brief Reset the shape of the tensor. ++ /// Generally it's only used for the input tensor. ++ /// Reshape must be called before calling mutable_data() or copy_from_cpu() ++ /// \param shape The shape to set. ++ void Reshape(const std::vector& shape); ++ ++ /// \brief Get the memory pointer in CPU or GPU with specific data type. ++ /// Please Reshape the tensor first before call this. ++ /// It's usually used to get input data pointer. ++ /// \param place The place of the tensor. ++ template ++ T* mutable_data(PaddlePlace place); ++ ++ /// \brief Get the memory pointer directly. ++ /// It's usually used to get the output data pointer. ++ /// \param[out] place To get the device type of the tensor. ++ /// \param[out] size To get the data size of the tensor. ++ /// \return The tensor data buffer pointer. ++ template ++ T* data(PaddlePlace* place, int* size) const; ++ ++ /// \brief Copy the host memory to tensor data. ++ /// It's usually used to set the input tensor data. ++ /// \param data The pointer of the data, from which the tensor will copy. ++ template ++ void copy_from_cpu(const T* data); ++ ++ /// \brief Copy the tensor data to the host memory. ++ /// It's usually used to get the output tensor data. ++ /// \param[out] data The tensor will copy the data to the address. ++ template ++ void copy_to_cpu(T* data); ++ ++ /// \brief Return the shape of the Tensor. ++ std::vector shape() const; ++ ++ /// \brief Set lod info of the tensor. ++ /// More about LOD can be seen here: ++ /// https://www.paddlepaddle.org.cn/documentation/docs/zh/beginners_guide/basic_concept/lod_tensor.html#lodtensor ++ /// \param x the lod info. ++ void SetLoD(const std::vector>& x); ++ /// \brief Return the lod info of the tensor. ++ std::vector> lod() const; ++ /// \brief Return the name of the tensor. ++ const std::string& name() const { return name_; } ++ void SetPlace(PaddlePlace place, int device = -1) { ++ place_ = place; ++ device_ = device; ++ } ++ ++ /// \brief Return the data type of the tensor. ++ /// It's usually used to get the output tensor data type. ++ /// \return The data type of the tensor. ++ PaddleDType type() const; ++ ++ protected: ++ explicit ZeroCopyTensor(void* scope) : scope_{scope} {} ++ void SetName(const std::string& name) { name_ = name; } ++ void* FindTensor() const; ++ ++ private: ++ std::string name_; ++ bool input_or_output_; ++ friend class AnalysisPredictor; ++ void* scope_{nullptr}; ++ // The corresponding tensor pointer inside Paddle workspace is cached for ++ // performance. ++ mutable void* tensor_{nullptr}; ++ PaddlePlace place_; ++ PaddleDType dtype_; ++ int device_; ++}; ++ ++/// \brief A Predictor for executing inference on a model. ++/// Base class for AnalysisPredictor and NativePaddlePredictor. ++class PD_INFER_DECL PaddlePredictor { ++ public: ++ struct Config; ++ PaddlePredictor() = default; ++ PaddlePredictor(const PaddlePredictor&) = delete; ++ PaddlePredictor& operator=(const PaddlePredictor&) = delete; ++ ++ /// \brief This interface takes input and runs the network. ++ /// There are redundant copies of data between hosts in this operation, ++ /// so it is more recommended to use the zecopyrun interface ++ /// \param[in] inputs An list of PaddleTensor as the input to the network. ++ /// \param[out] output_data Pointer to the tensor list, which holds the output ++ /// paddletensor ++ /// \param[in] batch_size This setting has been discarded and can be ignored. ++ /// \return Whether the run is successful ++ virtual bool Run(const std::vector& inputs, ++ std::vector* output_data, ++ int batch_size = -1) = 0; ++ ++ /// \brief Used to get the name of the network input. ++ /// Be inherited by AnalysisPredictor, Only used in ZeroCopy scenarios. ++ /// \return Input tensor names. ++ virtual std::vector GetInputNames() { return {}; } ++ ++ /// \brief Get the input shape of the model. ++ /// \return A map contains all the input names and shape defined in the model. ++ virtual std::map> GetInputTensorShape() { ++ return {}; ++ } ++ ++ /// \brief Used to get the name of the network output. ++ /// Be inherited by AnalysisPredictor, Only used in ZeroCopy scenarios. ++ /// \return Output tensor names. ++ virtual std::vector GetOutputNames() { return {}; } ++ ++ /// \brief Get the input ZeroCopyTensor by name. ++ /// Be inherited by AnalysisPredictor, Only used in ZeroCopy scenarios. ++ /// The name is obtained from the GetInputNames() interface. ++ /// \param name The input tensor name. ++ /// \return Return the corresponding input ZeroCopyTensor. ++ virtual std::unique_ptr GetInputTensor( ++ const std::string& name) { ++ return nullptr; ++ } ++ ++ /// \brief Get the output ZeroCopyTensor by name. ++ /// Be inherited by AnalysisPredictor, Only used in ZeroCopy scenarios. ++ /// The name is obtained from the GetOutputNames() interface. ++ /// \param name The output tensor name. ++ /// \return Return the corresponding output ZeroCopyTensor. ++ virtual std::unique_ptr GetOutputTensor( ++ const std::string& name) { ++ return nullptr; ++ } ++ /// \brief Run the network with zero-copied inputs and outputs. ++ /// Be inherited by AnalysisPredictor and only used in ZeroCopy scenarios. ++ /// This will save the IO copy for transfering inputs and outputs to predictor ++ /// workspace ++ /// and get some performance improvement. ++ /// To use it, one should call the AnalysisConfig.SwitchUseFeedFetchOp(false) ++ /// and then use the `GetInputTensor` and `GetOutputTensor` ++ /// to directly write or read the input/output tensors. ++ /// \return Whether the run is successful ++ virtual bool ZeroCopyRun() { return false; } ++ ++ /// ++ /// \brief Clear the intermediate tensors of the predictor ++ /// ++ /// ++ virtual void ClearIntermediateTensor() {} ++ ++ /// ++ /// \brief Release all tmp tensor to compress the size of the memory pool. ++ /// The memory pool is considered to be composed of a list of chunks, if ++ /// the chunk is not occupied, it can be released. ++ /// ++ /// \return Number of bytes released. It may be smaller than the actual ++ /// released memory, because part of the memory is not managed by the ++ /// MemoryPool. ++ /// ++ virtual uint64_t TryShrinkMemory() { return 0; } ++ ++ /// \brief Clone an existing predictor ++ /// When using clone, the same network will be created, ++ /// and the parameters between them are shared. ++ /// \return unique_ptr which contains the pointer of predictor ++ virtual std::unique_ptr Clone() = 0; ++ ++ /// \brief Destroy the Predictor. ++ virtual ~PaddlePredictor() = default; ++ ++ virtual std::string GetSerializedProgram() const { ++ assert(false); // Force raise error. ++ return "NotImplemented"; ++ } ++ ++ /// \brief Base class for NativeConfig and AnalysisConfig. ++ struct Config { ++ std::string model_dir; /*!< path to the model directory. */ ++ }; ++}; ++ ++/// ++/// \brief configuration manager for `NativePredictor`. ++/// ++/// `AnalysisConfig` manages configurations of `NativePredictor`. ++/// During inference procedure, there are many parameters(model/params path, ++/// place of inference, etc.) ++/// ++struct PD_INFER_DECL NativeConfig : public PaddlePredictor::Config { ++ NativeConfig(); ++ /// GPU related fields. ++ bool use_xpu{false}; ++ bool use_gpu{false}; ++ int device{0}; ++ float fraction_of_gpu_memory{ ++ -1.f}; ///< Change to a float in (0,1] if needed. ++ ++ std::string prog_file; ++ std::string ++ param_file; ///< Specify the exact path of program and parameter files. ++ ++ bool specify_input_name{false}; ///< Specify the variable's name of each ++ ///< input if input tensors don't follow the ++ ///< `feeds` and `fetches` of the phase ++ ///< `save_inference_model`. ++ ++ /// Set and get the number of cpu math library threads. ++ void SetCpuMathLibraryNumThreads(int cpu_math_library_num_threads) { ++ cpu_math_library_num_threads_ = cpu_math_library_num_threads; ++ } ++ int cpu_math_library_num_threads() const { ++ return cpu_math_library_num_threads_; ++ } ++ ++ protected: ++ int cpu_math_library_num_threads_{1}; ///< number of cpu math library (such ++ ///< as MKL, OpenBlas) threads for each ++ ///< instance. ++}; ++ ++/// ++/// \brief A factory to help create different predictors. ++/// ++/// Usage: ++/// ++/// \code{.cpp} ++/// NativeConfig config; ++/// ... // change the configs. ++/// auto native_predictor = CreatePaddlePredictor(config); ++/// \endcode ++/// ++/// FOR EXTENSION DEVELOPER: ++/// Different predictors are designated by config type. Similar configs can be ++/// merged, but there shouldn't be a huge config containing different fields for ++/// more than one kind of predictors. ++//// ++template ++std::unique_ptr CreatePaddlePredictor(const ConfigT& config); ++ ++struct AnalysisConfig; ++struct NativeConfig; ++struct DemoConfig; ++ ++template <> ++PD_INFER_DECL std::unique_ptr ++CreatePaddlePredictor(const AnalysisConfig& config); ++ ++template <> ++PD_INFER_DECL std::unique_ptr ++CreatePaddlePredictor(const NativeConfig& config); ++ ++template <> ++PD_INFER_DECL std::unique_ptr ++CreatePaddlePredictor(const DemoConfig& config); ++ ++/// NOTE The following APIs are too trivial, we will discard it in the following ++/// versions. ++/// ++enum class PaddleEngineKind { ++ kNative = 0, ///< Use the native Fluid facility. ++ kAutoMixedTensorRT, ///< Automatically mix Fluid with TensorRT. ++ kAnalysis, ///< More optimization. ++}; ++ ++template ++PD_INFER_DECL std::unique_ptr CreatePaddlePredictor( ++ const ConfigT& config); ++ ++template <> ++PD_INFER_DECL std::unique_ptr CreatePaddlePredictor< ++ NativeConfig, PaddleEngineKind::kNative>(const NativeConfig& config); ++ ++template <> ++PD_INFER_DECL std::unique_ptr CreatePaddlePredictor< ++ AnalysisConfig, PaddleEngineKind::kAnalysis>(const AnalysisConfig& config); ++ ++PD_INFER_DECL int PaddleDtypeSize(PaddleDType dtype); ++ ++PD_INFER_DECL std::string get_version(); ++ ++PD_INFER_DECL std::string UpdateDllFlag(const char* name, const char* value); ++ ++PD_INFER_DECL std::shared_ptr MakeCipher( ++ const std::string& config_file); ++ ++} // namespace paddle +diff --git a/src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_infer_declare.h b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_infer_declare.h +new file mode 100755 +index 0000000..e8525f4 +--- /dev/null ++++ b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_infer_declare.h +@@ -0,0 +1,27 @@ ++// Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++#pragma once ++ ++#if defined(_WIN32) ++#ifndef PD_INFER_DECL ++#ifdef PADDLE_DLL_INFERENCE ++#define PD_INFER_DECL __declspec(dllexport) ++#else ++#define PD_INFER_DECL __declspec(dllimport) ++#endif // PADDLE_DLL_INFERENCE ++#endif // PD_INFER_DECL ++#else ++#define PD_INFER_DECL __attribute__((visibility("default"))) ++#endif // _WIN32 +diff --git a/src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_inference_api.h b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_inference_api.h +new file mode 100755 +index 0000000..2e1e3b8 +--- /dev/null ++++ b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_inference_api.h +@@ -0,0 +1,311 @@ ++/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. ++ ++Licensed under the Apache License, Version 2.0 (the "License"); ++you may not use this file except in compliance with the License. ++You may obtain a copy of the License at ++ ++http://www.apache.org/licenses/LICENSE-2.0 ++ ++Unless required by applicable law or agreed to in writing, software ++distributed under the License is distributed on an "AS IS" BASIS, ++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++See the License for the specific language governing permissions and ++limitations under the License. */ ++ ++/* ++ * This file contains the definition of a simple Inference API for Paddle. ++ * ++ * ATTENTION: It requires some C++11 features, for lower version C++ or C, we ++ * might release another API. ++ */ ++ ++#pragma once ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "paddle_analysis_config.h" // NOLINT ++#include "paddle_api.h" // NOLINT ++ ++/// ++/// \file paddle_inference_api.h ++/// ++/// \brief Paddle Inference API ++/// ++/// \author paddle-infer@baidu.com ++/// \date 2020-09-01 ++/// \since 2.0.0-beta ++/// ++ ++namespace paddle_infer { ++using DataType = paddle::PaddleDType; ++using PlaceType = paddle::PaddlePlace; ++using PrecisionType = paddle::AnalysisConfig::Precision; ++using Config = paddle::AnalysisConfig; ++ ++/// ++/// \class Tensor ++/// ++/// \brief Represents an n-dimensional array of values. ++/// The Tensor is used to store the input or output of the network. ++/// It is obtained through Predictor::GetinputHandle() ++/// and Predictor::GetOutputHandle() interface. ++/// ++class PD_INFER_DECL Tensor { ++ public: ++ // Can only be created by predictor->GetInputHandle(cosnt std::string& name) ++ // or predictor->GetOutputHandle(cosnt std::string& name) ++ Tensor() = delete; ++ explicit Tensor(std::unique_ptr&& tensor) ++ : tensor_(std::move(tensor)) {} ++ ++ /// ++ /// \brief Reset the shape of the tensor. ++ /// Generally it's only used for the input tensor. ++ /// Reshape must be called before calling mutable_data() or CopyFromCpu() ++ /// \param shape The shape to set. ++ /// ++ void Reshape(const std::vector& shape); ++ ++ /// ++ /// \brief Copy the host memory to tensor data. ++ /// It's usually used to set the input tensor data. ++ /// \param data The pointer of the data, from which the tensor will copy. ++ /// ++ template ++ void CopyFromCpu(const T* data); ++ ++ /// ++ /// \brief Get the memory pointer in CPU or GPU with specific data type. ++ /// Please Reshape the tensor first before call this. ++ /// It's usually used to get input data pointer. ++ /// \param place The place of the tensor. ++ /// \return The tensor data buffer pointer. ++ /// ++ template ++ T* mutable_data(PlaceType place); ++ ++ /// ++ /// \brief Copy the tensor data to the host memory. ++ /// It's usually used to get the output tensor data. ++ /// \param[out] data The tensor will copy the data to the address. ++ /// ++ template ++ void CopyToCpu(T* data); ++ ++ /// ++ /// \brief Get the memory pointer directly. ++ /// It's usually used to get the output data pointer. ++ /// \param[out] place To get the device type of the tensor. ++ /// \param[out] size To get the data size of the tensor. ++ /// \return The tensor data buffer pointer. ++ /// ++ template ++ T* data(PlaceType* place, int* size) const; ++ ++ /// ++ /// \brief Set lod info of the tensor. ++ /// More about LOD can be seen here: ++ /// https://www.paddlepaddle.org.cn/documentation/docs/zh/beginners_guide/basic_concept/lod_tensor.html#lodtensor ++ /// \param x the lod info. ++ /// ++ void SetLoD(const std::vector>& x); ++ ++ /// \brief Return the lod info of the tensor. ++ std::vector> lod() const; ++ ++ /// \brief Return the data type of the tensor. ++ /// It's usually used to get the output tensor data type. ++ /// \return The data type of the tensor. ++ DataType type() const; ++ ++ /// \brief Return the shape of the Tensor. ++ std::vector shape() const; ++ ++ /// \brief Return the name of the tensor. ++ const std::string& name() const; ++ ++ private: ++ std::unique_ptr tensor_; ++}; ++ ++/// ++/// \class Predictor ++/// ++/// \brief Predictor is the interface for model prediction. ++/// ++/// The predictor has the following typical uses: ++/// ++/// Get predictor ++/// \code{cpp} ++/// auto predictor = CreatePredictor(config); ++/// \endcode ++/// ++/// Get input or output names ++/// \code{cpp} ++/// auto input_names = predictor->GetInputNames(); ++/// auto output_names = predictor->GetOutputNames(); ++/// \endcode ++/// ++/// Get input or output handle ++/// \code{cpp} ++/// auto input_t = predictor->GetInputHandle(input_names[0]); ++/// auto output_t = predictor->GetOutputHandle(output_names[0]); ++/// \endcode ++/// ++/// Run predictor ++/// \code{cpp} ++/// predictor->Run(); ++/// \endcode ++/// ++class PD_INFER_DECL Predictor { ++ public: ++ Predictor() = delete; ++ ~Predictor() {} ++ // Use for clone ++ explicit Predictor(std::unique_ptr&& pred) ++ : predictor_(std::move(pred)) {} ++ ++ /// ++ /// \brief Construct a new Predictor object ++ /// ++ /// \param[in] Config config ++ /// ++ explicit Predictor(const Config& config); ++ ++ /// ++ /// \brief Get the input names ++ /// ++ /// \return input names ++ /// ++ std::vector GetInputNames(); ++ ++ /// ++ /// \brief Get the Input Tensor object ++ /// ++ /// \param[in] name input name ++ /// \return input tensor ++ /// ++ std::unique_ptr GetInputHandle(const std::string& name); ++ ++ /// ++ /// \brief Run the prediction engine ++ /// ++ /// \return Whether the function executed successfully ++ /// ++ bool Run(); ++ ++ /// ++ /// \brief Get the output names ++ /// ++ /// \return output names ++ /// ++ std::vector GetOutputNames(); ++ ++ /// ++ /// \brief Get the Output Tensor object ++ /// ++ /// \param[in] name otuput name ++ /// \return output tensor ++ /// ++ std::unique_ptr GetOutputHandle(const std::string& name); ++ ++ /// ++ /// \brief Clone to get the new predictor. thread safe. ++ /// ++ /// \return get a new predictor ++ /// ++ std::unique_ptr Clone(); ++ ++ /// \brief Clear the intermediate tensors of the predictor ++ void ClearIntermediateTensor(); ++ ++ /// ++ /// \brief Release all tmp tensor to compress the size of the memory pool. ++ /// The memory pool is considered to be composed of a list of chunks, if ++ /// the chunk is not occupied, it can be released. ++ /// ++ /// \return Number of bytes released. It may be smaller than the actual ++ /// released memory, because part of the memory is not managed by the ++ /// MemoryPool. ++ /// ++ uint64_t TryShrinkMemory(); ++ ++ private: ++ std::unique_ptr predictor_; ++}; ++ ++/// ++/// \brief A factory to help create predictors. ++/// ++/// Usage: ++/// ++/// \code{.cpp} ++/// Config config; ++/// ... // change the configs. ++/// auto predictor = CreatePredictor(config); ++/// \endcode ++/// ++PD_INFER_DECL std::shared_ptr CreatePredictor( ++ const Config& config); // NOLINT ++ ++PD_INFER_DECL int GetNumBytesOfDataType(DataType dtype); ++ ++PD_INFER_DECL std::string GetVersion(); ++PD_INFER_DECL std::string UpdateDllFlag(const char* name, const char* value); ++ ++template ++void Tensor::CopyFromCpu(const T* data) { ++ tensor_->copy_from_cpu(data); ++} ++ ++template ++void Tensor::CopyToCpu(T* data) { ++ return tensor_->copy_to_cpu(data); ++} ++ ++template ++T* Tensor::mutable_data(PlaceType place) { ++ return tensor_->mutable_data(place); ++} ++ ++template ++T* Tensor::data(PlaceType* place, int* size) const { ++ return tensor_->data(place, size); ++} ++ ++} // namespace paddle_infer ++ ++namespace paddle_infer { ++namespace services { ++ ++/// ++/// \class PredictorPool ++/// ++/// \brief PredictorPool is a simple encapsulation of Predictor, suitable for ++/// use in multi-threaded situations. According to the thread id, the ++/// corresponding Predictor is taken out from PredictorPool to complete the ++/// prediction. ++/// ++class PD_INFER_DECL PredictorPool { ++ public: ++ PredictorPool() = delete; ++ PredictorPool(const PredictorPool&) = delete; ++ PredictorPool& operator=(const PredictorPool&) = delete; ++ ++ /// \brief Construct the predictor pool with \param size predictor instances. ++ explicit PredictorPool(const Config& config, size_t size = 1); ++ ++ /// \brief Get \param id-th predictor. ++ Predictor* Retrive(size_t idx); ++ ++ private: ++ std::shared_ptr main_pred_; ++ std::vector> preds_; ++}; ++} // namespace services ++} // namespace paddle_infer +diff --git a/src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_mkldnn_quantizer_config.h b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_mkldnn_quantizer_config.h +new file mode 100755 +index 0000000..8bad8f3 +--- /dev/null ++++ b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_mkldnn_quantizer_config.h +@@ -0,0 +1,199 @@ ++// Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++/// ++/// \file paddle_mkldnn_quantizer_config.h ++/// ++/// \brief Mkldnn quantizer config. ++/// ++/// \author paddle-infer@baidu.com ++/// \date 2020-01-01 ++/// \since 1.7.0 ++/// ++ ++#pragma once ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "paddle_api.h" // NOLINT ++#include "paddle_infer_declare.h" // NOLINT ++ ++namespace paddle { ++ ++/// ++/// \brief Algorithms for finding scale of quantized Tensors. ++/// ++enum class ScaleAlgo { ++ NONE, ///< Do not compute scale ++ MAX, ///< Find scale based on the max absolute value ++ MAX_CH, ///< Find scale based on the max absolute value per output channel ++ MAX_CH_T, ///< Find scale based on the max absolute value per output channel ++ ///< of a transposed tensor ++ KL, ///< Find scale based on KL Divergence ++}; ++ ++/// ++/// \class MkldnnQuantizerConfig ++/// ++/// \brief Config for mkldnn quantize. ++/// ++/// The MkldnnQuantizerConfig is used to configure Mkldnn's quantization ++/// parameters, including scale algorithm, warmup data, warmup batch size, ++/// quantized op list, etc. ++/// ++/// It is not recommended to use this config directly, please refer to ++/// AnalysisConfig::mkldnn_quantizer_config() ++/// ++struct PD_INFER_DECL MkldnnQuantizerConfig { ++ /// ++ /// \brief Construct a new Mkldnn Quantizer Config object ++ /// ++ MkldnnQuantizerConfig(); ++ ++ /// ++ /// \brief Set the scale algo ++ /// ++ /// Specify a quantization algorithm for a connection (input/output) of the ++ /// operator type. ++ /// \param[in] op_type_name the operator's name. ++ /// \param[in] conn_name name of the connection (input/output) of the ++ /// operator. ++ /// \param[in] algo the algorithm for computing scale. ++ /// ++ void SetScaleAlgo(std::string op_type_name, std::string conn_name, ++ ScaleAlgo algo) { ++ rules_[op_type_name][conn_name] = algo; ++ } ++ ++ /// ++ /// \brief Get the scale algo ++ /// ++ /// Get the quantization algorithm for a connection (input/output) of the ++ /// operator type. ++ /// ++ /// \param[in] op_type_name the operator's name. ++ /// \param[in] conn_name name of the connection (input/output) of the ++ /// operator. ++ /// \return the scale algo. ++ /// ++ ScaleAlgo scale_algo(const std::string& op_type_name, ++ const std::string& conn_name) const; ++ ++ /// ++ /// \brief Set the warmup data ++ /// ++ /// Set the batch of data to be used for warm-up iteration. ++ /// ++ /// \param[in] data batch of data. ++ /// ++ void SetWarmupData(std::shared_ptr> data) { ++ warmup_data_ = data; ++ } ++ ++ /// ++ /// \brief Get the warmup data ++ /// ++ /// Get the batch of data used for warm-up iteration. ++ /// ++ /// \return the warm up data ++ /// ++ std::shared_ptr> warmup_data() const { ++ return warmup_data_; ++ } ++ ++ /// ++ /// \brief Set the warmup batch size ++ /// ++ /// Set the batch size for warm-up iteration. ++ /// ++ /// \param[in] batch_size warm-up batch size ++ /// ++ void SetWarmupBatchSize(int batch_size) { warmup_bs_ = batch_size; } ++ ++ /// ++ /// \brief Get the warmup batch size ++ /// ++ /// Get the batch size for warm-up iteration. ++ /// ++ /// \return the warm up batch size ++ int warmup_batch_size() const { return warmup_bs_; } ++ ++ /// ++ /// \brief Set quantized op list ++ /// ++ /// In the quantization process, set the op list that supports quantization ++ /// ++ /// \param[in] op_list List of quantized ops ++ /// ++ void SetEnabledOpTypes(std::unordered_set op_list) { ++ enabled_op_types_ = op_list; ++ } ++ ++ /// ++ /// \brief Get quantized op list ++ /// ++ /// \return list of quantized ops ++ /// ++ const std::unordered_set& enabled_op_types() const { ++ return enabled_op_types_; ++ } ++ ++ /// ++ /// \brief Set the excluded op ids ++ /// ++ /// \param[in] op_ids_list excluded op ids ++ /// ++ void SetExcludedOpIds(std::unordered_set op_ids_list) { ++ excluded_op_ids_ = op_ids_list; ++ } ++ ++ /// ++ /// \brief Get the excluded op ids ++ /// ++ /// \return exclude op ids ++ /// ++ const std::unordered_set& excluded_op_ids() const { ++ return excluded_op_ids_; ++ } ++ ++ /// ++ /// \brief Set default scale algorithm ++ /// ++ /// \param[in] algo Method for calculating scale in quantization process ++ /// ++ void SetDefaultScaleAlgo(ScaleAlgo algo) { default_scale_algo_ = algo; } ++ ++ /// ++ /// \brief Get default scale algorithm ++ /// ++ /// \return Method for calculating scale in quantization ++ /// process ++ /// ++ ScaleAlgo default_scale_algo() const { return default_scale_algo_; } ++ ++ protected: ++ std::map> rules_; ++ std::unordered_set enabled_op_types_; ++ std::unordered_set excluded_op_ids_; ++ std::shared_ptr> warmup_data_; ++ int warmup_bs_{1}; ++ ScaleAlgo default_scale_algo_{ScaleAlgo::MAX}; ++}; ++ ++} // namespace paddle +diff --git a/src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_pass_builder.h b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_pass_builder.h +new file mode 100755 +index 0000000..a725eba +--- /dev/null ++++ b/src/kdkocr/libs/sw64/paddle_inference/paddle/include/paddle_pass_builder.h +@@ -0,0 +1,248 @@ ++// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. ++// ++// Licensed under the Apache License, Version 2.0 (the "License"); ++// you may not use this file except in compliance with the License. ++// You may obtain a copy of the License at ++// ++// http://www.apache.org/licenses/LICENSE-2.0 ++// ++// Unless required by applicable law or agreed to in writing, software ++// distributed under the License is distributed on an "AS IS" BASIS, ++// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ++// See the License for the specific language governing permissions and ++// limitations under the License. ++ ++#pragma once ++ ++#include ++#include ++#include ++ ++#include "paddle_infer_declare.h" // NOLINT ++ ++/// ++/// \file paddle_pass_builder.h ++/// ++/// \brief Class Paddle Passs Builder and its subclasses(pass strategies). ++/// \section sec_intro Introduction ++/// This class aims to build passes for paddle and define passes' strategies. ++/// ++/// \author paddle-infer@baidu.com ++/// \date 2020-3-23 ++/// \since 1.7 ++ ++/// \namespace paddle ++namespace paddle { ++ ++/// \class PaddlePassBuilder ++/// \brief This class build passes based on vector input. It is part of ++/// inference API. Users can build passes, insert new passes, delete passes ++/// using this class and its functions. ++/// ++/// Example Usage: ++/// Build a new pass. ++/// \code{cpp} ++/// const vector passes(1, "conv_relu_mkldnn_fuse_pass"); ++/// PaddlePassBuilder builder(passes); ++/// \endcode ++class PD_INFER_DECL PaddlePassBuilder { ++ public: ++ /// \brief Constructor of the class. It stores the input passes. ++ /// \param[in] passes passes' types. ++ explicit PaddlePassBuilder(const std::vector &passes) ++ : passes_(passes) {} ++ ++ /// \brief Stores the input passes. ++ /// \param[in] passes passes' types. ++ void SetPasses(std::initializer_list passes) { ++ passes_ = passes; ++ } ++ ++ /// \brief Append a pass to the end of the passes. ++ /// \param[in] pass_type the type of the new pass. ++ void AppendPass(const std::string &pass_type); ++ ++ /// \brief Insert a pass to a specific position. ++ /// \param[in] idx the position to insert. ++ /// \param[in] pass_type the type of insert pass. ++ void InsertPass(size_t idx, const std::string &pass_type); ++ ++ /// \brief Delete the pass at certain position 'idx'. ++ /// \param[in] idx the position to delete. ++ void DeletePass(size_t idx); ++ ++ /// \brief Delete all passes that has a certain type 'pass_type'. ++ /// \param[in] pass_type the certain pass type to be deleted. ++ void DeletePass(const std::string &pass_type); ++ ++ /// \brief Delete all the passes. ++ void ClearPasses(); ++ ++ /// \brief Append an analysis pass. ++ /// \param[in] pass the type of the new analysis pass. ++ void AppendAnalysisPass(const std::string &pass); ++ ++ /// \brief Visualize the computation graph after each pass by generating a DOT ++ /// language file, one can draw them with the Graphviz toolkit. ++ void TurnOnDebug(); ++ /// \brief Human-readable information of the passes. ++ std::string DebugString(); ++ ++ /// \brief Get information of passes. ++ /// \return Return list of the passes. ++ const std::vector &AllPasses() const { return passes_; } ++ ++ /// \brief Get information of analysis passes. ++ /// \return Return list of analysis passes. ++ std::vector AnalysisPasses() const { ++ auto passes = analysis_passes_; ++ // To make sure the ir_graph_to_program should be the last pass so any ++ // modication of IR will persist to the program. ++ passes.push_back("ir_graph_to_program_pass"); ++ return passes; ++ } ++ ++ protected: ++ /// \cond Protected ++ std::vector analysis_passes_{ ++ {"ir_graph_build_pass", "ir_graph_clean_pass", "ir_analysis_pass", ++ "ir_params_sync_among_devices_pass", "adjust_cudnn_workspace_size_pass", ++ "inference_op_replace_pass"}}; ++ std::vector passes_; ++ /// \endcond ++}; ++ ++/// \class PassStrategy ++/// \brief This class defines the pass strategies like whether to use gpu/cuDNN ++/// kernel/MKLDNN. ++class PD_INFER_DECL PassStrategy : public PaddlePassBuilder { ++ public: ++ /// \brief Constructor of PassStrategy class. It works the same as ++ /// PaddlePassBuilder class. \param[in] passes passes' types. ++ explicit PassStrategy(const std::vector &passes) ++ : PaddlePassBuilder(passes) {} ++ ++ /// \brief Enable the use of cuDNN kernel. ++ virtual void EnableCUDNN() {} ++ ++ /// \brief Enable the use of MKLDNN. ++ /// The MKLDNN control exists in both CPU and GPU mode, because there can ++ /// still be some CPU kernels running in GPU mode. ++ virtual void EnableMKLDNN() {} ++ ++ /// \brief Enable MKLDNN quantize optimization. ++ virtual void EnableMkldnnQuantizer() {} ++ ++ /// \brief Enable MKLDNN bfloat16. ++ virtual void EnableMkldnnBfloat16() {} ++ ++ /// \brief Check if we are using gpu. ++ /// \return A bool variable implying whether we are in gpu mode. ++ bool use_gpu() const { return use_gpu_; } ++ ++ /// \brief Check if we are using xpu. ++ /// \return A bool variable implying whether we are in xpu mode. ++ bool use_xpu() const { return use_xpu_; } ++ ++ /// \brief Default destructor. ++ virtual ~PassStrategy() = default; ++ ++ protected: ++ /// \cond Protected ++ bool use_xpu_{false}; ++ bool use_gpu_{false}; ++ bool use_mkldnn_{false}; ++ /// \endcond ++}; ++ ++/// \class CpuPassStrategy ++/// \brief The CPU passes controller, it is used in AnalysisPredictor with CPU ++/// mode. ++class PD_INFER_DECL CpuPassStrategy : public PassStrategy { ++ public: ++ /// \brief Default constructor of CpuPassStrategy. ++ CpuPassStrategy(); ++ ++ /// \brief Construct by copying another CpuPassStrategy object. ++ /// \param[in] other The CpuPassStrategy object we want to copy. ++ explicit CpuPassStrategy(const CpuPassStrategy &other) ++ : PassStrategy(other.AllPasses()) { ++ use_gpu_ = other.use_gpu_; ++ use_mkldnn_ = other.use_mkldnn_; ++ use_mkldnn_quantizer_ = other.use_mkldnn_quantizer_; ++ use_mkldnn_bfloat16_ = other.use_mkldnn_bfloat16_; ++ } ++ /// \brief Default destructor. ++ virtual ~CpuPassStrategy() = default; ++ ++ /// \brief Enable the use of cuDNN kernel. ++ void EnableCUDNN() override; ++ ++ /// \brief Enable the use of MKLDNN. ++ void EnableMKLDNN() override; ++ ++ /// \brief Enable MKLDNN quantize optimization. ++ void EnableMkldnnQuantizer() override; ++ ++ /// \brief Enable MKLDNN bfloat16. ++ void EnableMkldnnBfloat16() override; ++ ++ protected: ++ /// \cond Protected ++ bool use_mkldnn_quantizer_{false}; ++ bool use_mkldnn_bfloat16_{false}; ++ /// \endcond ++}; ++ ++/// \class GpuPassStrategy ++/// \brief The GPU passes controller, it is used in AnalysisPredictor with GPU ++/// mode. ++class PD_INFER_DECL GpuPassStrategy : public PassStrategy { ++ public: ++ /// \brief Default constructor of GpuPassStrategy. ++ GpuPassStrategy(); ++ ++ /// \brief Construct by copying another GpuPassStrategy object. ++ /// \param[in] other The GpuPassStrategy object we want to copy. ++ explicit GpuPassStrategy(const GpuPassStrategy &other) ++ : PassStrategy(other.AllPasses()) { ++ use_gpu_ = true; ++ use_cudnn_ = other.use_cudnn_; ++ } ++ ++ /// \brief Enable the use of cuDNN kernel. ++ void EnableCUDNN() override; ++ ++ /// \brief Not supported in GPU mode yet. ++ void EnableMKLDNN() override; ++ ++ /// \brief Not supported in GPU mode yet. ++ void EnableMkldnnQuantizer() override; ++ ++ /// \brief Not supported in GPU mode yet. ++ void EnableMkldnnBfloat16() override; ++ ++ /// \brief Default destructor. ++ virtual ~GpuPassStrategy() = default; ++ ++ protected: ++ /// \cond Protected ++ bool use_cudnn_{false}; ++ /// \endcond ++}; ++ ++/// \class XpuPassStrategy ++/// \brief The XPU passes controller, it is used in AnalysisPredictor with XPU ++/// mode. ++class PD_INFER_DECL XpuPassStrategy final : public PassStrategy { ++ public: ++ XpuPassStrategy() : PassStrategy({}) {} ++}; ++ ++/// \brief List of tensorRT subgraph passes. ++PD_INFER_DECL extern const std::vector kTRTSubgraphPasses; ++ ++/// \brief List of lite subgraph passes. ++PD_INFER_DECL extern const std::vector kLiteSubgraphPasses; ++ ++} // namespace paddle +diff --git a/src/kdkocr/libs/sw64/paddle_inference/paddle/lib/libpaddle_inference.so b/src/kdkocr/libs/sw64/paddle_inference/paddle/lib/libpaddle_inference.so +new file mode 100644 +index 0000000..e69de29 +diff --git a/src/kdkocr/ocr_main.cpp b/src/kdkocr/ocr_main.cpp +index b2d67c1..189844a 100644 +--- a/src/kdkocr/ocr_main.cpp ++++ b/src/kdkocr/ocr_main.cpp +@@ -377,7 +377,7 @@ namespace kdkocr_infer { + + boxes = post_processor_.FilterTagDetRes(boxes, ratio_h, ratio_w, srcimg); + auto postprocess_end = std::chrono::steady_clock::now(); +- std::cout << "Detected boxes num: " << boxes.size() << endl; ++ //std::cout << "Detected boxes num: " << boxes.size() << endl; + + std::chrono::duration preprocess_diff = preprocess_end - preprocess_start; + times->push_back(double(preprocess_diff.count() * 1000)); +diff --git a/src/powermanagement/lockscreen.cpp b/src/powermanagement/lockscreen.cpp +index fd50af1..5164984 100644 +--- a/src/powermanagement/lockscreen.cpp ++++ b/src/powermanagement/lockscreen.cpp +@@ -129,7 +129,7 @@ bool LockScreen::unInhibitLockScreen(uint32_t flag) + DBusPendingCall *sendMsgPending = NULL; + + sendMsg = dbus_message_new_method_call(dbusServiceName , dbusObjectPath , dbusInterfaceName , dbusUnInhibitLockScreenMethod); +- if (!dbus_message_append_args(sendMsg , DBUS_TYPE_UINT32 , &flag)) { ++ if (!dbus_message_append_args(sendMsg , DBUS_TYPE_UINT32 , &flag , DBUS_TYPE_INVALID)) { + klog_err("kdk : d-bus append args fail !\n"); + return false; + } +diff --git a/src/systeminfo/CMakeLists.txt b/src/systeminfo/CMakeLists.txt +index c1392eb..ea2e129 100644 +--- a/src/systeminfo/CMakeLists.txt ++++ b/src/systeminfo/CMakeLists.txt +@@ -6,7 +6,7 @@ find_library(DBUS_GLIB_LIB dbus-glib-1) + add_library(kysysinfo SHARED ${SOURCESCODE}) + set_target_properties(kysysinfo PROPERTIES VERSION 1.2.0 SOVERSION 1) + add_executable(kysysinfo-test test/kysysinfo_test.c) +-target_link_libraries(kysysinfo kylog systemd kyconf ${GLIBC_LIB} ${DBUS_LIB} ${DBUS_GLIB_LIB}) ++target_link_libraries(kysysinfo dl kylog systemd kyconf ${GLIBC_LIB} ${DBUS_LIB} ${DBUS_GLIB_LIB}) + target_link_libraries(kysysinfo-test kysysinfo) + # target_link_libraries(kysysinfo-test kysysinfo kylin-activation kylog systemd kyconf ${GLIBC_LIB} ${DBUS_LIB} ${DBUS_GLIB_LIB}) + +diff --git a/src/systeminfo/libkysysinfo.c b/src/systeminfo/libkysysinfo.c +index f325bad..21c43cb 100644 +--- a/src/systeminfo/libkysysinfo.c ++++ b/src/systeminfo/libkysysinfo.c +@@ -4,7 +4,9 @@ + #include + #include + #include ++#include + #include ++#include + + #define KYLIN_ACTIVATION_DBUS_ADDRESS "org.freedesktop.activation" + +@@ -77,7 +79,10 @@ char* kdk_system_get_version(bool verbose) + + sysversion = get_val_from_file(fp, "milestone"); + if (!sysversion) ++ { ++ fclose(fp); + return NULL; ++ } + fclose(fp); + } + else +@@ -88,7 +93,10 @@ char* kdk_system_get_version(bool verbose) + + sysversion = get_val_from_file(fp, "VERSION"); + if (!sysversion) ++ { ++ fclose(fp); + return NULL; ++ } + strstrip(sysversion, '\"'); + fclose(fp); + } +@@ -107,33 +115,63 @@ int kdk_system_get_activationStatus(int *status_error_num,int *date_error_num) + } + + int res = 0; ++ ++// #define _KYLIN_ACTIVATION_H_ + #ifdef __linux__ + #ifdef _KYLIN_ACTIVATION_H_ + int err; +- res = kylin_activation_activate_status(&err); +- if (err != NO_ERROR) +- { +- klog_err("激活状态获取失败:%d\n", err); +- return 0; +- } +- if (res) +- { +- return 1; +- } ++ int ret = -1; ++ ++ typedef int (*kylin_activation_activate_status)(int *); ++ typedef int (*kylin_activation_trial_status)(int *); ++ void *hwnd = dlopen("/usr/lib/libkylin-activation.so", RTLD_LAZY); + +- res = kylin_activation_trial_status(&err); +- if (err != NO_ERROR) ++ if (!hwnd) + { +- klog_err("试用状态获取失败:%d\n", err); +- return 0; ++ klog_err("加载libkylin-activation.so失败\n"); ++ return ret; + } + +- if (res == 1) ++ do + { +- return 0; +- } ++ kylin_activation_activate_status pkylin_activation_activate_status = (kylin_activation_activate_status)dlsym(hwnd, "kylin_activation_activate_status"); ++ kylin_activation_trial_status pkylin_activation_trial_status = (kylin_activation_trial_status)dlsym(hwnd, "kylin_activation_trial_status"); + +- return -1; ++ if (!pkylin_activation_activate_status || !pkylin_activation_trial_status) ++ { ++ klog_err("获取接口地址失败\n"); ++ break; ++ } ++ res = pkylin_activation_activate_status(&err); ++ if (err != 0) ++ { ++ klog_err("激活状态获取失败:%d\n", err); ++ ret = 0; ++ break; ++ } ++ if (res) ++ { ++ ret = 1; ++ break; ++ } ++ ++ res = pkylin_activation_trial_status(&err); ++ if (err != 0) ++ { ++ klog_err("试用状态获取失败:%d\n", err); ++ ret = 0; ++ break; ++ } ++ ++ if (res == 1) ++ { ++ ret = 0; ++ break; ++ } ++ } while (false); ++ ++ dlclose(hwnd); ++ return ret; + #else // 修改dbus通信 + DBusConnection *conn; + DBusError err; +@@ -166,6 +204,12 @@ int kdk_system_get_activationStatus(int *status_error_num,int *date_error_num) + "org.freedesktop.activation.interface", // interface to call on + "status"); // method name + ++ if (!status_msg) ++ { // -1 is default timeout ++ klog_err("status_msg:dbus_message_new_method_call调用失败\n"); ++ return -1; ++ } ++ + if (!dbus_connection_send_with_reply (conn, status_msg, &status_pending, -1)) { // -1 is default timeout + klog_err("status_msg:dbus_connection_send_with_reply调用失败\n"); + return -1; +@@ -269,8 +313,8 @@ int kdk_system_get_activationStatus(int *status_error_num,int *date_error_num) + return 1; + } + +-#endif // _KYLIN_ACTIVATION_H_ +-#endif // __linux__ ++// #endif // _KYLIN_ACTIVATION_H_ ++// #endif // __linux__ + if (status_msg) + { + dbus_message_unref(status_msg); +@@ -296,6 +340,8 @@ int kdk_system_get_activationStatus(int *status_error_num,int *date_error_num) + dbus_pending_call_unref(date_pending); + } + return 0; ++#endif // _KYLIN_ACTIVATION_H_ ++#endif // __linux__ + } + + char* kdk_system_get_serialNumber() +@@ -313,7 +359,22 @@ char* kdk_system_get_serialNumber() + strskipspace(serial); + #else + int err; +- serial = kylin_activation_get_serial_number(&err); ++ ++ typedef char *(*kylin_activation_get_serial_number)(int *); ++ void *hwnd = dlopen("/usr/lib/libkylin-activation.so", RTLD_LAZY); ++ ++ if (!hwnd) ++ { ++ klog_err("加载libkylin-activation.so失败\n"); ++ return serial; ++ } ++ kylin_activation_get_serial_number pkylin_activation_get_serial_number = (kylin_activation_get_serial_number)dlsym(hwnd,"kylin_activation_get_serial_number"); ++ if(!pkylin_activation_get_serial_number) ++ { ++ klog_err("加载接口kylin_activation_get_serial_number失败\n"); ++ return serial; ++ } ++ serial = pkylin_activation_get_serial_number(&err); + if (!serial) + { + klog_err("序列号获取失败:%d\n", err); +@@ -322,6 +383,7 @@ char* kdk_system_get_serialNumber() + { + strskipspace(serial); + } ++ dlclose(hwnd); + return serial; + #endif // _KYLIN_ACTIVATION_H_ + #endif // __linux__ +@@ -389,6 +451,59 @@ char* kdk_system_get_projectName() + return project_codename; + } + ++char* kdk_system_get_projectSubName() ++{ ++ char *project_subcodename = NULL; ++#ifdef __linux__ ++ FILE *fp = fopen("/etc/lsb-release", "rt"); ++ if (fp) ++ { ++ project_subcodename = get_val_from_file(fp, "SUB_PROJECT_CODENAME"); ++ fclose(fp); ++ } ++ ++ if (!project_subcodename) ++ { ++ fp = fopen("/etc/os-release", "rt"); ++ ASSERT_NOT_NULL(fp, NULL); ++ project_subcodename = get_val_from_file(fp, "SUB_PROJECT_CODENAME"); ++ fclose(fp); ++ } ++ if (project_subcodename) ++ strstripspace(project_subcodename); ++#endif ++ return project_subcodename; ++} ++ ++unsigned int kdk_system_get_productFeatures() ++{ ++ char *product_features = NULL; ++ unsigned int res = 0; ++#ifdef __linux__ ++ FILE *fp = fopen("/etc/lsb-release", "rt"); ++ if (fp) ++ { ++ product_features = get_val_from_file(fp, "PRODUCT_FEATURES"); ++ fclose(fp); ++ } ++ ++ if (!product_features) ++ { ++ fp = fopen("/etc/os-release", "rt"); ++ ASSERT_NOT_NULL(fp, 0); ++ product_features = get_val_from_file(fp, "PRODUCT_FEATURES"); ++ fclose(fp); ++ } ++ if (product_features) ++ { ++ strstripspace(product_features); ++ res = atoi(product_features); ++ } ++#endif ++ ++ return res; ++} ++ + char* kdk_system_get_hostVirtType() + { + char *virtType = (char*)malloc(sizeof(char) * 65); +diff --git a/src/systeminfo/libkysysinfo.h b/src/systeminfo/libkysysinfo.h +index 230563a..ee74a56 100644 +--- a/src/systeminfo/libkysysinfo.h ++++ b/src/systeminfo/libkysysinfo.h +@@ -82,6 +82,24 @@ extern char* kdk_system_get_eUser(); + */ + extern char* kdk_system_get_projectName(); + ++/** ++ * @brief 获取操作系统项目子编号名 ++ * ++ * @return char* 成功返回字符串,失败返回NULL。返回的字符串需要被 free 释放 ++ */ ++extern char* kdk_system_get_projectSubName(); ++ ++/** ++ * @brief 获取操作系统产品标识码 ++ * ++ * @return unsigned int 返回标志码 ++ * 0000 信息异常 ++ * 0001 仅PC特性 ++ * 0010 仅平板特性 ++ * 0011 支持平板与PC特性 ++ */ ++extern unsigned int kdk_system_get_productFeatures(); ++ + /** + * @brief 获取操作系统宿主机的虚拟机类型 + * +diff --git a/src/systemtime/m_systime.c b/src/systemtime/m_systime.c +index f567292..993a972 100644 +--- a/src/systemtime/m_systime.c ++++ b/src/systemtime/m_systime.c +@@ -15,13 +15,13 @@ + #include + + pthread_mutex_t lock; +-u_int8_t g_Flag; ++u_int8_t g_Flag; // 控制启用常驻定时器还是临时定时器 + +-u_int8_t g_Quit; ++u_int8_t g_Quit; //退出信号 + sem_t g_Wait; + +-u_int8_t g_TimeChanged; +-u_int8_t g_TimeSync; ++u_int8_t g_TimeChanged; // 发生了时间变更 ++u_int8_t g_TimeSync; // 需要进行对时 + + void sig_Handler(int sig) + { +@@ -40,10 +40,7 @@ static void *printClock(void *ptr) + time(¤t); + now = localtime(¤t); + +- // printf("%04d/%02d/%02d %02d:%02d:%02d\n", now->tm_year + 1900, now->tm_mon, now->tm_mday, now->tm_hour, now->tm_min, now->tm_sec); +- // printf("g_TimeChanged is %d\n", g_TimeChanged); +- // printf("%d",now->tm_sec); +- // 如果时间发生改变发送TimeVhangeSignal信号 ++ // 如果时间发生改变发送TimeChangeSignal信号 + if (g_TimeChanged == 1) + { + char *buf = calloc(1, 128); +@@ -63,9 +60,10 @@ static void *printClock(void *ptr) + dbus_connection_send(conn, msg, &serial); + dbus_connection_flush(conn); + dbus_message_unref(msg); ++ free(buf); + } +- // 非整点情况 + ++ // 非整点情况 + if (now->tm_sec != 0) + { + pthread_mutex_lock(&lock); +@@ -108,6 +106,7 @@ static void *printClock(void *ptr) + pthread_mutex_unlock(&lock); + sem_post(&g_Wait); + } ++ free(buf); + } + + return NULL; +@@ -117,26 +116,29 @@ static void *printClock(void *ptr) + void *startBroadcastSystemTimePerMin(void *tmp) + { + DBusConnection *conn = tmp; +- size_t timerID = -1; ++ size_t periodicTimerID = 0; + while (!g_Quit) + { + sem_wait(&g_Wait); + + if (g_TimeChanged || g_TimeSync) + { +- printf("Get Time Changed signal or mis-synced. stop timerID %zd\n", timerID); +- kdk_timer_stop(timerID); ++ // 若临时定时器已启动,则不做处理 ++ // 时钟发生变化,需要进行对时调整;关闭常驻定时器timerID,启动临时定时器 ++ printf("Get Time Changed signal or mis-synced. stop timerID %zd\n", periodicTimerID); ++ kdk_timer_stop(periodicTimerID); + g_TimeChanged = 0; + g_TimeSync = 0; +- timerID = -1; ++ periodicTimerID = 0; + } + + if (!g_Flag) + kdk_timer_start(200, printClock, KTIMER_SINGLESHOT, KTIMER_RELATIVE, conn, 0); + else + { +- timerID = kdk_timer_start(1000 * 60, printClock, KTIMER_PERIODIC, KTIMER_RELATIVE, conn, 0); +- printf("start periodic timer with ID %zd\n", timerID); ++ // 当启动常驻定时器时,临时定时器肯定不需要再存在了 ++ periodicTimerID = kdk_timer_start(1000 * 60, printClock, KTIMER_PERIODIC, KTIMER_RELATIVE, conn, 0); ++ printf("start periodic timer with ID %zd\n", periodicTimerID); + } + } + +@@ -162,6 +164,7 @@ int monitorSystemTimeChange() + + u_int64_t dep; + ssize_t ret = read(fd, &dep, sizeof(u_int64_t)); ++ close(fd); + if (ret == -1 && errno == ECANCELED) + return 1; + +@@ -176,9 +179,12 @@ void *actionTimeChanged(void *ptr) + if (monitorSystemTimeChange() == 1) + { + printf("System Time Changed.\n"); +- g_TimeChanged = 1; +- g_Flag = 0; +- printClock(conn); ++ if (g_Flag) ++ { ++ g_TimeChanged = 1; ++ g_Flag = 0; ++ printClock(conn); ++ } + } + } + +@@ -227,15 +233,7 @@ const char *server_introspection_xml = + " \n" + + " \n" +- " \n" +- " \n" +- " \n" +- " \n" +- " \n" +- " \n" +- " \n" +- " \n" +- " \n" ++ " \n" + + " \n" diff --git a/debian/patches/0004-4-systime-dbus-interface.patch b/debian/patches/0004-4-systime-dbus-interface.patch new file mode 100644 index 0000000..3facfa4 --- /dev/null +++ b/debian/patches/0004-4-systime-dbus-interface.patch @@ -0,0 +1,30 @@ +From: szm-min +Date: Thu, 28 Jul 2022 06:58:47 +0000 +Subject: =?utf-8?q?!4_=E4=BF=AE=E6=94=B9systime=E7=9A=84dbus=E7=9A=84interf?= + =?utf-8?q?ace_Merge_pull_request_!4_from_szm-min/openkylin/yangtze?= + +--- + src/systemtime/m_systime.c | 10 +++++++++- + 1 file changed, 9 insertions(+), 1 deletion(-) + +diff --git a/src/systemtime/m_systime.c b/src/systemtime/m_systime.c +index 993a972..dbb3ac2 100644 +--- a/src/systemtime/m_systime.c ++++ b/src/systemtime/m_systime.c +@@ -233,7 +233,15 @@ const char *server_introspection_xml = + " \n" + + " \n" +- " \n" ++ " \n" ++ " \n" ++ " \n" ++ " \n" ++ " \n" ++ " \n" ++ " \n" ++ " \n" + " \n" + + " \n" diff --git a/src/systeminfo/CMakeLists.txt b/src/systeminfo/CMakeLists.txt index ea2e129..d623f59 100644 --- a/src/systeminfo/CMakeLists.txt +++ b/src/systeminfo/CMakeLists.txt @@ -17,4 +17,4 @@ install(FILES libkysysinfo.hpp DESTINATION include/kysdk/kysdk-system) install(FILES libkysysinfo.h - DESTINATION include/kysdk/kysdk-system) + DESTINATION include/kysdk/kysdk-system) \ No newline at end of file diff --git a/src/systemtime/m_systime.c b/src/systemtime/m_systime.c index dbb3ac2..85a5ae1 100644 --- a/src/systemtime/m_systime.c +++ b/src/systemtime/m_systime.c @@ -13,6 +13,8 @@ #include #include #include +// 20020721新增时区变化监听 +#include pthread_mutex_t lock; u_int8_t g_Flag; // 控制启用常驻定时器还是临时定时器 @@ -39,12 +41,16 @@ static void *printClock(void *ptr) time_t current; time(¤t); now = localtime(¤t); - + // struct timeval tx; + // struct timezone tz; + // gettimeofday(&tx,&tz); + // zone = tz.tz_minuteswest/60; + // printf("时差:%d\n",zone); // 如果时间发生改变发送TimeChangeSignal信号 if (g_TimeChanged == 1) { char *buf = calloc(1, 128); - sprintf(buf, "%04d/%02d/%02d %02d:%02d:%02d", now->tm_year + 1900, now->tm_mon, now->tm_mday, now->tm_hour, now->tm_min, now->tm_sec); + sprintf(buf, "%04d/%02d/%02d %02d:%02d:%02d", now->tm_year + 1900, now->tm_mon+1, now->tm_mday, now->tm_hour, now->tm_min, now->tm_sec); printf("%s\n", buf); msg = dbus_message_new_signal("/com/kylin/kysdk/Timer", @@ -83,7 +89,7 @@ static void *printClock(void *ptr) else { char *buf = calloc(1, 128); - sprintf(buf, "%04d/%02d/%02d %02d:%02d:%02d", now->tm_year + 1900, now->tm_mon, now->tm_mday, now->tm_hour, now->tm_min, now->tm_sec); + sprintf(buf, "%04d/%02d/%02d %02d:%02d:%02d", now->tm_year + 1900, now->tm_mon+1, now->tm_mday, now->tm_hour, now->tm_min, now->tm_sec); // printf("%s\n", buf); msg = dbus_message_new_signal("/com/kylin/kysdk/Timer", @@ -148,7 +154,7 @@ void *startBroadcastSystemTimePerMin(void *tmp) int monitorSystemTimeChange() { #define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) - + // printf("monitorSystemTimeChange\n"); struct itimerspec its = {.it_value.tv_sec = TIME_T_MAX}; int fd = timerfd_create(CLOCK_REALTIME, TFD_CLOEXEC); if (fd < 0) @@ -190,6 +196,57 @@ void *actionTimeChanged(void *ptr) return NULL; } +// 20020721新增时区变化监听 +int monitorSystemTimeZoneChange(){ + + char buf[BUFSIZ]; + int fd = inotify_init(); + buf[sizeof(buf) - 1] = 0; + + struct inotify_event *event; + + if (fd < 0) + { + return -1; + } + + int ftimezone = inotify_add_watch(fd,"/etc/timezone",IN_DELETE_SELF); + + if (ftimezone < 0) + { + close(fd); + return -1; + } + int ret = read(fd, buf, sizeof(buf) - 1); + close(fd); + event = (struct inotify_event *)&buf[0]; + if (ret) + { + fprintf(stdout, "%s --- %s\n", event->name, "IN_DELETE_SELF"); + return 1; + } + return 0; +} +// 20020721新增时区变化监听 +void *actionTimeZoneChanged(void *ptr) +{ + DBusConnection *conn = ptr; + while (!g_Quit) + { + if (monitorSystemTimeZoneChange() == 1) + { + printf("System Time Changed.\n"); + if (g_Flag) + { + g_TimeChanged = 1; + g_Flag = 0; + printClock(conn); + } + } + } + + return NULL; +} const char *version = "0.1"; GMainLoop *mainloop; @@ -450,13 +507,20 @@ int main(void) sem_init(&g_Wait, 0, 1); + pthread_mutex_init(&lock, NULL); pthread_attr_t attr; pthread_t tid; pthread_attr_init(&attr); pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); pthread_create(&tid, &attr, actionTimeChanged, conn); + + // 20020721新增时区变化监听 + pthread_attr_t timezone_attr; + pthread_t timezone_id; + pthread_attr_init(&timezone_attr); + pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED); + pthread_create(&timezone_id, &timezone_attr, actionTimeZoneChanged, conn); - pthread_mutex_init(&lock, NULL); /* connect to the daemon bus */