Remove projects being moved into from system/core into system/memory

The following directories are being moved into separate projects under
system/memory:
  platform/system/memory/libion
  platform/system/memory/libmeminfo
  platform/system/memory/libmemtrack
  platform/system/memory/libmemunreachable
  platform/system/memory/lmkd

Remove them from system/core.

Bug: 141634854
Test: build and boot
Change-Id: I0ecc0668a281ec360133c8472bbf12ece92116d2
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
Merged-In: Ica64900cd8af273fdffd321591acf3a566f04355
Merged-In: I98793edd10954058448727635f101219f71d3ccf
This commit is contained in:
Suren Baghdasaryan 2019-11-19 11:29:53 -08:00 committed by Thomas Joseph Avila
parent b407aca123
commit c19f63004d
123 changed files with 0 additions and 77417 deletions

View File

@ -1,29 +0,0 @@
cc_library {
name: "libion",
vendor_available: true,
vndk: {
enabled: true,
support_system_process: true,
},
srcs: ["ion.c"],
shared_libs: ["liblog"],
local_include_dirs: [
"include",
"kernel-headers",
],
export_include_dirs: [
"include",
"kernel-headers",
],
cflags: ["-Werror"],
}
cc_binary {
name: "iontest",
srcs: ["ion_test.c"],
static_libs: ["libion"],
shared_libs: ["liblog"],
cflags: ["-Werror"],
}
subdirs = ["tests"]

View File

@ -1,2 +0,0 @@
sspatil@google.com
hridya@google.com

View File

@ -1,56 +0,0 @@
/*
* ion.c
*
* Memory Allocator functions for ion
*
* Copyright 2011 Google, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __SYS_CORE_ION_H
#define __SYS_CORE_ION_H
#include <sys/types.h>
#include <linux/ion.h>
__BEGIN_DECLS
struct ion_handle;
int ion_open();
int ion_close(int fd);
int ion_alloc(int fd, size_t len, size_t align, unsigned int heap_mask,
unsigned int flags, ion_user_handle_t *handle);
int ion_alloc_fd(int fd, size_t len, size_t align, unsigned int heap_mask,
unsigned int flags, int *handle_fd);
int ion_sync_fd(int fd, int handle_fd);
int ion_free(int fd, ion_user_handle_t handle);
int ion_map(int fd, ion_user_handle_t handle, size_t length, int prot,
int flags, off_t offset, unsigned char **ptr, int *map_fd);
int ion_share(int fd, ion_user_handle_t handle, int *share_fd);
int ion_import(int fd, int share_fd, ion_user_handle_t *handle);
/**
* Add 4.12+ kernel ION interfaces here for forward compatibility
* This should be needed till the pre-4.12+ ION interfaces are backported.
*/
int ion_query_heap_cnt(int fd, int* cnt);
int ion_query_get_heaps(int fd, int cnt, void* buffers);
int ion_is_legacy(int fd);
int ion_is_using_modular_heaps(int fd);
__END_DECLS
#endif /* __SYS_CORE_ION_H */

View File

@ -1,234 +0,0 @@
/*
* ion.c
*
* Memory Allocator functions for ion
*
* Copyright 2011 Google, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "ion"
#include <errno.h>
#include <fcntl.h>
#include <linux/ion.h>
#include <stdatomic.h>
#include <stdio.h>
#include <string.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <unistd.h>
#include <ion/ion.h>
#include <linux/ion_4.19.h>
#include <log/log.h>
#define ION_ABI_VERSION_MODULAR_HEAPS 2
enum ion_version { ION_VERSION_UNKNOWN, ION_VERSION_MODERN, ION_VERSION_LEGACY };
static atomic_int g_ion_version = ATOMIC_VAR_INIT(ION_VERSION_UNKNOWN);
int ion_is_legacy(int fd) {
int version = atomic_load_explicit(&g_ion_version, memory_order_acquire);
if (version == ION_VERSION_UNKNOWN) {
/**
* Check for FREE IOCTL here; it is available only in the old
* kernels, not the new ones.
*/
int err = ion_free(fd, (ion_user_handle_t)NULL);
version = (err == -ENOTTY) ? ION_VERSION_MODERN : ION_VERSION_LEGACY;
atomic_store_explicit(&g_ion_version, version, memory_order_release);
}
return version == ION_VERSION_LEGACY;
}
int ion_open() {
int fd = open("/dev/ion", O_RDONLY | O_CLOEXEC);
if (fd < 0) ALOGE("open /dev/ion failed: %s", strerror(errno));
return fd;
}
int ion_close(int fd) {
int ret = close(fd);
if (ret < 0) return -errno;
return ret;
}
static int ion_ioctl(int fd, int req, void* arg) {
int ret = ioctl(fd, req, arg);
if (ret < 0) {
ALOGE("ioctl %x failed with code %d: %s", req, ret, strerror(errno));
return -errno;
}
return ret;
}
int ion_is_using_modular_heaps(int fd) {
int ion_abi_version = 0;
int ret = 0;
ret = ion_ioctl(fd, ION_IOC_ABI_VERSION, &ion_abi_version);
return (ret == 0 && ion_abi_version >= ION_ABI_VERSION_MODULAR_HEAPS);
}
int ion_alloc(int fd, size_t len, size_t align, unsigned int heap_mask, unsigned int flags,
ion_user_handle_t* handle) {
int ret = 0;
if ((handle == NULL) || (!ion_is_legacy(fd))) return -EINVAL;
struct ion_allocation_data data = {
.len = len, .align = align, .heap_id_mask = heap_mask, .flags = flags,
};
ret = ion_ioctl(fd, ION_IOC_ALLOC, &data);
if (ret < 0) return ret;
*handle = data.handle;
return ret;
}
int ion_free(int fd, ion_user_handle_t handle) {
struct ion_handle_data data = {
.handle = handle,
};
return ion_ioctl(fd, ION_IOC_FREE, &data);
}
int ion_map(int fd, ion_user_handle_t handle, size_t length, int prot, int flags, off_t offset,
unsigned char** ptr, int* map_fd) {
if (!ion_is_legacy(fd)) return -EINVAL;
int ret;
unsigned char* tmp_ptr;
struct ion_fd_data data = {
.handle = handle,
};
if (map_fd == NULL) return -EINVAL;
if (ptr == NULL) return -EINVAL;
ret = ion_ioctl(fd, ION_IOC_MAP, &data);
if (ret < 0) return ret;
if (data.fd < 0) {
ALOGE("map ioctl returned negative fd");
return -EINVAL;
}
tmp_ptr = mmap(NULL, length, prot, flags, data.fd, offset);
if (tmp_ptr == MAP_FAILED) {
ALOGE("mmap failed: %s", strerror(errno));
return -errno;
}
*map_fd = data.fd;
*ptr = tmp_ptr;
return ret;
}
int ion_share(int fd, ion_user_handle_t handle, int* share_fd) {
int ret;
struct ion_fd_data data = {
.handle = handle,
};
if (!ion_is_legacy(fd)) return -EINVAL;
if (share_fd == NULL) return -EINVAL;
ret = ion_ioctl(fd, ION_IOC_SHARE, &data);
if (ret < 0) return ret;
if (data.fd < 0) {
ALOGE("share ioctl returned negative fd");
return -EINVAL;
}
*share_fd = data.fd;
return ret;
}
int ion_alloc_fd(int fd, size_t len, size_t align, unsigned int heap_mask, unsigned int flags,
int* handle_fd) {
ion_user_handle_t handle;
int ret;
if (!handle_fd) return -EINVAL;
if (!ion_is_legacy(fd)) {
struct ion_new_allocation_data data = {
.len = len,
.heap_id_mask = heap_mask,
.flags = flags,
};
ret = ion_ioctl(fd, ION_IOC_NEW_ALLOC, &data);
if (ret < 0) return ret;
*handle_fd = data.fd;
} else {
ret = ion_alloc(fd, len, align, heap_mask, flags, &handle);
if (ret < 0) return ret;
ret = ion_share(fd, handle, handle_fd);
ion_free(fd, handle);
}
return ret;
}
int ion_import(int fd, int share_fd, ion_user_handle_t* handle) {
int ret;
struct ion_fd_data data = {
.fd = share_fd,
};
if (!ion_is_legacy(fd)) return -EINVAL;
if (handle == NULL) return -EINVAL;
ret = ion_ioctl(fd, ION_IOC_IMPORT, &data);
if (ret < 0) return ret;
*handle = data.handle;
return ret;
}
int ion_sync_fd(int fd, int handle_fd) {
struct ion_fd_data data = {
.fd = handle_fd,
};
if (!ion_is_legacy(fd)) return -EINVAL;
return ion_ioctl(fd, ION_IOC_SYNC, &data);
}
int ion_query_heap_cnt(int fd, int* cnt) {
int ret;
struct ion_heap_query query;
if (!cnt) return -EINVAL;
memset(&query, 0, sizeof(query));
ret = ion_ioctl(fd, ION_IOC_HEAP_QUERY, &query);
if (ret < 0) return ret;
*cnt = query.cnt;
return ret;
}
int ion_query_get_heaps(int fd, int cnt, void* buffers) {
int ret;
struct ion_heap_query query = {
.cnt = cnt, .heaps = (uintptr_t)buffers,
};
ret = ion_ioctl(fd, ION_IOC_HEAP_QUERY, &query);
return ret;
}

View File

@ -1,289 +0,0 @@
/*
* Copyright 2013 Google, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h>
#include <fcntl.h>
#include <getopt.h>
#include <string.h>
#include <stdlib.h>
#include <stdio.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <sys/socket.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <ion/ion.h>
#include <linux/ion.h>
size_t len = 1024*1024, align = 0;
int prot = PROT_READ | PROT_WRITE;
int map_flags = MAP_SHARED;
int alloc_flags = 0;
int heap_mask = 1;
int test = -1;
size_t stride;
int _ion_alloc_test(int *fd, ion_user_handle_t *handle)
{
int ret;
*fd = ion_open();
if (*fd < 0)
return *fd;
ret = ion_alloc(*fd, len, align, heap_mask, alloc_flags, handle);
if (ret)
printf("%s failed: %s\n", __func__, strerror(ret));
return ret;
}
void ion_alloc_test()
{
int fd, ret;
ion_user_handle_t handle;
if(_ion_alloc_test(&fd, &handle))
return;
ret = ion_free(fd, handle);
if (ret) {
printf("%s failed: %s %d\n", __func__, strerror(ret), handle);
return;
}
ion_close(fd);
printf("ion alloc test: passed\n");
}
void ion_map_test()
{
int fd, map_fd, ret;
size_t i;
ion_user_handle_t handle;
unsigned char *ptr;
if(_ion_alloc_test(&fd, &handle))
return;
ret = ion_map(fd, handle, len, prot, map_flags, 0, &ptr, &map_fd);
if (ret)
return;
for (i = 0; i < len; i++) {
ptr[i] = (unsigned char)i;
}
for (i = 0; i < len; i++)
if (ptr[i] != (unsigned char)i)
printf("%s failed wrote %zu read %d from mapped "
"memory\n", __func__, i, ptr[i]);
/* clean up properly */
ret = ion_free(fd, handle);
ion_close(fd);
munmap(ptr, len);
close(map_fd);
_ion_alloc_test(&fd, &handle);
close(fd);
#if 0
munmap(ptr, len);
close(map_fd);
ion_close(fd);
_ion_alloc_test(len, align, flags, &fd, &handle);
close(map_fd);
ret = ion_map(fd, handle, len, prot, flags, 0, &ptr, &map_fd);
/* don't clean up */
#endif
}
void ion_share_test()
{
ion_user_handle_t handle;
int sd[2];
int num_fd = 1;
struct iovec count_vec = {
.iov_base = &num_fd,
.iov_len = sizeof num_fd,
};
char buf[CMSG_SPACE(sizeof(int))];
socketpair(AF_UNIX, SOCK_STREAM, 0, sd);
if (fork()) {
struct msghdr msg = {
.msg_control = buf,
.msg_controllen = sizeof buf,
.msg_iov = &count_vec,
.msg_iovlen = 1,
};
struct cmsghdr *cmsg;
int fd, share_fd, ret;
char *ptr;
/* parent */
if(_ion_alloc_test(&fd, &handle))
return;
ret = ion_share(fd, handle, &share_fd);
if (ret)
printf("share failed %s\n", strerror(errno));
ptr = mmap(NULL, len, prot, map_flags, share_fd, 0);
if (ptr == MAP_FAILED) {
return;
}
strcpy(ptr, "master");
cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
*(int *)CMSG_DATA(cmsg) = share_fd;
/* send the fd */
printf("master? [%10s] should be [master]\n", ptr);
printf("master sending msg 1\n");
sendmsg(sd[0], &msg, 0);
if (recvmsg(sd[0], &msg, 0) < 0)
perror("master recv msg 2");
printf("master? [%10s] should be [child]\n", ptr);
/* send ping */
sendmsg(sd[0], &msg, 0);
printf("master->master? [%10s]\n", ptr);
if (recvmsg(sd[0], &msg, 0) < 0)
perror("master recv 1");
close(fd);
_exit(0);
} else {
struct cmsghdr *cmsg;
char* ptr;
int fd, recv_fd;
char* child_buf[100];
/* child */
struct iovec count_vec = {
.iov_base = child_buf,
.iov_len = sizeof child_buf,
};
struct msghdr child_msg = {
.msg_control = buf,
.msg_controllen = sizeof buf,
.msg_iov = &count_vec,
.msg_iovlen = 1,
};
if (recvmsg(sd[1], &child_msg, 0) < 0)
perror("child recv msg 1");
cmsg = CMSG_FIRSTHDR(&child_msg);
if (cmsg == NULL) {
printf("no cmsg rcvd in child");
return;
}
recv_fd = *(int*)CMSG_DATA(cmsg);
if (recv_fd < 0) {
printf("could not get recv_fd from socket");
return;
}
printf("child %d\n", recv_fd);
fd = ion_open();
ptr = mmap(NULL, len, prot, map_flags, recv_fd, 0);
if (ptr == MAP_FAILED) {
return;
}
printf("child? [%10s] should be [master]\n", ptr);
strcpy(ptr, "child");
printf("child sending msg 2\n");
sendmsg(sd[1], &child_msg, 0);
close(fd);
}
}
int main(int argc, char* argv[]) {
int c;
enum tests {
ALLOC_TEST = 0, MAP_TEST, SHARE_TEST,
};
while (1) {
static struct option opts[] = {
{"alloc", no_argument, 0, 'a'},
{"alloc_flags", required_argument, 0, 'f'},
{"heap_mask", required_argument, 0, 'h'},
{"map", no_argument, 0, 'm'},
{"share", no_argument, 0, 's'},
{"len", required_argument, 0, 'l'},
{"align", required_argument, 0, 'g'},
{"map_flags", required_argument, 0, 'z'},
{"prot", required_argument, 0, 'p'},
};
int i = 0;
c = getopt_long(argc, argv, "af:h:l:mr:st", opts, &i);
if (c == -1)
break;
switch (c) {
case 'l':
len = atol(optarg);
break;
case 'g':
align = atol(optarg);
break;
case 'z':
map_flags = 0;
map_flags |= strstr(optarg, "PROT_EXEC") ? PROT_EXEC : 0;
map_flags |= strstr(optarg, "PROT_READ") ? PROT_READ: 0;
map_flags |= strstr(optarg, "PROT_WRITE") ? PROT_WRITE: 0;
map_flags |= strstr(optarg, "PROT_NONE") ? PROT_NONE: 0;
break;
case 'p':
prot = 0;
prot |= strstr(optarg, "MAP_PRIVATE") ? MAP_PRIVATE : 0;
prot |= strstr(optarg, "MAP_SHARED") ? MAP_SHARED : 0;
break;
case 'f':
alloc_flags = atol(optarg);
break;
case 'h':
heap_mask = atol(optarg);
break;
case 'a':
test = ALLOC_TEST;
break;
case 'm':
test = MAP_TEST;
break;
case 's':
test = SHARE_TEST;
break;
}
}
printf("test %d, len %zu, align %zu, map_flags %d, prot %d, heap_mask %d,"
" alloc_flags %d\n", test, len, align, map_flags, prot,
heap_mask, alloc_flags);
switch (test) {
case ALLOC_TEST:
ion_alloc_test();
break;
case MAP_TEST:
ion_map_test();
break;
case SHARE_TEST:
ion_share_test();
break;
default:
printf("must specify a test (alloc, map, share)\n");
}
return 0;
}

View File

@ -1,78 +0,0 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
*** To edit the content of this header, modify the corresponding
*** source file (e.g. under external/kernel-headers/original/) then
*** run bionic/libc/kernel/tools/update_all.py
***
*** Any manual change here will be lost the next time this script will
*** be run. You've been warned!
***
****************************************************************************
****************************************************************************/
#ifndef _UAPI_LINUX_ION_H
#define _UAPI_LINUX_ION_H
#include <linux/ioctl.h>
#include <linux/types.h>
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
typedef int ion_user_handle_t;
enum ion_heap_type {
ION_HEAP_TYPE_SYSTEM,
ION_HEAP_TYPE_SYSTEM_CONTIG,
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
ION_HEAP_TYPE_CARVEOUT,
ION_HEAP_TYPE_CHUNK,
ION_HEAP_TYPE_DMA,
ION_HEAP_TYPE_CUSTOM,
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
ION_NUM_HEAPS = 16,
};
#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8)
#define ION_FLAG_CACHED 1
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define ION_FLAG_CACHED_NEEDS_SYNC 2
struct ion_allocation_data {
size_t len;
size_t align;
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
unsigned int heap_id_mask;
unsigned int flags;
ion_user_handle_t handle;
};
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
struct ion_fd_data {
ion_user_handle_t handle;
int fd;
};
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
struct ion_handle_data {
ion_user_handle_t handle;
};
struct ion_custom_data {
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
unsigned int cmd;
unsigned long arg;
};
#define ION_IOC_MAGIC 'I'
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, struct ion_allocation_data)
#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
#endif
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */

View File

@ -1,50 +0,0 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
*** To edit the content of this header, modify the corresponding
*** source file (e.g. under external/kernel-headers/original/) then
*** run bionic/libc/kernel/tools/update_all.py
***
*** Any manual change here will be lost the next time this script will
*** be run. You've been warned!
***
****************************************************************************
****************************************************************************/
#ifndef _UAPI_LINUX_ION_NEW_H
#define _UAPI_LINUX_ION_NEW_H
#include <linux/ioctl.h>
#include <linux/types.h>
#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8)
struct ion_new_allocation_data {
__u64 len;
__u32 heap_id_mask;
__u32 flags;
__u32 fd;
__u32 unused;
};
#define MAX_HEAP_NAME 32
struct ion_heap_data {
char name[MAX_HEAP_NAME];
__u32 type;
__u32 heap_id;
__u32 reserved0;
__u32 reserved1;
__u32 reserved2;
};
struct ion_heap_query {
__u32 cnt;
__u32 reserved0;
__u64 heaps;
__u32 reserved1;
__u32 reserved2;
};
#define ION_IOC_MAGIC 'I'
#define ION_IOC_NEW_ALLOC _IOWR(ION_IOC_MAGIC, 0, struct ion_new_allocation_data)
#define ION_IOC_HEAP_QUERY _IOWR(ION_IOC_MAGIC, 8, struct ion_heap_query)
#endif

View File

@ -1,67 +0,0 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
*** To edit the content of this header, modify the corresponding
*** source file (e.g. under external/kernel-headers/original/) then
*** run bionic/libc/kernel/tools/update_all.py
***
*** Any manual change here will be lost the next time this script will
*** be run. You've been warned!
***
****************************************************************************
****************************************************************************/
#ifndef _UAPI_LINUX_ION_NEW_H
#define _UAPI_LINUX_ION_NEW_H
#include <linux/ioctl.h>
#include <linux/types.h>
#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8)
enum ion_heap_type_ext {
ION_HEAP_TYPE_CUSTOM_EXT = 16,
ION_HEAP_TYPE_MAX = 31,
};
enum ion_heap_id {
ION_HEAP_SYSTEM = (1 << ION_HEAP_TYPE_SYSTEM),
ION_HEAP_SYSTEM_CONTIG = (ION_HEAP_SYSTEM << 1),
ION_HEAP_CARVEOUT_START = (ION_HEAP_SYSTEM_CONTIG << 1),
ION_HEAP_CARVEOUT_END = (ION_HEAP_CARVEOUT_START << 4),
ION_HEAP_CHUNK = (ION_HEAP_CARVEOUT_END << 1),
ION_HEAP_DMA_START = (ION_HEAP_CHUNK << 1),
ION_HEAP_DMA_END = (ION_HEAP_DMA_START << 7),
ION_HEAP_CUSTOM_START = (ION_HEAP_DMA_END << 1),
ION_HEAP_CUSTOM_END = (ION_HEAP_CUSTOM_START << 15),
};
#define ION_NUM_MAX_HEAPS (32)
struct ion_new_allocation_data {
__u64 len;
__u32 heap_id_mask;
__u32 flags;
__u32 fd;
__u32 unused;
};
#define MAX_HEAP_NAME 32
struct ion_heap_data {
char name[MAX_HEAP_NAME];
__u32 type;
__u32 heap_id;
__u32 reserved0;
__u32 reserved1;
__u32 reserved2;
};
struct ion_heap_query {
__u32 cnt;
__u32 reserved0;
__u64 heaps;
__u32 reserved1;
__u32 reserved2;
};
#define ION_IOC_MAGIC 'I'
#define ION_IOC_NEW_ALLOC _IOWR(ION_IOC_MAGIC, 0, struct ion_new_allocation_data)
#define ION_IOC_HEAP_QUERY _IOWR(ION_IOC_MAGIC, 8, struct ion_heap_query)
#define ION_IOC_ABI_VERSION _IOR(ION_IOC_MAGIC, 9, __u32)
#endif

View File

@ -1,38 +0,0 @@
/****************************************************************************
****************************************************************************
***
*** This header was automatically generated from a Linux kernel header
*** of the same name, to make information necessary for userspace to
*** call into the kernel available to libc. It contains only constants,
*** structures, and macros generated from the original header, and thus,
*** contains no copyrightable information.
***
*** To edit the content of this header, modify the corresponding
*** source file (e.g. under external/kernel-headers/original/) then
*** run bionic/libc/kernel/tools/update_all.py
***
*** Any manual change here will be lost the next time this script will
*** be run. You've been warned!
***
****************************************************************************
****************************************************************************/
#ifndef _UAPI_LINUX_ION_TEST_H
#define _UAPI_LINUX_ION_TEST_H
#include <linux/ioctl.h>
#include <linux/types.h>
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
struct ion_test_rw_data {
__u64 ptr;
__u64 offset;
__u64 size;
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
int write;
int __padding;
};
#define ION_IOC_MAGIC 'I'
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */
#define ION_IOC_TEST_SET_FD _IO(ION_IOC_MAGIC, 0xf0)
#define ION_IOC_TEST_DMA_MAPPING _IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data)
#define ION_IOC_TEST_KERNEL_MAPPING _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data)
#endif
/* WARNING: DO NOT EDIT, AUTO-GENERATED CODE - SEE TOP FOR INSTRUCTIONS */

View File

@ -1,196 +0,0 @@
/*
* drivers/staging/android/uapi/ion.h
*
* Copyright (C) 2011 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _UAPI_LINUX_ION_H
#define _UAPI_LINUX_ION_H
#include <linux/ioctl.h>
#include <linux/types.h>
typedef int ion_user_handle_t;
/**
* enum ion_heap_types - list of all possible types of heaps
* @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
* @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
* @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
* carveout heap, allocations are physically
* contiguous
* @ION_HEAP_TYPE_DMA: memory allocated via DMA API
* @ION_NUM_HEAPS: helper for iterating over heaps, a bit mask
* is used to identify the heaps, so only 32
* total heap types are supported
*/
enum ion_heap_type {
ION_HEAP_TYPE_SYSTEM,
ION_HEAP_TYPE_SYSTEM_CONTIG,
ION_HEAP_TYPE_CARVEOUT,
ION_HEAP_TYPE_CHUNK,
ION_HEAP_TYPE_DMA,
ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
are at the end of this enum */
ION_NUM_HEAPS = 16,
};
#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
#define ION_HEAP_TYPE_DMA_MASK (1 << ION_HEAP_TYPE_DMA)
#define ION_NUM_HEAP_IDS sizeof(unsigned int) * 8
/**
* allocation flags - the lower 16 bits are used by core ion, the upper 16
* bits are reserved for use by the heaps themselves.
*/
#define ION_FLAG_CACHED 1 /* mappings of this buffer should be
cached, ion will do cache
maintenance when the buffer is
mapped for dma */
#define ION_FLAG_CACHED_NEEDS_SYNC 2 /* mappings of this buffer will created
at mmap time, if this is set
caches must be managed manually */
/**
* DOC: Ion Userspace API
*
* create a client by opening /dev/ion
* most operations handled via following ioctls
*
*/
/**
* struct ion_allocation_data - metadata passed from userspace for allocations
* @len: size of the allocation
* @align: required alignment of the allocation
* @heap_id_mask: mask of heap ids to allocate from
* @flags: flags passed to heap
* @handle: pointer that will be populated with a cookie to use to
* refer to this allocation
*
* Provided by userspace as an argument to the ioctl
*/
struct ion_allocation_data {
size_t len;
size_t align;
unsigned int heap_id_mask;
unsigned int flags;
ion_user_handle_t handle;
};
/**
* struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
* @handle: a handle
* @fd: a file descriptor representing that handle
*
* For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
* the handle returned from ion alloc, and the kernel returns the file
* descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace
* provides the file descriptor and the kernel returns the handle.
*/
struct ion_fd_data {
ion_user_handle_t handle;
int fd;
};
/**
* struct ion_handle_data - a handle passed to/from the kernel
* @handle: a handle
*/
struct ion_handle_data {
ion_user_handle_t handle;
};
/**
* struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
* @cmd: the custom ioctl function to call
* @arg: additional data to pass to the custom ioctl, typically a user
* pointer to a predefined structure
*
* This works just like the regular cmd and arg fields of an ioctl.
*/
struct ion_custom_data {
unsigned int cmd;
unsigned long arg;
};
#define ION_IOC_MAGIC 'I'
/**
* DOC: ION_IOC_ALLOC - allocate memory
*
* Takes an ion_allocation_data struct and returns it with the handle field
* populated with the opaque handle for the allocation.
*/
#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
struct ion_allocation_data)
/**
* DOC: ION_IOC_FREE - free memory
*
* Takes an ion_handle_data struct and frees the handle.
*/
#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
/**
* DOC: ION_IOC_MAP - get a file descriptor to mmap
*
* Takes an ion_fd_data struct with the handle field populated with a valid
* opaque handle. Returns the struct with the fd field set to a file
* descriptor open in the current address space. This file descriptor
* can then be used as an argument to mmap.
*/
#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
/**
* DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
*
* Takes an ion_fd_data struct with the handle field populated with a valid
* opaque handle. Returns the struct with the fd field set to a file
* descriptor open in the current address space. This file descriptor
* can then be passed to another process. The corresponding opaque handle can
* be retrieved via ION_IOC_IMPORT.
*/
#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
/**
* DOC: ION_IOC_IMPORT - imports a shared file descriptor
*
* Takes an ion_fd_data struct with the fd field populated with a valid file
* descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
* filed set to the corresponding opaque handle.
*/
#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data)
/**
* DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory
*
* Deprecated in favor of using the dma_buf api's correctly (syncing
* will happend automatically when the buffer is mapped to a device).
* If necessary should be used after touching a cached buffer from the cpu,
* this will make the buffer in memory coherent.
*/
#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data)
/**
* DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
*
* Takes the argument of the architecture specific ioctl to call and
* passes appropriate userdata for that ioctl
*/
#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
#endif /* _UAPI_LINUX_ION_H */

View File

@ -1,125 +0,0 @@
/*
* Adapted from drivers/staging/android/uapi/ion.h
*
* Copyright (C) 2011 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _UAPI_LINUX_ION_NEW_H
#define _UAPI_LINUX_ION_NEW_H
#include <linux/ioctl.h>
#include <linux/types.h>
#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8)
/**
* DOC: Ion Userspace API
*
* create a client by opening /dev/ion
* most operations handled via following ioctls
*
*/
/**
* struct ion_new_allocation_data - metadata passed from userspace for allocations
* @len: size of the allocation
* @heap_id_mask: mask of heap ids to allocate from
* @flags: flags passed to heap
* @handle: pointer that will be populated with a cookie to use to
* refer to this allocation
*
* Provided by userspace as an argument to the ioctl - added _new to denote
* this belongs to the new ION interface.
*/
struct ion_new_allocation_data {
__u64 len;
__u32 heap_id_mask;
__u32 flags;
__u32 fd;
__u32 unused;
};
#define MAX_HEAP_NAME 32
/**
* struct ion_heap_data - data about a heap
* @name - first 32 characters of the heap name
* @type - heap type
* @heap_id - heap id for the heap
*/
struct ion_heap_data {
char name[MAX_HEAP_NAME];
__u32 type;
__u32 heap_id;
__u32 reserved0;
__u32 reserved1;
__u32 reserved2;
};
/**
* struct ion_heap_query - collection of data about all heaps
* @cnt - total number of heaps to be copied
* @heaps - buffer to copy heap data
*/
struct ion_heap_query {
__u32 cnt; /* Total number of heaps to be copied */
__u32 reserved0; /* align to 64bits */
__u64 heaps; /* buffer to be populated */
__u32 reserved1;
__u32 reserved2;
};
#define ION_IOC_MAGIC 'I'
/**
* DOC: ION_IOC_NEW_ALLOC - allocate memory
*
* Takes an ion_allocation_data struct and returns it with the handle field
* populated with the opaque handle for the allocation.
* TODO: This IOCTL will clash by design; however, only one of
* ION_IOC_ALLOC or ION_IOC_NEW_ALLOC paths will be exercised,
* so this should not conflict.
*/
#define ION_IOC_NEW_ALLOC _IOWR(ION_IOC_MAGIC, 0, struct ion_new_allocation_data)
/**
* DOC: ION_IOC_FREE - free memory
*
* Takes an ion_handle_data struct and frees the handle.
*
* #define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
* This will come from the older kernels, so don't redefine here
*/
/**
* DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
*
* Takes an ion_fd_data struct with the handle field populated with a valid
* opaque handle. Returns the struct with the fd field set to a file
* descriptor open in the current address space. This file descriptor
* can then be passed to another process. The corresponding opaque handle can
* be retrieved via ION_IOC_IMPORT.
*
* #define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
* This will come from the older kernels, so don't redefine here
*/
/**
* DOC: ION_IOC_HEAP_QUERY - information about available heaps
*
* Takes an ion_heap_query structure and populates information about
* available Ion heaps.
*/
#define ION_IOC_HEAP_QUERY _IOWR(ION_IOC_MAGIC, 8, struct ion_heap_query)
#endif /* _UAPI_LINUX_ION_NEW_H */

View File

@ -1,170 +0,0 @@
/*
* Adapted from drivers/staging/android/uapi/ion.h
*
* Copyright (C) 2019 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _UAPI_LINUX_ION_NEW_H
#define _UAPI_LINUX_ION_NEW_H
#include <linux/ioctl.h>
#include <linux/types.h>
#define ION_NUM_HEAP_IDS (sizeof(unsigned int) * 8)
enum ion_heap_type_ext {
ION_HEAP_TYPE_CUSTOM_EXT = 16,
ION_HEAP_TYPE_MAX = 31,
};
/**
* ion_heap_id - list of standard heap ids that Android can use
*
* @ION_HEAP_SYSTEM Id for the ION_HEAP_TYPE_SYSTEM
* @ION_HEAP_SYSTEM_CONTIG Id for the ION_HEAP_TYPE_SYSTEM_CONTIG
* @ION_HEAP_CHUNK Id for the ION_HEAP_TYPE_CHUNK
* @ION_HEAP_CARVEOUT_START Start of reserved id range for heaps of type
* ION_HEAP_TYPE_CARVEOUT
* @ION_HEAP_CARVEOUT_END End of reserved id range for heaps of type
* ION_HEAP_TYPE_CARVEOUT
* @ION_HEAP_DMA_START Start of reserved id range for heaps of type
* ION_HEAP_TYPE_DMA
* @ION_HEAP_DMA_END End of reserved id range for heaps of type
* ION_HEAP_TYPE_DMA
* @ION_HEAP_CUSTOM_START Start of reserved id range for heaps of custom
* type
* @ION_HEAP_CUSTOM_END End of reserved id range for heaps of custom
* type
*/
enum ion_heap_id {
ION_HEAP_SYSTEM = (1 << ION_HEAP_TYPE_SYSTEM),
ION_HEAP_SYSTEM_CONTIG = (ION_HEAP_SYSTEM << 1),
ION_HEAP_CARVEOUT_START = (ION_HEAP_SYSTEM_CONTIG << 1),
ION_HEAP_CARVEOUT_END = (ION_HEAP_CARVEOUT_START << 4),
ION_HEAP_CHUNK = (ION_HEAP_CARVEOUT_END << 1),
ION_HEAP_DMA_START = (ION_HEAP_CHUNK << 1),
ION_HEAP_DMA_END = (ION_HEAP_DMA_START << 7),
ION_HEAP_CUSTOM_START = (ION_HEAP_DMA_END << 1),
ION_HEAP_CUSTOM_END = (ION_HEAP_CUSTOM_START << 15),
};
#define ION_NUM_MAX_HEAPS (32)
/**
* DOC: Ion Userspace API
*
* create a client by opening /dev/ion
* most operations handled via following ioctls
*
*/
/**
* struct ion_new_allocation_data - metadata passed from userspace for allocations
* @len: size of the allocation
* @heap_id_mask: mask of heap ids to allocate from
* @flags: flags passed to heap
* @handle: pointer that will be populated with a cookie to use to
* refer to this allocation
*
* Provided by userspace as an argument to the ioctl - added _new to denote
* this belongs to the new ION interface.
*/
struct ion_new_allocation_data {
__u64 len;
__u32 heap_id_mask;
__u32 flags;
__u32 fd;
__u32 unused;
};
#define MAX_HEAP_NAME 32
/**
* struct ion_heap_data - data about a heap
* @name - first 32 characters of the heap name
* @type - heap type
* @heap_id - heap id for the heap
*/
struct ion_heap_data {
char name[MAX_HEAP_NAME];
__u32 type;
__u32 heap_id;
__u32 reserved0;
__u32 reserved1;
__u32 reserved2;
};
/**
* struct ion_heap_query - collection of data about all heaps
* @cnt - total number of heaps to be copied
* @heaps - buffer to copy heap data
*/
struct ion_heap_query {
__u32 cnt; /* Total number of heaps to be copied */
__u32 reserved0; /* align to 64bits */
__u64 heaps; /* buffer to be populated */
__u32 reserved1;
__u32 reserved2;
};
#define ION_IOC_MAGIC 'I'
/**
* DOC: ION_IOC_NEW_ALLOC - allocate memory
*
* Takes an ion_allocation_data struct and returns it with the handle field
* populated with the opaque handle for the allocation.
* TODO: This IOCTL will clash by design; however, only one of
* ION_IOC_ALLOC or ION_IOC_NEW_ALLOC paths will be exercised,
* so this should not conflict.
*/
#define ION_IOC_NEW_ALLOC _IOWR(ION_IOC_MAGIC, 0, struct ion_new_allocation_data)
/**
* DOC: ION_IOC_FREE - free memory
*
* Takes an ion_handle_data struct and frees the handle.
*
* #define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
* This will come from the older kernels, so don't redefine here
*/
/**
* DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
*
* Takes an ion_fd_data struct with the handle field populated with a valid
* opaque handle. Returns the struct with the fd field set to a file
* descriptor open in the current address space. This file descriptor
* can then be passed to another process. The corresponding opaque handle can
* be retrieved via ION_IOC_IMPORT.
*
* #define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
* This will come from the older kernels, so don't redefine here
*/
/**
* DOC: ION_IOC_HEAP_QUERY - information about available heaps
*
* Takes an ion_heap_query structure and populates information about
* available Ion heaps.
*/
#define ION_IOC_HEAP_QUERY _IOWR(ION_IOC_MAGIC, 8, struct ion_heap_query)
/**
* DOC: ION_IOC_HEAP_ABI_VERSION - return ABI version
*
* Returns ABI version for this driver
*/
#define ION_IOC_ABI_VERSION _IOR(ION_IOC_MAGIC, 9, __u32)
#endif /* _UAPI_LINUX_ION_NEW_H */

View File

@ -1,70 +0,0 @@
/*
* drivers/staging/android/uapi/ion.h
*
* Copyright (C) 2011 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _UAPI_LINUX_ION_TEST_H
#define _UAPI_LINUX_ION_TEST_H
#include <linux/ioctl.h>
#include <linux/types.h>
/**
* struct ion_test_rw_data - metadata passed to the kernel to read handle
* @ptr: a pointer to an area at least as large as size
* @offset: offset into the ion buffer to start reading
* @size: size to read or write
* @write: 1 to write, 0 to read
*/
struct ion_test_rw_data {
__u64 ptr;
__u64 offset;
__u64 size;
int write;
int __padding;
};
#define ION_IOC_MAGIC 'I'
/**
* DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver
*
* Attaches a dma buf fd to the test driver. Passing a second fd or -1 will
* release the first fd.
*/
#define ION_IOC_TEST_SET_FD \
_IO(ION_IOC_MAGIC, 0xf0)
/**
* DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA
*
* Reads or writes the memory from a handle using an uncached mapping. Can be
* used by unit tests to emulate a DMA engine as close as possible. Only
* expected to be used for debugging and testing, may not always be available.
*/
#define ION_IOC_TEST_DMA_MAPPING \
_IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data)
/**
* DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle
*
* Reads or writes the memory from a handle using a kernel mapping. Can be
* used by unit tests to test heap map_kernel functions. Only expected to be
* used for debugging and testing, may not always be available.
*/
#define ION_IOC_TEST_KERNEL_MAPPING \
_IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data)
#endif /* _UAPI_LINUX_ION_H */

View File

@ -1,34 +0,0 @@
//
// Copyright (C) 2013 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
cc_test {
name: "ion-unit-tests",
cflags: [
"-g",
"-Wno-missing-field-initializers",
],
shared_libs: ["libion"],
srcs: [
"allocate_test.cpp",
"exit_test.cpp",
"heap_query.cpp",
"system_heap.cpp",
"invalid_values_test.cpp",
"ion_test_fixture.cpp",
"map_test.cpp",
"modular_heap_check.cpp",
],
}

View File

@ -1,146 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sys/mman.h>
#include <memory>
#include <gtest/gtest.h>
#include <ion/ion.h>
#include "ion_test_fixture.h"
class Allocate : public IonTest {};
TEST_F(Allocate, Allocate) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
int fd;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0, &fd));
ASSERT_TRUE(fd != 0);
ASSERT_EQ(close(fd), 0); // free the buffer
}
}
}
TEST_F(Allocate, AllocateCached) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
int fd;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), ION_FLAG_CACHED, &fd));
ASSERT_TRUE(fd != 0);
ASSERT_EQ(close(fd), 0); // free the buffer
}
}
}
TEST_F(Allocate, AllocateCachedNeedsSync) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
int fd;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id),
ION_FLAG_CACHED_NEEDS_SYNC, &fd));
ASSERT_TRUE(fd != 0);
ASSERT_EQ(close(fd), 0); // free the buffer
}
}
}
TEST_F(Allocate, RepeatedAllocate) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
int fd;
for (unsigned int i = 0; i < 1024; i++) {
SCOPED_TRACE(::testing::Message() << "iteration " << i);
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0, &fd));
ASSERT_TRUE(fd != 0);
ASSERT_EQ(close(fd), 0); // free the buffer
}
}
}
}
TEST_F(Allocate, Large) {
for (const auto& heap : ion_heaps) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
int fd;
ASSERT_EQ(-ENOMEM,
ion_alloc_fd(ionfd, 3UL * 1024 * 1024 * 1024, 0, (1 << heap.heap_id), 0, &fd));
}
}
// Make sure all heaps always return zeroed pages
TEST_F(Allocate, Zeroed) {
auto zeroes_ptr = std::make_unique<char[]>(4096);
for (const auto& heap : ion_heaps) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
int fds[16];
for (unsigned int i = 0; i < 16; i++) {
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(ionfd, 4096, 0, (1 << heap.heap_id), 0, &map_fd));
ASSERT_GE(map_fd, 0);
void* ptr = NULL;
ptr = mmap(NULL, 4096, PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
memset(ptr, 0xaa, 4096);
ASSERT_EQ(0, munmap(ptr, 4096));
fds[i] = map_fd;
}
for (unsigned int i = 0; i < 16; i++) {
ASSERT_EQ(0, close(fds[i]));
}
int new_ionfd = ion_open();
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(new_ionfd, 4096, 0, (1 << heap.heap_id), 0, &map_fd));
ASSERT_GE(map_fd, 0);
void* ptr = NULL;
ptr = mmap(NULL, 4096, PROT_READ, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
ASSERT_EQ(0, memcmp(ptr, zeroes_ptr.get(), 4096));
ASSERT_EQ(0, munmap(ptr, 4096));
ASSERT_EQ(0, close(map_fd));
ASSERT_EQ(0, ion_close(new_ionfd));
}
}

View File

@ -1,227 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sys/mman.h>
#include <gtest/gtest.h>
#include <ion/ion.h>
#include "ion_test_fixture.h"
class Exit : public IonTest {};
TEST_F(Exit, WithAllocFd) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
EXPECT_EXIT(
{
int handle_fd = -1;
ASSERT_EQ(0,
ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0, &handle_fd));
ASSERT_NE(-1, handle_fd);
exit(0);
},
::testing::ExitedWithCode(0), "");
}
}
}
TEST_F(Exit, WithRepeatedAllocFd) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
for (unsigned int i = 0; i < 1024; i++) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
ASSERT_EXIT(
{
int handle_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0,
&handle_fd));
ASSERT_NE(-1, handle_fd);
exit(0);
},
::testing::ExitedWithCode(0), "")
<< "failed on heap " << heap.name << ":" << heap.type << ":" << heap.heap_id
<< " and size " << size << " on iteration " << i;
}
}
}
}
TEST_F(Exit, WithMapping) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
EXPECT_EXIT(
{
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0, &map_fd));
ASSERT_GE(map_fd, 0);
void* ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
exit(0);
},
::testing::ExitedWithCode(0), "");
}
}
}
TEST_F(Exit, WithPartialMapping) {
static const size_t allocationSizes[] = {64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
EXPECT_EXIT(
{
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0, &map_fd));
ASSERT_GE(map_fd, 0);
void* ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
ASSERT_EQ(0, munmap(ptr, size / 2));
exit(0);
},
::testing::ExitedWithCode(0), "");
}
}
}
TEST_F(Exit, WithMappingCached) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
EXPECT_EXIT(
{
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id),
ION_FLAG_CACHED, &map_fd));
ASSERT_GE(map_fd, 0);
void* ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
exit(0);
},
::testing::ExitedWithCode(0), "");
}
}
}
TEST_F(Exit, WithPartialMappingCached) {
static const size_t allocationSizes[] = {64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
EXPECT_EXIT(
{
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id),
ION_FLAG_CACHED, &map_fd));
ASSERT_GE(map_fd, 0);
void* ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
ASSERT_EQ(0, munmap(ptr, size / 2));
exit(0);
},
::testing::ExitedWithCode(0), "");
}
}
}
TEST_F(Exit, WithMappingNeedsSync) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
EXPECT_EXIT(
{
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id),
ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC,
&map_fd));
ASSERT_GE(map_fd, 0);
void* ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
exit(0);
},
::testing::ExitedWithCode(0), "");
}
}
}
TEST_F(Exit, WithPartialMappingNeedsSync) {
static const size_t allocationSizes[] = {64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
EXPECT_EXIT(
{
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id),
ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC,
&map_fd));
ASSERT_GE(map_fd, 0);
void* ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
ASSERT_EQ(0, munmap(ptr, size / 2));
exit(0);
},
::testing::ExitedWithCode(0), "");
}
}
}

View File

@ -1,48 +0,0 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <ion/ion.h>
#include "ion_test_fixture.h"
class HeapQuery : public IonTest {};
TEST_F(HeapQuery, AtleastOneHeap) {
ASSERT_GT(ion_heaps.size(), 0);
}
// TODO: Adjust this test to account for the range of valid carveout and DMA heap ids.
TEST_F(HeapQuery, HeapIdVerify) {
for (const auto& heap : ion_heaps) {
SCOPED_TRACE(::testing::Message() << "Invalid id for heap:" << heap.name << ":" << heap.type
<< ":" << heap.heap_id);
switch (heap.type) {
case ION_HEAP_TYPE_SYSTEM:
ASSERT_TRUE((1 << heap.heap_id) & ION_HEAP_SYSTEM_MASK);
break;
case ION_HEAP_TYPE_SYSTEM_CONTIG:
ASSERT_TRUE((1 << heap.heap_id) & ION_HEAP_SYSTEM_CONTIG_MASK);
break;
case ION_HEAP_TYPE_CARVEOUT:
ASSERT_TRUE((1 << heap.heap_id) & ION_HEAP_CARVEOUT_MASK);
break;
case ION_HEAP_TYPE_DMA:
ASSERT_TRUE((1 << heap.heap_id) & ION_HEAP_TYPE_DMA_MASK);
break;
}
}
}

View File

@ -1,86 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sys/mman.h>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include <ion/ion.h>
#include "ion_test_fixture.h"
class InvalidValues : public IonTest {};
TEST_F(InvalidValues, ion_close) {
EXPECT_EQ(-EBADF, ion_close(-1));
}
TEST_F(InvalidValues, ion_alloc_fd) {
int fd;
// no heaps
EXPECT_EQ(-ENODEV, ion_alloc_fd(ionfd, 4096, 0, 0, 0, &fd));
for (const auto& heap : ion_heaps) {
// invalid ion_fd
int ret = ion_alloc_fd(0, 4096, 0, (1 << heap.heap_id), 0, &fd);
EXPECT_TRUE(ret == -EINVAL || ret == -ENOTTY);
// invalid ion_fd
EXPECT_EQ(-EBADF, ion_alloc_fd(-1, 4096, 0, (1 << heap.heap_id), 0, &fd));
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
// zero size
EXPECT_EQ(-EINVAL, ion_alloc_fd(ionfd, 0, 0, (1 << heap.heap_id), 0, &fd));
// too large size
EXPECT_EQ(-EINVAL, ion_alloc_fd(ionfd, -1, 0, (1 << heap.heap_id), 0, &fd));
// bad alignment
// TODO: Current userspace and kernel code completely ignores alignment. So this
// test is going to fail. We need to completely remove alignment from the API.
// All memory by default is always page aligned. OR actually pass the alignment
// down into the kernel and make kernel respect the alignment.
// EXPECT_EQ(-EINVAL, ion_alloc_fd(ionfd, 4096, -1, (1 << heap.heap_id), 0, &fd));
// NULL fd
EXPECT_EQ(-EINVAL, ion_alloc_fd(ionfd, 4096, 0, (1 << heap.heap_id), 0, nullptr));
}
}
TEST_F(InvalidValues, ion_query_heap_cnt) {
// NULL count
EXPECT_EQ(-EINVAL, ion_query_heap_cnt(ionfd, nullptr));
int heap_count;
// bad fd
EXPECT_EQ(-EBADF, ion_query_heap_cnt(-1, &heap_count));
}
TEST_F(InvalidValues, ion_query_get_heaps) {
int heap_count;
ASSERT_EQ(0, ion_query_heap_cnt(ionfd, &heap_count));
ASSERT_GT(heap_count, 0);
// nullptr buffers, still returns success but without
// the ion_heap_data.
EXPECT_EQ(0, ion_query_get_heaps(ionfd, heap_count, nullptr));
std::unique_ptr<struct ion_heap_data[]> heaps =
std::make_unique<struct ion_heap_data[]>(heap_count);
// bad fd
EXPECT_EQ(-EBADF, ion_query_get_heaps(-1, heap_count, heaps.get()));
// invalid heap data pointer
EXPECT_EQ(-EFAULT, ion_query_get_heaps(ionfd, heap_count, reinterpret_cast<void*>(0xdeadf00d)));
}

View File

@ -1,40 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <ion/ion.h>
#include "ion_test_fixture.h"
IonTest::IonTest() : ionfd(-1), ion_heaps() {}
void IonTest::SetUp() {
ionfd = ion_open();
ASSERT_GE(ionfd, 0);
int heap_count;
int ret = ion_query_heap_cnt(ionfd, &heap_count);
ASSERT_EQ(ret, 0);
ASSERT_GT(heap_count, 0);
ion_heaps.resize(heap_count, {});
ret = ion_query_get_heaps(ionfd, heap_count, ion_heaps.data());
ASSERT_EQ(ret, 0);
}
void IonTest::TearDown() {
ion_close(ionfd);
}

View File

@ -1,36 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ION_TEST_FIXTURE_H_
#define ION_TEST_FIXTURE_H_
#include <gtest/gtest.h>
#include <linux/ion_4.12.h>
#include <vector>
using ::testing::Test;
class IonTest : public virtual Test {
public:
IonTest();
virtual ~IonTest(){};
virtual void SetUp();
virtual void TearDown();
int ionfd;
std::vector<struct ion_heap_data> ion_heaps;
};
#endif /* ION_TEST_FIXTURE_H_ */

View File

@ -1,128 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sys/mman.h>
#include <unistd.h>
#include <gtest/gtest.h>
#include <ion/ion.h>
#include "ion_test_fixture.h"
class Map : public IonTest {};
TEST_F(Map, MapFd) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), 0, &map_fd));
ASSERT_GE(map_fd, 0);
void* ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
ASSERT_EQ(0, close(map_fd));
memset(ptr, 0xaa, size);
ASSERT_EQ(0, munmap(ptr, size));
}
}
}
TEST_F(Map, MapOffset) {
for (const auto& heap : ion_heaps) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
int map_fd = -1;
ASSERT_EQ(0, ion_alloc_fd(ionfd, getpagesize() * 2, 0, (1 << heap.heap_id), 0, &map_fd));
ASSERT_GE(map_fd, 0);
unsigned char* ptr;
ptr = (unsigned char*)mmap(NULL, getpagesize() * 2, PROT_READ | PROT_WRITE, MAP_SHARED,
map_fd, 0);
ASSERT_TRUE(ptr != NULL);
memset(ptr, 0, getpagesize());
memset(ptr + getpagesize(), 0xaa, getpagesize());
ASSERT_EQ(0, munmap(ptr, getpagesize() * 2));
ptr = (unsigned char*)mmap(NULL, getpagesize(), PROT_READ | PROT_WRITE, MAP_SHARED, map_fd,
getpagesize());
ASSERT_TRUE(ptr != NULL);
ASSERT_EQ(ptr[0], 0xaa);
ASSERT_EQ(ptr[getpagesize() - 1], 0xaa);
ASSERT_EQ(0, munmap(ptr, getpagesize()));
ASSERT_EQ(0, close(map_fd));
}
}
TEST_F(Map, MapCached) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
int map_fd = -1;
unsigned int flags = ION_FLAG_CACHED;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), flags, &map_fd));
ASSERT_GE(map_fd, 0);
void* ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
ASSERT_EQ(0, close(map_fd));
memset(ptr, 0xaa, size);
ASSERT_EQ(0, munmap(ptr, size));
}
}
}
TEST_F(Map, MapCachedNeedsSync) {
static const size_t allocationSizes[] = {4 * 1024, 64 * 1024, 1024 * 1024, 2 * 1024 * 1024};
for (const auto& heap : ion_heaps) {
for (size_t size : allocationSizes) {
SCOPED_TRACE(::testing::Message()
<< "heap:" << heap.name << ":" << heap.type << ":" << heap.heap_id);
SCOPED_TRACE(::testing::Message() << "size " << size);
int map_fd = -1;
unsigned int flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
ASSERT_EQ(0, ion_alloc_fd(ionfd, size, 0, (1 << heap.heap_id), flags, &map_fd));
ASSERT_GE(map_fd, 0);
void* ptr;
ptr = mmap(NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0);
ASSERT_TRUE(ptr != NULL);
ASSERT_EQ(0, close(map_fd));
memset(ptr, 0xaa, size);
ASSERT_EQ(0, munmap(ptr, size));
}
}
}

View File

@ -1,30 +0,0 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <ion/ion.h>
#include "ion_test_fixture.h"
class ModularHeapCheck : public IonTest {};
TEST_F(ModularHeapCheck, ModularHeapCheckSimple) {
if (ion_is_using_modular_heaps(ionfd)) {
std::cout << "Heaps are modular." << std::endl;
} else {
std::cout << "Heaps are built-in." << std::endl;
}
}

View File

@ -1,44 +0,0 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <unistd.h>
#include <gtest/gtest.h>
#include <iostream>
#include <ion/ion.h>
#include "ion_test_fixture.h"
class SystemHeap : public IonTest {};
TEST_F(SystemHeap, Presence) {
bool system_heap_found = false;
for (const auto& heap : ion_heaps) {
if (heap.type == ION_HEAP_TYPE_SYSTEM) {
system_heap_found = true;
EXPECT_TRUE((1 << heap.heap_id) & ION_HEAP_SYSTEM_MASK);
}
}
// We now expect the system heap to exist from Android
ASSERT_TRUE(system_heap_found);
}
TEST_F(SystemHeap, Allocate) {
int fd;
ASSERT_EQ(0, ion_alloc_fd(ionfd, getpagesize(), 0, ION_HEAP_SYSTEM_MASK, 0, &fd));
ASSERT_TRUE(fd != 0);
ASSERT_EQ(close(fd), 0); // free the buffer
}

View File

@ -1 +0,0 @@
../.clang-format-4

View File

@ -1,85 +0,0 @@
//
// Copyright (C) 2018 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
cc_defaults {
name: "libmeminfo_defaults",
cflags: [
"-Wall",
"-Werror",
],
shared_libs: [
"libbase",
"liblog",
"libprocinfo",
],
target: {
darwin: {
enabled: false,
},
},
}
cc_library {
name: "libmeminfo",
host_supported: true,
defaults: ["libmeminfo_defaults"],
export_include_dirs: ["include"],
export_shared_lib_headers: ["libbase"],
srcs: [
"pageacct.cpp",
"procmeminfo.cpp",
"sysmeminfo.cpp",
],
}
cc_test {
name: "libmeminfo_test",
defaults: ["libmeminfo_defaults"],
static_libs: [
"libmeminfo",
"libbase",
"liblog",
],
srcs: [
"libmeminfo_test.cpp"
],
data: [
"testdata1/*",
"testdata2/*"
],
}
cc_benchmark {
name: "libmeminfo_benchmark",
srcs: [
"libmeminfo_benchmark.cpp",
],
static_libs : [
"libbase",
"liblog",
"libmeminfo",
"libprocinfo",
],
data: [
"testdata1/*",
],
}

View File

@ -1 +0,0 @@
sspatil@google.com

View File

@ -1,82 +0,0 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <string.h>
#include <sys/types.h>
#include <unistd.h>
#include <string>
#include <vector>
namespace android {
namespace meminfo {
struct MemUsage {
uint64_t vss;
uint64_t rss;
uint64_t pss;
uint64_t uss;
uint64_t swap;
uint64_t swap_pss;
uint64_t private_clean;
uint64_t private_dirty;
uint64_t shared_clean;
uint64_t shared_dirty;
MemUsage()
: vss(0),
rss(0),
pss(0),
uss(0),
swap(0),
swap_pss(0),
private_clean(0),
private_dirty(0),
shared_clean(0),
shared_dirty(0) {}
~MemUsage() = default;
void clear() {
vss = rss = pss = uss = swap = swap_pss = 0;
private_clean = private_dirty = shared_clean = shared_dirty = 0;
}
};
struct Vma {
uint64_t start;
uint64_t end;
uint64_t offset;
uint16_t flags;
std::string name;
Vma() : start(0), end(0), offset(0), flags(0), name("") {}
Vma(uint64_t s, uint64_t e, uint64_t off, uint16_t f, const char* n)
: start(s), end(e), offset(off), flags(f), name(n) {}
~Vma() = default;
void clear() { memset(&usage, 0, sizeof(usage)); }
// Memory usage of this mapping.
MemUsage usage;
};
} // namespace meminfo
} // namespace android

View File

@ -1,81 +0,0 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <sys/types.h>
#include <unistd.h>
#include <string>
#include <vector>
#include <android-base/unique_fd.h>
namespace android {
namespace meminfo {
class PageAcct final {
// Class for per-page accounting by using kernel provided interfaces like
// kpagecount, kpageflags etc.
public:
static bool KernelHasPageIdle() {
return (access("/sys/kernel/mm/page_idle/bitmap", R_OK | W_OK) == 0);
}
bool InitPageAcct(bool pageidle_enable = false);
bool PageFlags(uint64_t pfn, uint64_t* flags);
bool PageMapCount(uint64_t pfn, uint64_t* mapcount);
int IsPageIdle(uint64_t pfn);
// The only way to create PageAcct object
static PageAcct& Instance() {
static PageAcct instance;
return instance;
}
~PageAcct() = default;
private:
PageAcct() : kpagecount_fd_(-1), kpageflags_fd_(-1), pageidle_fd_(-1) {}
int MarkPageIdle(uint64_t pfn) const;
int GetPageIdle(uint64_t pfn) const;
// Non-copyable & Non-movable
PageAcct(const PageAcct&) = delete;
PageAcct& operator=(const PageAcct&) = delete;
PageAcct& operator=(PageAcct&&) = delete;
PageAcct(PageAcct&&) = delete;
::android::base::unique_fd kpagecount_fd_;
::android::base::unique_fd kpageflags_fd_;
::android::base::unique_fd pageidle_fd_;
};
// Returns if the page present bit is set in the value
// passed in.
bool page_present(uint64_t pagemap_val);
// Returns if the page swapped bit is set in the value
// passed in.
bool page_swapped(uint64_t pagemap_val);
// Returns the page frame number (physical page) from
// pagemap value
uint64_t page_pfn(uint64_t pagemap_val);
} // namespace meminfo
} // namespace android

View File

@ -1,133 +0,0 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <sys/types.h>
#include <string>
#include <vector>
#include "meminfo.h"
namespace android {
namespace meminfo {
using VmaCallback = std::function<void(const Vma&)>;
class ProcMemInfo final {
// Per-process memory accounting
public:
// Reset the working set accounting of the process via /proc/<pid>/clear_refs
static bool ResetWorkingSet(pid_t pid);
ProcMemInfo(pid_t pid, bool get_wss = false, uint64_t pgflags = 0, uint64_t pgflags_mask = 0);
const std::vector<Vma>& Maps();
const MemUsage& Usage();
const MemUsage& Wss();
// Same as Maps() except, only valid for reading working set using CONFIG_IDLE_PAGE_TRACKING
// support in kernel. If the kernel support doesn't exist, the function will return an empty
// vector.
const std::vector<Vma>& MapsWithPageIdle();
// Same as Maps() except, do not read the usage stats for each map.
const std::vector<Vma>& MapsWithoutUsageStats();
// If MapsWithoutUsageStats was called, this function will fill in
// usage stats for this single vma.
bool FillInVmaStats(Vma& vma);
// Collect all 'vma' or 'maps' from /proc/<pid>/smaps and store them in 'maps_'. Returns a
// constant reference to the vma vector after the collection is done.
//
// Each 'struct Vma' is *fully* populated by this method (unlike SmapsOrRollup).
const std::vector<Vma>& Smaps(const std::string& path = "");
// This method reads /proc/<pid>/smaps and calls the callback() for each
// vma or map that it finds. The map is converted to 'struct Vma' object which is then
// passed to the callback.
// Returns 'false' if the file is malformed.
bool ForEachVma(const VmaCallback& callback);
// Used to parse either of /proc/<pid>/{smaps, smaps_rollup} and record the process's
// Pss and Private memory usage in 'stats'. In particular, the method only populates the fields
// of the MemUsage structure that are intended to be used by Android's periodic Pss collection.
//
// The method populates the following statistics in order to be fast an efficient.
// Pss
// Rss
// Uss
// private_clean
// private_dirty
// SwapPss
// All other fields of MemUsage are zeroed.
bool SmapsOrRollup(MemUsage* stats) const;
// Used to parse either of /proc/<pid>/{smaps, smaps_rollup} and record the process's
// Pss.
// Returns 'true' on success and the value of Pss in the out parameter.
bool SmapsOrRollupPss(uint64_t* pss) const;
const std::vector<uint16_t>& SwapOffsets();
// Reads /proc/<pid>/pagemap for this process for each page within
// the 'vma' and stores that in 'pagemap'. It is assumed that the 'vma'
// is obtained by calling Maps() or 'ForEachVma' for the same object. No special checks
// are made to see if 'vma' is *valid*.
// Returns false if anything goes wrong, 'true' otherwise.
bool PageMap(const Vma& vma, std::vector<uint64_t>* pagemap);
~ProcMemInfo() = default;
private:
bool ReadMaps(bool get_wss, bool use_pageidle = false, bool get_usage_stats = true);
bool ReadVmaStats(int pagemap_fd, Vma& vma, bool get_wss, bool use_pageidle);
pid_t pid_;
bool get_wss_;
uint64_t pgflags_;
uint64_t pgflags_mask_;
std::vector<Vma> maps_;
MemUsage usage_;
std::vector<uint16_t> swap_offsets_;
};
// Makes callback for each 'vma' or 'map' found in file provided. The file is expected to be in the
// same format as /proc/<pid>/smaps. Returns 'false' if the file is malformed.
bool ForEachVmaFromFile(const std::string& path, const VmaCallback& callback);
// Returns if the kernel supports /proc/<pid>/smaps_rollup. Assumes that the
// calling process has access to the /proc/<pid>/smaps_rollup.
// Returns 'false' if the calling process has no permission to read the file if it exists
// of if the file doesn't exist.
bool IsSmapsRollupSupported(pid_t pid);
// Same as ProcMemInfo::SmapsOrRollup but reads the statistics directly
// from a file. The file MUST be in the same format as /proc/<pid>/smaps
// or /proc/<pid>/smaps_rollup
bool SmapsOrRollupFromFile(const std::string& path, MemUsage* stats);
// Same as ProcMemInfo::SmapsOrRollupPss but reads the statistics directly
// from a file and returns total Pss in kB. The file MUST be in the same format
// as /proc/<pid>/smaps or /proc/<pid>/smaps_rollup
bool SmapsOrRollupPssFromFile(const std::string& path, uint64_t* pss);
} // namespace meminfo
} // namespace android

View File

@ -1,93 +0,0 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <sys/types.h>
#include <functional>
#include <map>
#include <string>
#include <vector>
namespace android {
namespace meminfo {
class SysMemInfo final {
// System or Global memory accounting
public:
static constexpr const char* kMemTotal = "MemTotal:";
static constexpr const char* kMemFree = "MemFree:";
static constexpr const char* kMemBuffers = "Buffers:";
static constexpr const char* kMemCached = "Cached:";
static constexpr const char* kMemShmem = "Shmem:";
static constexpr const char* kMemSlab = "Slab:";
static constexpr const char* kMemSReclaim = "SReclaimable:";
static constexpr const char* kMemSUnreclaim = "SUnreclaim:";
static constexpr const char* kMemSwapTotal = "SwapTotal:";
static constexpr const char* kMemSwapFree = "SwapFree:";
static constexpr const char* kMemMapped = "Mapped:";
static constexpr const char* kMemVmallocUsed = "VmallocUsed:";
static constexpr const char* kMemPageTables = "PageTables:";
static constexpr const char* kMemKernelStack = "KernelStack:";
static const std::vector<std::string> kDefaultSysMemInfoTags;
SysMemInfo() = default;
// Parse /proc/meminfo and read values that are needed
bool ReadMemInfo(const std::string& path = "/proc/meminfo");
bool ReadMemInfo(const std::vector<std::string>& tags, std::vector<uint64_t>* out,
const std::string& path = "/proc/meminfo");
bool ReadMemInfo(std::vector<uint64_t>* out, const std::string& path = "/proc/meminfo");
// Parse /proc/vmallocinfo and return total physical memory mapped
// in vmalloc area by the kernel.
// Note that this deliberately ignores binder buffers. They are _always_
// mapped in a process and are counted for in each process.
uint64_t ReadVmallocInfo();
// getters
uint64_t mem_total_kb() { return mem_in_kb_[kMemTotal]; }
uint64_t mem_free_kb() { return mem_in_kb_[kMemFree]; }
uint64_t mem_buffers_kb() { return mem_in_kb_[kMemBuffers]; }
uint64_t mem_cached_kb() { return mem_in_kb_[kMemCached]; }
uint64_t mem_shmem_kb() { return mem_in_kb_[kMemShmem]; }
uint64_t mem_slab_kb() { return mem_in_kb_[kMemSlab]; }
uint64_t mem_slab_reclaimable_kb() { return mem_in_kb_[kMemSReclaim]; }
uint64_t mem_slab_unreclaimable_kb() { return mem_in_kb_[kMemSUnreclaim]; }
uint64_t mem_swap_kb() { return mem_in_kb_[kMemSwapTotal]; }
uint64_t mem_swap_free_kb() { return mem_in_kb_[kMemSwapFree]; }
uint64_t mem_mapped_kb() { return mem_in_kb_[kMemMapped]; }
uint64_t mem_vmalloc_used_kb() { return mem_in_kb_[kMemVmallocUsed]; }
uint64_t mem_page_tables_kb() { return mem_in_kb_[kMemPageTables]; }
uint64_t mem_kernel_stack_kb() { return mem_in_kb_[kMemKernelStack]; }
uint64_t mem_zram_kb(const std::string& zram_dev = "");
private:
std::map<std::string, uint64_t> mem_in_kb_;
bool MemZramDevice(const std::string& zram_dev, uint64_t* mem_zram_dev);
bool ReadMemInfo(const std::vector<std::string>& tags, const std::string& path,
std::function<void(const std::string&, uint64_t)> store_val);
};
// Parse /proc/vmallocinfo and return total physical memory mapped
// in vmalloc area by the kernel. Note that this deliberately ignores binder buffers. They are
// _always_ mapped in a process and are counted for in each process.
uint64_t ReadVmallocInfo(const std::string& path = "/proc/vmallocinfo");
} // namespace meminfo
} // namespace android

View File

@ -1,56 +0,0 @@
//
// Copyright (C) 2019 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
cc_defaults {
name: "dmabufinfo_defaults",
static_libs: [
"libbase",
"libprocinfo",
],
shared_libs: [
"liblog",
],
cflags: [
"-Wall",
"-Werror",
"-Wextra",
],
}
cc_library_static {
name: "libdmabufinfo",
vendor_available: true,
defaults: ["dmabufinfo_defaults"],
export_include_dirs: ["include"],
srcs: [
"dmabufinfo.cpp",
],
}
cc_test {
name: "dmabufinfo_test",
defaults: ["dmabufinfo_defaults"],
srcs: [
"dmabufinfo_test.cpp"
],
static_libs: [
"libc++fs",
"libdmabufinfo",
"libion",
"libmeminfo",
],
}

View File

@ -1,265 +0,0 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <dirent.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <unistd.h>
#include <filesystem>
#include <memory>
#include <string>
#include <vector>
#include <android-base/file.h>
#include <android-base/logging.h>
#include <android-base/parseint.h>
#include <android-base/stringprintf.h>
#include <android-base/strings.h>
#include <procinfo/process_map.h>
#include <dmabufinfo/dmabufinfo.h>
namespace android {
namespace dmabufinfo {
static bool FileIsDmaBuf(const std::string& path) {
return ::android::base::StartsWith(path, "/dmabuf");
}
static bool ReadDmaBufFdInfo(pid_t pid, int fd, std::string* name, std::string* exporter,
uint64_t* count) {
std::string fdinfo = ::android::base::StringPrintf("/proc/%d/fdinfo/%d", pid, fd);
auto fp = std::unique_ptr<FILE, decltype(&fclose)>{fopen(fdinfo.c_str(), "re"), fclose};
if (fp == nullptr) {
LOG(ERROR) << "Failed to open dmabuf info from debugfs";
return false;
}
char* line = nullptr;
size_t len = 0;
while (getline(&line, &len, fp.get()) > 0) {
switch (line[0]) {
case 'c':
if (strncmp(line, "count:", 6) == 0) {
char* c = line + 6;
*count = strtoull(c, nullptr, 10);
}
break;
case 'e':
if (strncmp(line, "exp_name:", 9) == 0) {
char* c = line + 9;
*exporter = ::android::base::Trim(c);
}
break;
case 'n':
if (strncmp(line, "name:", 5) == 0) {
char* c = line + 5;
*name = ::android::base::Trim(std::string(c));
}
break;
}
}
free(line);
return true;
}
// TODO: std::filesystem::is_symlink fails to link on vendor code,
// forcing this workaround.
// Move back to libc++fs once it is vendor-available. See b/124012728
static bool is_symlink(const char *filename)
{
struct stat p_statbuf;
if (lstat(filename, &p_statbuf) < 0) {
return false;
}
if (S_ISLNK(p_statbuf.st_mode) == 1) {
return true;
}
return false;
}
static bool ReadDmaBufFdRefs(pid_t pid, std::vector<DmaBuffer>* dmabufs) {
std::string fdpath = ::android::base::StringPrintf("/proc/%d/fd", pid);
std::unique_ptr<DIR, int (*)(DIR*)> dir(opendir(fdpath.c_str()), closedir);
if (!dir) {
LOG(ERROR) << "Failed to open " << fdpath << " directory" << std::endl;
return false;
}
struct dirent* dent;
while ((dent = readdir(dir.get()))) {
std::string path =
::android::base::StringPrintf("%s/%s", fdpath.c_str(), dent->d_name);
if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, "..") ||
!is_symlink(path.c_str())) {
continue;
}
std::string target;
if (!::android::base::Readlink(path, &target)) {
LOG(ERROR) << "Failed to find target for symlink: " << path;
return false;
}
if (!FileIsDmaBuf(target)) {
continue;
}
int fd;
if (!::android::base::ParseInt(dent->d_name, &fd)) {
LOG(ERROR) << "Dmabuf fd: " << path << " is invalid";
return false;
}
// Set defaults in case the kernel doesn't give us the information
// we need in fdinfo
std::string name = "<unknown>";
std::string exporter = "<unknown>";
uint64_t count = 0;
if (!ReadDmaBufFdInfo(pid, fd, &name, &exporter, &count)) {
LOG(ERROR) << "Failed to read fdinfo for: " << path;
return false;
}
struct stat sb;
if (stat(path.c_str(), &sb) < 0) {
PLOG(ERROR) << "Failed to stat: " << path;
return false;
}
uint64_t inode = sb.st_ino;
auto buf = std::find_if(dmabufs->begin(), dmabufs->end(),
[&inode](const DmaBuffer& dbuf) { return dbuf.inode() == inode; });
if (buf != dmabufs->end()) {
if (buf->name() == "" || buf->name() == "<unknown>") buf->SetName(name);
if (buf->exporter() == "" || buf->exporter() == "<unknown>") buf->SetExporter(exporter);
if (buf->count() == 0) buf->SetCount(count);
buf->AddFdRef(pid);
continue;
}
DmaBuffer& db = dmabufs->emplace_back(sb.st_ino, sb.st_blocks * 512, count, exporter, name);
db.AddFdRef(pid);
}
return true;
}
static bool ReadDmaBufMapRefs(pid_t pid, std::vector<DmaBuffer>* dmabufs) {
std::string mapspath = ::android::base::StringPrintf("/proc/%d/maps", pid);
auto fp = std::unique_ptr<FILE, decltype(&fclose)>{fopen(mapspath.c_str(), "re"), fclose};
if (fp == nullptr) {
LOG(ERROR) << "Failed to open maps for pid: " << pid;
return false;
}
char* line = nullptr;
size_t len = 0;
// Process the map if it is dmabuf. Add map reference to existing object in 'dmabufs'
// if it was already found. If it wasn't create a new one and append it to 'dmabufs'
auto account_dmabuf = [&](uint64_t start, uint64_t end, uint16_t /* flags */,
uint64_t /* pgoff */, ino_t inode, const char* name) {
// no need to look into this mapping if it is not dmabuf
if (!FileIsDmaBuf(std::string(name))) {
return;
}
auto buf = std::find_if(dmabufs->begin(), dmabufs->end(),
[&inode](const DmaBuffer& dbuf) { return dbuf.inode() == inode; });
if (buf != dmabufs->end()) {
buf->AddMapRef(pid);
return;
}
// We have a new buffer, but unknown count and name
DmaBuffer& dbuf = dmabufs->emplace_back(inode, end - start, 0, "<unknown>", "<unknown>");
dbuf.AddMapRef(pid);
};
while (getline(&line, &len, fp.get()) > 0) {
if (!::android::procinfo::ReadMapFileContent(line, account_dmabuf)) {
LOG(ERROR) << "Failed t parse maps for pid: " << pid;
return false;
}
}
free(line);
return true;
}
// Public methods
bool ReadDmaBufInfo(std::vector<DmaBuffer>* dmabufs, const std::string& path) {
auto fp = std::unique_ptr<FILE, decltype(&fclose)>{fopen(path.c_str(), "re"), fclose};
if (fp == nullptr) {
LOG(ERROR) << "Failed to open dmabuf info from debugfs";
return false;
}
char* line = nullptr;
size_t len = 0;
dmabufs->clear();
while (getline(&line, &len, fp.get()) > 0) {
// The new dmabuf bufinfo format adds inode number and a name at the end
// We are looking for lines as follows:
// size flags mode count exp_name ino name
// 01048576 00000002 00000007 00000001 ion 00018758 CAMERA
// 01048576 00000002 00000007 00000001 ion 00018758
uint64_t size, count;
char* exporter_name = nullptr;
ino_t inode;
char* name = nullptr;
int matched = sscanf(line, "%" SCNu64 "%*x %*x %" SCNu64 " %ms %lu %ms", &size, &count,
&exporter_name, &inode, &name);
if (matched < 4) {
continue;
}
dmabufs->emplace_back(inode, size, count, exporter_name, matched > 4 ? name : "");
free(exporter_name);
free(name);
}
free(line);
return true;
}
bool ReadDmaBufInfo(pid_t pid, std::vector<DmaBuffer>* dmabufs) {
dmabufs->clear();
return AppendDmaBufInfo(pid, dmabufs);
}
bool AppendDmaBufInfo(pid_t pid, std::vector<DmaBuffer>* dmabufs) {
if (!ReadDmaBufFdRefs(pid, dmabufs)) {
LOG(ERROR) << "Failed to read dmabuf fd references";
return false;
}
if (!ReadDmaBufMapRefs(pid, dmabufs)) {
LOG(ERROR) << "Failed to read dmabuf map references";
return false;
}
return true;
}
} // namespace dmabufinfo
} // namespace android

View File

@ -1,489 +0,0 @@
/* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <inttypes.h>
#include <linux/dma-buf.h>
#include <poll.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <unistd.h>
#include <fstream>
#include <string>
#include <unordered_map>
#include <vector>
#include <android-base/file.h>
#include <android-base/logging.h>
#include <android-base/stringprintf.h>
#include <android-base/unique_fd.h>
#include <ion/ion.h>
#include <dmabufinfo/dmabufinfo.h>
using namespace ::android::dmabufinfo;
using namespace ::android::base;
#define MAX_HEAP_NAME 32
#define ION_HEAP_ANY_MASK (0x7fffffff)
struct ion_heap_data {
char name[MAX_HEAP_NAME];
__u32 type;
__u32 heap_id;
__u32 reserved0;
__u32 reserved1;
__u32 reserved2;
};
#ifndef DMA_BUF_SET_NAME
#define DMA_BUF_SET_NAME _IOW(DMA_BUF_BASE, 5, const char*)
#endif
class fd_sharer {
public:
fd_sharer();
~fd_sharer() { kill(); }
bool ok() const { return child_pid > 0; }
bool sendfd(int fd);
bool kill();
pid_t pid() const { return child_pid; }
private:
unique_fd parent_fd, child_fd;
pid_t child_pid;
void run();
};
fd_sharer::fd_sharer() : parent_fd{}, child_fd{}, child_pid{-1} {
bool sp_ok = android::base::Socketpair(SOCK_STREAM, &parent_fd, &child_fd);
if (!sp_ok) return;
child_pid = fork();
if (child_pid < 0) return;
if (child_pid == 0) run();
}
bool fd_sharer::kill() {
int err = ::kill(child_pid, SIGKILL);
if (err < 0) return false;
return ::waitpid(child_pid, nullptr, 0) == child_pid;
}
void fd_sharer::run() {
while (true) {
int fd;
char unused = 0;
iovec iov{};
iov.iov_base = &unused;
iov.iov_len = sizeof(unused);
msghdr msg{};
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
char cmsg_buf[CMSG_SPACE(sizeof(fd))];
msg.msg_control = cmsg_buf;
msg.msg_controllen = sizeof(cmsg_buf);
cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(sizeof(fd));
ssize_t s = TEMP_FAILURE_RETRY(recvmsg(child_fd, &msg, 0));
if (s == -1) break;
s = TEMP_FAILURE_RETRY(write(child_fd, &unused, sizeof(unused)));
if (s == -1) break;
}
}
bool fd_sharer::sendfd(int fd) {
char unused = 0;
iovec iov{};
iov.iov_base = &unused;
iov.iov_len = sizeof(unused);
msghdr msg{};
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
char cmsg_buf[CMSG_SPACE(sizeof(fd))];
msg.msg_control = cmsg_buf;
msg.msg_controllen = sizeof(cmsg_buf);
cmsghdr* cmsg = CMSG_FIRSTHDR(&msg);
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
cmsg->cmsg_len = CMSG_LEN(sizeof(fd));
int* fd_buf = reinterpret_cast<int*>(CMSG_DATA(cmsg));
*fd_buf = fd;
ssize_t s = TEMP_FAILURE_RETRY(sendmsg(parent_fd, &msg, 0));
if (s == -1) return false;
// The target process installs the fd into its fd table during recvmsg().
// So if we return now, there's a brief window between sendfd() finishing
// and libmemoryinfo actually seeing that the buffer has been shared. This
// window is just large enough to break tests.
//
// To work around this, wait for the target process to respond with a dummy
// byte, with a timeout of 1 s.
pollfd p{};
p.fd = parent_fd;
p.events = POLL_IN;
int ready = poll(&p, 1, 1000);
if (ready != 1) return false;
s = TEMP_FAILURE_RETRY(read(parent_fd, &unused, sizeof(unused)));
if (s == -1) return false;
return true;
}
#define EXPECT_ONE_BUF_EQ(_bufptr, _name, _fdrefs, _maprefs, _expname, _count, _size) \
do { \
EXPECT_EQ(_bufptr->name(), _name); \
EXPECT_EQ(_bufptr->fdrefs().size(), _fdrefs); \
EXPECT_EQ(_bufptr->maprefs().size(), _maprefs); \
EXPECT_EQ(_bufptr->exporter(), _expname); \
EXPECT_EQ(_bufptr->count(), _count); \
EXPECT_EQ(_bufptr->size(), _size); \
} while (0)
#define EXPECT_PID_IN_FDREFS(_bufptr, _pid, _expect) \
do { \
const std::unordered_map<pid_t, int>& _fdrefs = _bufptr->fdrefs(); \
auto _ref = _fdrefs.find(_pid); \
EXPECT_EQ((_ref != _fdrefs.end()), _expect); \
} while (0)
#define EXPECT_PID_IN_MAPREFS(_bufptr, _pid, _expect) \
do { \
const std::unordered_map<pid_t, int>& _maprefs = _bufptr->maprefs(); \
auto _ref = _maprefs.find(_pid); \
EXPECT_EQ((_ref != _maprefs.end()), _expect); \
} while (0)
TEST(DmaBufInfoParser, TestReadDmaBufInfo) {
std::string bufinfo = R"bufinfo(00045056 00000002 00000007 00000002 ion 00022069
Attached Devices:
Total 0 devices attached
01048576 00000002 00000007 00000001 ion 00019834 CAMERA
Attached Devices:
soc:qcom,cam_smmu:msm_cam_smmu_icp
Total 1 devices attached)bufinfo";
TemporaryFile tf;
ASSERT_TRUE(tf.fd != -1);
ASSERT_TRUE(::android::base::WriteStringToFd(bufinfo, tf.fd));
std::string path = std::string(tf.path);
std::vector<DmaBuffer> dmabufs;
EXPECT_TRUE(ReadDmaBufInfo(&dmabufs, path));
EXPECT_EQ(dmabufs.size(), 2UL);
EXPECT_EQ(dmabufs[0].size(), 45056UL);
EXPECT_EQ(dmabufs[0].inode(), 22069UL);
EXPECT_EQ(dmabufs[0].count(), 2UL);
EXPECT_EQ(dmabufs[0].exporter(), "ion");
EXPECT_TRUE(dmabufs[0].name().empty());
EXPECT_EQ(dmabufs[0].total_refs(), 0ULL);
EXPECT_TRUE(dmabufs[0].fdrefs().empty());
EXPECT_TRUE(dmabufs[0].maprefs().empty());
EXPECT_EQ(dmabufs[1].size(), 1048576UL);
EXPECT_EQ(dmabufs[1].inode(), 19834UL);
EXPECT_EQ(dmabufs[1].count(), 1UL);
EXPECT_EQ(dmabufs[1].exporter(), "ion");
EXPECT_FALSE(dmabufs[1].name().empty());
EXPECT_EQ(dmabufs[1].name(), "CAMERA");
EXPECT_EQ(dmabufs[1].total_refs(), 0ULL);
EXPECT_TRUE(dmabufs[1].fdrefs().empty());
EXPECT_TRUE(dmabufs[1].maprefs().empty());
}
class DmaBufTester : public ::testing::Test {
public:
DmaBufTester() : ion_fd(ion_open()), ion_heap_mask(get_ion_heap_mask()) {}
~DmaBufTester() {
if (ion_fd >= 0) {
ion_close(ion_fd);
}
}
bool is_valid() { return (ion_fd >= 0 && ion_heap_mask > 0); }
unique_fd allocate(uint64_t size, const std::string& name) {
int fd;
int err = ion_alloc_fd(ion_fd, size, 0, ion_heap_mask, 0, &fd);
if (err < 0) {
printf("Failed ion_alloc_fd, return value: %d\n", err);
return unique_fd{};
}
if (!name.empty()) {
if (ioctl(fd, DMA_BUF_SET_NAME, name.c_str()) == -1) {
printf("Failed ioctl(DMA_BUF_SET_NAME): %s\n", strerror(errno));
close(fd);
return unique_fd{};
}
}
return unique_fd{fd};
}
void readAndCheckDmaBuffer(std::vector<DmaBuffer>* dmabufs, pid_t pid, const std::string name,
size_t fdrefs_size, size_t maprefs_size, const std::string exporter,
size_t refcount, uint64_t buf_size, bool expectFdrefs,
bool expectMapRefs) {
EXPECT_TRUE(ReadDmaBufInfo(pid, dmabufs));
EXPECT_EQ(dmabufs->size(), 1UL);
EXPECT_ONE_BUF_EQ(dmabufs->begin(), name, fdrefs_size, maprefs_size, exporter, refcount,
buf_size);
// Make sure the buffer has the right pid too.
EXPECT_PID_IN_FDREFS(dmabufs->begin(), pid, expectFdrefs);
EXPECT_PID_IN_MAPREFS(dmabufs->begin(), pid, expectMapRefs);
}
bool checkPidRef(DmaBuffer& dmabuf, pid_t pid, int expectFdrefs) {
int fdrefs = dmabuf.fdrefs().find(pid)->second;
return fdrefs == expectFdrefs;
}
private:
int get_ion_heap_mask() {
if (ion_fd < 0) {
return 0;
}
if (ion_is_legacy(ion_fd)) {
// Since ION is still in staging, we've seen that the heap mask ids are also
// changed across kernels for some reason. So, here we basically ask for a buffer
// from _any_ heap.
return ION_HEAP_ANY_MASK;
}
int cnt;
int err = ion_query_heap_cnt(ion_fd, &cnt);
if (err < 0) {
return err;
}
std::vector<ion_heap_data> heaps;
heaps.resize(cnt);
err = ion_query_get_heaps(ion_fd, cnt, &heaps[0]);
if (err < 0) {
return err;
}
unsigned int ret = 0;
for (auto& it : heaps) {
if (!strcmp(it.name, "ion_system_heap")) {
ret |= (1 << it.heap_id);
}
}
return ret;
}
int ion_fd;
const int ion_heap_mask;
};
TEST_F(DmaBufTester, TestFdRef) {
// Test if a dma buffer is found while the corresponding file descriptor
// is open
ASSERT_TRUE(is_valid());
pid_t pid = getpid();
std::vector<DmaBuffer> dmabufs;
{
// Allocate one buffer and make sure the library can see it
unique_fd buf = allocate(4096, "dmabuftester-4k");
ASSERT_GT(buf, 0) << "Allocated buffer is invalid";
ASSERT_TRUE(ReadDmaBufInfo(pid, &dmabufs));
EXPECT_EQ(dmabufs.size(), 1UL);
EXPECT_ONE_BUF_EQ(dmabufs.begin(), "dmabuftester-4k", 1UL, 0UL, "ion", 1UL, 4096ULL);
// Make sure the buffer has the right pid too.
EXPECT_PID_IN_FDREFS(dmabufs.begin(), pid, true);
}
// Now make sure the buffer has disappeared
ASSERT_TRUE(ReadDmaBufInfo(pid, &dmabufs));
EXPECT_TRUE(dmabufs.empty());
}
TEST_F(DmaBufTester, TestMapRef) {
// Test to make sure we can find a buffer if the fd is closed but the buffer
// is mapped
ASSERT_TRUE(is_valid());
pid_t pid = getpid();
std::vector<DmaBuffer> dmabufs;
{
// Allocate one buffer and make sure the library can see it
unique_fd buf = allocate(4096, "dmabuftester-4k");
ASSERT_GT(buf, 0) << "Allocated buffer is invalid";
auto ptr = mmap(0, 4096, PROT_READ, MAP_SHARED, buf, 0);
ASSERT_NE(ptr, MAP_FAILED);
ASSERT_TRUE(ReadDmaBufInfo(pid, &dmabufs));
EXPECT_EQ(dmabufs.size(), 1UL);
EXPECT_ONE_BUF_EQ(dmabufs.begin(), "dmabuftester-4k", 1UL, 1UL, "ion", 2UL, 4096ULL);
// Make sure the buffer has the right pid too.
EXPECT_PID_IN_FDREFS(dmabufs.begin(), pid, true);
EXPECT_PID_IN_MAPREFS(dmabufs.begin(), pid, true);
// close the file descriptor and re-read the stats
buf.reset(-1);
ASSERT_TRUE(ReadDmaBufInfo(pid, &dmabufs));
EXPECT_EQ(dmabufs.size(), 1UL);
EXPECT_ONE_BUF_EQ(dmabufs.begin(), "<unknown>", 0UL, 1UL, "<unknown>", 0UL, 4096ULL);
EXPECT_PID_IN_FDREFS(dmabufs.begin(), pid, false);
EXPECT_PID_IN_MAPREFS(dmabufs.begin(), pid, true);
// unmap the bufer and lose all references
munmap(ptr, 4096);
}
// Now make sure the buffer has disappeared
ASSERT_TRUE(ReadDmaBufInfo(pid, &dmabufs));
EXPECT_TRUE(dmabufs.empty());
}
TEST_F(DmaBufTester, TestSharedfd) {
// Each time a shared buffer is received over a socket, the remote process
// will take an extra reference on it.
ASSERT_TRUE(is_valid());
pid_t pid = getpid();
std::vector<DmaBuffer> dmabufs;
{
fd_sharer sharer{};
ASSERT_TRUE(sharer.ok());
// Allocate one buffer and make sure the library can see it
unique_fd buf = allocate(4096, "dmabuftester-4k");
ASSERT_GT(buf, 0) << "Allocated buffer is invalid";
readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 1UL, 4096ULL, true,
false);
ASSERT_TRUE(sharer.sendfd(buf));
readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 2UL, 4096ULL, true,
false);
EXPECT_TRUE(checkPidRef(dmabufs[0], pid, 1));
readAndCheckDmaBuffer(&dmabufs, sharer.pid(), "dmabuftester-4k", 1UL, 0UL, "ion", 2UL,
4096ULL, true, false);
EXPECT_TRUE(checkPidRef(dmabufs[0], sharer.pid(), 1));
ASSERT_TRUE(sharer.sendfd(buf));
readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 3UL, 4096ULL, true,
false);
EXPECT_TRUE(checkPidRef(dmabufs[0], pid, 1));
readAndCheckDmaBuffer(&dmabufs, sharer.pid(), "dmabuftester-4k", 1UL, 0UL, "ion", 3UL,
4096ULL, true, false);
EXPECT_TRUE(checkPidRef(dmabufs[0], sharer.pid(), 2));
ASSERT_TRUE(sharer.kill());
readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 1UL, 4096ULL, true,
false);
}
// Now make sure the buffer has disappeared
ASSERT_TRUE(ReadDmaBufInfo(pid, &dmabufs));
EXPECT_TRUE(dmabufs.empty());
}
TEST_F(DmaBufTester, DupFdTest) {
// dup()ing an fd will make this process take an extra reference on the
// shared buffer.
ASSERT_TRUE(is_valid());
pid_t pid = getpid();
std::vector<DmaBuffer> dmabufs;
{
// Allocate one buffer and make sure the library can see it
unique_fd buf = allocate(4096, "dmabuftester-4k");
ASSERT_GT(buf, 0) << "Allocated buffer is invalid";
readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 1UL, 4096ULL, true,
false);
unique_fd buf2{dup(buf)};
readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 2UL, 4096ULL, true,
false);
EXPECT_TRUE(checkPidRef(dmabufs[0], pid, 2));
close(buf2.release());
readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 1UL, 4096ULL, true,
false);
EXPECT_TRUE(checkPidRef(dmabufs[0], pid, 1));
}
// Now make sure the buffer has disappeared
ASSERT_TRUE(ReadDmaBufInfo(pid, &dmabufs));
EXPECT_TRUE(dmabufs.empty());
}
TEST_F(DmaBufTester, ForkTest) {
// fork()ing a child will cause the child to automatically take a reference
// on any existing shared buffers.
ASSERT_TRUE(is_valid());
pid_t pid = getpid();
std::vector<DmaBuffer> dmabufs;
{
// Allocate one buffer and make sure the library can see it
unique_fd buf = allocate(4096, "dmabuftester-4k");
ASSERT_GT(buf, 0) << "Allocated buffer is invalid";
readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 1UL, 4096ULL, true,
false);
fd_sharer sharer{};
ASSERT_TRUE(sharer.ok());
readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 2UL, 4096ULL, true,
false);
readAndCheckDmaBuffer(&dmabufs, sharer.pid(), "dmabuftester-4k", 1UL, 0UL, "ion", 2UL,
4096ULL, true, false);
ASSERT_TRUE(sharer.kill());
readAndCheckDmaBuffer(&dmabufs, pid, "dmabuftester-4k", 1UL, 0UL, "ion", 1UL, 4096ULL, true,
false);
}
// Now make sure the buffer has disappeared
ASSERT_TRUE(ReadDmaBufInfo(pid, &dmabufs));
EXPECT_TRUE(dmabufs.empty());
}
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
::android::base::InitLogging(argv, android::base::StderrLogger);
return RUN_ALL_TESTS();
}

View File

@ -1,113 +0,0 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <sys/types.h>
#include <unistd.h>
#include <set>
#include <string>
#include <vector>
#include <unordered_map>
namespace android {
namespace dmabufinfo {
struct DmaBuffer {
public:
DmaBuffer(ino_t inode, uint64_t size, uint64_t count, const std::string& exporter,
const std::string& name)
: inode_(inode), size_(size), count_(count), exporter_(exporter), name_(name) {
total_refs_ = 0;
}
DmaBuffer() = default;
~DmaBuffer() = default;
// Adds one file descriptor reference for the given pid
void AddFdRef(pid_t pid) {
AddRefToPidMap(pid, &fdrefs_);
total_refs_++;
}
// Adds one map reference for the given pid
void AddMapRef(pid_t pid) {
AddRefToPidMap(pid, &maprefs_);
total_refs_++;
}
// Getters for each property
uint64_t size() const { return size_; }
const std::unordered_map<pid_t, int>& fdrefs() const { return fdrefs_; }
const std::unordered_map<pid_t, int>& maprefs() const { return maprefs_; }
ino_t inode() const { return inode_; }
uint64_t total_refs() const { return total_refs_; }
uint64_t count() const { return count_; };
const std::set<pid_t>& pids() const { return pids_; }
const std::string& name() const { return name_; }
const std::string& exporter() const { return exporter_; }
void SetName(const std::string& name) { name_ = name; }
void SetExporter(const std::string& exporter) { exporter_ = exporter; }
void SetCount(uint64_t count) { count_ = count; }
uint64_t Pss() const { return size_ / pids_.size(); }
bool operator==(const DmaBuffer& rhs) {
return (inode_ == rhs.inode()) && (size_ == rhs.size()) && (name_ == rhs.name()) &&
(exporter_ == rhs.exporter());
}
private:
ino_t inode_;
uint64_t size_;
uint64_t count_;
uint64_t total_refs_;
std::set<pid_t> pids_;
std::string exporter_;
std::string name_;
std::unordered_map<pid_t, int> fdrefs_;
std::unordered_map<pid_t, int> maprefs_;
void AddRefToPidMap(pid_t pid, std::unordered_map<pid_t, int>* map) {
// The first time we find a ref, we set the ref count to 1
// otherwise, increment the existing ref count
auto [it, inserted] = map->insert(std::make_pair(pid, 1));
if (!inserted)
it->second++;
pids_.insert(pid);
}
};
// Read and return current dma buf objects from
// DEBUGFS/dma_buf/bufinfo. The references to each dma buffer are not
// populated here and will return an empty vector.
// Returns false if something went wrong with the function, true otherwise.
bool ReadDmaBufInfo(std::vector<DmaBuffer>* dmabufs,
const std::string& path = "/sys/kernel/debug/dma_buf/bufinfo");
// Read and return dmabuf objects for a given process without the help
// of DEBUGFS
// Returns false if something went wrong with the function, true otherwise.
bool ReadDmaBufInfo(pid_t pid, std::vector<DmaBuffer>* dmabufs);
// Append new dmabuf objects from a given process to an existing vector.
// When the vector contains an existing element with a matching inode,
// the reference counts will be updated.
// Does not depend on DEBUGFS.
// Returns false if something went wrong with the function, true otherwise.
bool AppendDmaBufInfo(pid_t pid, std::vector<DmaBuffer>* dmabufs);
} // namespace dmabufinfo
} // namespace android

View File

@ -1,30 +0,0 @@
// Copyright (C) 2019 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
cc_binary {
name: "dmabuf_dump",
cflags: [
"-Wall",
"-Werror",
],
srcs: ["dmabuf_dump.cpp"],
shared_libs: [
"libbase",
],
static_libs: [
"libdmabufinfo",
],
product_specific: true,
}

View File

@ -1,266 +0,0 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <dirent.h>
#include <errno.h>
#include <getopt.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fstream>
#include <iostream>
#include <map>
#include <set>
#include <sstream>
#include <string>
#include <vector>
#include <android-base/stringprintf.h>
#include <dmabufinfo/dmabufinfo.h>
using DmaBuffer = ::android::dmabufinfo::DmaBuffer;
[[noreturn]] static void usage(int exit_status) {
fprintf(stderr,
"Usage: %s [-ah] [PID] \n"
"-a\t show all dma buffers (ion) in big table, [buffer x process] grid \n"
"-h\t show this help\n"
" \t If PID is supplied, the dmabuf information for that process is shown.\n",
getprogname());
exit(exit_status);
}
static std::string GetProcessComm(const pid_t pid) {
std::string pid_path = android::base::StringPrintf("/proc/%d/comm", pid);
std::ifstream in{pid_path};
if (!in) return std::string("N/A");
std::string line;
std::getline(in, line);
if (!in) return std::string("N/A");
return line;
}
static void PrintDmaBufTable(const std::vector<DmaBuffer>& bufs) {
if (bufs.empty()) {
printf("dmabuf info not found ¯\\_(ツ)_/¯\n");
return;
}
// Find all unique pids in the input vector, create a set
std::set<pid_t> pid_set;
for (auto& buf : bufs) {
pid_set.insert(buf.pids().begin(), buf.pids().end());
}
// Format the header string spaced and separated with '|'
printf(" Dmabuf Inode | Size | Ref Counts |");
for (auto pid : pid_set) {
printf("%16s:%-5d |", GetProcessComm(pid).c_str(), pid);
}
printf("\n");
// holds per-process dmabuf size in kB
std::map<pid_t, uint64_t> per_pid_size = {};
uint64_t dmabuf_total_size = 0;
// Iterate through all dmabufs and collect per-process sizes, refs
for (auto& buf : bufs) {
printf("%16ju |%13" PRIu64 " kB |%16" PRIu64 " |", static_cast<uintmax_t>(buf.inode()),
buf.size() / 1024, buf.total_refs());
// Iterate through each process to find out per-process references for each buffer,
// gather total size used by each process etc.
for (pid_t pid : pid_set) {
int pid_refs = 0;
if (buf.fdrefs().count(pid) == 1) {
// Get the total number of ref counts the process is holding
// on this buffer. We don't differentiate between mmap or fd.
pid_refs += buf.fdrefs().at(pid);
if (buf.maprefs().count(pid) == 1) {
pid_refs += buf.maprefs().at(pid);
}
}
if (pid_refs) {
// Add up the per-pid total size. Note that if a buffer is mapped
// in 2 different processes, the size will be shown as mapped or opened
// in both processes. This is intended for visibility.
//
// If one wants to get the total *unique* dma buffers, they can simply
// sum the size of all dma bufs shown by the tool
per_pid_size[pid] += buf.size() / 1024;
printf("%17d refs |", pid_refs);
} else {
printf("%22s |", "--");
}
}
dmabuf_total_size += buf.size() / 1024;
printf("\n");
}
printf("------------------------------------\n");
printf("%-16s %13" PRIu64 " kB |%16s |", "TOTALS", dmabuf_total_size, "n/a");
for (auto pid : pid_set) {
printf("%19" PRIu64 " kB |", per_pid_size[pid]);
}
printf("\n");
return;
}
static void PrintDmaBufPerProcess(const std::vector<DmaBuffer>& bufs) {
if (bufs.empty()) {
printf("dmabuf info not found ¯\\_(ツ)_/¯\n");
return;
}
// Create a reverse map from pid to dmabufs
std::unordered_map<pid_t, std::set<ino_t>> pid_to_inodes = {};
uint64_t total_size = 0; // Total size of dmabufs in the system
uint64_t kernel_rss = 0; // Total size of dmabufs NOT mapped or opened by a process
for (auto& buf : bufs) {
for (auto pid : buf.pids()) {
pid_to_inodes[pid].insert(buf.inode());
}
total_size += buf.size();
if (buf.fdrefs().empty() && buf.maprefs().empty()) {
kernel_rss += buf.size();
}
}
// Create an inode to dmabuf map. We know inodes are unique..
std::unordered_map<ino_t, DmaBuffer> inode_to_dmabuf;
for (auto buf : bufs) {
inode_to_dmabuf[buf.inode()] = buf;
}
uint64_t total_rss = 0, total_pss = 0;
for (auto& [pid, inodes] : pid_to_inodes) {
uint64_t pss = 0;
uint64_t rss = 0;
printf("%16s:%-5d\n", GetProcessComm(pid).c_str(), pid);
printf("%22s %16s %16s %16s %16s\n", "Name", "Rss", "Pss", "nr_procs", "Inode");
for (auto& inode : inodes) {
DmaBuffer& buf = inode_to_dmabuf[inode];
printf("%22s %13" PRIu64 " kB %13" PRIu64 " kB %16zu %16" PRIuMAX "\n",
buf.name().empty() ? "<unknown>" : buf.name().c_str(), buf.size() / 1024,
buf.Pss() / 1024, buf.pids().size(), static_cast<uintmax_t>(buf.inode()));
rss += buf.size();
pss += buf.Pss();
}
printf("%22s %13" PRIu64 " kB %13" PRIu64 " kB %16s\n", "PROCESS TOTAL", rss / 1024,
pss / 1024, "");
printf("----------------------\n");
total_rss += rss;
total_pss += pss;
}
printf("dmabuf total: %" PRIu64 " kB kernel_rss: %" PRIu64 " kB userspace_rss: %" PRIu64
" kB userspace_pss: %" PRIu64 " kB\n ",
total_size / 1024, kernel_rss / 1024, total_rss / 1024, total_pss / 1024);
}
static bool ReadDmaBufs(std::vector<DmaBuffer>* bufs) {
bufs->clear();
if (!ReadDmaBufInfo(bufs)) {
fprintf(stderr, "debugfs entry for dmabuf not available, skipping\n");
return false;
}
std::unique_ptr<DIR, int (*)(DIR*)> dir(opendir("/proc"), closedir);
if (!dir) {
fprintf(stderr, "Failed to open /proc directory\n");
bufs->clear();
return false;
}
struct dirent* dent;
while ((dent = readdir(dir.get()))) {
if (dent->d_type != DT_DIR) continue;
int pid = atoi(dent->d_name);
if (pid == 0) {
continue;
}
if (!AppendDmaBufInfo(pid, bufs)) {
fprintf(stderr, "Unable to read dmabuf info for pid %d\n", pid);
bufs->clear();
return false;
}
}
return true;
}
int main(int argc, char* argv[]) {
struct option longopts[] = {{"all", no_argument, nullptr, 'a'},
{"help", no_argument, nullptr, 'h'},
{0, 0, nullptr, 0}};
int opt;
bool show_table = false;
while ((opt = getopt_long(argc, argv, "ah", longopts, nullptr)) != -1) {
switch (opt) {
case 'a':
show_table = true;
break;
case 'h':
usage(EXIT_SUCCESS);
default:
usage(EXIT_FAILURE);
}
}
pid_t pid = -1;
if (optind < argc) {
if (show_table) {
fprintf(stderr, "Invalid arguments: -a does not need arguments\n");
usage(EXIT_FAILURE);
}
if (optind != (argc - 1)) {
fprintf(stderr, "Invalid arguments - only one [PID] argument is allowed\n");
usage(EXIT_FAILURE);
}
pid = atoi(argv[optind]);
if (pid == 0) {
fprintf(stderr, "Invalid process id %s\n", argv[optind]);
usage(EXIT_FAILURE);
}
}
std::vector<DmaBuffer> bufs;
if (pid != -1) {
if (!ReadDmaBufInfo(pid, &bufs)) {
fprintf(stderr, "Unable to read dmabuf info for %d\n", pid);
exit(EXIT_FAILURE);
}
} else {
if (!ReadDmaBufs(&bufs)) exit(EXIT_FAILURE);
}
// Show the old dmabuf table, inode x process
if (show_table) {
PrintDmaBufTable(bufs);
return 0;
}
PrintDmaBufPerProcess(bufs);
return 0;
}

View File

@ -1,544 +0,0 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <meminfo/procmeminfo.h>
#include <meminfo/sysmeminfo.h>
#include <fcntl.h>
#include <inttypes.h>
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <string>
#include <android-base/file.h>
#include <android-base/logging.h>
#include <android-base/stringprintf.h>
#include <android-base/unique_fd.h>
#include <benchmark/benchmark.h>
using ::android::meminfo::MemUsage;
using ::android::meminfo::ProcMemInfo;
using ::android::meminfo::SmapsOrRollupFromFile;
using ::android::meminfo::SysMemInfo;
enum {
MEMINFO_TOTAL,
MEMINFO_FREE,
MEMINFO_BUFFERS,
MEMINFO_CACHED,
MEMINFO_SHMEM,
MEMINFO_SLAB,
MEMINFO_SLAB_RECLAIMABLE,
MEMINFO_SLAB_UNRECLAIMABLE,
MEMINFO_SWAP_TOTAL,
MEMINFO_SWAP_FREE,
MEMINFO_ZRAM_TOTAL,
MEMINFO_MAPPED,
MEMINFO_VMALLOC_USED,
MEMINFO_PAGE_TABLES,
MEMINFO_KERNEL_STACK,
MEMINFO_COUNT
};
static void get_mem_info(uint64_t mem[], const char* file) {
char buffer[4096];
unsigned int numFound = 0;
int fd = open(file, O_RDONLY);
if (fd < 0) {
printf("Unable to open %s: %s\n", file, strerror(errno));
return;
}
const int len = read(fd, buffer, sizeof(buffer) - 1);
close(fd);
if (len < 0) {
printf("Empty %s\n", file);
return;
}
buffer[len] = 0;
static const char* const tags[] = {
"MemTotal:", "MemFree:", "Buffers:", "Cached:", "Shmem:", "Slab:",
"SReclaimable:", "SUnreclaim:", "SwapTotal:", "SwapFree:", "ZRam:", "Mapped:",
"VmallocUsed:", "PageTables:", "KernelStack:", NULL};
static const int tagsLen[] = {9, 8, 8, 7, 6, 5, 13, 11, 10, 9, 5, 7, 12, 11, 12, 0};
memset(mem, 0, sizeof(uint64_t) * 15);
char* p = buffer;
while (*p && (numFound < (sizeof(tagsLen) / sizeof(tagsLen[0])))) {
int i = 0;
while (tags[i]) {
// std::cout << "tag =" << tags[i] << " p = " << std::string(p, tagsLen[i]) <<
// std::endl;
if (strncmp(p, tags[i], tagsLen[i]) == 0) {
p += tagsLen[i];
while (*p == ' ') p++;
char* num = p;
while (*p >= '0' && *p <= '9') p++;
if (*p != 0) {
*p = 0;
p++;
}
mem[i] = atoll(num);
numFound++;
break;
}
i++;
}
while (*p && *p != '\n') {
p++;
}
if (*p) p++;
}
}
static void BM_ReadMemInfo_old(benchmark::State& state) {
std::string meminfo = R"meminfo(MemTotal: 3019740 kB
MemFree: 1809728 kB
MemAvailable: 2546560 kB
Buffers: 54736 kB
Cached: 776052 kB
SwapCached: 0 kB
Active: 445856 kB
Inactive: 459092 kB
Active(anon): 78492 kB
Inactive(anon): 2240 kB
Active(file): 367364 kB
Inactive(file): 456852 kB
Unevictable: 3096 kB
Mlocked: 3096 kB
SwapTotal: 0 kB
SwapFree: 0 kB
Dirty: 32 kB
Writeback: 0 kB
AnonPages: 74988 kB
Mapped: 62624 kB
Shmem: 4020 kB
Slab: 86464 kB
SReclaimable: 44432 kB
SUnreclaim: 42032 kB
KernelStack: 4880 kB
PageTables: 2900 kB
NFS_Unstable: 0 kB
Bounce: 0 kB
WritebackTmp: 0 kB
CommitLimit: 1509868 kB
Committed_AS: 80296 kB
VmallocTotal: 263061440 kB
VmallocUsed: 0 kB
VmallocChunk: 0 kB
AnonHugePages: 6144 kB
ShmemHugePages: 0 kB
ShmemPmdMapped: 0 kB
CmaTotal: 131072 kB
CmaFree: 130380 kB
HugePages_Total: 0
HugePages_Free: 0
HugePages_Rsvd: 0
HugePages_Surp: 0
Hugepagesize: 2048 kB)meminfo";
TemporaryFile tf;
::android::base::WriteStringToFd(meminfo, tf.fd);
uint64_t mem[MEMINFO_COUNT];
for (auto _ : state) {
get_mem_info(mem, tf.path);
}
}
BENCHMARK(BM_ReadMemInfo_old);
static void BM_ReadMemInfo_new(benchmark::State& state) {
std::string meminfo = R"meminfo(MemTotal: 3019740 kB
MemFree: 1809728 kB
MemAvailable: 2546560 kB
Buffers: 54736 kB
Cached: 776052 kB
SwapCached: 0 kB
Active: 445856 kB
Inactive: 459092 kB
Active(anon): 78492 kB
Inactive(anon): 2240 kB
Active(file): 367364 kB
Inactive(file): 456852 kB
Unevictable: 3096 kB
Mlocked: 3096 kB
SwapTotal: 0 kB
SwapFree: 0 kB
Dirty: 32 kB
Writeback: 0 kB
AnonPages: 74988 kB
Mapped: 62624 kB
Shmem: 4020 kB
Slab: 86464 kB
SReclaimable: 44432 kB
SUnreclaim: 42032 kB
KernelStack: 4880 kB
PageTables: 2900 kB
NFS_Unstable: 0 kB
Bounce: 0 kB
WritebackTmp: 0 kB
CommitLimit: 1509868 kB
Committed_AS: 80296 kB
VmallocTotal: 263061440 kB
VmallocUsed: 0 kB
VmallocChunk: 0 kB
AnonHugePages: 6144 kB
ShmemHugePages: 0 kB
ShmemPmdMapped: 0 kB
CmaTotal: 131072 kB
CmaFree: 130380 kB
HugePages_Total: 0
HugePages_Free: 0
HugePages_Rsvd: 0
HugePages_Surp: 0
Hugepagesize: 2048 kB)meminfo";
TemporaryFile tf;
android::base::WriteStringToFd(meminfo, tf.fd);
std::string file = std::string(tf.path);
std::vector<uint64_t> mem(MEMINFO_COUNT);
const std::vector<std::string> tags = {
SysMemInfo::kMemTotal, SysMemInfo::kMemFree, SysMemInfo::kMemBuffers,
SysMemInfo::kMemCached, SysMemInfo::kMemShmem, SysMemInfo::kMemSlab,
SysMemInfo::kMemSReclaim, SysMemInfo::kMemSUnreclaim, SysMemInfo::kMemSwapTotal,
SysMemInfo::kMemSwapFree, SysMemInfo::kMemMapped, SysMemInfo::kMemVmallocUsed,
SysMemInfo::kMemPageTables, SysMemInfo::kMemKernelStack,
};
SysMemInfo smi;
for (auto _ : state) {
smi.ReadMemInfo(tags, &mem, file);
}
}
BENCHMARK(BM_ReadMemInfo_new);
static uint64_t get_zram_mem_used(const std::string& zram_dir) {
FILE* f = fopen((zram_dir + "mm_stat").c_str(), "r");
if (f) {
uint64_t mem_used_total = 0;
int matched = fscanf(f, "%*d %*d %" SCNu64 " %*d %*d %*d %*d", &mem_used_total);
if (matched != 1)
fprintf(stderr, "warning: failed to parse %s\n", (zram_dir + "mm_stat").c_str());
fclose(f);
return mem_used_total;
}
f = fopen((zram_dir + "mem_used_total").c_str(), "r");
if (f) {
uint64_t mem_used_total = 0;
int matched = fscanf(f, "%" SCNu64, &mem_used_total);
if (matched != 1)
fprintf(stderr, "warning: failed to parse %s\n", (zram_dir + "mem_used_total").c_str());
fclose(f);
return mem_used_total;
}
return 0;
}
static void BM_ZramTotal_old(benchmark::State& state) {
std::string exec_dir = ::android::base::GetExecutableDirectory();
std::string zram_mmstat_dir = exec_dir + "/testdata1/";
for (auto _ : state) {
uint64_t zram_total __attribute__((unused)) = get_zram_mem_used(zram_mmstat_dir) / 1024;
}
}
BENCHMARK(BM_ZramTotal_old);
static void BM_ZramTotal_new(benchmark::State& state) {
std::string exec_dir = ::android::base::GetExecutableDirectory();
std::string zram_mmstat_dir = exec_dir + "/testdata1/";
SysMemInfo smi;
for (auto _ : state) {
uint64_t zram_total __attribute__((unused)) = smi.mem_zram_kb(zram_mmstat_dir);
}
}
BENCHMARK(BM_ZramTotal_new);
static void BM_MemInfoWithZram_old(benchmark::State& state) {
std::string meminfo = R"meminfo(MemTotal: 3019740 kB
MemFree: 1809728 kB
MemAvailable: 2546560 kB
Buffers: 54736 kB
Cached: 776052 kB
SwapCached: 0 kB
Active: 445856 kB
Inactive: 459092 kB
Active(anon): 78492 kB
Inactive(anon): 2240 kB
Active(file): 367364 kB
Inactive(file): 456852 kB
Unevictable: 3096 kB
Mlocked: 3096 kB
SwapTotal: 0 kB
SwapFree: 0 kB
Dirty: 32 kB
Writeback: 0 kB
AnonPages: 74988 kB
Mapped: 62624 kB
Shmem: 4020 kB
Slab: 86464 kB
SReclaimable: 44432 kB
SUnreclaim: 42032 kB
KernelStack: 4880 kB
PageTables: 2900 kB
NFS_Unstable: 0 kB
Bounce: 0 kB
WritebackTmp: 0 kB
CommitLimit: 1509868 kB
Committed_AS: 80296 kB
VmallocTotal: 263061440 kB
VmallocUsed: 0 kB
VmallocChunk: 0 kB
AnonHugePages: 6144 kB
ShmemHugePages: 0 kB
ShmemPmdMapped: 0 kB
CmaTotal: 131072 kB
CmaFree: 130380 kB
HugePages_Total: 0
HugePages_Free: 0
HugePages_Rsvd: 0
HugePages_Surp: 0
Hugepagesize: 2048 kB)meminfo";
TemporaryFile tf;
::android::base::WriteStringToFd(meminfo, tf.fd);
std::string exec_dir = ::android::base::GetExecutableDirectory();
std::string zram_mmstat_dir = exec_dir + "/testdata1/";
uint64_t mem[MEMINFO_COUNT];
for (auto _ : state) {
get_mem_info(mem, tf.path);
mem[MEMINFO_ZRAM_TOTAL] = get_zram_mem_used("/sys/block/zram0/") / 1024;
CHECK_EQ(mem[MEMINFO_KERNEL_STACK], 4880u);
}
}
BENCHMARK(BM_MemInfoWithZram_old);
static void BM_MemInfoWithZram_new(benchmark::State& state) {
std::string meminfo = R"meminfo(MemTotal: 3019740 kB
MemFree: 1809728 kB
MemAvailable: 2546560 kB
Buffers: 54736 kB
Cached: 776052 kB
SwapCached: 0 kB
Active: 445856 kB
Inactive: 459092 kB
Active(anon): 78492 kB
Inactive(anon): 2240 kB
Active(file): 367364 kB
Inactive(file): 456852 kB
Unevictable: 3096 kB
Mlocked: 3096 kB
SwapTotal: 0 kB
SwapFree: 0 kB
Dirty: 32 kB
Writeback: 0 kB
AnonPages: 74988 kB
Mapped: 62624 kB
Shmem: 4020 kB
Slab: 86464 kB
SReclaimable: 44432 kB
SUnreclaim: 42032 kB
KernelStack: 4880 kB
PageTables: 2900 kB
NFS_Unstable: 0 kB
Bounce: 0 kB
WritebackTmp: 0 kB
CommitLimit: 1509868 kB
Committed_AS: 80296 kB
VmallocTotal: 263061440 kB
VmallocUsed: 0 kB
VmallocChunk: 0 kB
AnonHugePages: 6144 kB
ShmemHugePages: 0 kB
ShmemPmdMapped: 0 kB
CmaTotal: 131072 kB
CmaFree: 130380 kB
HugePages_Total: 0
HugePages_Free: 0
HugePages_Rsvd: 0
HugePages_Surp: 0
Hugepagesize: 2048 kB)meminfo";
TemporaryFile tf;
android::base::WriteStringToFd(meminfo, tf.fd);
std::string file = std::string(tf.path);
std::vector<uint64_t> mem(MEMINFO_COUNT);
std::vector<std::string> tags(SysMemInfo::kDefaultSysMemInfoTags);
auto it = tags.begin();
tags.insert(it + MEMINFO_ZRAM_TOTAL, "Zram:");
SysMemInfo smi;
for (auto _ : state) {
smi.ReadMemInfo(tags, &mem, file);
CHECK_EQ(mem[MEMINFO_KERNEL_STACK], 4880u);
}
}
BENCHMARK(BM_MemInfoWithZram_new);
// Current implementation is in frameworks/base/core/jni/android_os_Debug.cpp.
// That implementation is still buggy and it skips over vmalloc allocated memory by kernel modules.
// This is the *fixed* version of the same implementation intended for benchmarking against the new
// one.
static uint64_t get_allocated_vmalloc_memory(const std::string& vm_file) {
char line[1024];
uint64_t vmalloc_allocated_size = 0;
auto fp = std::unique_ptr<FILE, decltype(&fclose)>{fopen(vm_file.c_str(), "re"), fclose};
if (fp == nullptr) {
return 0;
}
while (true) {
if (fgets(line, 1024, fp.get()) == NULL) {
break;
}
// check to see if there are pages mapped in vmalloc area
if (!strstr(line, "pages=")) {
continue;
}
long nr_pages;
if (sscanf(line, "%*x-%*x %*ld %*s pages=%ld", &nr_pages) == 1) {
vmalloc_allocated_size += (nr_pages * getpagesize());
} else if (sscanf(line, "%*x-%*x %*ld %*s %*s pages=%ld", &nr_pages) == 1) {
// The second case is for kernel modules. If allocation comes from the module,
// kernel puts an extra string containing the module name before "pages=" in
// the line.
// See: https://elixir.bootlin.com/linux/latest/source/kernel/kallsyms.c#L373
vmalloc_allocated_size += (nr_pages * getpagesize());
}
}
return vmalloc_allocated_size;
}
static void BM_VmallocInfo_old_fixed(benchmark::State& state) {
std::string exec_dir = ::android::base::GetExecutableDirectory();
std::string vmallocinfo =
::android::base::StringPrintf("%s/testdata1/vmallocinfo", exec_dir.c_str());
for (auto _ : state) {
CHECK_EQ(get_allocated_vmalloc_memory(vmallocinfo), 29884416);
}
}
BENCHMARK(BM_VmallocInfo_old_fixed);
static void BM_VmallocInfo_new(benchmark::State& state) {
std::string exec_dir = ::android::base::GetExecutableDirectory();
std::string vmallocinfo =
::android::base::StringPrintf("%s/testdata1/vmallocinfo", exec_dir.c_str());
for (auto _ : state) {
CHECK_EQ(::android::meminfo::ReadVmallocInfo(vmallocinfo), 29884416);
}
}
BENCHMARK(BM_VmallocInfo_new);
// This implementation is picked up as-is from frameworks/base/core/jni/android_os_Debug.cpp
// and only slightly modified to use std:unique_ptr.
static bool get_smaps_rollup(const std::string path, MemUsage* rollup) {
char lineBuffer[1024];
auto fp = std::unique_ptr<FILE, decltype(&fclose)>{fopen(path.c_str(), "re"), fclose};
if (fp != nullptr) {
char* line;
while (true) {
if (fgets(lineBuffer, sizeof(lineBuffer), fp.get()) == NULL) {
break;
}
line = lineBuffer;
switch (line[0]) {
case 'P':
if (strncmp(line, "Pss:", 4) == 0) {
char* c = line + 4;
while (*c != 0 && (*c < '0' || *c > '9')) {
c++;
}
rollup->pss += atoi(c);
} else if (strncmp(line, "Private_Clean:", 14) == 0 ||
strncmp(line, "Private_Dirty:", 14) == 0) {
char* c = line + 14;
while (*c != 0 && (*c < '0' || *c > '9')) {
c++;
}
rollup->uss += atoi(c);
}
break;
case 'R':
if (strncmp(line, "Rss:", 4) == 0) {
char* c = line + 4;
while (*c != 0 && (*c < '0' || *c > '9')) {
c++;
}
rollup->rss += atoi(c);
}
break;
case 'S':
if (strncmp(line, "SwapPss:", 8) == 0) {
char* c = line + 8;
long lSwapPss;
while (*c != 0 && (*c < '0' || *c > '9')) {
c++;
}
lSwapPss = atoi(c);
rollup->swap_pss += lSwapPss;
}
break;
}
}
} else {
return false;
}
return true;
}
static void BM_SmapsRollup_old(benchmark::State& state) {
std::string exec_dir = ::android::base::GetExecutableDirectory();
std::string path = ::android::base::StringPrintf("%s/testdata1/smaps", exec_dir.c_str());
for (auto _ : state) {
MemUsage stats;
CHECK_EQ(get_smaps_rollup(path, &stats), true);
CHECK_EQ(stats.pss, 108384);
}
}
BENCHMARK(BM_SmapsRollup_old);
static void BM_SmapsRollup_new(benchmark::State& state) {
std::string exec_dir = ::android::base::GetExecutableDirectory();
std::string path = ::android::base::StringPrintf("%s/testdata1/smaps", exec_dir.c_str());
for (auto _ : state) {
MemUsage stats;
CHECK_EQ(SmapsOrRollupFromFile(path, &stats), true);
CHECK_EQ(stats.pss, 108384);
}
}
BENCHMARK(BM_SmapsRollup_new);
BENCHMARK_MAIN();

View File

@ -1,781 +0,0 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include <gtest/gtest.h>
#include <string>
#include <vector>
#include <meminfo/pageacct.h>
#include <meminfo/procmeminfo.h>
#include <meminfo/sysmeminfo.h>
#include <android-base/file.h>
#include <android-base/logging.h>
#include <android-base/stringprintf.h>
#include <android-base/strings.h>
using namespace std;
using namespace android::meminfo;
pid_t pid = -1;
TEST(ProcMemInfo, TestWorkingTestReset) {
// Expect reset to succeed
EXPECT_TRUE(ProcMemInfo::ResetWorkingSet(pid));
}
TEST(ProcMemInfo, UsageEmpty) {
// If we created the object for getting working set,
// the usage must be empty
ProcMemInfo proc_mem(pid, true);
const MemUsage& usage = proc_mem.Usage();
EXPECT_EQ(usage.rss, 0);
EXPECT_EQ(usage.vss, 0);
EXPECT_EQ(usage.pss, 0);
EXPECT_EQ(usage.uss, 0);
EXPECT_EQ(usage.swap, 0);
}
TEST(ProcMemInfo, MapsNotEmpty) {
// Make sure the process maps are never empty
ProcMemInfo proc_mem(pid);
const std::vector<Vma>& maps = proc_mem.Maps();
EXPECT_FALSE(maps.empty());
}
TEST(ProcMemInfo, MapsUsageNotEmpty) {
ProcMemInfo proc_mem(pid);
const std::vector<Vma>& maps = proc_mem.Maps();
EXPECT_FALSE(maps.empty());
uint64_t total_pss = 0;
uint64_t total_rss = 0;
uint64_t total_uss = 0;
for (auto& map : maps) {
ASSERT_NE(0, map.usage.vss);
total_rss += map.usage.rss;
total_pss += map.usage.pss;
total_uss += map.usage.uss;
}
// Crude check that stats are actually being read.
EXPECT_NE(0, total_rss) << "RSS zero for all maps, that is not possible.";
EXPECT_NE(0, total_pss) << "PSS zero for all maps, that is not possible.";
EXPECT_NE(0, total_uss) << "USS zero for all maps, that is not possible.";
}
TEST(ProcMemInfo, MapsUsageEmpty) {
ProcMemInfo proc_mem(pid);
const std::vector<Vma>& maps = proc_mem.MapsWithoutUsageStats();
EXPECT_FALSE(maps.empty());
// Verify that all usage stats are zero in every map.
for (auto& map : maps) {
ASSERT_EQ(0, map.usage.vss);
ASSERT_EQ(0, map.usage.rss);
ASSERT_EQ(0, map.usage.pss);
ASSERT_EQ(0, map.usage.uss);
ASSERT_EQ(0, map.usage.swap);
ASSERT_EQ(0, map.usage.swap_pss);
ASSERT_EQ(0, map.usage.private_clean);
ASSERT_EQ(0, map.usage.private_dirty);
ASSERT_EQ(0, map.usage.shared_clean);
ASSERT_EQ(0, map.usage.shared_dirty);
}
}
TEST(ProcMemInfo, MapsUsageFillInLater) {
ProcMemInfo proc_mem(pid);
const std::vector<Vma>& maps = proc_mem.MapsWithoutUsageStats();
EXPECT_FALSE(maps.empty());
for (auto& map : maps) {
Vma update_map(map);
ASSERT_EQ(map.start, update_map.start);
ASSERT_EQ(map.end, update_map.end);
ASSERT_EQ(map.offset, update_map.offset);
ASSERT_EQ(map.flags, update_map.flags);
ASSERT_EQ(map.name, update_map.name);
ASSERT_EQ(0, update_map.usage.vss);
ASSERT_EQ(0, update_map.usage.rss);
ASSERT_EQ(0, update_map.usage.pss);
ASSERT_EQ(0, update_map.usage.uss);
ASSERT_EQ(0, update_map.usage.swap);
ASSERT_EQ(0, update_map.usage.swap_pss);
ASSERT_EQ(0, update_map.usage.private_clean);
ASSERT_EQ(0, update_map.usage.private_dirty);
ASSERT_EQ(0, update_map.usage.shared_clean);
ASSERT_EQ(0, update_map.usage.shared_dirty);
ASSERT_TRUE(proc_mem.FillInVmaStats(update_map));
// Check that at least one usage stat was updated.
ASSERT_NE(0, update_map.usage.vss);
}
}
TEST(ProcMemInfo, PageMapPresent) {
static constexpr size_t kNumPages = 20;
size_t pagesize = getpagesize();
void* ptr = mmap(nullptr, pagesize * (kNumPages + 2), PROT_READ | PROT_WRITE,
MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
ASSERT_NE(MAP_FAILED, ptr);
// Unmap the first page and the last page so that we guarantee this
// map is in a map by itself.
ASSERT_EQ(0, munmap(ptr, pagesize));
uintptr_t addr = reinterpret_cast<uintptr_t>(ptr) + pagesize;
ASSERT_EQ(0, munmap(reinterpret_cast<void*>(addr + kNumPages * pagesize), pagesize));
ProcMemInfo proc_mem(getpid());
const std::vector<Vma>& maps = proc_mem.MapsWithoutUsageStats();
ASSERT_FALSE(maps.empty());
// Find the vma associated with our previously created map.
const Vma* test_vma = nullptr;
for (const Vma& vma : maps) {
if (vma.start == addr) {
test_vma = &vma;
break;
}
}
ASSERT_TRUE(test_vma != nullptr) << "Cannot find test map.";
// Verify that none of the pages are listed as present.
std::vector<uint64_t> pagemap;
ASSERT_TRUE(proc_mem.PageMap(*test_vma, &pagemap));
ASSERT_EQ(kNumPages, pagemap.size());
for (size_t i = 0; i < pagemap.size(); i++) {
EXPECT_FALSE(android::meminfo::page_present(pagemap[i]))
<< "Page " << i << " is present and it should not be.";
}
// Make some of the pages present and verify that we see them
// as present.
uint8_t* data = reinterpret_cast<uint8_t*>(addr);
data[0] = 1;
data[pagesize * 5] = 1;
data[pagesize * 11] = 1;
ASSERT_TRUE(proc_mem.PageMap(*test_vma, &pagemap));
ASSERT_EQ(kNumPages, pagemap.size());
for (size_t i = 0; i < pagemap.size(); i++) {
if (i == 0 || i == 5 || i == 11) {
EXPECT_TRUE(android::meminfo::page_present(pagemap[i]))
<< "Page " << i << " is not present and it should be.";
} else {
EXPECT_FALSE(android::meminfo::page_present(pagemap[i]))
<< "Page " << i << " is present and it should not be.";
}
}
ASSERT_EQ(0, munmap(reinterpret_cast<void*>(addr), kNumPages * pagesize));
}
TEST(ProcMemInfo, WssEmpty) {
// If we created the object for getting usage,
// the working set must be empty
ProcMemInfo proc_mem(pid, false);
const MemUsage& wss = proc_mem.Wss();
EXPECT_EQ(wss.rss, 0);
EXPECT_EQ(wss.vss, 0);
EXPECT_EQ(wss.pss, 0);
EXPECT_EQ(wss.uss, 0);
EXPECT_EQ(wss.swap, 0);
}
TEST(ProcMemInfo, SwapOffsetsEmpty) {
// If we created the object for getting working set,
// the swap offsets must be empty
ProcMemInfo proc_mem(pid, true);
const std::vector<uint16_t>& swap_offsets = proc_mem.SwapOffsets();
EXPECT_EQ(swap_offsets.size(), 0);
}
TEST(ProcMemInfo, IsSmapsSupportedTest) {
// Get any pid and check if /proc/<pid>/smaps_rollup exists using the API.
// The API must return the appropriate value regardless of the after it succeeds
// once.
std::string path = ::android::base::StringPrintf("/proc/%d/smaps_rollup", pid);
bool supported = IsSmapsRollupSupported(pid);
EXPECT_EQ(!access(path.c_str(), F_OK | R_OK), supported);
// Second call must return what the first one returned regardless of the pid parameter.
// So, deliberately pass invalid pid.
EXPECT_EQ(supported, IsSmapsRollupSupported(-1));
}
TEST(ProcMemInfo, SmapsOrRollupTest) {
// Make sure we can parse 'smaps_rollup' correctly
std::string rollup =
R"rollup(12c00000-7fe859e000 ---p 00000000 00:00 0 [rollup]
Rss: 331908 kB
Pss: 202052 kB
Shared_Clean: 158492 kB
Shared_Dirty: 18928 kB
Private_Clean: 90472 kB
Private_Dirty: 64016 kB
Referenced: 318700 kB
Anonymous: 81984 kB
AnonHugePages: 0 kB
Shared_Hugetlb: 0 kB
Private_Hugetlb: 0 kB
Swap: 5344 kB
SwapPss: 442 kB
Locked: 1523537 kB)rollup";
TemporaryFile tf;
ASSERT_TRUE(tf.fd != -1);
ASSERT_TRUE(::android::base::WriteStringToFd(rollup, tf.fd));
MemUsage stats;
ASSERT_EQ(SmapsOrRollupFromFile(tf.path, &stats), true);
EXPECT_EQ(stats.rss, 331908);
EXPECT_EQ(stats.pss, 202052);
EXPECT_EQ(stats.uss, 154488);
EXPECT_EQ(stats.private_clean, 90472);
EXPECT_EQ(stats.private_dirty, 64016);
EXPECT_EQ(stats.swap_pss, 442);
}
TEST(ProcMemInfo, SmapsOrRollupSmapsTest) {
// Make sure /proc/<pid>/smaps is parsed correctly
std::string smaps =
R"smaps(12c00000-13440000 rw-p 00000000 00:00 0 [anon:dalvik-main space (region space)]
Name: [anon:dalvik-main space (region space)]
Size: 8448 kB
KernelPageSize: 4 kB
MMUPageSize: 4 kB
Rss: 2652 kB
Pss: 2652 kB
Shared_Clean: 840 kB
Shared_Dirty: 40 kB
Private_Clean: 84 kB
Private_Dirty: 2652 kB
Referenced: 2652 kB
Anonymous: 2652 kB
AnonHugePages: 0 kB
ShmemPmdMapped: 0 kB
Shared_Hugetlb: 0 kB
Private_Hugetlb: 0 kB
Swap: 102 kB
SwapPss: 70 kB
Locked: 2652 kB
VmFlags: rd wr mr mw me ac
)smaps";
TemporaryFile tf;
ASSERT_TRUE(tf.fd != -1);
ASSERT_TRUE(::android::base::WriteStringToFd(smaps, tf.fd));
MemUsage stats;
ASSERT_EQ(SmapsOrRollupFromFile(tf.path, &stats), true);
EXPECT_EQ(stats.rss, 2652);
EXPECT_EQ(stats.pss, 2652);
EXPECT_EQ(stats.uss, 2736);
EXPECT_EQ(stats.private_clean, 84);
EXPECT_EQ(stats.private_dirty, 2652);
EXPECT_EQ(stats.swap_pss, 70);
}
TEST(ProcMemInfo, SmapsOrRollupPssRollupTest) {
// Make sure /proc/<pid>/smaps is parsed correctly
// to get the PSS
std::string smaps =
R"smaps(12c00000-13440000 rw-p 00000000 00:00 0 [anon:dalvik-main space (region space)]
Name: [anon:dalvik-main space (region space)]
Size: 8448 kB
KernelPageSize: 4 kB
MMUPageSize: 4 kB
Rss: 2652 kB
Pss: 2652 kB
Shared_Clean: 840 kB
Shared_Dirty: 40 kB
Private_Clean: 84 kB
Private_Dirty: 2652 kB
Referenced: 2652 kB
Anonymous: 2652 kB
AnonHugePages: 0 kB
ShmemPmdMapped: 0 kB
Shared_Hugetlb: 0 kB
Private_Hugetlb: 0 kB
Swap: 102 kB
SwapPss: 70 kB
Locked: 2652 kB
VmFlags: rd wr mr mw me ac
)smaps";
TemporaryFile tf;
ASSERT_TRUE(tf.fd != -1);
ASSERT_TRUE(::android::base::WriteStringToFd(smaps, tf.fd));
uint64_t pss;
ASSERT_EQ(SmapsOrRollupPssFromFile(tf.path, &pss), true);
EXPECT_EQ(pss, 2652);
}
TEST(ProcMemInfo, SmapsOrRollupPssSmapsTest) {
// Correctly parse smaps file to gather pss
std::string exec_dir = ::android::base::GetExecutableDirectory();
std::string path = ::android::base::StringPrintf("%s/testdata1/smaps_short", exec_dir.c_str());
uint64_t pss;
ASSERT_EQ(SmapsOrRollupPssFromFile(path, &pss), true);
EXPECT_EQ(pss, 19119);
}
TEST(ProcMemInfo, ForEachVmaFromFileTest) {
// Parse smaps file correctly to make callbacks for each virtual memory area (vma)
std::string exec_dir = ::android::base::GetExecutableDirectory();
std::string path = ::android::base::StringPrintf("%s/testdata1/smaps_short", exec_dir.c_str());
ProcMemInfo proc_mem(pid);
std::vector<Vma> vmas;
auto collect_vmas = [&](const Vma& v) { vmas.push_back(v); };
ASSERT_TRUE(ForEachVmaFromFile(path, collect_vmas));
// We should get a total of 6 vmas
ASSERT_EQ(vmas.size(), 6);
// Expect values to be equal to what we have in testdata1/smaps_short
// Check for sizes first
ASSERT_EQ(vmas[0].usage.vss, 32768);
EXPECT_EQ(vmas[1].usage.vss, 11204);
EXPECT_EQ(vmas[2].usage.vss, 16896);
EXPECT_EQ(vmas[3].usage.vss, 260);
EXPECT_EQ(vmas[4].usage.vss, 6060);
EXPECT_EQ(vmas[5].usage.vss, 4);
// Check for names
EXPECT_EQ(vmas[0].name, "[anon:dalvik-zygote-jit-code-cache]");
EXPECT_EQ(vmas[1].name, "/system/framework/x86_64/boot-framework.art");
EXPECT_TRUE(vmas[2].name == "[anon:libc_malloc]" ||
android::base::StartsWith(vmas[2].name, "[anon:scudo:"))
<< "Unknown map name " << vmas[2].name;
EXPECT_EQ(vmas[3].name, "/system/priv-app/SettingsProvider/oat/x86_64/SettingsProvider.odex");
EXPECT_EQ(vmas[4].name, "/system/lib64/libhwui.so");
EXPECT_EQ(vmas[5].name, "[vsyscall]");
EXPECT_EQ(vmas[0].usage.rss, 2048);
EXPECT_EQ(vmas[1].usage.rss, 11188);
EXPECT_EQ(vmas[2].usage.rss, 15272);
EXPECT_EQ(vmas[3].usage.rss, 260);
EXPECT_EQ(vmas[4].usage.rss, 4132);
EXPECT_EQ(vmas[5].usage.rss, 0);
EXPECT_EQ(vmas[0].usage.pss, 113);
EXPECT_EQ(vmas[1].usage.pss, 2200);
EXPECT_EQ(vmas[2].usage.pss, 15272);
EXPECT_EQ(vmas[3].usage.pss, 260);
EXPECT_EQ(vmas[4].usage.pss, 1274);
EXPECT_EQ(vmas[5].usage.pss, 0);
EXPECT_EQ(vmas[0].usage.uss, 0);
EXPECT_EQ(vmas[1].usage.uss, 1660);
EXPECT_EQ(vmas[2].usage.uss, 15272);
EXPECT_EQ(vmas[3].usage.uss, 260);
EXPECT_EQ(vmas[4].usage.uss, 0);
EXPECT_EQ(vmas[5].usage.uss, 0);
EXPECT_EQ(vmas[0].usage.private_clean, 0);
EXPECT_EQ(vmas[1].usage.private_clean, 0);
EXPECT_EQ(vmas[2].usage.private_clean, 0);
EXPECT_EQ(vmas[3].usage.private_clean, 260);
EXPECT_EQ(vmas[4].usage.private_clean, 0);
EXPECT_EQ(vmas[5].usage.private_clean, 0);
EXPECT_EQ(vmas[0].usage.private_dirty, 0);
EXPECT_EQ(vmas[1].usage.private_dirty, 1660);
EXPECT_EQ(vmas[2].usage.private_dirty, 15272);
EXPECT_EQ(vmas[3].usage.private_dirty, 0);
EXPECT_EQ(vmas[4].usage.private_dirty, 0);
EXPECT_EQ(vmas[5].usage.private_dirty, 0);
EXPECT_EQ(vmas[0].usage.shared_clean, 0);
EXPECT_EQ(vmas[1].usage.shared_clean, 80);
EXPECT_EQ(vmas[2].usage.shared_clean, 0);
EXPECT_EQ(vmas[3].usage.shared_clean, 0);
EXPECT_EQ(vmas[4].usage.shared_clean, 4132);
EXPECT_EQ(vmas[5].usage.shared_clean, 0);
EXPECT_EQ(vmas[0].usage.shared_dirty, 2048);
EXPECT_EQ(vmas[1].usage.shared_dirty, 9448);
EXPECT_EQ(vmas[2].usage.shared_dirty, 0);
EXPECT_EQ(vmas[3].usage.shared_dirty, 0);
EXPECT_EQ(vmas[4].usage.shared_dirty, 0);
EXPECT_EQ(vmas[5].usage.shared_dirty, 0);
EXPECT_EQ(vmas[0].usage.swap, 0);
EXPECT_EQ(vmas[1].usage.swap, 0);
EXPECT_EQ(vmas[2].usage.swap, 0);
EXPECT_EQ(vmas[3].usage.swap, 0);
EXPECT_EQ(vmas[4].usage.swap, 0);
EXPECT_EQ(vmas[5].usage.swap, 0);
EXPECT_EQ(vmas[0].usage.swap_pss, 0);
EXPECT_EQ(vmas[1].usage.swap_pss, 0);
EXPECT_EQ(vmas[2].usage.swap_pss, 0);
EXPECT_EQ(vmas[3].usage.swap_pss, 0);
EXPECT_EQ(vmas[4].usage.swap_pss, 0);
EXPECT_EQ(vmas[5].usage.swap_pss, 0);
}
TEST(ProcMemInfo, SmapsReturnTest) {
// Make sure Smaps() is never empty for any process
ProcMemInfo proc_mem(pid);
auto vmas = proc_mem.Smaps();
EXPECT_FALSE(vmas.empty());
}
TEST(ProcMemInfo, SmapsTest) {
std::string exec_dir = ::android::base::GetExecutableDirectory();
std::string path = ::android::base::StringPrintf("%s/testdata1/smaps_short", exec_dir.c_str());
ProcMemInfo proc_mem(pid);
auto vmas = proc_mem.Smaps(path);
ASSERT_FALSE(vmas.empty());
// We should get a total of 6 vmas
ASSERT_EQ(vmas.size(), 6);
// Expect values to be equal to what we have in testdata1/smaps_short
// Check for sizes first
ASSERT_EQ(vmas[0].usage.vss, 32768);
EXPECT_EQ(vmas[1].usage.vss, 11204);
EXPECT_EQ(vmas[2].usage.vss, 16896);
EXPECT_EQ(vmas[3].usage.vss, 260);
EXPECT_EQ(vmas[4].usage.vss, 6060);
EXPECT_EQ(vmas[5].usage.vss, 4);
// Check for names
EXPECT_EQ(vmas[0].name, "[anon:dalvik-zygote-jit-code-cache]");
EXPECT_EQ(vmas[1].name, "/system/framework/x86_64/boot-framework.art");
EXPECT_TRUE(vmas[2].name == "[anon:libc_malloc]" ||
android::base::StartsWith(vmas[2].name, "[anon:scudo:"))
<< "Unknown map name " << vmas[2].name;
EXPECT_EQ(vmas[3].name, "/system/priv-app/SettingsProvider/oat/x86_64/SettingsProvider.odex");
EXPECT_EQ(vmas[4].name, "/system/lib64/libhwui.so");
EXPECT_EQ(vmas[5].name, "[vsyscall]");
EXPECT_EQ(vmas[0].usage.rss, 2048);
EXPECT_EQ(vmas[1].usage.rss, 11188);
EXPECT_EQ(vmas[2].usage.rss, 15272);
EXPECT_EQ(vmas[3].usage.rss, 260);
EXPECT_EQ(vmas[4].usage.rss, 4132);
EXPECT_EQ(vmas[5].usage.rss, 0);
EXPECT_EQ(vmas[0].usage.pss, 113);
EXPECT_EQ(vmas[1].usage.pss, 2200);
EXPECT_EQ(vmas[2].usage.pss, 15272);
EXPECT_EQ(vmas[3].usage.pss, 260);
EXPECT_EQ(vmas[4].usage.pss, 1274);
EXPECT_EQ(vmas[5].usage.pss, 0);
EXPECT_EQ(vmas[0].usage.uss, 0);
EXPECT_EQ(vmas[1].usage.uss, 1660);
EXPECT_EQ(vmas[2].usage.uss, 15272);
EXPECT_EQ(vmas[3].usage.uss, 260);
EXPECT_EQ(vmas[4].usage.uss, 0);
EXPECT_EQ(vmas[5].usage.uss, 0);
EXPECT_EQ(vmas[0].usage.private_clean, 0);
EXPECT_EQ(vmas[1].usage.private_clean, 0);
EXPECT_EQ(vmas[2].usage.private_clean, 0);
EXPECT_EQ(vmas[3].usage.private_clean, 260);
EXPECT_EQ(vmas[4].usage.private_clean, 0);
EXPECT_EQ(vmas[5].usage.private_clean, 0);
EXPECT_EQ(vmas[0].usage.private_dirty, 0);
EXPECT_EQ(vmas[1].usage.private_dirty, 1660);
EXPECT_EQ(vmas[2].usage.private_dirty, 15272);
EXPECT_EQ(vmas[3].usage.private_dirty, 0);
EXPECT_EQ(vmas[4].usage.private_dirty, 0);
EXPECT_EQ(vmas[5].usage.private_dirty, 0);
EXPECT_EQ(vmas[0].usage.shared_clean, 0);
EXPECT_EQ(vmas[1].usage.shared_clean, 80);
EXPECT_EQ(vmas[2].usage.shared_clean, 0);
EXPECT_EQ(vmas[3].usage.shared_clean, 0);
EXPECT_EQ(vmas[4].usage.shared_clean, 4132);
EXPECT_EQ(vmas[5].usage.shared_clean, 0);
EXPECT_EQ(vmas[0].usage.shared_dirty, 2048);
EXPECT_EQ(vmas[1].usage.shared_dirty, 9448);
EXPECT_EQ(vmas[2].usage.shared_dirty, 0);
EXPECT_EQ(vmas[3].usage.shared_dirty, 0);
EXPECT_EQ(vmas[4].usage.shared_dirty, 0);
EXPECT_EQ(vmas[5].usage.shared_dirty, 0);
EXPECT_EQ(vmas[0].usage.swap, 0);
EXPECT_EQ(vmas[1].usage.swap, 0);
EXPECT_EQ(vmas[2].usage.swap, 0);
EXPECT_EQ(vmas[3].usage.swap, 0);
EXPECT_EQ(vmas[4].usage.swap, 0);
EXPECT_EQ(vmas[5].usage.swap, 0);
EXPECT_EQ(vmas[0].usage.swap_pss, 0);
EXPECT_EQ(vmas[1].usage.swap_pss, 0);
EXPECT_EQ(vmas[2].usage.swap_pss, 0);
EXPECT_EQ(vmas[3].usage.swap_pss, 0);
EXPECT_EQ(vmas[4].usage.swap_pss, 0);
EXPECT_EQ(vmas[5].usage.swap_pss, 0);
}
TEST(SysMemInfo, TestSysMemInfoFile) {
std::string meminfo = R"meminfo(MemTotal: 3019740 kB
MemFree: 1809728 kB
MemAvailable: 2546560 kB
Buffers: 54736 kB
Cached: 776052 kB
SwapCached: 0 kB
Active: 445856 kB
Inactive: 459092 kB
Active(anon): 78492 kB
Inactive(anon): 2240 kB
Active(file): 367364 kB
Inactive(file): 456852 kB
Unevictable: 3096 kB
Mlocked: 3096 kB
SwapTotal: 32768 kB
SwapFree: 4096 kB
Dirty: 32 kB
Writeback: 0 kB
AnonPages: 74988 kB
Mapped: 62624 kB
Shmem: 4020 kB
Slab: 86464 kB
SReclaimable: 44432 kB
SUnreclaim: 42032 kB
KernelStack: 4880 kB
PageTables: 2900 kB
NFS_Unstable: 0 kB
Bounce: 0 kB
WritebackTmp: 0 kB
CommitLimit: 1509868 kB
Committed_AS: 80296 kB
VmallocTotal: 263061440 kB
VmallocUsed: 65536 kB
VmallocChunk: 0 kB
AnonHugePages: 6144 kB
ShmemHugePages: 0 kB
ShmemPmdMapped: 0 kB
CmaTotal: 131072 kB
CmaFree: 130380 kB
HugePages_Total: 0
HugePages_Free: 0
HugePages_Rsvd: 0
HugePages_Surp: 0
Hugepagesize: 2048 kB)meminfo";
TemporaryFile tf;
ASSERT_TRUE(tf.fd != -1);
ASSERT_TRUE(::android::base::WriteStringToFd(meminfo, tf.fd));
SysMemInfo mi;
ASSERT_TRUE(mi.ReadMemInfo(tf.path));
EXPECT_EQ(mi.mem_total_kb(), 3019740);
EXPECT_EQ(mi.mem_free_kb(), 1809728);
EXPECT_EQ(mi.mem_buffers_kb(), 54736);
EXPECT_EQ(mi.mem_cached_kb(), 776052);
EXPECT_EQ(mi.mem_shmem_kb(), 4020);
EXPECT_EQ(mi.mem_slab_kb(), 86464);
EXPECT_EQ(mi.mem_slab_reclaimable_kb(), 44432);
EXPECT_EQ(mi.mem_slab_unreclaimable_kb(), 42032);
EXPECT_EQ(mi.mem_swap_kb(), 32768);
EXPECT_EQ(mi.mem_swap_free_kb(), 4096);
EXPECT_EQ(mi.mem_mapped_kb(), 62624);
EXPECT_EQ(mi.mem_vmalloc_used_kb(), 65536);
EXPECT_EQ(mi.mem_page_tables_kb(), 2900);
EXPECT_EQ(mi.mem_kernel_stack_kb(), 4880);
}
TEST(SysMemInfo, TestEmptyFile) {
TemporaryFile tf;
std::string empty_string = "";
ASSERT_TRUE(tf.fd != -1);
ASSERT_TRUE(::android::base::WriteStringToFd(empty_string, tf.fd));
SysMemInfo mi;
EXPECT_TRUE(mi.ReadMemInfo(tf.path));
EXPECT_EQ(mi.mem_total_kb(), 0);
}
TEST(SysMemInfo, TestZramTotal) {
std::string exec_dir = ::android::base::GetExecutableDirectory();
SysMemInfo mi;
std::string zram_mmstat_dir = exec_dir + "/testdata1/";
EXPECT_EQ(mi.mem_zram_kb(zram_mmstat_dir), 30504);
std::string zram_memused_dir = exec_dir + "/testdata2/";
EXPECT_EQ(mi.mem_zram_kb(zram_memused_dir), 30504);
}
enum {
MEMINFO_TOTAL,
MEMINFO_FREE,
MEMINFO_BUFFERS,
MEMINFO_CACHED,
MEMINFO_SHMEM,
MEMINFO_SLAB,
MEMINFO_SLAB_RECLAIMABLE,
MEMINFO_SLAB_UNRECLAIMABLE,
MEMINFO_SWAP_TOTAL,
MEMINFO_SWAP_FREE,
MEMINFO_ZRAM_TOTAL,
MEMINFO_MAPPED,
MEMINFO_VMALLOC_USED,
MEMINFO_PAGE_TABLES,
MEMINFO_KERNEL_STACK,
MEMINFO_COUNT
};
TEST(SysMemInfo, TestZramWithTags) {
std::string meminfo = R"meminfo(MemTotal: 3019740 kB
MemFree: 1809728 kB
MemAvailable: 2546560 kB
Buffers: 54736 kB
Cached: 776052 kB
SwapCached: 0 kB
Active: 445856 kB
Inactive: 459092 kB
Active(anon): 78492 kB
Inactive(anon): 2240 kB
Active(file): 367364 kB
Inactive(file): 456852 kB
Unevictable: 3096 kB
Mlocked: 3096 kB
SwapTotal: 32768 kB
SwapFree: 4096 kB
Dirty: 32 kB
Writeback: 0 kB
AnonPages: 74988 kB
Mapped: 62624 kB
Shmem: 4020 kB
Slab: 86464 kB
SReclaimable: 44432 kB
SUnreclaim: 42032 kB
KernelStack: 4880 kB
PageTables: 2900 kB
NFS_Unstable: 0 kB
Bounce: 0 kB
WritebackTmp: 0 kB
CommitLimit: 1509868 kB
Committed_AS: 80296 kB
VmallocTotal: 263061440 kB
VmallocUsed: 65536 kB
VmallocChunk: 0 kB
AnonHugePages: 6144 kB
ShmemHugePages: 0 kB
ShmemPmdMapped: 0 kB
CmaTotal: 131072 kB
CmaFree: 130380 kB
HugePages_Total: 0
HugePages_Free: 0
HugePages_Rsvd: 0
HugePages_Surp: 0
Hugepagesize: 2048 kB)meminfo";
TemporaryFile tf;
ASSERT_TRUE(tf.fd != -1);
ASSERT_TRUE(::android::base::WriteStringToFd(meminfo, tf.fd));
std::string file = std::string(tf.path);
std::vector<uint64_t> mem(MEMINFO_COUNT);
std::vector<std::string> tags(SysMemInfo::kDefaultSysMemInfoTags);
auto it = tags.begin();
tags.insert(it + MEMINFO_ZRAM_TOTAL, "Zram:");
SysMemInfo mi;
// Read system memory info
EXPECT_TRUE(mi.ReadMemInfo(tags, &mem, file));
EXPECT_EQ(mem[MEMINFO_TOTAL], 3019740);
EXPECT_EQ(mem[MEMINFO_FREE], 1809728);
EXPECT_EQ(mem[MEMINFO_BUFFERS], 54736);
EXPECT_EQ(mem[MEMINFO_CACHED], 776052);
EXPECT_EQ(mem[MEMINFO_SHMEM], 4020);
EXPECT_EQ(mem[MEMINFO_SLAB], 86464);
EXPECT_EQ(mem[MEMINFO_SLAB_RECLAIMABLE], 44432);
EXPECT_EQ(mem[MEMINFO_SLAB_UNRECLAIMABLE], 42032);
EXPECT_EQ(mem[MEMINFO_SWAP_TOTAL], 32768);
EXPECT_EQ(mem[MEMINFO_SWAP_FREE], 4096);
EXPECT_EQ(mem[MEMINFO_MAPPED], 62624);
EXPECT_EQ(mem[MEMINFO_VMALLOC_USED], 65536);
EXPECT_EQ(mem[MEMINFO_PAGE_TABLES], 2900);
EXPECT_EQ(mem[MEMINFO_KERNEL_STACK], 4880);
}
TEST(SysMemInfo, TestVmallocInfoNoMemory) {
std::string vmallocinfo =
R"vmallocinfo(0x0000000000000000-0x0000000000000000 69632 of_iomap+0x78/0xb0 phys=17a00000 ioremap
0x0000000000000000-0x0000000000000000 8192 of_iomap+0x78/0xb0 phys=b220000 ioremap
0x0000000000000000-0x0000000000000000 8192 of_iomap+0x78/0xb0 phys=17c90000 ioremap
0x0000000000000000-0x0000000000000000 8192 of_iomap+0x78/0xb0 phys=17ca0000 ioremap)vmallocinfo";
TemporaryFile tf;
ASSERT_TRUE(tf.fd != -1);
ASSERT_TRUE(::android::base::WriteStringToFd(vmallocinfo, tf.fd));
std::string file = std::string(tf.path);
EXPECT_EQ(ReadVmallocInfo(file), 0);
}
TEST(SysMemInfo, TestVmallocInfoKernel) {
std::string vmallocinfo =
R"vmallocinfo(0x0000000000000000-0x0000000000000000 8192 drm_property_create_blob+0x44/0xec pages=1 vmalloc)vmallocinfo";
TemporaryFile tf;
ASSERT_TRUE(tf.fd != -1);
ASSERT_TRUE(::android::base::WriteStringToFd(vmallocinfo, tf.fd));
std::string file = std::string(tf.path);
EXPECT_EQ(ReadVmallocInfo(file), getpagesize());
}
TEST(SysMemInfo, TestVmallocInfoModule) {
std::string vmallocinfo =
R"vmallocinfo(0x0000000000000000-0x0000000000000000 28672 pktlog_alloc_buf+0xc4/0x15c [wlan] pages=6 vmalloc)vmallocinfo";
TemporaryFile tf;
ASSERT_TRUE(tf.fd != -1);
ASSERT_TRUE(::android::base::WriteStringToFd(vmallocinfo, tf.fd));
std::string file = std::string(tf.path);
EXPECT_EQ(ReadVmallocInfo(file), 6 * getpagesize());
}
TEST(SysMemInfo, TestVmallocInfoAll) {
std::string vmallocinfo =
R"vmallocinfo(0x0000000000000000-0x0000000000000000 69632 of_iomap+0x78/0xb0 phys=17a00000 ioremap
0x0000000000000000-0x0000000000000000 8192 of_iomap+0x78/0xb0 phys=b220000 ioremap
0x0000000000000000-0x0000000000000000 8192 of_iomap+0x78/0xb0 phys=17c90000 ioremap
0x0000000000000000-0x0000000000000000 8192 of_iomap+0x78/0xb0 phys=17ca0000 ioremap
0x0000000000000000-0x0000000000000000 8192 drm_property_create_blob+0x44/0xec pages=1 vmalloc
0x0000000000000000-0x0000000000000000 28672 pktlog_alloc_buf+0xc4/0x15c [wlan] pages=6 vmalloc)vmallocinfo";
TemporaryFile tf;
ASSERT_TRUE(tf.fd != -1);
ASSERT_TRUE(::android::base::WriteStringToFd(vmallocinfo, tf.fd));
std::string file = std::string(tf.path);
EXPECT_EQ(ReadVmallocInfo(file), 7 * getpagesize());
}
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
::android::base::InitLogging(argv, android::base::StderrLogger);
pid = getpid();
return RUN_ALL_TESTS();
}

View File

@ -1,34 +0,0 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include <sys/types.h>
#include <unistd.h>
#include <meminfo/meminfo.h>
#include <meminfo/pageacct.h>
#include <meminfo/procmeminfo.h>
#include <meminfo/sysmeminfo.h>
// Macros to do per-page flag manipulation
#define _BITS(x, offset, bits) (((x) >> (offset)) & ((1LL << (bits)) - 1))
#define PAGE_PRESENT(x) (_BITS(x, 63, 1))
#define PAGE_SWAPPED(x) (_BITS(x, 62, 1))
#define PAGE_SHIFT(x) (_BITS(x, 55, 6))
#define PAGE_PFN(x) (_BITS(x, 0, 55))
#define PAGE_SWAP_OFFSET(x) (_BITS(x, 5, 50))
#define PAGE_SWAP_TYPE(x) (_BITS(x, 0, 5))

View File

@ -1,157 +0,0 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h>
#include <fcntl.h>
#include <unistd.h>
#include <android-base/logging.h>
#include <android-base/unique_fd.h>
#include "meminfo_private.h"
using unique_fd = ::android::base::unique_fd;
namespace android {
namespace meminfo {
static inline off64_t pfn_to_idle_bitmap_offset(uint64_t pfn) {
return static_cast<off64_t>((pfn >> 6) << 3);
}
uint64_t pagesize(void) {
static uint64_t pagesize = sysconf(_SC_PAGE_SIZE);
return pagesize;
}
bool PageAcct::InitPageAcct(bool pageidle_enable) {
if (pageidle_enable && !PageAcct::KernelHasPageIdle()) {
LOG(ERROR) << "Idle page tracking is not supported by the kernel";
return false;
}
if (kpagecount_fd_ < 0) {
unique_fd count_fd(TEMP_FAILURE_RETRY(open("/proc/kpagecount", O_RDONLY | O_CLOEXEC)));
if (count_fd < 0) {
PLOG(ERROR) << "Failed to open /proc/kpagecount";
return false;
}
kpagecount_fd_ = std::move(count_fd);
}
if (kpageflags_fd_ < 0) {
unique_fd flags_fd(TEMP_FAILURE_RETRY(open("/proc/kpageflags", O_RDONLY | O_CLOEXEC)));
if (flags_fd < 0) {
PLOG(ERROR) << "Failed to open /proc/kpageflags";
return false;
}
kpageflags_fd_ = std::move(flags_fd);
}
if (pageidle_enable && pageidle_fd_ < 0) {
unique_fd idle_fd(
TEMP_FAILURE_RETRY(open("/sys/kernel/mm/page_idle/bitmap", O_RDWR | O_CLOEXEC)));
if (idle_fd < 0) {
PLOG(ERROR) << "Failed to open page idle bitmap";
return false;
}
pageidle_fd_ = std::move(idle_fd);
}
return true;
}
bool PageAcct::PageFlags(uint64_t pfn, uint64_t* flags) {
if (!flags) return false;
if (kpageflags_fd_ < 0) {
if (!InitPageAcct()) return false;
}
if (pread64(kpageflags_fd_, flags, sizeof(uint64_t), pfn * sizeof(uint64_t)) !=
sizeof(uint64_t)) {
PLOG(ERROR) << "Failed to read page flags for page " << pfn;
return false;
}
return true;
}
bool PageAcct::PageMapCount(uint64_t pfn, uint64_t* mapcount) {
if (!mapcount) return false;
if (kpagecount_fd_ < 0) {
if (!InitPageAcct()) return false;
}
if (pread64(kpagecount_fd_, mapcount, sizeof(uint64_t), pfn * sizeof(uint64_t)) !=
sizeof(uint64_t)) {
PLOG(ERROR) << "Failed to read map count for page " << pfn;
return false;
}
return true;
}
int PageAcct::IsPageIdle(uint64_t pfn) {
if (pageidle_fd_ < 0) {
if (!InitPageAcct(true)) return -EOPNOTSUPP;
}
int idle_status = MarkPageIdle(pfn);
if (idle_status) return idle_status;
return GetPageIdle(pfn);
}
int PageAcct::MarkPageIdle(uint64_t pfn) const {
off64_t offset = pfn_to_idle_bitmap_offset(pfn);
// set the bit corresponding to page frame
uint64_t idle_bits = 1ULL << (pfn % 64);
if (pwrite64(pageidle_fd_, &idle_bits, sizeof(uint64_t), offset) < 0) {
PLOG(ERROR) << "Failed to write page idle bitmap for page " << pfn;
return -errno;
}
return 0;
}
int PageAcct::GetPageIdle(uint64_t pfn) const {
off64_t offset = pfn_to_idle_bitmap_offset(pfn);
uint64_t idle_bits;
if (pread64(pageidle_fd_, &idle_bits, sizeof(uint64_t), offset) != sizeof(uint64_t)) {
PLOG(ERROR) << "Failed to read page idle bitmap for page " << pfn;
return -errno;
}
return !!(idle_bits & (1ULL << (pfn % 64)));
}
// Public methods
bool page_present(uint64_t pagemap_val) {
return PAGE_PRESENT(pagemap_val);
}
bool page_swapped(uint64_t pagemap_val) {
return PAGE_SWAPPED(pagemap_val);
}
uint64_t page_pfn(uint64_t pagemap_val) {
return PAGE_PFN(pagemap_val);
}
} // namespace meminfo
} // namespace android

View File

@ -1,569 +0,0 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <linux/kernel-page-flags.h>
#include <stdio.h>
#include <unistd.h>
#include <atomic>
#include <fstream>
#include <iostream>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include <android-base/file.h>
#include <android-base/logging.h>
#include <android-base/stringprintf.h>
#include <android-base/strings.h>
#include <android-base/unique_fd.h>
#include <procinfo/process_map.h>
#include "meminfo_private.h"
namespace android {
namespace meminfo {
static void add_mem_usage(MemUsage* to, const MemUsage& from) {
to->vss += from.vss;
to->rss += from.rss;
to->pss += from.pss;
to->uss += from.uss;
to->swap += from.swap;
to->private_clean += from.private_clean;
to->private_dirty += from.private_dirty;
to->shared_clean += from.shared_clean;
to->shared_dirty += from.shared_dirty;
}
// Returns true if the line was valid smaps stats line false otherwise.
static bool parse_smaps_field(const char* line, MemUsage* stats) {
char field[64];
int len;
if (sscanf(line, "%63s %n", field, &len) == 1 && *field && field[strlen(field) - 1] == ':') {
const char* c = line + len;
switch (field[0]) {
case 'P':
if (strncmp(field, "Pss:", 4) == 0) {
stats->pss = strtoull(c, nullptr, 10);
} else if (strncmp(field, "Private_Clean:", 14) == 0) {
uint64_t prcl = strtoull(c, nullptr, 10);
stats->private_clean = prcl;
stats->uss += prcl;
} else if (strncmp(field, "Private_Dirty:", 14) == 0) {
uint64_t prdi = strtoull(c, nullptr, 10);
stats->private_dirty = prdi;
stats->uss += prdi;
}
break;
case 'S':
if (strncmp(field, "Size:", 5) == 0) {
stats->vss = strtoull(c, nullptr, 10);
} else if (strncmp(field, "Shared_Clean:", 13) == 0) {
stats->shared_clean = strtoull(c, nullptr, 10);
} else if (strncmp(field, "Shared_Dirty:", 13) == 0) {
stats->shared_dirty = strtoull(c, nullptr, 10);
} else if (strncmp(field, "Swap:", 5) == 0) {
stats->swap = strtoull(c, nullptr, 10);
} else if (strncmp(field, "SwapPss:", 8) == 0) {
stats->swap_pss = strtoull(c, nullptr, 10);
}
break;
case 'R':
if (strncmp(field, "Rss:", 4) == 0) {
stats->rss = strtoull(c, nullptr, 10);
}
break;
}
return true;
}
return false;
}
bool ProcMemInfo::ResetWorkingSet(pid_t pid) {
std::string clear_refs_path = ::android::base::StringPrintf("/proc/%d/clear_refs", pid);
if (!::android::base::WriteStringToFile("1\n", clear_refs_path)) {
PLOG(ERROR) << "Failed to write to " << clear_refs_path;
return false;
}
return true;
}
ProcMemInfo::ProcMemInfo(pid_t pid, bool get_wss, uint64_t pgflags, uint64_t pgflags_mask)
: pid_(pid), get_wss_(get_wss), pgflags_(pgflags), pgflags_mask_(pgflags_mask) {}
const std::vector<Vma>& ProcMemInfo::Maps() {
if (maps_.empty() && !ReadMaps(get_wss_)) {
LOG(ERROR) << "Failed to read maps for Process " << pid_;
}
return maps_;
}
const std::vector<Vma>& ProcMemInfo::MapsWithPageIdle() {
if (maps_.empty() && !ReadMaps(get_wss_, true)) {
LOG(ERROR) << "Failed to read maps with page idle for Process " << pid_;
}
return maps_;
}
const std::vector<Vma>& ProcMemInfo::MapsWithoutUsageStats() {
if (maps_.empty() && !ReadMaps(get_wss_, false, false)) {
LOG(ERROR) << "Failed to read maps for Process " << pid_;
}
return maps_;
}
const std::vector<Vma>& ProcMemInfo::Smaps(const std::string& path) {
if (!maps_.empty()) {
return maps_;
}
auto collect_vmas = [&](const Vma& vma) { maps_.emplace_back(vma); };
if (path.empty() && !ForEachVma(collect_vmas)) {
LOG(ERROR) << "Failed to read smaps for Process " << pid_;
maps_.clear();
}
if (!path.empty() && !ForEachVmaFromFile(path, collect_vmas)) {
LOG(ERROR) << "Failed to read smaps from file " << path;
maps_.clear();
}
return maps_;
}
const MemUsage& ProcMemInfo::Usage() {
if (get_wss_) {
LOG(WARNING) << "Trying to read process memory usage for " << pid_
<< " using invalid object";
return usage_;
}
if (maps_.empty() && !ReadMaps(get_wss_)) {
LOG(ERROR) << "Failed to get memory usage for Process " << pid_;
}
return usage_;
}
const MemUsage& ProcMemInfo::Wss() {
if (!get_wss_) {
LOG(WARNING) << "Trying to read process working set for " << pid_
<< " using invalid object";
return usage_;
}
if (maps_.empty() && !ReadMaps(get_wss_)) {
LOG(ERROR) << "Failed to get working set for Process " << pid_;
}
return usage_;
}
bool ProcMemInfo::ForEachVma(const VmaCallback& callback) {
std::string path = ::android::base::StringPrintf("/proc/%d/smaps", pid_);
return ForEachVmaFromFile(path, callback);
}
bool ProcMemInfo::SmapsOrRollup(MemUsage* stats) const {
std::string path = ::android::base::StringPrintf(
"/proc/%d/%s", pid_, IsSmapsRollupSupported(pid_) ? "smaps_rollup" : "smaps");
return SmapsOrRollupFromFile(path, stats);
}
bool ProcMemInfo::SmapsOrRollupPss(uint64_t* pss) const {
std::string path = ::android::base::StringPrintf(
"/proc/%d/%s", pid_, IsSmapsRollupSupported(pid_) ? "smaps_rollup" : "smaps");
return SmapsOrRollupPssFromFile(path, pss);
}
const std::vector<uint16_t>& ProcMemInfo::SwapOffsets() {
if (get_wss_) {
LOG(WARNING) << "Trying to read process swap offsets for " << pid_
<< " using invalid object";
return swap_offsets_;
}
if (maps_.empty() && !ReadMaps(get_wss_)) {
LOG(ERROR) << "Failed to get swap offsets for Process " << pid_;
}
return swap_offsets_;
}
bool ProcMemInfo::PageMap(const Vma& vma, std::vector<uint64_t>* pagemap) {
pagemap->clear();
std::string pagemap_file = ::android::base::StringPrintf("/proc/%d/pagemap", pid_);
::android::base::unique_fd pagemap_fd(
TEMP_FAILURE_RETRY(open(pagemap_file.c_str(), O_RDONLY | O_CLOEXEC)));
if (pagemap_fd == -1) {
PLOG(ERROR) << "Failed to open " << pagemap_file;
return false;
}
uint64_t nr_pages = (vma.end - vma.start) / getpagesize();
pagemap->resize(nr_pages);
size_t bytes_to_read = sizeof(uint64_t) * nr_pages;
off64_t start_addr = (vma.start / getpagesize()) * sizeof(uint64_t);
ssize_t bytes_read = pread64(pagemap_fd, pagemap->data(), bytes_to_read, start_addr);
if (bytes_read == -1) {
PLOG(ERROR) << "Failed to read page frames from page map for pid: " << pid_;
return false;
} else if (static_cast<size_t>(bytes_read) != bytes_to_read) {
LOG(ERROR) << "Failed to read page frames from page map for pid: " << pid_
<< ": read bytes " << bytes_read << " expected bytes " << bytes_to_read;
return false;
}
return true;
}
static int GetPagemapFd(pid_t pid) {
std::string pagemap_file = ::android::base::StringPrintf("/proc/%d/pagemap", pid);
int fd = TEMP_FAILURE_RETRY(open(pagemap_file.c_str(), O_RDONLY | O_CLOEXEC));
if (fd == -1) {
PLOG(ERROR) << "Failed to open " << pagemap_file;
}
return fd;
}
bool ProcMemInfo::ReadMaps(bool get_wss, bool use_pageidle, bool get_usage_stats) {
// Each object reads /proc/<pid>/maps only once. This is done to make sure programs that are
// running for the lifetime of the system can recycle the objects and don't have to
// unnecessarily retain and update this object in memory (which can get significantly large).
// E.g. A program that only needs to reset the working set will never all ->Maps(), ->Usage().
// E.g. A program that is monitoring smaps_rollup, may never call ->maps(), Usage(), so it
// doesn't make sense for us to parse and retain unnecessary memory accounting stats by default.
if (!maps_.empty()) return true;
// parse and read /proc/<pid>/maps
std::string maps_file = ::android::base::StringPrintf("/proc/%d/maps", pid_);
if (!::android::procinfo::ReadMapFile(
maps_file, [&](uint64_t start, uint64_t end, uint16_t flags, uint64_t pgoff, ino_t,
const char* name) {
maps_.emplace_back(Vma(start, end, pgoff, flags, name));
})) {
LOG(ERROR) << "Failed to parse " << maps_file;
maps_.clear();
return false;
}
if (!get_usage_stats) {
return true;
}
::android::base::unique_fd pagemap_fd(GetPagemapFd(pid_));
if (pagemap_fd == -1) {
return false;
}
for (auto& vma : maps_) {
if (!ReadVmaStats(pagemap_fd.get(), vma, get_wss, use_pageidle)) {
LOG(ERROR) << "Failed to read page map for vma " << vma.name << "[" << vma.start << "-"
<< vma.end << "]";
maps_.clear();
return false;
}
add_mem_usage(&usage_, vma.usage);
}
return true;
}
bool ProcMemInfo::FillInVmaStats(Vma& vma) {
::android::base::unique_fd pagemap_fd(GetPagemapFd(pid_));
if (pagemap_fd == -1) {
return false;
}
if (!ReadVmaStats(pagemap_fd.get(), vma, get_wss_, false)) {
LOG(ERROR) << "Failed to read page map for vma " << vma.name << "[" << vma.start << "-"
<< vma.end << "]";
return false;
}
return true;
}
bool ProcMemInfo::ReadVmaStats(int pagemap_fd, Vma& vma, bool get_wss, bool use_pageidle) {
PageAcct& pinfo = PageAcct::Instance();
if (get_wss && use_pageidle && !pinfo.InitPageAcct(true)) {
LOG(ERROR) << "Failed to init idle page accounting";
return false;
}
uint64_t pagesz = getpagesize();
size_t num_pages = (vma.end - vma.start) / pagesz;
size_t first_page = vma.start / pagesz;
std::vector<uint64_t> page_cache;
size_t cur_page_cache_index = 0;
size_t num_in_page_cache = 0;
size_t num_leftover_pages = num_pages;
for (size_t cur_page = first_page; cur_page < first_page + num_pages; ++cur_page) {
if (!get_wss) {
vma.usage.vss += pagesz;
}
// Cache page map data.
if (cur_page_cache_index == num_in_page_cache) {
static constexpr size_t kMaxPages = 2048;
num_leftover_pages -= num_in_page_cache;
if (num_leftover_pages > kMaxPages) {
num_in_page_cache = kMaxPages;
} else {
num_in_page_cache = num_leftover_pages;
}
page_cache.resize(num_in_page_cache);
size_t total_bytes = page_cache.size() * sizeof(uint64_t);
ssize_t bytes = pread64(pagemap_fd, page_cache.data(), total_bytes,
cur_page * sizeof(uint64_t));
if (bytes != total_bytes) {
if (bytes == -1) {
PLOG(ERROR) << "Failed to read page data at offset 0x" << std::hex
<< cur_page * sizeof(uint64_t);
} else {
LOG(ERROR) << "Failed to read page data at offset 0x" << std::hex
<< cur_page * sizeof(uint64_t) << std::dec << " read bytes " << bytes
<< " expected bytes " << total_bytes;
}
return false;
}
cur_page_cache_index = 0;
}
uint64_t page_info = page_cache[cur_page_cache_index++];
if (!PAGE_PRESENT(page_info) && !PAGE_SWAPPED(page_info)) continue;
if (PAGE_SWAPPED(page_info)) {
vma.usage.swap += pagesz;
swap_offsets_.emplace_back(PAGE_SWAP_OFFSET(page_info));
continue;
}
uint64_t page_frame = PAGE_PFN(page_info);
uint64_t cur_page_flags;
if (!pinfo.PageFlags(page_frame, &cur_page_flags)) {
LOG(ERROR) << "Failed to get page flags for " << page_frame << " in process " << pid_;
swap_offsets_.clear();
return false;
}
// skip unwanted pages from the count
if ((cur_page_flags & pgflags_mask_) != pgflags_) continue;
uint64_t cur_page_counts;
if (!pinfo.PageMapCount(page_frame, &cur_page_counts)) {
LOG(ERROR) << "Failed to get page count for " << page_frame << " in process " << pid_;
swap_offsets_.clear();
return false;
}
// Page was unmapped between the presence check at the beginning of the loop and here.
if (cur_page_counts == 0) {
continue;
}
bool is_dirty = !!(cur_page_flags & (1 << KPF_DIRTY));
bool is_private = (cur_page_counts == 1);
// Working set
if (get_wss) {
bool is_referenced = use_pageidle ? (pinfo.IsPageIdle(page_frame) == 1)
: !!(cur_page_flags & (1 << KPF_REFERENCED));
if (!is_referenced) {
continue;
}
// This effectively makes vss = rss for the working set is requested.
// The libpagemap implementation returns vss > rss for
// working set, which doesn't make sense.
vma.usage.vss += pagesz;
}
vma.usage.rss += pagesz;
vma.usage.uss += is_private ? pagesz : 0;
vma.usage.pss += pagesz / cur_page_counts;
if (is_private) {
vma.usage.private_dirty += is_dirty ? pagesz : 0;
vma.usage.private_clean += is_dirty ? 0 : pagesz;
} else {
vma.usage.shared_dirty += is_dirty ? pagesz : 0;
vma.usage.shared_clean += is_dirty ? 0 : pagesz;
}
}
return true;
}
// Public APIs
bool ForEachVmaFromFile(const std::string& path, const VmaCallback& callback) {
auto fp = std::unique_ptr<FILE, decltype(&fclose)>{fopen(path.c_str(), "re"), fclose};
if (fp == nullptr) {
return false;
}
char* line = nullptr;
bool parsing_vma = false;
ssize_t line_len;
size_t line_alloc = 0;
Vma vma;
while ((line_len = getline(&line, &line_alloc, fp.get())) > 0) {
// Make sure the line buffer terminates like a C string for ReadMapFile
line[line_len] = '\0';
if (parsing_vma) {
if (parse_smaps_field(line, &vma.usage)) {
// This was a stats field
continue;
}
// Done collecting stats, make the call back
callback(vma);
parsing_vma = false;
}
vma.clear();
// If it has, we are looking for the vma stats
// 00400000-00409000 r-xp 00000000 fc:00 426998 /usr/lib/gvfs/gvfsd-http
if (!::android::procinfo::ReadMapFileContent(
line, [&](uint64_t start, uint64_t end, uint16_t flags, uint64_t pgoff, ino_t,
const char* name) {
vma.start = start;
vma.end = end;
vma.flags = flags;
vma.offset = pgoff;
vma.name = name;
})) {
LOG(ERROR) << "Failed to parse " << path;
return false;
}
parsing_vma = true;
}
// free getline() managed buffer
free(line);
if (parsing_vma) {
callback(vma);
}
return true;
}
enum smaps_rollup_support { UNTRIED, SUPPORTED, UNSUPPORTED };
static std::atomic<smaps_rollup_support> g_rollup_support = UNTRIED;
bool IsSmapsRollupSupported(pid_t pid) {
// Similar to OpenSmapsOrRollup checks from android_os_Debug.cpp, except
// the method only checks if rollup is supported and returns the status
// right away.
enum smaps_rollup_support rollup_support = g_rollup_support.load(std::memory_order_relaxed);
if (rollup_support != UNTRIED) {
return rollup_support == SUPPORTED;
}
std::string rollup_file = ::android::base::StringPrintf("/proc/%d/smaps_rollup", pid);
if (access(rollup_file.c_str(), F_OK | R_OK)) {
// No check for errno = ENOENT necessary here. The caller MUST fallback to
// using /proc/<pid>/smaps instead anyway.
g_rollup_support.store(UNSUPPORTED, std::memory_order_relaxed);
return false;
}
g_rollup_support.store(SUPPORTED, std::memory_order_relaxed);
LOG(INFO) << "Using smaps_rollup for pss collection";
return true;
}
bool SmapsOrRollupFromFile(const std::string& path, MemUsage* stats) {
auto fp = std::unique_ptr<FILE, decltype(&fclose)>{fopen(path.c_str(), "re"), fclose};
if (fp == nullptr) {
return false;
}
char* line = nullptr;
size_t line_alloc = 0;
stats->clear();
while (getline(&line, &line_alloc, fp.get()) > 0) {
switch (line[0]) {
case 'P':
if (strncmp(line, "Pss:", 4) == 0) {
char* c = line + 4;
stats->pss += strtoull(c, nullptr, 10);
} else if (strncmp(line, "Private_Clean:", 14) == 0) {
char* c = line + 14;
uint64_t prcl = strtoull(c, nullptr, 10);
stats->private_clean += prcl;
stats->uss += prcl;
} else if (strncmp(line, "Private_Dirty:", 14) == 0) {
char* c = line + 14;
uint64_t prdi = strtoull(c, nullptr, 10);
stats->private_dirty += prdi;
stats->uss += prdi;
}
break;
case 'R':
if (strncmp(line, "Rss:", 4) == 0) {
char* c = line + 4;
stats->rss += strtoull(c, nullptr, 10);
}
break;
case 'S':
if (strncmp(line, "SwapPss:", 8) == 0) {
char* c = line + 8;
stats->swap_pss += strtoull(c, nullptr, 10);
}
break;
}
}
// free getline() managed buffer
free(line);
return true;
}
bool SmapsOrRollupPssFromFile(const std::string& path, uint64_t* pss) {
auto fp = std::unique_ptr<FILE, decltype(&fclose)>{fopen(path.c_str(), "re"), fclose};
if (fp == nullptr) {
return false;
}
*pss = 0;
char* line = nullptr;
size_t line_alloc = 0;
while (getline(&line, &line_alloc, fp.get()) > 0) {
uint64_t v;
if (sscanf(line, "Pss: %" SCNu64 " kB", &v) == 1) {
*pss += v;
}
}
// free getline() managed buffer
free(line);
return true;
}
} // namespace meminfo
} // namespace android

View File

@ -1,275 +0,0 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <algorithm>
#include <cctype>
#include <cstdio>
#include <fstream>
#include <iterator>
#include <sstream>
#include <string>
#include <utility>
#include <vector>
#include <android-base/file.h>
#include <android-base/logging.h>
#include <android-base/parseint.h>
#include <android-base/stringprintf.h>
#include <android-base/strings.h>
#include <android-base/unique_fd.h>
#include "meminfo_private.h"
namespace android {
namespace meminfo {
const std::vector<std::string> SysMemInfo::kDefaultSysMemInfoTags = {
SysMemInfo::kMemTotal, SysMemInfo::kMemFree, SysMemInfo::kMemBuffers,
SysMemInfo::kMemCached, SysMemInfo::kMemShmem, SysMemInfo::kMemSlab,
SysMemInfo::kMemSReclaim, SysMemInfo::kMemSUnreclaim, SysMemInfo::kMemSwapTotal,
SysMemInfo::kMemSwapFree, SysMemInfo::kMemMapped, SysMemInfo::kMemVmallocUsed,
SysMemInfo::kMemPageTables, SysMemInfo::kMemKernelStack,
};
bool SysMemInfo::ReadMemInfo(const std::string& path) {
return ReadMemInfo(SysMemInfo::kDefaultSysMemInfoTags, path,
[&](const std::string& tag, uint64_t val) { mem_in_kb_[tag] = val; });
}
bool SysMemInfo::ReadMemInfo(std::vector<uint64_t>* out, const std::string& path) {
return ReadMemInfo(SysMemInfo::kDefaultSysMemInfoTags, out, path);
}
bool SysMemInfo::ReadMemInfo(const std::vector<std::string>& tags, std::vector<uint64_t>* out,
const std::string& path) {
out->clear();
out->resize(tags.size());
return ReadMemInfo(tags, path, [&]([[maybe_unused]] const std::string& tag, uint64_t val) {
auto it = std::find(tags.begin(), tags.end(), tag);
if (it == tags.end()) {
LOG(ERROR) << "Tried to store invalid tag: " << tag;
return;
}
auto index = std::distance(tags.begin(), it);
// store the values in the same order as the tags
out->at(index) = val;
});
}
uint64_t SysMemInfo::ReadVmallocInfo() {
return ::android::meminfo::ReadVmallocInfo();
}
// TODO: Delete this function if it can't match up with the c-like implementation below.
// Currently, this added about 50 % extra overhead on hikey.
#if 0
bool SysMemInfo::ReadMemInfo(const std::vector<std::string>& tags, const std::string& path) {
std::string buffer;
if (!::android::base::ReadFileToString(path, &buffer)) {
PLOG(ERROR) << "Failed to read : " << path;
return false;
}
uint32_t total_found = 0;
for (auto s = buffer.begin(); s < buffer.end() && total_found < tags.size();) {
for (auto& tag : tags) {
if (tag == std::string(s, s + tag.size())) {
s += tag.size();
while (isspace(*s)) s++;
auto num_start = s;
while (std::isdigit(*s)) s++;
std::string number(num_start, num_start + (s - num_start));
if (!::android::base::ParseUint(number, &mem_in_kb_[tag])) {
LOG(ERROR) << "Failed to parse uint";
return false;
}
total_found++;
break;
}
}
while (s < buffer.end() && *s != '\n') s++;
if (s < buffer.end()) s++;
}
return true;
}
#else
bool SysMemInfo::ReadMemInfo(const std::vector<std::string>& tags, const std::string& path,
std::function<void(const std::string&, uint64_t)> store_val) {
char buffer[4096];
int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
if (fd < 0) {
PLOG(ERROR) << "Failed to open file :" << path;
return false;
}
const int len = read(fd, buffer, sizeof(buffer) - 1);
close(fd);
if (len < 0) {
return false;
}
buffer[len] = '\0';
char* p = buffer;
uint32_t found = 0;
uint32_t lineno = 0;
bool zram_tag_found = false;
while (*p && found < tags.size()) {
for (auto& tag : tags) {
// Special case for "Zram:" tag that android_os_Debug and friends look
// up along with the rest of the numbers from /proc/meminfo
if (!zram_tag_found && tag == "Zram:") {
store_val(tag, mem_zram_kb());
zram_tag_found = true;
found++;
continue;
}
if (strncmp(p, tag.c_str(), tag.size()) == 0) {
p += tag.size();
while (*p == ' ') p++;
char* endptr = nullptr;
uint64_t val = strtoull(p, &endptr, 10);
if (p == endptr) {
PLOG(ERROR) << "Failed to parse line:" << lineno + 1 << " in file: " << path;
return false;
}
store_val(tag, val);
p = endptr;
found++;
break;
}
}
while (*p && *p != '\n') {
p++;
}
if (*p) p++;
lineno++;
}
return true;
}
#endif
uint64_t SysMemInfo::mem_zram_kb(const std::string& zram_dev) {
uint64_t mem_zram_total = 0;
if (!zram_dev.empty()) {
if (!MemZramDevice(zram_dev, &mem_zram_total)) {
return 0;
}
return mem_zram_total / 1024;
}
constexpr uint32_t kMaxZramDevices = 256;
for (uint32_t i = 0; i < kMaxZramDevices; i++) {
std::string zram_dev = ::android::base::StringPrintf("/sys/block/zram%u/", i);
if (access(zram_dev.c_str(), F_OK)) {
// We assume zram devices appear in range 0-255 and appear always in sequence
// under /sys/block. So, stop looking for them once we find one is missing.
break;
}
uint64_t mem_zram_dev;
if (!MemZramDevice(zram_dev, &mem_zram_dev)) {
return 0;
}
mem_zram_total += mem_zram_dev;
}
return mem_zram_total / 1024;
}
bool SysMemInfo::MemZramDevice(const std::string& zram_dev, uint64_t* mem_zram_dev) {
std::string mmstat = ::android::base::StringPrintf("%s/%s", zram_dev.c_str(), "mm_stat");
auto mmstat_fp = std::unique_ptr<FILE, decltype(&fclose)>{fopen(mmstat.c_str(), "re"), fclose};
if (mmstat_fp != nullptr) {
// only if we do have mmstat, use it. Otherwise, fall through to trying out the old
// 'mem_used_total'
if (fscanf(mmstat_fp.get(), "%*" SCNu64 " %*" SCNu64 " %" SCNu64, mem_zram_dev) != 1) {
PLOG(ERROR) << "Malformed mm_stat file in: " << zram_dev;
return false;
}
return true;
}
std::string content;
if (::android::base::ReadFileToString(zram_dev + "mem_used_total", &content)) {
*mem_zram_dev = strtoull(content.c_str(), NULL, 10);
if (*mem_zram_dev == ULLONG_MAX) {
PLOG(ERROR) << "Malformed mem_used_total file for zram dev: " << zram_dev
<< " content: " << content;
return false;
}
return true;
}
LOG(ERROR) << "Can't find memory status under: " << zram_dev;
return false;
}
// Public methods
uint64_t ReadVmallocInfo(const std::string& path) {
uint64_t vmalloc_total = 0;
auto fp = std::unique_ptr<FILE, decltype(&fclose)>{fopen(path.c_str(), "re"), fclose};
if (fp == nullptr) {
return vmalloc_total;
}
char* line = nullptr;
size_t line_alloc = 0;
while (getline(&line, &line_alloc, fp.get()) > 0) {
// We are looking for lines like
//
// 0x0000000000000000-0x0000000000000000 12288 drm_property_create_blob+0x44/0xec pages=2 vmalloc
// 0x0000000000000000-0x0000000000000000 8192 wlan_logging_sock_init_svc+0xf8/0x4f0 [wlan] pages=1 vmalloc
//
// Notice that if the caller is coming from a module, the kernel prints and extra
// "[module_name]" after the address and the symbol of the call site. This means we can't
// use the old sscanf() method of getting the # of pages.
char* p_start = strstr(line, "pages=");
if (p_start == nullptr) {
// we didn't find anything
continue;
}
uint64_t nr_pages;
if (sscanf(p_start, "pages=%" SCNu64 "", &nr_pages) == 1) {
vmalloc_total += (nr_pages * getpagesize());
}
}
free(line);
return vmalloc_total;
}
} // namespace meminfo
} // namespace android

View File

@ -1 +0,0 @@
145674240 26801454 31236096 0 45772800 3042 1887 517

View File

@ -1,86 +0,0 @@
#! /system/bin/sh
TESTDATA_PATH=/data/nativetest64/libmeminfo_test/testdata1
SMAPS=$TESTDATA_PATH/smaps
OUT1=$TMPDIR/1.txt
OUT2=$TMPDIR/2.txt
showmap -f $SMAPS > $OUT1
showmap2 -f $SMAPS > $OUT2
diff $OUT1 $OUT2 > /dev/null
ret=$?
if [[ $ret != 0 ]]; then
echo "fail: showmap -f <smaps>";
else
echo "pass: showmap -f <smaps>"
fi
showmap -q -f $SMAPS > $OUT1
showmap2 -q -f $SMAPS > $OUT2
diff $OUT1 $OUT2 > /dev/null
ret=$?
if [[ $ret != 0 ]]; then
echo "fail: showmap -q -f <smaps>";
else
echo "pass: showmap -q -f <smaps>"
fi
showmap -v -f $SMAPS > $OUT1
showmap2 -v -f $SMAPS > $OUT2
diff $OUT1 $OUT2 > /dev/null
ret=$?
if [[ $ret != 0 ]]; then
echo "fail: showmap -v -f <smaps>";
else
echo "pass: showmap -v -f <smaps>"
fi
showmap -a -f $SMAPS > $OUT1
showmap2 -a -f $SMAPS > $OUT2
diff $OUT1 $OUT2 > /dev/null
ret=$?
if [[ $ret != 0 ]]; then
echo "fail: showmap -a -f <smaps>";
else
echo "pass: showmap -a -f <smaps>"
fi
# Note that all tests from here down that have the option
# '-a' added to the command are expected to fail as
# 'showmap2' actually fixes the 64-bit address truncating
# that was already happening with showmap
showmap -a -t -f $SMAPS > $OUT1
showmap2 -a -t -f $SMAPS > $OUT2
diff $OUT1 $OUT2 > /dev/null
ret=$?
if [[ $ret != 0 ]]; then
echo "fail: showmap -a -t -f <smaps>";
else
echo "pass: showmap -a -t -f <smaps>"
fi
showmap -a -t -v -f $SMAPS > $OUT1
showmap2 -a -t -v -f $SMAPS > $OUT2
diff $OUT1 $OUT2 > /dev/null
ret=$?
if [[ $ret != 0 ]]; then
echo "fail: showmap -a -t -v -f <smaps>";
else
echo "pass: showmap -a -t -v -f <smaps>"
fi
# Note: This test again is expected to fail as the new
# showmap fixes an issue with -t where the tool was only
# showing maps with private dirty pages. The '-t' option was however
# supposed to show all maps that have 'private' pages, clean or dirty.
showmap -t -f $SMAPS > $OUT1
showmap2 -t -f $SMAPS > $OUT2
diff $OUT1 $OUT2 > /dev/null
ret=$?
if [[ $ret != 0 ]]; then
echo "fail: showmap -t -f <smaps>";
else
echo "pass: showmap -t -f <smaps>"
fi

File diff suppressed because it is too large Load Diff

View File

@ -1,122 +0,0 @@
54c00000-56c00000 r-xp 00000000 00:00 0 [anon:dalvik-zygote-jit-code-cache]
Name: [anon:dalvik-zygote-jit-code-cache]
Size: 32768 kB
KernelPageSize: 4 kB
MMUPageSize: 4 kB
Rss: 2048 kB
Pss: 113 kB
Shared_Clean: 0 kB
Shared_Dirty: 2048 kB
Private_Clean: 0 kB
Private_Dirty: 0 kB
Referenced: 2048 kB
Anonymous: 2048 kB
AnonHugePages: 2048 kB
ShmemPmdMapped: 0 kB
Shared_Hugetlb: 0 kB
Private_Hugetlb: 0 kB
Swap: 0 kB
SwapPss: 0 kB
Locked: 113 kB
VmFlags: rd ex mr mw me ac
701ea000-70cdb000 rw-p 00000000 fe:00 3165 /system/framework/x86_64/boot-framework.art
Size: 11204 kB
KernelPageSize: 4 kB
MMUPageSize: 4 kB
Rss: 11188 kB
Pss: 2200 kB
Shared_Clean: 80 kB
Shared_Dirty: 9448 kB
Private_Clean: 0 kB
Private_Dirty: 1660 kB
Referenced: 9892 kB
Anonymous: 11108 kB
AnonHugePages: 0 kB
ShmemPmdMapped: 0 kB
Shared_Hugetlb: 0 kB
Private_Hugetlb: 0 kB
Swap: 0 kB
SwapPss: 0 kB
Locked: 2200 kB
VmFlags: rd wr mr mw me ac
70074dd8d000-70074ee0d000 rw-p 00000000 00:00 0 [anon:libc_malloc]
Name: [anon:libc_malloc]
Size: 16896 kB
KernelPageSize: 4 kB
MMUPageSize: 4 kB
Rss: 15272 kB
Pss: 15272 kB
Shared_Clean: 0 kB
Shared_Dirty: 0 kB
Private_Clean: 0 kB
Private_Dirty: 15272 kB
Referenced: 11156 kB
Anonymous: 15272 kB
AnonHugePages: 6144 kB
ShmemPmdMapped: 0 kB
Shared_Hugetlb: 0 kB
Private_Hugetlb: 0 kB
Swap: 0 kB
SwapPss: 0 kB
Locked: 15272 kB
VmFlags: rd wr mr mw me ac
700755a2d000-700755a6e000 r-xp 00016000 fe:00 1947 /system/priv-app/SettingsProvider/oat/x86_64/SettingsProvider.odex
Size: 260 kB
KernelPageSize: 4 kB
MMUPageSize: 4 kB
Rss: 260 kB
Pss: 260 kB
Shared_Clean: 0 kB
Shared_Dirty: 0 kB
Private_Clean: 260 kB
Private_Dirty: 0 kB
Referenced: 260 kB
Anonymous: 0 kB
AnonHugePages: 0 kB
ShmemPmdMapped: 0 kB
Shared_Hugetlb: 0 kB
Private_Hugetlb: 0 kB
Swap: 0 kB
SwapPss: 0 kB
Locked: 260 kB
VmFlags: rd ex mr mw me
7007f85b0000-7007f8b9b000 r-xp 001ee000 fe:00 1537 /system/lib64/libhwui.so
Size: 6060 kB
KernelPageSize: 4 kB
MMUPageSize: 4 kB
Rss: 4132 kB
Pss: 1274 kB
Shared_Clean: 4132 kB
Shared_Dirty: 0 kB
Private_Clean: 0 kB
Private_Dirty: 0 kB
Referenced: 4132 kB
Anonymous: 0 kB
AnonHugePages: 0 kB
ShmemPmdMapped: 0 kB
Shared_Hugetlb: 0 kB
Private_Hugetlb: 0 kB
Swap: 0 kB
SwapPss: 0 kB
Locked: 1274 kB
VmFlags: rd ex mr mw me
ffffffffff600000-ffffffffff601000 r-xp 00000000 00:00 0 [vsyscall]
Size: 4 kB
KernelPageSize: 4 kB
MMUPageSize: 4 kB
Rss: 0 kB
Pss: 0 kB
Shared_Clean: 0 kB
Shared_Dirty: 0 kB
Private_Clean: 0 kB
Private_Dirty: 0 kB
Referenced: 0 kB
Anonymous: 0 kB
AnonHugePages: 0 kB
ShmemPmdMapped: 0 kB
Shared_Hugetlb: 0 kB
Private_Hugetlb: 0 kB
Swap: 0 kB
SwapPss: 0 kB
Locked: 0 kB
VmFlags: rd ex

File diff suppressed because it is too large Load Diff

View File

@ -1 +0,0 @@
31236096

View File

@ -1,90 +0,0 @@
// Copyright (C) 2018 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
cc_binary {
name: "librank",
cflags: [
"-Wall",
"-Werror",
],
srcs: ["librank.cpp"],
shared_libs: [
"libbase",
"libmeminfo",
],
}
cc_binary {
name: "procmem",
cflags: [
"-Wall",
"-Werror",
],
srcs: ["procmem.cpp"],
shared_libs: [
"libbase",
"libmeminfo",
],
}
cc_binary {
name: "procrank",
cflags: [
"-Wall",
"-Werror",
],
srcs: ["procrank.cpp"],
shared_libs: [
"libbase",
"libmeminfo",
],
}
cc_binary {
name: "showmap",
host_supported: true,
cflags: [
"-Wall",
"-Werror",
],
srcs: ["showmap.cpp"],
shared_libs: [
"libbase",
"libmeminfo",
],
target: {
darwin: {
enabled: false,
},
},
}
cc_binary {
name: "wsstop",
cflags: [
"-Wall",
"-Werror",
],
srcs: ["wsstop.cpp"],
shared_libs: [
"libbase",
"liblog",
"libmeminfo",
],
}

View File

@ -1,351 +0,0 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <dirent.h>
#include <errno.h>
#include <error.h>
#include <inttypes.h>
#include <linux/kernel-page-flags.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <unistd.h>
#include <algorithm>
#include <map>
#include <memory>
#include <vector>
#include <android-base/file.h>
#include <android-base/parseint.h>
#include <android-base/stringprintf.h>
#include <android-base/strings.h>
#include <meminfo/procmeminfo.h>
using ::android::meminfo::MemUsage;
using ::android::meminfo::ProcMemInfo;
using ::android::meminfo::Vma;
[[noreturn]] static void usage(int exit_status) {
fprintf(stderr,
"Usage: %s [ -P | -L ] [ -v | -r | -p | -u | -s | -h ]\n"
"\n"
"Sort options:\n"
" -v Sort processes by VSS.\n"
" -r Sort processes by RSS.\n"
" -p Sort processes by PSS.\n"
" -u Sort processes by USS.\n"
" -s Sort processes by swap.\n"
" (Default sort order is PSS.)\n"
" -a Show all mappings, including stack, heap and anon.\n"
" -P /path Limit libraries displayed to those in path.\n"
" -R Reverse sort order (default is descending).\n"
" -m [r][w][x] Only list pages that exactly match permissions\n"
" -c Only show cached (storage backed) pages\n"
" -C Only show non-cached (ram/swap backed) pages\n"
" -k Only show pages collapsed by KSM\n"
" -h Display this help screen.\n",
getprogname());
exit(exit_status);
}
static void add_mem_usage(MemUsage* to, const MemUsage& from) {
to->vss += from.vss;
to->rss += from.rss;
to->pss += from.pss;
to->uss += from.uss;
to->swap += from.swap;
to->private_clean += from.private_clean;
to->private_dirty += from.private_dirty;
to->shared_clean += from.shared_clean;
to->shared_dirty += from.shared_dirty;
}
struct ProcessRecord {
public:
ProcessRecord(pid_t pid) : pid_(-1), cmdline_("") {
std::string fname = ::android::base::StringPrintf("/proc/%d/cmdline", pid);
std::string cmdline;
if (!::android::base::ReadFileToString(fname, &cmdline)) {
fprintf(stderr, "Failed to read cmdline from: %s\n", fname.c_str());
return;
}
// We deliberately don't read the proc/<pid>cmdline file directly into 'cmdline_'
// because of some processes showing up cmdlines that end with "0x00 0x0A 0x00"
// e.g. xtra-daemon, lowi-server
// The .c_str() assignment below then takes care of trimming the cmdline at the first
// 0x00. This is how original procrank worked (luckily)
cmdline_ = cmdline.c_str();
pid_ = pid;
usage_.clear();
}
~ProcessRecord() = default;
bool valid() const { return pid_ != -1; }
// Getters
pid_t pid() const { return pid_; }
const std::string& cmdline() const { return cmdline_; }
const MemUsage& usage() const { return usage_; }
// Add to the usage
void AddUsage(const MemUsage& mem_usage) { add_mem_usage(&usage_, mem_usage); }
private:
pid_t pid_;
std::string cmdline_;
MemUsage usage_;
};
struct LibRecord {
public:
LibRecord(const std::string& name) : name_(name) {}
~LibRecord() = default;
const std::string& name() const { return name_; }
const MemUsage& usage() const { return usage_; }
const std::map<pid_t, ProcessRecord>& processes() const { return procs_; }
uint64_t pss() const { return usage_.pss; }
void AddUsage(const ProcessRecord& proc, const MemUsage& mem_usage) {
auto [it, inserted] = procs_.insert(std::pair<pid_t, ProcessRecord>(proc.pid(), proc));
it->second.AddUsage(mem_usage);
add_mem_usage(&usage_, mem_usage);
}
private:
std::string name_;
MemUsage usage_;
std::map<pid_t, ProcessRecord> procs_;
};
// List of every library / map
static std::map<std::string, LibRecord> g_libs;
// List of library/map names that we don't want to show by default
static const std::vector<std::string> g_blacklisted_libs = {"[heap]", "[stack]"};
// Global flags affected by command line
static uint64_t g_pgflags = 0;
static uint64_t g_pgflags_mask = 0;
static uint16_t g_mapflags_mask = 0;
static bool g_all_libs = false;
static bool g_has_swap = false;
static bool g_reverse_sort = false;
static std::string g_prefix_filter = "";
static bool read_all_pids(std::function<bool(pid_t pid)> for_each_pid) {
std::unique_ptr<DIR, int (*)(DIR*)> procdir(opendir("/proc"), closedir);
if (!procdir) return false;
struct dirent* dir;
pid_t pid;
while ((dir = readdir(procdir.get()))) {
if (!::android::base::ParseInt(dir->d_name, &pid)) continue;
if (!for_each_pid(pid)) return false;
}
return true;
}
static bool scan_libs_per_process(pid_t pid) {
ProcMemInfo pmem(pid, false, g_pgflags, g_pgflags_mask);
const std::vector<Vma> maps = pmem.Maps();
if (maps.size() == 0) {
// nothing to do here, continue
return true;
}
ProcessRecord proc(pid);
if (!proc.valid()) {
fprintf(stderr, "Failed to create process record for process: %d\n", pid);
return false;
}
for (auto& map : maps) {
// skip library / map if prefix for the path doesn't match
if (!g_prefix_filter.empty() && !::android::base::StartsWith(map.name, g_prefix_filter)) {
continue;
}
// Skip maps based on map permissions
if (g_mapflags_mask &&
((map.flags & (PROT_READ | PROT_WRITE | PROT_EXEC)) != g_mapflags_mask)) {
continue;
}
// skip blacklisted library / map names
if (!g_all_libs && (std::find(g_blacklisted_libs.begin(), g_blacklisted_libs.end(),
map.name) != g_blacklisted_libs.end())) {
continue;
}
auto [it, inserted] =
g_libs.insert(std::pair<std::string, LibRecord>(map.name, LibRecord(map.name)));
it->second.AddUsage(proc, map.usage);
if (!g_has_swap && map.usage.swap) {
g_has_swap = true;
}
}
return true;
}
static uint16_t parse_mapflags(const char* mapflags) {
uint16_t ret = 0;
for (const char* p = mapflags; *p; p++) {
switch (*p) {
case 'r':
ret |= PROT_READ;
break;
case 'w':
ret |= PROT_WRITE;
break;
case 'x':
ret |= PROT_EXEC;
break;
default:
error(EXIT_FAILURE, 0, "Invalid permissions string: %s, %s", mapflags, p);
}
}
return ret;
}
int main(int argc, char* argv[]) {
int opt;
auto pss_sort = [](const ProcessRecord& a, const ProcessRecord& b) {
return g_reverse_sort ? a.usage().pss < b.usage().pss : a.usage().pss > b.usage().pss;
};
auto uss_sort = [](const ProcessRecord& a, const ProcessRecord& b) {
return g_reverse_sort ? a.usage().uss < b.usage().uss : a.usage().uss > b.usage().uss;
};
auto vss_sort = [](const ProcessRecord& a, const ProcessRecord& b) {
return g_reverse_sort ? a.usage().vss < b.usage().vss : a.usage().vss > b.usage().vss;
};
auto rss_sort = [](const ProcessRecord& a, const ProcessRecord& b) {
return g_reverse_sort ? a.usage().rss < b.usage().rss : a.usage().rss > b.usage().rss;
};
auto swap_sort = [](const ProcessRecord& a, const ProcessRecord& b) {
return g_reverse_sort ? a.usage().swap < b.usage().swap : a.usage().swap > b.usage().swap;
};
std::function<bool(const ProcessRecord&, const ProcessRecord&)> sort_func = pss_sort;
while ((opt = getopt(argc, argv, "acChkm:pP:uvrsR")) != -1) {
switch (opt) {
case 'a':
g_all_libs = true;
break;
case 'c':
g_pgflags = 0;
g_pgflags_mask = (1 << KPF_SWAPBACKED);
break;
case 'C':
g_pgflags = g_pgflags_mask = (1 << KPF_SWAPBACKED);
break;
case 'h':
usage(EXIT_SUCCESS);
case 'k':
g_pgflags = g_pgflags_mask = (1 << KPF_KSM);
break;
case 'm':
g_mapflags_mask = parse_mapflags(optarg);
break;
case 'p':
sort_func = pss_sort;
break;
case 'P':
g_prefix_filter = optarg;
break;
case 'u':
sort_func = uss_sort;
break;
case 'v':
sort_func = vss_sort;
break;
case 'r':
sort_func = rss_sort;
break;
case 's':
sort_func = swap_sort;
break;
case 'R':
g_reverse_sort = true;
break;
default:
usage(EXIT_FAILURE);
}
}
if (!read_all_pids(scan_libs_per_process)) {
error(EXIT_FAILURE, 0, "Failed to read all pids from the system");
}
printf(" %6s %7s %6s %6s %6s ", "RSStot", "VSS", "RSS", "PSS", "USS");
if (g_has_swap) {
printf(" %6s ", "Swap");
}
printf("Name/PID\n");
std::vector<LibRecord> v_libs;
v_libs.reserve(g_libs.size());
std::transform(g_libs.begin(), g_libs.end(), std::back_inserter(v_libs),
[] (std::pair<std::string, LibRecord> const& pair) { return pair.second; });
// sort the libraries by their pss
std::sort(v_libs.begin(), v_libs.end(),
[](const LibRecord& l1, const LibRecord& l2) { return l1.pss() > l2.pss(); });
for (auto& lib : v_libs) {
printf("%6" PRIu64 "K %7s %6s %6s %6s ", lib.pss() / 1024, "", "", "", "");
if (g_has_swap) {
printf(" %6s ", "");
}
printf("%s\n", lib.name().c_str());
// sort all mappings first
std::vector<ProcessRecord> procs;
procs.reserve(lib.processes().size());
std::transform(lib.processes().begin(), lib.processes().end(), std::back_inserter(procs),
[] (std::pair<pid_t, ProcessRecord> const& pair) { return pair.second; });
std::sort(procs.begin(), procs.end(), sort_func);
for (auto& p : procs) {
const MemUsage& usage = p.usage();
printf(" %6s %7" PRIu64 "K %6" PRIu64 "K %6" PRIu64 "K %6" PRIu64 "K ", "",
usage.vss / 1024, usage.rss / 1024, usage.pss / 1024, usage.uss / 1024);
if (g_has_swap) {
printf("%6" PRIu64 "K ", usage.swap / 1024);
}
printf(" %s [%d]\n", p.cmdline().c_str(), p.pid());
}
}
return 0;
}

View File

@ -1,201 +0,0 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h>
#include <inttypes.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <unistd.h>
#include <iostream>
#include <sstream>
#include <string>
#include <vector>
#include <android-base/stringprintf.h>
#include <meminfo/procmeminfo.h>
using Vma = ::android::meminfo::Vma;
using ProcMemInfo = ::android::meminfo::ProcMemInfo;
using MemUsage = ::android::meminfo::MemUsage;
// Global flags to control procmem output
// Set to use page idle bits for working set detection
bool use_pageidle = false;
// hides map entries with zero rss
bool hide_zeroes = false;
// Reset working set and exit
bool reset_wss = false;
// Show working set, mutually exclusive with reset_wss;
bool show_wss = false;
[[noreturn]] static void usage(int exit_status) {
fprintf(stderr,
"Usage: %s [-i] [ -w | -W ] [ -p | -m ] [ -h ] pid\n"
" -i Uses idle page tracking for working set statistics.\n"
" -w Displays statistics for the working set only.\n"
" -W Resets the working set of the process.\n"
" -p Sort by PSS.\n"
" -u Sort by USS.\n"
" -m Sort by mapping order (as read from /proc).\n"
" -h Hide maps with no RSS.\n",
getprogname());
exit(exit_status);
}
static void print_separator(std::stringstream& ss) {
if (show_wss) {
ss << ::android::base::StringPrintf("%7s %7s %7s %7s %7s %7s %7s %7s %s\n",
"-------", "-------", "-------", "-------", "-------",
"-------", "-------", "-------", "");
return;
}
ss << ::android::base::StringPrintf("%7s %7s %7s %7s %7s %7s %7s %7s %7s %s\n",
"-------", "-------", "-------", "-------", "-------",
"-------", "-------", "-------", "-------", "");
}
static void print_header(std::stringstream& ss) {
if (show_wss) {
ss << ::android::base::StringPrintf("%7s %7s %7s %7s %7s %7s %7s %7s %s\n", "WRss",
"WPss", "WUss", "WShCl", "WShDi", "WPrCl", "WPrDi",
"Flags", "Name");
} else {
ss << ::android::base::StringPrintf("%7s %7s %7s %7s %7s %7s %7s %7s %7s %s\n",
"Vss", "Rss", "Pss", "Uss", "ShCl", "ShDi", "PrCl",
"PrDi", "Flags", "Name");
}
print_separator(ss);
}
static void print_stats(std::stringstream& ss, const MemUsage& stats) {
if (!show_wss) {
ss << ::android::base::StringPrintf("%6" PRIu64 "K ", stats.vss / 1024);
}
ss << ::android::base::StringPrintf("%6" PRIu64 "K %6" PRIu64 "K %6" PRIu64 "K %6" PRIu64
"K %6" PRIu64 "K %6" PRIu64 "K %6" PRIu64 "K ",
stats.rss / 1024, stats.pss / 1024, stats.uss / 1024,
stats.shared_clean / 1024, stats.shared_dirty / 1024,
stats.private_clean / 1024, stats.private_dirty / 1024);
}
static int show(const MemUsage& proc_stats, const std::vector<Vma>& maps) {
std::stringstream ss;
print_header(ss);
for (auto& vma : maps) {
const MemUsage& vma_stats = vma.usage;
if (hide_zeroes && vma_stats.rss == 0) {
continue;
}
print_stats(ss, vma_stats);
// TODO: b/141711064 fix libprocinfo to record (p)rivate or (s)hared flag
// for now always report as private
std::string flags_str("---p");
if (vma.flags & PROT_READ) flags_str[0] = 'r';
if (vma.flags & PROT_WRITE) flags_str[1] = 'w';
if (vma.flags & PROT_EXEC) flags_str[2] = 'x';
ss << ::android::base::StringPrintf("%7s ", flags_str.c_str()) << vma.name << std::endl;
}
print_separator(ss);
print_stats(ss, proc_stats);
ss << "TOTAL" << std::endl;
std::cout << ss.str();
return 0;
}
int main(int argc, char* argv[]) {
int opt;
auto pss_sort = [](const Vma& a, const Vma& b) {
uint64_t pss_a = a.usage.pss;
uint64_t pss_b = b.usage.pss;
return pss_a > pss_b;
};
auto uss_sort = [](const Vma& a, const Vma& b) {
uint64_t uss_a = a.usage.uss;
uint64_t uss_b = b.usage.uss;
return uss_a > uss_b;
};
std::function<bool(const Vma& a, const Vma& b)> sort_func = nullptr;
while ((opt = getopt(argc, argv, "himpuWw")) != -1) {
switch (opt) {
case 'h':
hide_zeroes = true;
break;
case 'i':
// TODO: libmeminfo doesn't support the flag to chose
// between idle page tracking vs clear_refs. So for now,
// this flag is unused and the library defaults to using
// /proc/<pid>/clear_refs for finding the working set.
use_pageidle = true;
break;
case 'm':
// this is the default
break;
case 'p':
sort_func = pss_sort;
break;
case 'u':
sort_func = uss_sort;
break;
case 'W':
reset_wss = true;
break;
case 'w':
show_wss = true;
break;
case '?':
usage(EXIT_SUCCESS);
default:
usage(EXIT_FAILURE);
}
}
if (optind != (argc - 1)) {
fprintf(stderr, "Need exactly one pid at the end\n");
usage(EXIT_FAILURE);
}
pid_t pid = atoi(argv[optind]);
if (pid == 0) {
std::cerr << "Invalid process id" << std::endl;
exit(EXIT_FAILURE);
}
if (reset_wss) {
if (!ProcMemInfo::ResetWorkingSet(pid)) {
std::cerr << "Failed to reset working set of pid : " << pid << std::endl;
exit(EXIT_FAILURE);
}
return 0;
}
ProcMemInfo proc(pid, show_wss);
const MemUsage& proc_stats = proc.Usage();
std::vector<Vma> maps(proc.Maps());
if (sort_func != nullptr) {
std::sort(maps.begin(), maps.end(), sort_func);
}
return show(proc_stats, maps);
}

View File

@ -1,536 +0,0 @@
/*
* Copyright (C) 2018 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <android-base/file.h>
#include <android-base/parseint.h>
#include <android-base/stringprintf.h>
#include <android-base/strings.h>
#include <dirent.h>
#include <errno.h>
#include <inttypes.h>
#include <linux/kernel-page-flags.h>
#include <linux/oom.h>
#include <meminfo/procmeminfo.h>
#include <meminfo/sysmeminfo.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <unistd.h>
#include <iostream>
#include <memory>
#include <sstream>
#include <vector>
using ::android::meminfo::MemUsage;
using ::android::meminfo::ProcMemInfo;
struct ProcessRecord {
public:
ProcessRecord(pid_t pid, bool get_wss = false, uint64_t pgflags = 0, uint64_t pgflags_mask = 0)
: pid_(-1),
oomadj_(OOM_SCORE_ADJ_MAX + 1),
cmdline_(""),
proportional_swap_(0),
unique_swap_(0),
zswap_(0) {
std::unique_ptr<ProcMemInfo> procmem =
std::make_unique<ProcMemInfo>(pid, get_wss, pgflags, pgflags_mask);
if (procmem == nullptr) {
std::cerr << "Failed to create ProcMemInfo for: " << pid << std::endl;
return;
}
std::string fname = ::android::base::StringPrintf("/proc/%d/oom_score_adj", pid);
auto oomscore_fp =
std::unique_ptr<FILE, decltype(&fclose)>{fopen(fname.c_str(), "re"), fclose};
if (oomscore_fp == nullptr) {
std::cerr << "Failed to open oom_score_adj file: " << fname << std::endl;
return;
}
if (fscanf(oomscore_fp.get(), "%d\n", &oomadj_) != 1) {
std::cerr << "Failed to read oomadj from: " << fname << std::endl;
return;
}
fname = ::android::base::StringPrintf("/proc/%d/cmdline", pid);
if (!::android::base::ReadFileToString(fname, &cmdline_)) {
std::cerr << "Failed to read cmdline from: " << fname << std::endl;
cmdline_ = "<unknown>";
}
// We deliberately don't read the proc/<pid>cmdline file directly into 'cmdline_'
// because of some processes showing up cmdlines that end with "0x00 0x0A 0x00"
// e.g. xtra-daemon, lowi-server
// The .c_str() assignment below then takes care of trimming the cmdline at the first
// 0x00. This is how original procrank worked (luckily)
cmdline_.resize(strlen(cmdline_.c_str()));
usage_or_wss_ = get_wss ? procmem->Wss() : procmem->Usage();
swap_offsets_ = procmem->SwapOffsets();
pid_ = pid;
}
bool valid() const { return pid_ != -1; }
void CalculateSwap(const uint16_t* swap_offset_array, float zram_compression_ratio) {
for (auto& off : swap_offsets_) {
proportional_swap_ += getpagesize() / swap_offset_array[off];
unique_swap_ += swap_offset_array[off] == 1 ? getpagesize() : 0;
zswap_ = proportional_swap_ * zram_compression_ratio;
}
}
// Getters
pid_t pid() const { return pid_; }
const std::string& cmdline() const { return cmdline_; }
int32_t oomadj() const { return oomadj_; }
uint64_t proportional_swap() const { return proportional_swap_; }
uint64_t unique_swap() const { return unique_swap_; }
uint64_t zswap() const { return zswap_; }
// Wrappers to ProcMemInfo
const std::vector<uint16_t>& SwapOffsets() const { return swap_offsets_; }
const MemUsage& Usage() const { return usage_or_wss_; }
const MemUsage& Wss() const { return usage_or_wss_; }
private:
pid_t pid_;
int32_t oomadj_;
std::string cmdline_;
uint64_t proportional_swap_;
uint64_t unique_swap_;
uint64_t zswap_;
MemUsage usage_or_wss_;
std::vector<uint16_t> swap_offsets_;
};
// Show working set instead of memory consumption
bool show_wss = false;
// Reset working set of each process
bool reset_wss = false;
// Show per-process oom_score_adj column
bool show_oomadj = false;
// True if the device has swap enabled
bool has_swap = false;
// True, if device has zram enabled
bool has_zram = false;
// If zram is enabled, the compression ratio is zram used / swap used.
float zram_compression_ratio = 0.0;
// Sort process in reverse, default is descending
bool reverse_sort = false;
// Calculated total memory usage across all processes in the system
uint64_t total_pss = 0;
uint64_t total_uss = 0;
uint64_t total_swap = 0;
uint64_t total_pswap = 0;
uint64_t total_uswap = 0;
uint64_t total_zswap = 0;
[[noreturn]] static void usage(int exit_status) {
std::cerr << "Usage: " << getprogname() << " [ -W ] [ -v | -r | -p | -u | -s | -h ]"
<< std::endl
<< " -v Sort by VSS." << std::endl
<< " -r Sort by RSS." << std::endl
<< " -p Sort by PSS." << std::endl
<< " -u Sort by USS." << std::endl
<< " -s Sort by swap." << std::endl
<< " (Default sort order is PSS.)" << std::endl
<< " -R Reverse sort order (default is descending)." << std::endl
<< " -c Only show cached (storage backed) pages" << std::endl
<< " -C Only show non-cached (ram/swap backed) pages" << std::endl
<< " -k Only show pages collapsed by KSM" << std::endl
<< " -w Display statistics for working set only." << std::endl
<< " -W Reset working set of all processes." << std::endl
<< " -o Show and sort by oom score against lowmemorykiller thresholds."
<< std::endl
<< " -h Display this help screen." << std::endl;
exit(exit_status);
}
static bool read_all_pids(std::vector<pid_t>* pids, std::function<bool(pid_t pid)> for_each_pid) {
pids->clear();
std::unique_ptr<DIR, int (*)(DIR*)> procdir(opendir("/proc"), closedir);
if (!procdir) return false;
struct dirent* dir;
pid_t pid;
while ((dir = readdir(procdir.get()))) {
if (!::android::base::ParseInt(dir->d_name, &pid)) continue;
if (!for_each_pid(pid)) return false;
pids->emplace_back(pid);
}
return true;
}
static bool count_swap_offsets(const ProcessRecord& proc, uint16_t* swap_offset_array,
uint32_t size) {
const std::vector<uint16_t>& swp_offs = proc.SwapOffsets();
for (auto& off : swp_offs) {
if (off >= size) {
std::cerr << "swap offset " << off << " is out of bounds for process: " << proc.pid()
<< std::endl;
return false;
}
if (swap_offset_array[off] == USHRT_MAX) {
std::cerr << "swap offset " << off << " ref count overflow in process: " << proc.pid()
<< std::endl;
return false;
}
swap_offset_array[off]++;
}
return true;
}
static void print_header(std::stringstream& ss) {
ss.str("");
ss << ::android::base::StringPrintf("%5s ", "PID");
if (show_oomadj) {
ss << ::android::base::StringPrintf("%5s ", "oom");
}
if (show_wss) {
ss << ::android::base::StringPrintf("%7s %7s %7s ", "WRss", "WPss", "WUss");
// now swap statistics here, working set pages by definition shouldn't end up in swap.
} else {
ss << ::android::base::StringPrintf("%8s %7s %7s %7s ", "Vss", "Rss", "Pss", "Uss");
if (has_swap) {
ss << ::android::base::StringPrintf("%7s %7s %7s ", "Swap", "PSwap", "USwap");
if (has_zram) {
ss << ::android::base::StringPrintf("%7s ", "ZSwap");
}
}
}
ss << "cmdline";
}
static void print_process_record(std::stringstream& ss, ProcessRecord& proc) {
ss << ::android::base::StringPrintf("%5d ", proc.pid());
if (show_oomadj) {
ss << ::android::base::StringPrintf("%5d ", proc.oomadj());
}
if (show_wss) {
ss << ::android::base::StringPrintf("%6" PRIu64 "K %6" PRIu64 "K %6" PRIu64 "K ",
proc.Wss().rss / 1024, proc.Wss().pss / 1024,
proc.Wss().uss / 1024);
} else {
ss << ::android::base::StringPrintf("%7" PRIu64 "K %6" PRIu64 "K %6" PRIu64 "K %6" PRIu64
"K ",
proc.Usage().vss / 1024, proc.Usage().rss / 1024,
proc.Usage().pss / 1024, proc.Usage().uss / 1024);
if (has_swap) {
ss << ::android::base::StringPrintf("%6" PRIu64 "K ", proc.Usage().swap / 1024);
ss << ::android::base::StringPrintf("%6" PRIu64 "K ", proc.proportional_swap() / 1024);
ss << ::android::base::StringPrintf("%6" PRIu64 "K ", proc.unique_swap() / 1024);
if (has_zram) {
ss << ::android::base::StringPrintf("%6" PRIu64 "K ", (proc.zswap() / 1024));
}
}
}
}
static void print_processes(std::stringstream& ss, std::vector<ProcessRecord>& procs,
uint16_t* swap_offset_array) {
for (auto& proc : procs) {
total_pss += show_wss ? proc.Wss().pss : proc.Usage().pss;
total_uss += show_wss ? proc.Wss().uss : proc.Usage().uss;
if (!show_wss && has_swap) {
proc.CalculateSwap(swap_offset_array, zram_compression_ratio);
total_swap += proc.Usage().swap;
total_pswap += proc.proportional_swap();
total_uswap += proc.unique_swap();
if (has_zram) {
total_zswap += proc.zswap();
}
}
print_process_record(ss, proc);
ss << proc.cmdline() << std::endl;
}
}
static void print_separator(std::stringstream& ss) {
ss << ::android::base::StringPrintf("%5s ", "");
if (show_oomadj) {
ss << ::android::base::StringPrintf("%5s ", "");
}
if (show_wss) {
ss << ::android::base::StringPrintf("%7s %7s %7s ", "", "------", "------");
} else {
ss << ::android::base::StringPrintf("%8s %7s %7s %7s ", "", "", "------", "------");
if (has_swap) {
ss << ::android::base::StringPrintf("%7s %7s %7s ", "------", "------", "------");
if (has_zram) {
ss << ::android::base::StringPrintf("%7s ", "------");
}
}
}
ss << ::android::base::StringPrintf("%s", "------");
}
static void print_totals(std::stringstream& ss) {
ss << ::android::base::StringPrintf("%5s ", "");
if (show_oomadj) {
ss << ::android::base::StringPrintf("%5s ", "");
}
if (show_wss) {
ss << ::android::base::StringPrintf("%7s %6" PRIu64 "K %6" PRIu64 "K ", "",
total_pss / 1024, total_uss / 1024);
} else {
ss << ::android::base::StringPrintf("%8s %7s %6" PRIu64 "K %6" PRIu64 "K ", "", "",
total_pss / 1024, total_uss / 1024);
if (has_swap) {
ss << ::android::base::StringPrintf("%6" PRIu64 "K ", total_swap / 1024);
ss << ::android::base::StringPrintf("%6" PRIu64 "K ", total_pswap / 1024);
ss << ::android::base::StringPrintf("%6" PRIu64 "K ", total_uswap / 1024);
if (has_zram) {
ss << ::android::base::StringPrintf("%6" PRIu64 "K ", total_zswap / 1024);
}
}
}
ss << "TOTAL";
}
static void print_sysmeminfo(std::stringstream& ss, ::android::meminfo::SysMemInfo& smi) {
if (has_swap) {
ss << ::android::base::StringPrintf("ZRAM: %" PRIu64 "K physical used for %" PRIu64
"K in swap "
"(%" PRIu64 "K total swap)",
smi.mem_zram_kb(),
(smi.mem_swap_kb() - smi.mem_swap_free_kb()),
smi.mem_swap_kb())
<< std::endl;
}
ss << ::android::base::StringPrintf(" RAM: %" PRIu64 "K total, %" PRIu64 "K free, %" PRIu64
"K buffers, "
"%" PRIu64 "K cached, %" PRIu64 "K shmem, %" PRIu64
"K slab",
smi.mem_total_kb(), smi.mem_free_kb(), smi.mem_buffers_kb(),
smi.mem_cached_kb(), smi.mem_shmem_kb(), smi.mem_slab_kb());
}
int main(int argc, char* argv[]) {
auto pss_sort = [](ProcessRecord& a, ProcessRecord& b) {
MemUsage stats_a = show_wss ? a.Wss() : a.Usage();
MemUsage stats_b = show_wss ? b.Wss() : b.Usage();
return reverse_sort ? stats_a.pss < stats_b.pss : stats_a.pss > stats_b.pss;
};
auto uss_sort = [](ProcessRecord& a, ProcessRecord& b) {
MemUsage stats_a = show_wss ? a.Wss() : a.Usage();
MemUsage stats_b = show_wss ? b.Wss() : b.Usage();
return reverse_sort ? stats_a.uss < stats_b.uss : stats_a.uss > stats_b.uss;
};
auto rss_sort = [](ProcessRecord& a, ProcessRecord& b) {
MemUsage stats_a = show_wss ? a.Wss() : a.Usage();
MemUsage stats_b = show_wss ? b.Wss() : b.Usage();
return reverse_sort ? stats_a.rss < stats_b.rss : stats_a.rss > stats_b.rss;
};
auto vss_sort = [](ProcessRecord& a, ProcessRecord& b) {
MemUsage stats_a = show_wss ? a.Wss() : a.Usage();
MemUsage stats_b = show_wss ? b.Wss() : b.Usage();
return reverse_sort ? stats_a.vss < stats_b.vss : stats_a.vss > stats_b.vss;
};
auto swap_sort = [](ProcessRecord& a, ProcessRecord& b) {
MemUsage stats_a = show_wss ? a.Wss() : a.Usage();
MemUsage stats_b = show_wss ? b.Wss() : b.Usage();
return reverse_sort ? stats_a.swap < stats_b.swap : stats_a.swap > stats_b.swap;
};
auto oomadj_sort = [](ProcessRecord& a, ProcessRecord& b) {
return reverse_sort ? a.oomadj() < b.oomadj() : a.oomadj() > b.oomadj();
};
// default PSS sort
std::function<bool(ProcessRecord & a, ProcessRecord & b)> proc_sort = pss_sort;
// count all pages by default
uint64_t pgflags = 0;
uint64_t pgflags_mask = 0;
int opt;
while ((opt = getopt(argc, argv, "cChkoprRsuvwW")) != -1) {
switch (opt) {
case 'c':
pgflags = 0;
pgflags_mask = (1 << KPF_SWAPBACKED);
break;
case 'C':
pgflags = (1 << KPF_SWAPBACKED);
pgflags_mask = (1 << KPF_SWAPBACKED);
break;
case 'h':
usage(EXIT_SUCCESS);
case 'k':
pgflags = (1 << KPF_KSM);
pgflags_mask = (1 << KPF_KSM);
break;
case 'o':
proc_sort = oomadj_sort;
show_oomadj = true;
break;
case 'p':
proc_sort = pss_sort;
break;
case 'r':
proc_sort = rss_sort;
break;
case 'R':
reverse_sort = true;
break;
case 's':
proc_sort = swap_sort;
break;
case 'u':
proc_sort = uss_sort;
break;
case 'v':
proc_sort = vss_sort;
break;
case 'w':
show_wss = true;
break;
case 'W':
reset_wss = true;
break;
default:
usage(EXIT_FAILURE);
}
}
std::vector<pid_t> pids;
std::vector<ProcessRecord> procs;
if (reset_wss) {
if (!read_all_pids(&pids,
[&](pid_t pid) -> bool { return ProcMemInfo::ResetWorkingSet(pid); })) {
std::cerr << "Failed to reset working set of all processes" << std::endl;
exit(EXIT_FAILURE);
}
// we are done, all other options passed to procrank are ignored in the presence of '-W'
return 0;
}
::android::meminfo::SysMemInfo smi;
if (!smi.ReadMemInfo()) {
std::cerr << "Failed to get system memory info" << std::endl;
exit(EXIT_FAILURE);
}
// Figure out swap and zram
uint64_t swap_total = smi.mem_swap_kb() * 1024;
has_swap = swap_total > 0;
// Allocate the swap array
auto swap_offset_array = std::make_unique<uint16_t[]>(swap_total / getpagesize());
if (has_swap) {
has_zram = smi.mem_zram_kb() > 0;
if (has_zram) {
zram_compression_ratio = static_cast<float>(smi.mem_zram_kb()) /
(smi.mem_swap_kb() - smi.mem_swap_free_kb());
}
}
auto mark_swap_usage = [&](pid_t pid) -> bool {
ProcessRecord proc(pid, show_wss, pgflags, pgflags_mask);
if (!proc.valid()) {
// Check to see if the process is still around, skip the process if the proc
// directory is inaccessible. It was most likely killed while creating the process
// record
std::string procdir = ::android::base::StringPrintf("/proc/%d", pid);
if (access(procdir.c_str(), F_OK | R_OK)) return true;
// Warn if we failed to gather process stats even while it is still alive.
// Return success here, so we continue to print stats for other processes.
std::cerr << "warning: failed to create process record for: " << pid << std::endl;
return true;
}
// Skip processes with no memory mappings
uint64_t vss = show_wss ? proc.Wss().vss : proc.Usage().vss;
if (vss == 0) return true;
// collect swap_offset counts from all processes in 1st pass
if (!show_wss && has_swap &&
!count_swap_offsets(proc, swap_offset_array.get(), swap_total / getpagesize())) {
std::cerr << "Failed to count swap offsets for process: " << pid << std::endl;
return false;
}
procs.emplace_back(std::move(proc));
return true;
};
// Get a list of all pids currently running in the system in 1st pass through all processes.
// Mark each swap offset used by the process as we find them for calculating proportional
// swap usage later.
if (!read_all_pids(&pids, mark_swap_usage)) {
std::cerr << "Failed to read all pids from the system" << std::endl;
exit(EXIT_FAILURE);
}
std::stringstream ss;
if (procs.empty()) {
// This would happen in corner cases where procrank is being run to find KSM usage on a
// system with no KSM and combined with working set determination as follows
// procrank -w -u -k
// procrank -w -s -k
// procrank -w -o -k
ss << "<empty>" << std::endl << std::endl;
print_sysmeminfo(ss, smi);
ss << std::endl;
std::cout << ss.str();
return 0;
}
// Sort all process records, default is PSS descending
std::sort(procs.begin(), procs.end(), proc_sort);
// start dumping output in string stream
print_header(ss);
ss << std::endl;
// 2nd pass to calculate and get per process stats to add them up
print_processes(ss, procs, swap_offset_array.get());
// Add separator to output
print_separator(ss);
ss << std::endl;
// Add totals to output
print_totals(ss);
ss << std::endl << std::endl;
// Add system information at the end
print_sysmeminfo(ss, smi);
ss << std::endl;
// dump on the screen
std::cout << ss.str();
return 0;
}

View File

@ -1,285 +0,0 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <getopt.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/mman.h>
#include <sys/signal.h>
#include <sys/types.h>
#include <unistd.h>
#include <memory>
#include <string>
#include <vector>
#include <android-base/stringprintf.h>
#include <android-base/strings.h>
#include <meminfo/procmeminfo.h>
using ::android::meminfo::Vma;
struct VmaInfo {
Vma vma;
bool is_bss;
uint32_t count;
VmaInfo() = default;
VmaInfo(const Vma& v) : vma(v), is_bss(false), count(1) {}
VmaInfo(const Vma& v, bool bss) : vma(v), is_bss(bss), count(1) {}
VmaInfo(const Vma& v, const std::string& name, bool bss) : vma(v), is_bss(bss), count(1) {
vma.name = name;
}
};
// Global options
static std::string g_filename = "";
static bool g_merge_by_names = false;
static bool g_terse = false;
static bool g_verbose = false;
static bool g_show_addr = false;
static bool g_quiet = false;
static pid_t g_pid = -1;
static VmaInfo g_total;
static std::vector<VmaInfo> g_vmas;
[[noreturn]] static void usage(const char* progname, int exit_status) {
fprintf(stderr,
"%s [-aqtv] [-f FILE] PID\n"
"-a\taddresses (show virtual memory map)\n"
"-q\tquiet (don't show error if map could not be read)\n"
"-t\tterse (show only items with private pages)\n"
"-v\tverbose (don't coalesce maps with the same name)\n"
"-f\tFILE (read from input from FILE instead of PID)\n",
progname);
exit(exit_status);
}
static bool is_library(const std::string& name) {
return (name.size() > 4) && (name[0] == '/') && ::android::base::EndsWith(name, ".so");
}
static bool insert_before(const VmaInfo& a, const VmaInfo& b) {
if (g_show_addr) {
return (a.vma.start < b.vma.start || (a.vma.start == b.vma.start && a.vma.end < b.vma.end));
}
return strcmp(a.vma.name.c_str(), b.vma.name.c_str()) < 0;
}
static void collect_vma(const Vma& vma) {
if (g_vmas.empty()) {
g_vmas.emplace_back(vma);
return;
}
VmaInfo current(vma);
VmaInfo& last = g_vmas.back();
// determine if this is bss;
if (vma.name.empty()) {
if (last.vma.end == current.vma.start && is_library(last.vma.name)) {
current.vma.name = last.vma.name;
current.is_bss = true;
} else {
current.vma.name = "[anon]";
}
}
std::vector<VmaInfo>::iterator it;
for (it = g_vmas.begin(); it != g_vmas.end(); it++) {
if (g_merge_by_names && (it->vma.name == current.vma.name)) {
it->vma.usage.vss += current.vma.usage.vss;
it->vma.usage.rss += current.vma.usage.rss;
it->vma.usage.pss += current.vma.usage.pss;
it->vma.usage.shared_clean += current.vma.usage.shared_clean;
it->vma.usage.shared_dirty += current.vma.usage.shared_dirty;
it->vma.usage.private_clean += current.vma.usage.private_clean;
it->vma.usage.private_dirty += current.vma.usage.private_dirty;
it->vma.usage.swap += current.vma.usage.swap;
it->vma.usage.swap_pss += current.vma.usage.swap_pss;
it->is_bss &= current.is_bss;
it->count++;
break;
}
if (insert_before(current, *it)) {
g_vmas.insert(it, current);
break;
}
}
if (it == g_vmas.end()) {
g_vmas.emplace_back(current);
}
}
static void print_header() {
const char* addr1 = g_show_addr ? " start end " : "";
const char* addr2 = g_show_addr ? " addr addr " : "";
printf("%s virtual shared shared private private\n", addr1);
printf("%s size RSS PSS clean dirty clean dirty swap swapPSS",
addr2);
if (!g_verbose && !g_show_addr) {
printf(" # ");
}
if (g_verbose) {
printf(" flags ");
}
printf(" object\n");
}
static void print_divider() {
if (g_show_addr) {
printf("-------- -------- ");
}
printf("-------- -------- -------- -------- -------- -------- -------- -------- -------- ");
if (!g_verbose && !g_show_addr) {
printf("---- ");
}
if (g_verbose) {
printf("------ ");
}
printf("------------------------------\n");
}
static void print_vmainfo(const VmaInfo& v, bool total) {
if (g_show_addr) {
if (total) {
printf(" ");
} else {
printf("%16" PRIx64 " %16" PRIx64 " ", v.vma.start, v.vma.end);
}
}
printf("%8" PRIu64 " %8" PRIu64 " %8" PRIu64 " %8" PRIu64 " %8" PRIu64 " %8" PRIu64 " %8" PRIu64
" %8" PRIu64 " %8" PRIu64 " ",
v.vma.usage.vss, v.vma.usage.rss, v.vma.usage.pss, v.vma.usage.shared_clean,
v.vma.usage.shared_dirty, v.vma.usage.private_clean, v.vma.usage.private_dirty,
v.vma.usage.swap, v.vma.usage.swap_pss);
if (!g_verbose && !g_show_addr) {
printf("%4" PRIu32 " ", v.count);
}
if (g_verbose) {
if (total) {
printf(" ");
} else {
std::string flags_str("---");
if (v.vma.flags & PROT_READ) flags_str[0] = 'r';
if (v.vma.flags & PROT_WRITE) flags_str[1] = 'w';
if (v.vma.flags & PROT_EXEC) flags_str[2] = 'x';
printf("%6s ", flags_str.c_str());
}
}
}
static int showmap(void) {
if (!::android::meminfo::ForEachVmaFromFile(g_filename, collect_vma)) {
if (!g_quiet) {
fprintf(stderr, "Failed to parse file %s\n", g_filename.c_str());
}
return 1;
}
print_header();
print_divider();
for (const auto& v : g_vmas) {
g_total.vma.usage.vss += v.vma.usage.vss;
g_total.vma.usage.rss += v.vma.usage.rss;
g_total.vma.usage.pss += v.vma.usage.pss;
g_total.vma.usage.private_clean += v.vma.usage.private_clean;
g_total.vma.usage.private_dirty += v.vma.usage.private_dirty;
g_total.vma.usage.shared_clean += v.vma.usage.shared_clean;
g_total.vma.usage.shared_dirty += v.vma.usage.shared_dirty;
g_total.vma.usage.swap += v.vma.usage.swap;
g_total.vma.usage.swap_pss += v.vma.usage.swap_pss;
g_total.count += v.count;
if (g_terse && !(v.vma.usage.private_dirty || v.vma.usage.private_clean)) {
continue;
}
print_vmainfo(v, false);
printf("%s%s\n", v.vma.name.c_str(), v.is_bss ? " [bss]" : "");
}
print_divider();
print_header();
print_divider();
print_vmainfo(g_total, true);
printf("TOTAL\n");
return 0;
}
int main(int argc, char* argv[]) {
signal(SIGPIPE, SIG_IGN);
struct option longopts[] = {
{"help", no_argument, nullptr, 'h'},
{0, 0, nullptr, 0},
};
int opt;
while ((opt = getopt_long(argc, argv, "tvaqf:h", longopts, nullptr)) != -1) {
switch (opt) {
case 't':
g_terse = true;
break;
case 'a':
g_show_addr = true;
break;
case 'v':
g_verbose = true;
break;
case 'q':
g_quiet = true;
break;
case 'f':
g_filename = optarg;
break;
case 'h':
usage(argv[0], EXIT_SUCCESS);
default:
usage(argv[0], EXIT_FAILURE);
}
}
if (g_filename.empty()) {
if ((argc - 1) < optind) {
fprintf(stderr, "Invalid arguments: Must provide <pid> at the end\n");
usage(argv[0], EXIT_FAILURE);
}
g_pid = atoi(argv[optind]);
if (g_pid <= 0) {
fprintf(stderr, "Invalid process id %s\n", argv[optind]);
usage(argv[0], EXIT_FAILURE);
}
g_filename = ::android::base::StringPrintf("/proc/%d/smaps", g_pid);
}
g_merge_by_names = !g_verbose && !g_show_addr;
return showmap();
}

View File

@ -1,219 +0,0 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <getopt.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/types.h>
#include <unistd.h>
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include <meminfo/pageacct.h>
#include <meminfo/procmeminfo.h>
using ::android::meminfo::ProcMemInfo;
using ::android::meminfo::Vma;
// Global options
static int32_t g_delay = 0;
static int32_t g_total = 2;
static pid_t g_pid = -1;
[[noreturn]] static void usage(int exit_status) {
fprintf(stderr,
"%s [-d DELAY_BETWEEN_EACH_SAMPLE] [-n REFRESH_TOTAL] PID\n"
"-d\tdelay between each working set sample (default 0)\n"
"-n\ttotal number of refreshes before we exit (default 2)\n",
getprogname());
exit(exit_status);
}
static void print_header() {
const char* addr1 = " start end ";
const char* addr2 = " addr addr ";
printf("%s virtual shared shared private private\n", addr1);
printf("%s size RSS PSS clean dirty clean dirty swap "
"swapPSS",
addr2);
printf(" object\n");
}
static void print_divider() {
printf("---------------- ---------------- ");
printf("--------- --------- --------- --------- --------- --------- --------- --------- "
"--------- ");
printf("------------------------------\n");
}
static void print_vma(const Vma& v) {
printf("%16" PRIx64 " %16" PRIx64 " ", v.start, v.end);
printf("%8" PRIu64 "K %8" PRIu64 "K %8" PRIu64 "K %8" PRIu64 "K %8" PRIu64 "K %8" PRIu64
"K %8" PRIu64 "K %8" PRIu64 "K %8" PRIu64 "K ",
v.usage.vss / 1024, v.usage.rss / 1024, v.usage.pss / 1024, v.usage.shared_clean / 1024,
v.usage.shared_dirty / 1024, v.usage.private_clean / 1024, v.usage.private_dirty / 1024,
v.usage.swap / 1024, v.usage.swap_pss / 1024);
printf("%s\n", v.name.c_str());
}
static bool same_vma(const Vma& cur, const Vma& last) {
return (cur.start == last.start && cur.end == last.end && cur.name == last.name &&
cur.flags == last.flags && cur.offset == last.offset);
}
static Vma diff_vma_params(const Vma& cur, const Vma& last) {
Vma res;
res.usage.shared_clean = cur.usage.shared_clean > last.usage.shared_clean
? cur.usage.shared_clean - last.usage.shared_clean
: 0;
res.usage.shared_dirty = cur.usage.shared_dirty > last.usage.shared_dirty
? cur.usage.shared_dirty - last.usage.shared_dirty
: 0;
res.usage.private_clean = cur.usage.private_clean > last.usage.private_clean
? cur.usage.private_clean - last.usage.private_clean
: 0;
res.usage.private_dirty = cur.usage.private_dirty > last.usage.private_dirty
? cur.usage.private_dirty - last.usage.private_dirty
: 0;
res.usage.rss = cur.usage.rss > last.usage.rss ? cur.usage.rss - last.usage.rss : 0;
res.usage.pss = cur.usage.pss > last.usage.pss ? cur.usage.pss - last.usage.pss : 0;
res.usage.uss = cur.usage.uss > last.usage.uss ? cur.usage.uss - last.usage.uss : 0;
res.usage.swap = cur.usage.swap > last.usage.swap ? cur.usage.swap - last.usage.swap : 0;
res.usage.swap_pss =
cur.usage.swap_pss > last.usage.swap_pss ? cur.usage.swap_pss - last.usage.swap_pss : 0;
// set vma properties to the same as the current one.
res.start = cur.start;
res.end = cur.end;
res.offset = cur.offset;
res.flags = cur.flags;
res.name = cur.name;
return res;
}
static void diff_workingset(std::vector<Vma>& wss, std::vector<Vma>& old, std::vector<Vma>* res) {
res->clear();
auto vma_sorter = [](const Vma& a, const Vma& b) { return a.start < b.start; };
std::sort(wss.begin(), wss.end(), vma_sorter);
std::sort(old.begin(), old.end(), vma_sorter);
if (old.empty()) {
*res = wss;
return;
}
for (auto& i : wss) {
bool found_same_vma = false;
// TODO: This is highly inefficient, fix it if it takes
// too long. Worst case will be system_server
for (auto& j : old) {
if (same_vma(i, j)) {
res->emplace_back(diff_vma_params(i, j));
found_same_vma = true;
break;
}
}
if (!found_same_vma) {
res->emplace_back(i);
}
}
std::sort(res->begin(), res->end(), vma_sorter);
return;
}
static int workingset() {
std::vector<Vma> last_wss = {};
std::vector<Vma> diff_wss = {};
uint32_t nr_refresh = 0;
while (true) {
std::unique_ptr<ProcMemInfo> proc_mem = std::make_unique<ProcMemInfo>(g_pid, true);
std::vector<Vma> wss = proc_mem->MapsWithPageIdle();
diff_workingset(wss, last_wss, &diff_wss);
diff_wss.erase(std::remove_if(diff_wss.begin(), diff_wss.end(),
[](const auto& v) { return v.usage.rss == 0; }),
diff_wss.end());
if ((nr_refresh % 5) == 0) {
print_header();
print_divider();
}
for (const auto& v : diff_wss) {
print_vma(v);
}
nr_refresh++;
if (nr_refresh == g_total) {
break;
}
last_wss = wss;
sleep(g_delay);
print_divider();
}
return 0;
}
int main(int argc, char* argv[]) {
struct option longopts[] = {
{"help", no_argument, nullptr, 'h'},
{0, 0, nullptr, 0},
};
int opt;
while ((opt = getopt_long(argc, argv, "d:n:h", longopts, nullptr)) != -1) {
switch (opt) {
case 'd':
g_delay = atoi(optarg);
break;
case 'n':
g_total = atoi(optarg);
break;
case 'h':
usage(EXIT_SUCCESS);
default:
usage(EXIT_FAILURE);
}
}
if ((argc - 1) < optind) {
fprintf(stderr, "Invalid arguments: Must provide <pid> at the end\n");
usage(EXIT_FAILURE);
}
g_pid = atoi(argv[optind]);
if (g_pid <= 0) {
fprintf(stderr, "Invalid process id %s\n", argv[optind]);
usage(EXIT_FAILURE);
}
if (!::android::meminfo::PageAcct::KernelHasPageIdle()) {
fprintf(stderr, "Missing support for Idle page tracking in the kernel\n");
return 0;
}
return workingset();
}

View File

@ -1,33 +0,0 @@
// Copyright (C) 2019 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
cc_defaults {
name: "vts_meminfo_defaults",
defaults: ["libmeminfo_defaults"],
srcs: ["vts_meminfo_test.cpp"],
static_libs: ["libmeminfo"],
}
cc_test {
name: "vts_meminfo_test",
defaults: ["vts_meminfo_defaults"],
}
cc_test {
name: "vts_core_meminfo_test",
defaults: ["vts_meminfo_defaults"],
test_suites: ["vts-core"],
auto_gen_config: true,
test_min_api_level: 29,
}

View File

@ -1,22 +0,0 @@
#
# Copyright (C) 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_MODULE := VtsKernelMemInfoTest
-include test/vts/tools/build/Android.host_config.mk

View File

@ -1,30 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Copyright (C) 2019 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration description="Config for VTS VtsKernelMemInfoTest.">
<option name="config-descriptor:metadata" key="plan" value="vts-kernel" />
<target_preparer class="com.android.compatibility.common.tradefed.targetprep.VtsFilePusher">
<option name="abort-on-push-failure" value="false"/>
<option name="push-group" value="HostDrivenTest.push"/>
</target_preparer>
<test class="com.android.tradefed.testtype.VtsMultiDeviceTest">
<option name="test-module-name" value="VtsKernelMemInfoTest"/>
<option name="binary-test-source" value="_32bit::DATA/nativetest/vts_meminfo_test/vts_meminfo_test" />
<option name="binary-test-source" value="_64bit::DATA/nativetest64/vts_meminfo_test/vts_meminfo_test" />
<option name="binary-test-type" value="gtest"/>
<option name="precondition-first-api-level" value="29" />
<option name="test-timeout" value="10m"/>
</test>
</configuration>

View File

@ -1,31 +0,0 @@
/*
* Copyright (C) 2019 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <meminfo/procmeminfo.h>
namespace android {
namespace meminfo {
// /proc/<pid>/smaps_rollup support is required.
TEST(SmapsRollup, IsSupported) {
// Use init's pid for this test since it's the only known pid.
ASSERT_TRUE(IsSmapsRollupSupported(1));
}
} // namespace meminfo
} // namespace android

View File

@ -1 +0,0 @@
../.clang-format-4

View File

@ -1,39 +0,0 @@
// Copyright 2013 The Android Open Source Project
cc_library_shared {
name: "libmemtrack",
vendor_available: true,
vndk: {
enabled: true,
},
srcs: ["memtrack.cpp"],
export_include_dirs: ["include"],
local_include_dirs: ["include"],
include_dirs: ["hardware/libhardware/include"],
shared_libs: [
"libhardware",
"liblog",
"libbase",
"libhidlbase",
"libutils",
"android.hardware.memtrack@1.0",
],
cflags: [
"-Wall",
"-Werror",
],
}
cc_binary {
name: "memtrack_test",
srcs: ["memtrack_test.cpp"],
static_libs: ["libc++fs"],
shared_libs: [
"libbase",
"libmemtrack",
],
cflags: [
"-Wall",
"-Werror",
],
}

View File

@ -1 +0,0 @@
ccross@google.com

View File

@ -1,136 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _LIBMEMTRACK_MEMTRACK_H_
#define _LIBMEMTRACK_MEMTRACK_H_
#include <sys/types.h>
#include <stddef.h>
#ifdef __cplusplus
extern "C" {
#endif
/**
* struct memtrack_proc
*
* an opaque handle to the memory stats on a process.
* Created with memtrack_proc_new, destroyed by
* memtrack_proc_destroy. Can be reused multiple times with
* memtrack_proc_get.
*/
struct memtrack_proc;
/**
* memtrack_proc_new
*
* Return a new handle to hold process memory stats.
*
* Returns NULL on error.
*/
struct memtrack_proc *memtrack_proc_new(void);
/**
* memtrack_proc_destroy
*
* Free all memory associated with a process memory stats handle.
*/
void memtrack_proc_destroy(struct memtrack_proc *p);
/**
* memtrack_proc_get
*
* Fill a process memory stats handle with data about the given pid. Can be
* called on a handle that was just allocated with memtrack_proc_new,
* or on a handle that has been previously passed to memtrack_proc_get
* to replace the data with new data on the same or another process. It is
* expected that the second call on the same handle should not require
* allocating any new memory.
*
* Returns 0 on success, -errno on error.
*/
int memtrack_proc_get(struct memtrack_proc *p, pid_t pid);
/**
* memtrack_proc_graphics_total
*
* Return total amount of memory that has been allocated for use as window
* buffers. Does not differentiate between memory that has already been
* accounted for by reading /proc/pid/smaps and memory that has not been
* accounted for.
*
* Returns non-negative size in bytes on success, -errno on error.
*/
ssize_t memtrack_proc_graphics_total(struct memtrack_proc *p);
/**
* memtrack_proc_graphics_pss
*
* Return total amount of memory that has been allocated for use as window
* buffers, but has not already been accounted for by reading /proc/pid/smaps.
* Memory that is shared across processes may already be divided by the
* number of processes that share it (preferred), or may be charged in full to
* every process that shares it, depending on the capabilities of the driver.
*
* Returns non-negative size in bytes on success, -errno on error.
*/
ssize_t memtrack_proc_graphics_pss(struct memtrack_proc *p);
/**
* memtrack_proc_gl_total
*
* Same as memtrack_proc_graphics_total, but counts GL memory (which
* should not overlap with graphics memory) instead of graphics memory.
*
* Returns non-negative size in bytes on success, -errno on error.
*/
ssize_t memtrack_proc_gl_total(struct memtrack_proc *p);
/**
* memtrack_proc_gl_pss
*
* Same as memtrack_proc_graphics_total, but counts GL memory (which
* should not overlap with graphics memory) instead of graphics memory.
*
* Returns non-negative size in bytes on success, -errno on error.
*/
ssize_t memtrack_proc_gl_pss(struct memtrack_proc *p);
/**
* memtrack_proc_other_total
*
* Same as memtrack_proc_graphics_total, but counts miscellaneous memory
* not tracked by gl or graphics calls above.
*
* Returns non-negative size in bytes on success, -errno on error.
*/
ssize_t memtrack_proc_other_total(struct memtrack_proc *p);
/**
* memtrack_proc_other_pss
*
* Same as memtrack_proc_graphics_total, but counts miscellaneous memory
* not tracked by gl or graphics calls above.
*
* Returns non-negative size in bytes on success, -errno on error.
*/
ssize_t memtrack_proc_other_pss(struct memtrack_proc *p);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -1,173 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#define LOG_TAG "memtrack"
#include <android/hardware/memtrack/1.0/IMemtrack.h>
#include <memtrack/memtrack.h>
#include <errno.h>
#include <malloc.h>
#include <vector>
#include <string.h>
#include <mutex>
#include <log/log.h>
using android::hardware::memtrack::V1_0::IMemtrack;
using android::hardware::memtrack::V1_0::MemtrackType;
using android::hardware::memtrack::V1_0::MemtrackRecord;
using android::hardware::memtrack::V1_0::MemtrackFlag;
using android::hardware::memtrack::V1_0::MemtrackStatus;
using android::hardware::hidl_vec;
using android::hardware::Return;
struct memtrack_proc_type {
MemtrackType type;
std::vector<MemtrackRecord> records;
};
struct memtrack_proc {
pid_t pid;
memtrack_proc_type types[static_cast<int>(MemtrackType::NUM_TYPES)];
};
//TODO(b/31632518)
static android::sp<IMemtrack> get_instance() {
static android::sp<IMemtrack> module = IMemtrack::getService();
if (module == nullptr) {
ALOGE("Couldn't load memtrack module");
}
return module;
}
memtrack_proc *memtrack_proc_new(void)
{
return new memtrack_proc();
}
void memtrack_proc_destroy(memtrack_proc *p)
{
delete(p);
}
static int memtrack_proc_get_type(memtrack_proc_type *t,
pid_t pid, MemtrackType type)
{
int err = 0;
android::sp<IMemtrack> memtrack = get_instance();
if (memtrack == nullptr)
return -1;
Return<void> ret = memtrack->getMemory(pid, type,
[&t, &err](MemtrackStatus status, hidl_vec<MemtrackRecord> records) {
if (status != MemtrackStatus::SUCCESS) {
err = -1;
t->records.resize(0);
}
t->records.resize(records.size());
for (size_t i = 0; i < records.size(); i++) {
t->records[i].sizeInBytes = records[i].sizeInBytes;
t->records[i].flags = records[i].flags;
}
});
return ret.isOk() ? err : -1;
}
/* TODO: sanity checks on return values from HALs:
* make sure no records have invalid flags set
* - unknown flags
* - too many flags of a single category
* - missing ACCOUNTED/UNACCOUNTED
* make sure there are not overlapping SHARED and SHARED_PSS records
*/
static int memtrack_proc_sanity_check(memtrack_proc* /*p*/)
{
return 0;
}
int memtrack_proc_get(memtrack_proc *p, pid_t pid)
{
if (!p) {
return -EINVAL;
}
p->pid = pid;
for (uint32_t i = 0; i < (uint32_t)MemtrackType::NUM_TYPES; i++) {
int ret = memtrack_proc_get_type(&p->types[i], pid, (MemtrackType)i);
if (ret != 0)
return ret;
}
return memtrack_proc_sanity_check(p);
}
static ssize_t memtrack_proc_sum(memtrack_proc *p,
const std::vector<MemtrackType>& types, uint32_t flags)
{
ssize_t sum = 0;
for (size_t i = 0; i < types.size(); i++) {
memtrack_proc_type type = p->types[static_cast<int>(types[i])];
std::vector<MemtrackRecord> records = type.records;
for (size_t j = 0; j < records.size(); j++) {
if ((records[j].flags & flags) == flags) {
sum += records[j].sizeInBytes;
}
}
}
return sum;
}
ssize_t memtrack_proc_graphics_total(memtrack_proc *p)
{
std::vector<MemtrackType> types = {MemtrackType::GRAPHICS};
return memtrack_proc_sum(p, types, 0);
}
ssize_t memtrack_proc_graphics_pss(memtrack_proc *p)
{
std::vector<MemtrackType> types = { MemtrackType::GRAPHICS };
return memtrack_proc_sum(p, types,
(uint32_t)MemtrackFlag::SMAPS_UNACCOUNTED);
}
ssize_t memtrack_proc_gl_total(memtrack_proc *p)
{
std::vector<MemtrackType> types = { MemtrackType::GL };
return memtrack_proc_sum(p, types, 0);
}
ssize_t memtrack_proc_gl_pss(memtrack_proc *p)
{
std::vector<MemtrackType> types = { MemtrackType::GL };
return memtrack_proc_sum(p, types,
(uint32_t)MemtrackFlag::SMAPS_UNACCOUNTED);
}
ssize_t memtrack_proc_other_total(memtrack_proc *p)
{
std::vector<MemtrackType> types = { MemtrackType::MULTIMEDIA,
MemtrackType::CAMERA, MemtrackType::OTHER };
return memtrack_proc_sum(p, types, 0);
}
ssize_t memtrack_proc_other_pss(memtrack_proc *p)
{
std::vector<MemtrackType> types = { MemtrackType::MULTIMEDIA,
MemtrackType::CAMERA, MemtrackType::OTHER };
return memtrack_proc_sum(p, types,
(uint32_t)MemtrackFlag::SMAPS_UNACCOUNTED);
}

View File

@ -1,97 +0,0 @@
/*
* Copyright (C) 2013 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <filesystem>
#include <vector>
#include <android-base/file.h>
#include <android-base/parseint.h>
#include <android-base/stringprintf.h>
#include <memtrack/memtrack.h>
#define DIV_ROUND_UP(x, y) (((x) + (y)-1) / (y))
static void getprocname(pid_t pid, std::string* name) {
std::string fname = ::android::base::StringPrintf("/proc/%d/cmdline", pid);
if (!::android::base::ReadFileToString(fname, name)) {
fprintf(stderr, "Failed to read cmdline from: %s\n", fname.c_str());
*name = "<unknown>";
}
}
int main(int /* argc */, char** /* argv */) {
int ret;
struct memtrack_proc* p;
std::vector<pid_t> pids;
p = memtrack_proc_new();
if (p == nullptr) {
fprintf(stderr, "failed to create memtrack process handle\n");
exit(EXIT_FAILURE);
}
for (auto& de : std::filesystem::directory_iterator("/proc")) {
if (!std::filesystem::is_directory(de.status())) {
continue;
}
pid_t pid;
if (!::android::base::ParseInt(de.path().filename().string(), &pid)) {
continue;
}
pids.emplace_back(pid);
}
for (auto& pid : pids) {
size_t v1;
size_t v2;
size_t v3;
size_t v4;
size_t v5;
size_t v6;
std::string cmdline;
getprocname(pid, &cmdline);
ret = memtrack_proc_get(p, pid);
if (ret) {
fprintf(stderr, "failed to get memory info for pid %d: %s (%d)\n", pid, strerror(-ret),
ret);
continue;
}
v1 = DIV_ROUND_UP(memtrack_proc_graphics_total(p), 1024);
v2 = DIV_ROUND_UP(memtrack_proc_graphics_pss(p), 1024);
v3 = DIV_ROUND_UP(memtrack_proc_gl_total(p), 1024);
v4 = DIV_ROUND_UP(memtrack_proc_gl_pss(p), 1024);
v5 = DIV_ROUND_UP(memtrack_proc_other_total(p), 1024);
v6 = DIV_ROUND_UP(memtrack_proc_other_pss(p), 1024);
if (v1 | v2 | v3 | v4 | v5 | v6) {
fprintf(stdout, "%5d %6zu %6zu %6zu %6zu %6zu %6zu %s\n", pid, v1, v2, v3, v4, v5, v6,
cmdline.c_str());
}
}
memtrack_proc_destroy(p);
return ret;
}

View File

@ -1 +0,0 @@
../.clang-format-2

View File

@ -1,468 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Header page:
//
// For minimum allocation size (8 bytes), bitmap can store used allocations for
// up to 4032*8*8=258048, which is 256KiB minus the header page
#include <assert.h>
#include <stdlib.h>
#include <sys/cdefs.h>
#include <sys/mman.h>
#include <sys/prctl.h>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <mutex>
#include "android-base/macros.h"
#include "Allocator.h"
#include "LinkedList.h"
namespace android {
// runtime interfaces used:
// abort
// assert - fprintf + mmap
// mmap
// munmap
// prctl
constexpr size_t const_log2(size_t n, size_t p = 0) {
return (n <= 1) ? p : const_log2(n / 2, p + 1);
}
constexpr unsigned int div_round_up(unsigned int x, unsigned int y) {
return (x + y - 1) / y;
}
static constexpr size_t kPageSize = 4096;
static constexpr size_t kChunkSize = 256 * 1024;
static constexpr size_t kUsableChunkSize = kChunkSize - kPageSize;
static constexpr size_t kMaxBucketAllocationSize = kChunkSize / 4;
static constexpr size_t kMinBucketAllocationSize = 8;
static constexpr unsigned int kNumBuckets =
const_log2(kMaxBucketAllocationSize) - const_log2(kMinBucketAllocationSize) + 1;
static constexpr unsigned int kUsablePagesPerChunk = kUsableChunkSize / kPageSize;
std::atomic<int> heap_count;
class Chunk;
class HeapImpl {
public:
HeapImpl();
~HeapImpl();
void* operator new(std::size_t count) noexcept;
void operator delete(void* ptr);
void* Alloc(size_t size);
void Free(void* ptr);
bool Empty();
void MoveToFullList(Chunk* chunk, int bucket_);
void MoveToFreeList(Chunk* chunk, int bucket_);
private:
DISALLOW_COPY_AND_ASSIGN(HeapImpl);
LinkedList<Chunk*> free_chunks_[kNumBuckets];
LinkedList<Chunk*> full_chunks_[kNumBuckets];
void MoveToList(Chunk* chunk, LinkedList<Chunk*>* head);
void* MapAlloc(size_t size);
void MapFree(void* ptr);
void* AllocLocked(size_t size);
void FreeLocked(void* ptr);
struct MapAllocation {
void* ptr;
size_t size;
MapAllocation* next;
};
MapAllocation* map_allocation_list_;
std::mutex m_;
};
// Integer log 2, rounds down
static inline unsigned int log2(size_t n) {
return 8 * sizeof(unsigned long long) - __builtin_clzll(n) - 1;
}
static inline unsigned int size_to_bucket(size_t size) {
if (size < kMinBucketAllocationSize) return kMinBucketAllocationSize;
return log2(size - 1) + 1 - const_log2(kMinBucketAllocationSize);
}
static inline size_t bucket_to_size(unsigned int bucket) {
return kMinBucketAllocationSize << bucket;
}
static void* MapAligned(size_t size, size_t align) {
const int prot = PROT_READ | PROT_WRITE;
const int flags = MAP_ANONYMOUS | MAP_PRIVATE;
size = (size + kPageSize - 1) & ~(kPageSize - 1);
// Over-allocate enough to align
size_t map_size = size + align - kPageSize;
if (map_size < size) {
return nullptr;
}
void* ptr = mmap(NULL, map_size, prot, flags, -1, 0);
if (ptr == MAP_FAILED) {
return nullptr;
}
size_t aligned_size = map_size;
void* aligned_ptr = ptr;
std::align(align, size, aligned_ptr, aligned_size);
// Trim beginning
if (aligned_ptr != ptr) {
ptrdiff_t extra = reinterpret_cast<uintptr_t>(aligned_ptr) - reinterpret_cast<uintptr_t>(ptr);
munmap(ptr, extra);
map_size -= extra;
ptr = aligned_ptr;
}
// Trim end
if (map_size != size) {
assert(map_size > size);
assert(ptr != NULL);
munmap(reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(ptr) + size), map_size - size);
}
#if defined(PR_SET_VMA)
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, reinterpret_cast<uintptr_t>(ptr), size,
"leak_detector_malloc");
#endif
return ptr;
}
class Chunk {
public:
static void* operator new(std::size_t count) noexcept;
static void operator delete(void* ptr);
Chunk(HeapImpl* heap, int bucket);
~Chunk() {}
void* Alloc();
void Free(void* ptr);
void Purge();
bool Empty();
static Chunk* ptr_to_chunk(void* ptr) {
return reinterpret_cast<Chunk*>(reinterpret_cast<uintptr_t>(ptr) & ~(kChunkSize - 1));
}
static bool is_chunk(void* ptr) {
return (reinterpret_cast<uintptr_t>(ptr) & (kChunkSize - 1)) != 0;
}
unsigned int free_count() { return free_count_; }
HeapImpl* heap() { return heap_; }
LinkedList<Chunk*> node_; // linked list sorted by minimum free count
private:
DISALLOW_COPY_AND_ASSIGN(Chunk);
HeapImpl* heap_;
unsigned int bucket_;
unsigned int allocation_size_; // size of allocations in chunk, min 8 bytes
unsigned int max_allocations_; // maximum number of allocations in the chunk
unsigned int first_free_bitmap_; // index into bitmap for first non-full entry
unsigned int free_count_; // number of available allocations
unsigned int frees_since_purge_; // number of calls to Free since last Purge
// bitmap of pages that have been dirtied
uint32_t dirty_pages_[div_round_up(kUsablePagesPerChunk, 32)];
// bitmap of free allocations.
uint32_t free_bitmap_[kUsableChunkSize / kMinBucketAllocationSize / 32];
char data_[0];
unsigned int ptr_to_n(void* ptr) {
ptrdiff_t offset = reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(data_);
return offset / allocation_size_;
}
void* n_to_ptr(unsigned int n) { return data_ + n * allocation_size_; }
};
static_assert(sizeof(Chunk) <= kPageSize, "header must fit in page");
// Override new operator on chunk to use mmap to allocate kChunkSize
void* Chunk::operator new(std::size_t count __attribute__((unused))) noexcept {
assert(count == sizeof(Chunk));
void* mem = MapAligned(kChunkSize, kChunkSize);
if (!mem) {
abort(); // throw std::bad_alloc;
}
return mem;
}
// Override new operator on chunk to use mmap to allocate kChunkSize
void Chunk::operator delete(void* ptr) {
assert(reinterpret_cast<Chunk*>(ptr) == ptr_to_chunk(ptr));
munmap(ptr, kChunkSize);
}
Chunk::Chunk(HeapImpl* heap, int bucket)
: node_(this),
heap_(heap),
bucket_(bucket),
allocation_size_(bucket_to_size(bucket)),
max_allocations_(kUsableChunkSize / allocation_size_),
first_free_bitmap_(0),
free_count_(max_allocations_),
frees_since_purge_(0) {
memset(dirty_pages_, 0, sizeof(dirty_pages_));
memset(free_bitmap_, 0xff, sizeof(free_bitmap_));
}
bool Chunk::Empty() {
return free_count_ == max_allocations_;
}
void* Chunk::Alloc() {
assert(free_count_ > 0);
unsigned int i = first_free_bitmap_;
while (free_bitmap_[i] == 0) i++;
assert(i < arraysize(free_bitmap_));
unsigned int bit = __builtin_ffs(free_bitmap_[i]) - 1;
assert(free_bitmap_[i] & (1U << bit));
free_bitmap_[i] &= ~(1U << bit);
unsigned int n = i * 32 + bit;
assert(n < max_allocations_);
unsigned int page = n * allocation_size_ / kPageSize;
assert(page / 32 < arraysize(dirty_pages_));
dirty_pages_[page / 32] |= 1U << (page % 32);
free_count_--;
if (free_count_ == 0) {
heap_->MoveToFullList(this, bucket_);
}
return n_to_ptr(n);
}
void Chunk::Free(void* ptr) {
assert(is_chunk(ptr));
assert(ptr_to_chunk(ptr) == this);
unsigned int n = ptr_to_n(ptr);
unsigned int i = n / 32;
unsigned int bit = n % 32;
assert(i < arraysize(free_bitmap_));
assert(!(free_bitmap_[i] & (1U << bit)));
free_bitmap_[i] |= 1U << bit;
free_count_++;
if (i < first_free_bitmap_) {
first_free_bitmap_ = i;
}
if (free_count_ == 1) {
heap_->MoveToFreeList(this, bucket_);
} else {
// TODO(ccross): move down free list if necessary
}
if (frees_since_purge_++ * allocation_size_ > 16 * kPageSize) {
Purge();
}
}
void Chunk::Purge() {
frees_since_purge_ = 0;
// unsigned int allocsPerPage = kPageSize / allocation_size_;
}
// Override new operator on HeapImpl to use mmap to allocate a page
void* HeapImpl::operator new(std::size_t count __attribute__((unused))) noexcept {
assert(count == sizeof(HeapImpl));
void* mem = MapAligned(kPageSize, kPageSize);
if (!mem) {
abort(); // throw std::bad_alloc;
}
heap_count++;
return mem;
}
void HeapImpl::operator delete(void* ptr) {
munmap(ptr, kPageSize);
}
HeapImpl::HeapImpl() : free_chunks_(), full_chunks_(), map_allocation_list_(NULL) {}
bool HeapImpl::Empty() {
for (unsigned int i = 0; i < kNumBuckets; i++) {
for (LinkedList<Chunk*>* it = free_chunks_[i].next(); it->data() != NULL; it = it->next()) {
if (!it->data()->Empty()) {
return false;
}
}
for (LinkedList<Chunk*>* it = full_chunks_[i].next(); it->data() != NULL; it = it->next()) {
if (!it->data()->Empty()) {
return false;
}
}
}
return true;
}
HeapImpl::~HeapImpl() {
for (unsigned int i = 0; i < kNumBuckets; i++) {
while (!free_chunks_[i].empty()) {
Chunk* chunk = free_chunks_[i].next()->data();
chunk->node_.remove();
delete chunk;
}
while (!full_chunks_[i].empty()) {
Chunk* chunk = full_chunks_[i].next()->data();
chunk->node_.remove();
delete chunk;
}
}
}
void* HeapImpl::Alloc(size_t size) {
std::lock_guard<std::mutex> lk(m_);
return AllocLocked(size);
}
void* HeapImpl::AllocLocked(size_t size) {
if (size > kMaxBucketAllocationSize) {
return MapAlloc(size);
}
int bucket = size_to_bucket(size);
if (free_chunks_[bucket].empty()) {
Chunk* chunk = new Chunk(this, bucket);
free_chunks_[bucket].insert(chunk->node_);
}
return free_chunks_[bucket].next()->data()->Alloc();
}
void HeapImpl::Free(void* ptr) {
std::lock_guard<std::mutex> lk(m_);
FreeLocked(ptr);
}
void HeapImpl::FreeLocked(void* ptr) {
if (!Chunk::is_chunk(ptr)) {
HeapImpl::MapFree(ptr);
} else {
Chunk* chunk = Chunk::ptr_to_chunk(ptr);
assert(chunk->heap() == this);
chunk->Free(ptr);
}
}
void* HeapImpl::MapAlloc(size_t size) {
size = (size + kPageSize - 1) & ~(kPageSize - 1);
MapAllocation* allocation = reinterpret_cast<MapAllocation*>(AllocLocked(sizeof(MapAllocation)));
void* ptr = MapAligned(size, kChunkSize);
if (!ptr) {
FreeLocked(allocation);
abort(); // throw std::bad_alloc;
}
allocation->ptr = ptr;
allocation->size = size;
allocation->next = map_allocation_list_;
map_allocation_list_ = allocation;
return ptr;
}
void HeapImpl::MapFree(void* ptr) {
MapAllocation** allocation = &map_allocation_list_;
while (*allocation && (*allocation)->ptr != ptr) allocation = &(*allocation)->next;
assert(*allocation != nullptr);
munmap((*allocation)->ptr, (*allocation)->size);
FreeLocked(*allocation);
*allocation = (*allocation)->next;
}
void HeapImpl::MoveToFreeList(Chunk* chunk, int bucket) {
MoveToList(chunk, &free_chunks_[bucket]);
}
void HeapImpl::MoveToFullList(Chunk* chunk, int bucket) {
MoveToList(chunk, &full_chunks_[bucket]);
}
void HeapImpl::MoveToList(Chunk* chunk, LinkedList<Chunk*>* head) {
// Remove from old list
chunk->node_.remove();
LinkedList<Chunk*>* node = head;
// Insert into new list, sorted by lowest free count
while (node->next() != head && node->data() != nullptr &&
node->data()->free_count() < chunk->free_count())
node = node->next();
node->insert(chunk->node_);
}
Heap::Heap() {
// HeapImpl overloads the operator new in order to mmap itself instead of
// allocating with new.
// Can't use a shared_ptr to store the result because shared_ptr needs to
// allocate, and Allocator<T> is still being constructed.
impl_ = new HeapImpl();
owns_impl_ = true;
}
Heap::~Heap() {
if (owns_impl_) {
delete impl_;
}
}
void* Heap::allocate(size_t size) {
return impl_->Alloc(size);
}
void Heap::deallocate(void* ptr) {
impl_->Free(ptr);
}
void Heap::deallocate(HeapImpl* impl, void* ptr) {
impl->Free(ptr);
}
bool Heap::empty() {
return impl_->Empty();
}
} // namespace android

View File

@ -1,218 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_ALLOCATOR_H_
#define LIBMEMUNREACHABLE_ALLOCATOR_H_
#include <atomic>
#include <cstddef>
#include <functional>
#include <list>
#include <map>
#include <memory>
#include <set>
#include <unordered_map>
#include <unordered_set>
#include <vector>
namespace android {
extern std::atomic<int> heap_count;
class HeapImpl;
template <typename T>
class Allocator;
// Non-templated class that implements wraps HeapImpl to keep
// implementation out of the header file
class Heap {
public:
Heap();
~Heap();
// Copy constructor that does not take ownership of impl_
Heap(const Heap& other) : impl_(other.impl_), owns_impl_(false) {}
// Assignment disabled
Heap& operator=(const Heap&) = delete;
// Allocate size bytes
void* allocate(size_t size);
// Deallocate allocation returned by allocate
void deallocate(void*);
bool empty();
static void deallocate(HeapImpl* impl, void* ptr);
// Allocate a class of type T
template <class T>
T* allocate() {
return reinterpret_cast<T*>(allocate(sizeof(T)));
}
// Comparators, copied objects will be equal
bool operator==(const Heap& other) const { return impl_ == other.impl_; }
bool operator!=(const Heap& other) const { return !(*this == other); }
// std::unique_ptr wrapper that allocates using allocate and deletes using
// deallocate
template <class T>
using unique_ptr = std::unique_ptr<T, std::function<void(void*)>>;
template <class T, class... Args>
unique_ptr<T> make_unique(Args&&... args) {
HeapImpl* impl = impl_;
return unique_ptr<T>(new (allocate<T>()) T(std::forward<Args>(args)...), [impl](void* ptr) {
reinterpret_cast<T*>(ptr)->~T();
deallocate(impl, ptr);
});
}
// std::unique_ptr wrapper that allocates using allocate and deletes using
// deallocate
template <class T>
using shared_ptr = std::shared_ptr<T>;
template <class T, class... Args>
shared_ptr<T> make_shared(Args&&... args);
protected:
HeapImpl* impl_;
bool owns_impl_;
};
// STLAllocator implements the std allocator interface on top of a Heap
template <typename T>
class STLAllocator {
public:
using value_type = T;
~STLAllocator() {}
// Construct an STLAllocator on top of a Heap
STLAllocator(const Heap& heap)
: // NOLINT, implicit
heap_(heap) {}
// Rebind an STLAllocator from an another STLAllocator
template <typename U>
STLAllocator(const STLAllocator<U>& other)
: // NOLINT, implicit
heap_(other.heap_) {}
STLAllocator(const STLAllocator&) = default;
STLAllocator<T>& operator=(const STLAllocator<T>&) = default;
T* allocate(std::size_t n) { return reinterpret_cast<T*>(heap_.allocate(n * sizeof(T))); }
void deallocate(T* ptr, std::size_t) { heap_.deallocate(ptr); }
template <typename U>
bool operator==(const STLAllocator<U>& other) const {
return heap_ == other.heap_;
}
template <typename U>
inline bool operator!=(const STLAllocator<U>& other) const {
return !(this == other);
}
template <typename U>
friend class STLAllocator;
protected:
Heap heap_;
};
// Allocator extends STLAllocator with some convenience methods for allocating
// a single object and for constructing unique_ptr and shared_ptr objects with
// appropriate deleters.
template <class T>
class Allocator : public STLAllocator<T> {
public:
~Allocator() {}
Allocator(const Heap& other)
: // NOLINT, implicit
STLAllocator<T>(other) {}
template <typename U>
Allocator(const STLAllocator<U>& other)
: // NOLINT, implicit
STLAllocator<T>(other) {}
Allocator(const Allocator&) = default;
Allocator<T>& operator=(const Allocator<T>&) = default;
using STLAllocator<T>::allocate;
using STLAllocator<T>::deallocate;
using STLAllocator<T>::heap_;
T* allocate() { return STLAllocator<T>::allocate(1); }
void deallocate(void* ptr) { heap_.deallocate(ptr); }
using shared_ptr = Heap::shared_ptr<T>;
template <class... Args>
shared_ptr make_shared(Args&&... args) {
return heap_.template make_shared<T>(std::forward<Args>(args)...);
}
using unique_ptr = Heap::unique_ptr<T>;
template <class... Args>
unique_ptr make_unique(Args&&... args) {
return heap_.template make_unique<T>(std::forward<Args>(args)...);
}
};
// std::unique_ptr wrapper that allocates using allocate and deletes using
// deallocate. Implemented outside class definition in order to pass
// Allocator<T> to shared_ptr.
template <class T, class... Args>
inline Heap::shared_ptr<T> Heap::make_shared(Args&&... args) {
return std::allocate_shared<T, Allocator<T>, Args...>(Allocator<T>(*this),
std::forward<Args>(args)...);
}
namespace allocator {
template <class T>
using vector = std::vector<T, Allocator<T>>;
template <class T>
using list = std::list<T, Allocator<T>>;
template <class Key, class T, class Compare = std::less<Key>>
using map = std::map<Key, T, Compare, Allocator<std::pair<const Key, T>>>;
template <class Key, class T, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
using unordered_map =
std::unordered_map<Key, T, Hash, KeyEqual, Allocator<std::pair<const Key, T>>>;
template <class Key, class Hash = std::hash<Key>, class KeyEqual = std::equal_to<Key>>
using unordered_set = std::unordered_set<Key, Hash, KeyEqual, Allocator<Key>>;
template <class Key, class Compare = std::less<Key>>
using set = std::set<Key, Compare, Allocator<Key>>;
using string = std::basic_string<char, std::char_traits<char>, Allocator<char>>;
}
} // namespace android
#endif

View File

@ -1,120 +0,0 @@
cc_defaults {
name: "libmemunreachable_defaults",
cflags: [
"-Wall",
"-Wextra",
"-Werror",
],
shared_libs: [
"libbase",
],
target: {
android: {
static_libs: ["libasync_safe"],
},
host: {
shared_libs: ["liblog"],
},
},
}
cc_library {
name: "libmemunreachable",
vendor_available: true,
defaults: ["libmemunreachable_defaults"],
srcs: [
"Allocator.cpp",
"Binder.cpp",
"HeapWalker.cpp",
"LeakFolding.cpp",
"LeakPipe.cpp",
"MemUnreachable.cpp",
"ProcessMappings.cpp",
"PtracerThread.cpp",
"ThreadCapture.cpp",
],
static_libs: [
"libc_malloc_debug_backtrace",
"libprocinfo",
],
// Only need this for arm since libc++ uses its own unwind code that
// doesn't mix with the other default unwind code.
arch: {
arm: {
static_libs: ["libunwind_llvm"],
},
},
export_include_dirs: ["include"],
local_include_dirs: ["include"],
version_script: "libmemunreachable.map",
}
// Integration test that runs against the public API of libmemunreachable
cc_test {
name: "memunreachable_test",
defaults: ["libmemunreachable_defaults"],
srcs: [
"tests/MemUnreachable_test.cpp",
],
shared_libs: ["libmemunreachable"],
test_suites: ["device-tests"],
}
cc_test {
name: "memunreachable_unit_test",
defaults: ["libmemunreachable_defaults"],
host_supported: true,
srcs: [
"tests/Allocator_test.cpp",
"tests/HeapWalker_test.cpp",
"tests/LeakFolding_test.cpp",
],
target: {
android: {
srcs: [
"tests/DisableMalloc_test.cpp",
"tests/MemUnreachable_test.cpp",
"tests/ThreadCapture_test.cpp",
],
static_libs: [
"libmemunreachable",
"libc_malloc_debug_backtrace",
],
},
host: {
srcs: [
"Allocator.cpp",
"HeapWalker.cpp",
"LeakFolding.cpp",
"tests/HostMallocStub.cpp",
],
},
darwin: {
enabled: false,
},
},
test_suites: ["device-tests"],
}
cc_test {
name: "memunreachable_binder_test",
defaults: ["libmemunreachable_defaults"],
require_root: true,
srcs: [
"tests/Binder_test.cpp",
],
static_libs: ["libmemunreachable"],
shared_libs: [
"libbinder",
"libhidlbase",
"libutils",
],
test_suites: ["device-tests"],
}

View File

@ -1,80 +0,0 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sys/cdefs.h>
#include <unistd.h>
#include <functional>
#include "Binder.h"
#include "log.h"
__BEGIN_DECLS
// Weak undefined references to the symbols in libbinder and libhwbinder
// so that libmemunreachable can call them in processes that have them
// loaded without requiring libmemunreachable to have dependencies on them.
ssize_t __attribute__((weak)) getBinderKernelReferences(size_t, uintptr_t*);
ssize_t __attribute__((weak)) getHWBinderKernelReferences(size_t, uintptr_t*);
__END_DECLS
namespace android {
static bool BinderReferencesToVector(allocator::vector<uintptr_t>& refs,
std::function<ssize_t(size_t, uintptr_t*)> fn) {
if (fn == nullptr) {
return true;
}
size_t size = refs.size();
do {
refs.resize(size);
ssize_t ret = fn(refs.size(), refs.data());
if (ret < 0) {
return false;
}
size = ret;
} while (size > refs.size());
refs.resize(size);
return true;
}
bool BinderReferences(allocator::vector<uintptr_t>& refs) {
refs.clear();
allocator::vector<uintptr_t> binder_refs{refs.get_allocator()};
if (BinderReferencesToVector(refs, getBinderKernelReferences)) {
refs.insert(refs.end(), binder_refs.begin(), binder_refs.end());
} else {
MEM_ALOGE("getBinderKernelReferences failed");
}
allocator::vector<uintptr_t> hwbinder_refs{refs.get_allocator()};
if (BinderReferencesToVector(hwbinder_refs, getHWBinderKernelReferences)) {
refs.insert(refs.end(), hwbinder_refs.begin(), hwbinder_refs.end());
} else {
MEM_ALOGE("getHWBinderKernelReferences failed");
}
return true;
}
} // namespace android

View File

@ -1,28 +0,0 @@
/*
* Copyright (C) 2017 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_BINDER_H_
#define LIBMEMUNREACHABLE_BINDER_H_
#include "Allocator.h"
namespace android {
bool BinderReferences(allocator::vector<uintptr_t>& refs);
} // namespace android
#endif // LIBMEMUNREACHABLE_BINDER_H_

View File

@ -1,212 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h>
#include <inttypes.h>
#include <sys/mman.h>
#include <unistd.h>
#include <map>
#include <utility>
#include "Allocator.h"
#include "HeapWalker.h"
#include "LeakFolding.h"
#include "ScopedSignalHandler.h"
#include "log.h"
namespace android {
bool HeapWalker::Allocation(uintptr_t begin, uintptr_t end) {
if (end == begin) {
end = begin + 1;
}
Range range{begin, end};
if (valid_mappings_range_.end != 0 &&
(begin < valid_mappings_range_.begin || end > valid_mappings_range_.end)) {
MEM_LOG_ALWAYS_FATAL("allocation %p-%p is outside mapping range %p-%p",
reinterpret_cast<void*>(begin), reinterpret_cast<void*>(end),
reinterpret_cast<void*>(valid_mappings_range_.begin),
reinterpret_cast<void*>(valid_mappings_range_.end));
}
auto inserted = allocations_.insert(std::pair<Range, AllocationInfo>(range, AllocationInfo{}));
if (inserted.second) {
valid_allocations_range_.begin = std::min(valid_allocations_range_.begin, begin);
valid_allocations_range_.end = std::max(valid_allocations_range_.end, end);
allocation_bytes_ += range.size();
return true;
} else {
Range overlap = inserted.first->first;
if (overlap != range) {
MEM_ALOGE("range %p-%p overlaps with existing range %p-%p", reinterpret_cast<void*>(begin),
reinterpret_cast<void*>(end), reinterpret_cast<void*>(overlap.begin),
reinterpret_cast<void*>(overlap.end));
}
return false;
}
}
// Sanitizers may consider certain memory inaccessible through certain pointers.
// With MTE this will need to use unchecked instructions or disable tag checking globally.
static uintptr_t ReadWordAtAddressUnsafe(uintptr_t word_ptr)
__attribute__((no_sanitize("address", "hwaddress"))) {
return *reinterpret_cast<uintptr_t*>(word_ptr);
}
bool HeapWalker::WordContainsAllocationPtr(uintptr_t word_ptr, Range* range, AllocationInfo** info) {
walking_ptr_ = word_ptr;
// This access may segfault if the process under test has done something strange,
// for example mprotect(PROT_NONE) on a native heap page. If so, it will be
// caught and handled by mmaping a zero page over the faulting page.
uintptr_t value = ReadWordAtAddressUnsafe(word_ptr);
walking_ptr_ = 0;
if (value >= valid_allocations_range_.begin && value < valid_allocations_range_.end) {
AllocationMap::iterator it = allocations_.find(Range{value, value + 1});
if (it != allocations_.end()) {
*range = it->first;
*info = &it->second;
return true;
}
}
return false;
}
void HeapWalker::RecurseRoot(const Range& root) {
allocator::vector<Range> to_do(1, root, allocator_);
while (!to_do.empty()) {
Range range = to_do.back();
to_do.pop_back();
walking_range_ = range;
ForEachPtrInRange(range, [&](Range& ref_range, AllocationInfo* ref_info) {
if (!ref_info->referenced_from_root) {
ref_info->referenced_from_root = true;
to_do.push_back(ref_range);
}
});
walking_range_ = Range{0, 0};
}
}
void HeapWalker::Mapping(uintptr_t begin, uintptr_t end) {
valid_mappings_range_.begin = std::min(valid_mappings_range_.begin, begin);
valid_mappings_range_.end = std::max(valid_mappings_range_.end, end);
}
void HeapWalker::Root(uintptr_t begin, uintptr_t end) {
roots_.push_back(Range{begin, end});
}
void HeapWalker::Root(const allocator::vector<uintptr_t>& vals) {
root_vals_.insert(root_vals_.end(), vals.begin(), vals.end());
}
size_t HeapWalker::Allocations() {
return allocations_.size();
}
size_t HeapWalker::AllocationBytes() {
return allocation_bytes_;
}
bool HeapWalker::DetectLeaks() {
// Recursively walk pointers from roots to mark referenced allocations
for (auto it = roots_.begin(); it != roots_.end(); it++) {
RecurseRoot(*it);
}
Range vals;
vals.begin = reinterpret_cast<uintptr_t>(root_vals_.data());
vals.end = vals.begin + root_vals_.size() * sizeof(uintptr_t);
RecurseRoot(vals);
if (segv_page_count_ > 0) {
MEM_ALOGE("%zu pages skipped due to segfaults", segv_page_count_);
}
return true;
}
bool HeapWalker::Leaked(allocator::vector<Range>& leaked, size_t limit, size_t* num_leaks_out,
size_t* leak_bytes_out) {
leaked.clear();
size_t num_leaks = 0;
size_t leak_bytes = 0;
for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
if (!it->second.referenced_from_root) {
num_leaks++;
leak_bytes += it->first.end - it->first.begin;
}
}
size_t n = 0;
for (auto it = allocations_.begin(); it != allocations_.end(); it++) {
if (!it->second.referenced_from_root) {
if (n++ < limit) {
leaked.push_back(it->first);
}
}
}
if (num_leaks_out) {
*num_leaks_out = num_leaks;
}
if (leak_bytes_out) {
*leak_bytes_out = leak_bytes;
}
return true;
}
static bool MapOverPage(void* addr) {
const size_t page_size = sysconf(_SC_PAGE_SIZE);
void* page = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(addr) & ~(page_size - 1));
void* ret = mmap(page, page_size, PROT_READ, MAP_ANONYMOUS | MAP_PRIVATE | MAP_FIXED, -1, 0);
if (ret == MAP_FAILED) {
MEM_ALOGE("failed to map page at %p: %s", page, strerror(errno));
return false;
}
return true;
}
void HeapWalker::HandleSegFault(ScopedSignalHandler& handler, int signal, siginfo_t* si,
void* /*uctx*/) {
uintptr_t addr = reinterpret_cast<uintptr_t>(si->si_addr);
if (addr != walking_ptr_) {
handler.reset();
return;
}
if (!segv_logged_) {
MEM_ALOGW("failed to read page at %p, signal %d", si->si_addr, signal);
if (walking_range_.begin != 0U) {
MEM_ALOGW("while walking range %p-%p", reinterpret_cast<void*>(walking_range_.begin),
reinterpret_cast<void*>(walking_range_.end));
}
segv_logged_ = true;
}
segv_page_count_++;
if (!MapOverPage(si->si_addr)) {
handler.reset();
}
}
Allocator<ScopedSignalHandler::SignalFnMap>::unique_ptr ScopedSignalHandler::handler_map_;
} // namespace android

View File

@ -1,148 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_HEAP_WALKER_H_
#define LIBMEMUNREACHABLE_HEAP_WALKER_H_
#include <signal.h>
#include "android-base/macros.h"
#include "Allocator.h"
#include "ScopedSignalHandler.h"
#include "Tarjan.h"
namespace android {
// A range [begin, end)
struct Range {
uintptr_t begin;
uintptr_t end;
size_t size() const { return end - begin; };
bool operator==(const Range& other) const {
return this->begin == other.begin && this->end == other.end;
}
bool operator!=(const Range& other) const { return !(*this == other); }
};
// Comparator for Ranges that returns equivalence for overlapping ranges
struct compare_range {
bool operator()(const Range& a, const Range& b) const { return a.end <= b.begin; }
};
class HeapWalker {
public:
explicit HeapWalker(Allocator<HeapWalker> allocator)
: allocator_(allocator),
allocations_(allocator),
allocation_bytes_(0),
roots_(allocator),
root_vals_(allocator),
sigsegv_handler_(allocator),
sigbus_handler_(allocator),
walking_ptr_(0),
walking_range_{0, 0},
segv_logged_(false),
segv_page_count_(0) {
valid_allocations_range_.end = 0;
valid_allocations_range_.begin = ~valid_allocations_range_.end;
valid_mappings_range_.end = 0;
valid_mappings_range_.begin = ~valid_allocations_range_.end;
sigsegv_handler_.install(
SIGSEGV, [=](ScopedSignalHandler& handler, int signal, siginfo_t* siginfo, void* uctx) {
this->HandleSegFault(handler, signal, siginfo, uctx);
});
sigbus_handler_.install(
SIGBUS, [=](ScopedSignalHandler& handler, int signal, siginfo_t* siginfo, void* uctx) {
this->HandleSegFault(handler, signal, siginfo, uctx);
});
}
~HeapWalker() {}
bool Allocation(uintptr_t begin, uintptr_t end);
void Mapping(uintptr_t begin, uintptr_t end);
void Root(uintptr_t begin, uintptr_t end);
void Root(const allocator::vector<uintptr_t>& vals);
bool DetectLeaks();
bool Leaked(allocator::vector<Range>&, size_t limit, size_t* num_leaks, size_t* leak_bytes);
size_t Allocations();
size_t AllocationBytes();
template <class F>
void ForEachPtrInRange(const Range& range, F&& f);
template <class F>
void ForEachAllocation(F&& f);
struct AllocationInfo {
bool referenced_from_root;
};
private:
void RecurseRoot(const Range& root);
bool WordContainsAllocationPtr(uintptr_t ptr, Range* range, AllocationInfo** info);
void HandleSegFault(ScopedSignalHandler&, int, siginfo_t*, void*);
DISALLOW_COPY_AND_ASSIGN(HeapWalker);
Allocator<HeapWalker> allocator_;
using AllocationMap = allocator::map<Range, AllocationInfo, compare_range>;
AllocationMap allocations_;
size_t allocation_bytes_;
Range valid_allocations_range_;
Range valid_mappings_range_;
allocator::vector<Range> roots_;
allocator::vector<uintptr_t> root_vals_;
ScopedSignalHandler sigsegv_handler_;
ScopedSignalHandler sigbus_handler_;
volatile uintptr_t walking_ptr_;
Range walking_range_;
bool segv_logged_;
size_t segv_page_count_;
};
template <class F>
inline void HeapWalker::ForEachPtrInRange(const Range& range, F&& f) {
uintptr_t begin = (range.begin + (sizeof(uintptr_t) - 1)) & ~(sizeof(uintptr_t) - 1);
// TODO(ccross): we might need to consider a pointer to the end of a buffer
// to be inside the buffer, which means the common case of a pointer to the
// beginning of a buffer may keep two ranges live.
for (uintptr_t i = begin; i < range.end; i += sizeof(uintptr_t)) {
Range ref_range;
AllocationInfo* ref_info;
if (WordContainsAllocationPtr(i, &ref_range, &ref_info)) {
f(ref_range, ref_info);
}
}
}
template <class F>
inline void HeapWalker::ForEachAllocation(F&& f) {
for (auto& it : allocations_) {
const Range& range = it.first;
HeapWalker::AllocationInfo& allocation = it.second;
f(range, allocation);
}
}
} // namespace android
#endif

View File

@ -1,60 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_LEAK_H_
#define LIBMEMUNREACHABLE_LEAK_H_
#include <functional>
#include <vector>
#include "memunreachable/memunreachable.h"
// Custom std::hash specialization so that Leak::Backtrace can be used
// as a key in std::unordered_map.
namespace std {
template <>
struct hash<android::Leak::Backtrace> {
std::size_t operator()(const android::Leak::Backtrace& key) const {
std::size_t seed = 0;
hash_combine(seed, key.num_frames);
for (size_t i = 0; i < key.num_frames; i++) {
hash_combine(seed, key.frames[i]);
}
return seed;
}
private:
template <typename T>
inline void hash_combine(std::size_t& seed, const T& v) const {
std::hash<T> hasher;
seed ^= hasher(v) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
}
};
} // namespace std
namespace android {
static bool operator==(const Leak::Backtrace& lhs, const Leak::Backtrace& rhs) {
return (lhs.num_frames == rhs.num_frames) &&
memcmp(lhs.frames, rhs.frames, lhs.num_frames * sizeof(lhs.frames[0])) == 0;
}
}
#endif

View File

@ -1,138 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <inttypes.h>
#include "Allocator.h"
#include "HeapWalker.h"
#include "LeakFolding.h"
#include "Tarjan.h"
#include "log.h"
namespace android {
// Converts possibly cyclic graph of leaks to a DAG by combining
// strongly-connected components into a object, stored in the scc pointer
// of each node in the component.
void LeakFolding::ComputeDAG() {
SCCList<LeakInfo> scc_list{allocator_};
Tarjan(leak_graph_, scc_list);
Allocator<SCCInfo> scc_allocator = allocator_;
for (auto& scc_nodes : scc_list) {
Allocator<SCCInfo>::unique_ptr leak_scc;
leak_scc = scc_allocator.make_unique(scc_allocator);
for (auto& node : scc_nodes) {
node->ptr->scc = leak_scc.get();
leak_scc->count++;
leak_scc->size += node->ptr->range.size();
}
leak_scc_.emplace_back(std::move(leak_scc));
}
for (auto& it : leak_map_) {
LeakInfo& leak = it.second;
for (auto& ref : leak.node.references_out) {
if (leak.scc != ref->ptr->scc) {
leak.scc->node.Edge(&ref->ptr->scc->node);
}
}
}
}
void LeakFolding::AccumulateLeaks(SCCInfo* dominator) {
std::function<void(SCCInfo*)> walk([&](SCCInfo* scc) {
if (scc->accumulator != dominator) {
scc->accumulator = dominator;
dominator->cuumulative_size += scc->size;
dominator->cuumulative_count += scc->count;
scc->node.Foreach([&](SCCInfo* ref) { walk(ref); });
}
});
walk(dominator);
}
bool LeakFolding::FoldLeaks() {
Allocator<LeakInfo> leak_allocator = allocator_;
// Find all leaked allocations insert them into leak_map_ and leak_graph_
heap_walker_.ForEachAllocation([&](const Range& range, HeapWalker::AllocationInfo& allocation) {
if (!allocation.referenced_from_root) {
auto it = leak_map_.emplace(std::piecewise_construct, std::forward_as_tuple(range),
std::forward_as_tuple(range, allocator_));
LeakInfo& leak = it.first->second;
leak_graph_.push_back(&leak.node);
}
});
// Find references between leaked allocations and connect them in leak_graph_
for (auto& it : leak_map_) {
LeakInfo& leak = it.second;
heap_walker_.ForEachPtrInRange(leak.range,
[&](Range& ptr_range, HeapWalker::AllocationInfo* ptr_info) {
if (!ptr_info->referenced_from_root) {
LeakInfo* ptr_leak = &leak_map_.at(ptr_range);
leak.node.Edge(&ptr_leak->node);
}
});
}
// Convert the cyclic graph to a DAG by grouping strongly connected components
ComputeDAG();
// Compute dominators and cuumulative sizes
for (auto& scc : leak_scc_) {
if (scc->node.references_in.size() == 0) {
scc->dominator = true;
AccumulateLeaks(scc.get());
}
}
return true;
}
bool LeakFolding::Leaked(allocator::vector<LeakFolding::Leak>& leaked, size_t* num_leaks_out,
size_t* leak_bytes_out) {
size_t num_leaks = 0;
size_t leak_bytes = 0;
for (auto& it : leak_map_) {
const LeakInfo& leak = it.second;
num_leaks++;
leak_bytes += leak.range.size();
}
for (auto& it : leak_map_) {
const LeakInfo& leak = it.second;
if (leak.scc->dominator) {
leaked.emplace_back(Leak{leak.range, leak.scc->cuumulative_count - 1,
leak.scc->cuumulative_size - leak.range.size()});
}
}
if (num_leaks_out) {
*num_leaks_out = num_leaks;
}
if (leak_bytes_out) {
*leak_bytes_out = leak_bytes;
}
return true;
}
} // namespace android

View File

@ -1,100 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_LEAK_FOLDING_H_
#define LIBMEMUNREACHABLE_LEAK_FOLDING_H_
#include "HeapWalker.h"
namespace android {
class LeakFolding {
public:
LeakFolding(Allocator<void> allocator, HeapWalker& heap_walker)
: allocator_(allocator),
heap_walker_(heap_walker),
leak_map_(allocator),
leak_graph_(allocator),
leak_scc_(allocator) {}
bool FoldLeaks();
struct Leak {
const Range range;
size_t referenced_count;
size_t referenced_size;
};
bool Leaked(allocator::vector<Leak>& leaked, size_t* num_leaks_out, size_t* leak_bytes_out);
private:
DISALLOW_COPY_AND_ASSIGN(LeakFolding);
Allocator<void> allocator_;
HeapWalker& heap_walker_;
struct SCCInfo {
public:
Node<SCCInfo> node;
size_t count;
size_t size;
size_t cuumulative_count;
size_t cuumulative_size;
bool dominator;
SCCInfo* accumulator;
explicit SCCInfo(Allocator<SCCInfo> allocator)
: node(this, allocator),
count(0),
size(0),
cuumulative_count(0),
cuumulative_size(0),
dominator(false),
accumulator(nullptr) {}
private:
SCCInfo(SCCInfo&&) = delete;
DISALLOW_COPY_AND_ASSIGN(SCCInfo);
};
struct LeakInfo {
public:
Node<LeakInfo> node;
const Range range;
SCCInfo* scc;
LeakInfo(const Range& range, Allocator<LeakInfo> allocator)
: node(this, allocator), range(range), scc(nullptr) {}
private:
DISALLOW_COPY_AND_ASSIGN(LeakInfo);
};
void ComputeDAG();
void AccumulateLeaks(SCCInfo* dominator);
allocator::map<Range, LeakInfo, compare_range> leak_map_;
Graph<LeakInfo> leak_graph_;
allocator::vector<Allocator<SCCInfo>::unique_ptr> leak_scc_;
};
} // namespace android
#endif // LIBMEMUNREACHABLE_LEAK_FOLDING_H_

View File

@ -1,93 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h>
#include <string.h>
#include "LeakPipe.h"
#include "log.h"
namespace android {
bool LeakPipe::SendFd(int sock, int fd) {
struct msghdr hdr {};
struct iovec iov {};
unsigned int data = 0xfdfdfdfd;
alignas(struct cmsghdr) char cmsgbuf[CMSG_SPACE(sizeof(int))];
hdr.msg_iov = &iov;
hdr.msg_iovlen = 1;
iov.iov_base = &data;
iov.iov_len = sizeof(data);
hdr.msg_control = cmsgbuf;
hdr.msg_controllen = CMSG_LEN(sizeof(int));
struct cmsghdr* cmsg = CMSG_FIRSTHDR(&hdr);
cmsg->cmsg_len = CMSG_LEN(sizeof(int));
cmsg->cmsg_level = SOL_SOCKET;
cmsg->cmsg_type = SCM_RIGHTS;
*(int*)CMSG_DATA(cmsg) = fd;
int ret = sendmsg(sock, &hdr, 0);
if (ret < 0) {
MEM_ALOGE("failed to send fd: %s", strerror(errno));
return false;
}
if (ret == 0) {
MEM_ALOGE("eof when sending fd");
return false;
}
return true;
}
int LeakPipe::ReceiveFd(int sock) {
struct msghdr hdr {};
struct iovec iov {};
unsigned int data;
alignas(struct cmsghdr) char cmsgbuf[CMSG_SPACE(sizeof(int))];
hdr.msg_iov = &iov;
hdr.msg_iovlen = 1;
iov.iov_base = &data;
iov.iov_len = sizeof(data);
hdr.msg_control = cmsgbuf;
hdr.msg_controllen = CMSG_LEN(sizeof(int));
int ret = recvmsg(sock, &hdr, 0);
if (ret < 0) {
MEM_ALOGE("failed to receive fd: %s", strerror(errno));
return -1;
}
if (ret == 0) {
MEM_ALOGE("eof when receiving fd");
return -1;
}
struct cmsghdr* cmsg = CMSG_FIRSTHDR(&hdr);
if (cmsg == NULL || cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
MEM_ALOGE("missing fd while receiving fd");
return -1;
}
return *(int*)CMSG_DATA(cmsg);
}
} // namespace android

View File

@ -1,194 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_LEAK_PIPE_H_
#define LIBMEMUNREACHABLE_LEAK_PIPE_H_
#include <sys/socket.h>
#include <vector>
#include "android-base/macros.h"
#include "ScopedPipe.h"
#include "log.h"
namespace android {
// LeakPipe implements a pipe that can transfer vectors of simple objects
// between processes. The pipe is created in the sending process and
// transferred over a socketpair that was created before forking. This ensures
// that only the sending process can have the send side of the pipe open, so if
// the sending process dies the pipe will close.
class LeakPipe {
public:
LeakPipe() {
int ret = socketpair(AF_UNIX, SOCK_STREAM | SOCK_CLOEXEC, 0, sv_);
if (ret < 0) {
MEM_LOG_ALWAYS_FATAL("failed to create socketpair: %s", strerror(errno));
}
}
~LeakPipe() { Close(); }
void Close() {
close(sv_[0]);
close(sv_[1]);
sv_[0] = -1;
sv_[1] = -1;
}
bool OpenReceiver() {
int fd = ReceiveFd(sv_[0]);
if (fd < 0) {
return false;
}
receiver_.SetFd(fd);
return true;
}
bool OpenSender() {
ScopedPipe pipe;
if (!SendFd(sv_[1], pipe.Receiver())) {
return false;
}
pipe.ReleaseReceiver();
sender_.SetFd(pipe.ReleaseSender());
return true;
}
class LeakPipeBase {
public:
LeakPipeBase() : fd_(-1) {}
~LeakPipeBase() { Close(); }
void SetFd(int fd) { fd_ = fd; }
void Close() {
close(fd_);
fd_ = -1;
}
protected:
int fd_;
private:
DISALLOW_COPY_AND_ASSIGN(LeakPipeBase);
};
class LeakPipeSender : public LeakPipeBase {
public:
using LeakPipeBase::LeakPipeBase;
template <typename T>
bool Send(const T& value) {
ssize_t ret = TEMP_FAILURE_RETRY(write(fd_, &value, sizeof(T)));
if (ret < 0) {
MEM_ALOGE("failed to send value: %s", strerror(errno));
return false;
} else if (static_cast<size_t>(ret) != sizeof(T)) {
MEM_ALOGE("eof while writing value");
return false;
}
return true;
}
template <class T, class Alloc = std::allocator<T>>
bool SendVector(const std::vector<T, Alloc>& vector) {
size_t size = vector.size() * sizeof(T);
if (!Send(size)) {
return false;
}
ssize_t ret = TEMP_FAILURE_RETRY(write(fd_, vector.data(), size));
if (ret < 0) {
MEM_ALOGE("failed to send vector: %s", strerror(errno));
return false;
} else if (static_cast<size_t>(ret) != size) {
MEM_ALOGE("eof while writing vector");
return false;
}
return true;
}
};
class LeakPipeReceiver : public LeakPipeBase {
public:
using LeakPipeBase::LeakPipeBase;
template <typename T>
bool Receive(T* value) {
ssize_t ret = TEMP_FAILURE_RETRY(read(fd_, reinterpret_cast<void*>(value), sizeof(T)));
if (ret < 0) {
MEM_ALOGE("failed to receive value: %s", strerror(errno));
return false;
} else if (static_cast<size_t>(ret) != sizeof(T)) {
MEM_ALOGE("eof while receiving value");
return false;
}
return true;
}
template <class T, class Alloc = std::allocator<T>>
bool ReceiveVector(std::vector<T, Alloc>& vector) {
size_t size = 0;
if (!Receive(&size)) {
return false;
}
vector.resize(size / sizeof(T));
char* ptr = reinterpret_cast<char*>(vector.data());
while (size > 0) {
ssize_t ret = TEMP_FAILURE_RETRY(read(fd_, ptr, size));
if (ret < 0) {
MEM_ALOGE("failed to send vector: %s", strerror(errno));
return false;
} else if (ret == 0) {
MEM_ALOGE("eof while reading vector");
return false;
}
size -= ret;
ptr += ret;
}
return true;
}
};
LeakPipeReceiver& Receiver() { return receiver_; }
LeakPipeSender& Sender() { return sender_; }
private:
LeakPipeReceiver receiver_;
LeakPipeSender sender_;
bool SendFd(int sock, int fd);
int ReceiveFd(int sock);
DISALLOW_COPY_AND_ASSIGN(LeakPipe);
int sv_[2];
};
} // namespace android
#endif // LIBMEMUNREACHABLE_LEAK_PIPE_H_

View File

@ -1,63 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_LINKED_LIST_H_
#define LIBMEMUNREACHABLE_LINKED_LIST_H_
namespace android {
template <class T>
class LinkedList {
public:
LinkedList() : next_(this), prev_(this), data_() {}
explicit LinkedList(T data) : LinkedList() { data_ = data; }
~LinkedList() {}
void insert(LinkedList<T>& node) {
assert(node.empty());
node.next_ = this->next_;
node.next_->prev_ = &node;
this->next_ = &node;
node.prev_ = this;
}
void remove() {
this->next_->prev_ = this->prev_;
this->prev_->next_ = this->next_;
this->next_ = this;
this->prev_ = this;
}
T data() { return data_; }
bool empty() { return next_ == this && prev_ == this; }
LinkedList<T>* next() { return next_; }
private:
LinkedList<T>* next_;
LinkedList<T>* prev_;
T data_;
};
template <class T>
class LinkedListHead {
public:
LinkedListHead() : node_() {}
~LinkedListHead() {}
private:
LinkedList<T> node_;
};
} // namespace android
#endif

View File

@ -1,564 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <inttypes.h>
#include <string.h>
#include <functional>
#include <iomanip>
#include <mutex>
#include <sstream>
#include <string>
#include <unordered_map>
#include <android-base/macros.h>
#include <android-base/strings.h>
#include <backtrace.h>
#include "Allocator.h"
#include "Binder.h"
#include "HeapWalker.h"
#include "Leak.h"
#include "LeakFolding.h"
#include "LeakPipe.h"
#include "ProcessMappings.h"
#include "PtracerThread.h"
#include "ScopedDisableMalloc.h"
#include "Semaphore.h"
#include "ThreadCapture.h"
#include "bionic.h"
#include "log.h"
#include "memunreachable/memunreachable.h"
using namespace std::chrono_literals;
namespace android {
const size_t Leak::contents_length;
class MemUnreachable {
public:
MemUnreachable(pid_t pid, Allocator<void> allocator)
: pid_(pid), allocator_(allocator), heap_walker_(allocator_) {}
bool CollectAllocations(const allocator::vector<ThreadInfo>& threads,
const allocator::vector<Mapping>& mappings,
const allocator::vector<uintptr_t>& refs);
bool GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit, size_t* num_leaks,
size_t* leak_bytes);
size_t Allocations() { return heap_walker_.Allocations(); }
size_t AllocationBytes() { return heap_walker_.AllocationBytes(); }
private:
bool ClassifyMappings(const allocator::vector<Mapping>& mappings,
allocator::vector<Mapping>& heap_mappings,
allocator::vector<Mapping>& anon_mappings,
allocator::vector<Mapping>& globals_mappings,
allocator::vector<Mapping>& stack_mappings);
DISALLOW_COPY_AND_ASSIGN(MemUnreachable);
pid_t pid_;
Allocator<void> allocator_;
HeapWalker heap_walker_;
};
static void HeapIterate(const Mapping& heap_mapping,
const std::function<void(uintptr_t, size_t)>& func) {
malloc_iterate(heap_mapping.begin, heap_mapping.end - heap_mapping.begin,
[](uintptr_t base, size_t size, void* arg) {
auto f = reinterpret_cast<const std::function<void(uintptr_t, size_t)>*>(arg);
(*f)(base, size);
},
const_cast<void*>(reinterpret_cast<const void*>(&func)));
}
bool MemUnreachable::CollectAllocations(const allocator::vector<ThreadInfo>& threads,
const allocator::vector<Mapping>& mappings,
const allocator::vector<uintptr_t>& refs) {
MEM_ALOGI("searching process %d for allocations", pid_);
for (auto it = mappings.begin(); it != mappings.end(); it++) {
heap_walker_.Mapping(it->begin, it->end);
}
allocator::vector<Mapping> heap_mappings{mappings};
allocator::vector<Mapping> anon_mappings{mappings};
allocator::vector<Mapping> globals_mappings{mappings};
allocator::vector<Mapping> stack_mappings{mappings};
if (!ClassifyMappings(mappings, heap_mappings, anon_mappings, globals_mappings, stack_mappings)) {
return false;
}
for (auto it = heap_mappings.begin(); it != heap_mappings.end(); it++) {
MEM_ALOGV("Heap mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
HeapIterate(*it,
[&](uintptr_t base, size_t size) { heap_walker_.Allocation(base, base + size); });
}
for (auto it = anon_mappings.begin(); it != anon_mappings.end(); it++) {
MEM_ALOGV("Anon mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
heap_walker_.Allocation(it->begin, it->end);
}
for (auto it = globals_mappings.begin(); it != globals_mappings.end(); it++) {
MEM_ALOGV("Globals mapping %" PRIxPTR "-%" PRIxPTR " %s", it->begin, it->end, it->name);
heap_walker_.Root(it->begin, it->end);
}
for (auto thread_it = threads.begin(); thread_it != threads.end(); thread_it++) {
for (auto it = stack_mappings.begin(); it != stack_mappings.end(); it++) {
if (thread_it->stack.first >= it->begin && thread_it->stack.first <= it->end) {
MEM_ALOGV("Stack %" PRIxPTR "-%" PRIxPTR " %s", thread_it->stack.first, it->end, it->name);
heap_walker_.Root(thread_it->stack.first, it->end);
}
}
heap_walker_.Root(thread_it->regs);
}
heap_walker_.Root(refs);
MEM_ALOGI("searching done");
return true;
}
bool MemUnreachable::GetUnreachableMemory(allocator::vector<Leak>& leaks, size_t limit,
size_t* num_leaks, size_t* leak_bytes) {
MEM_ALOGI("sweeping process %d for unreachable memory", pid_);
leaks.clear();
if (!heap_walker_.DetectLeaks()) {
return false;
}
allocator::vector<Range> leaked1{allocator_};
heap_walker_.Leaked(leaked1, 0, num_leaks, leak_bytes);
MEM_ALOGI("sweeping done");
MEM_ALOGI("folding related leaks");
LeakFolding folding(allocator_, heap_walker_);
if (!folding.FoldLeaks()) {
return false;
}
allocator::vector<LeakFolding::Leak> leaked{allocator_};
if (!folding.Leaked(leaked, num_leaks, leak_bytes)) {
return false;
}
allocator::unordered_map<Leak::Backtrace, Leak*> backtrace_map{allocator_};
// Prevent reallocations of backing memory so we can store pointers into it
// in backtrace_map.
leaks.reserve(leaked.size());
for (auto& it : leaked) {
leaks.emplace_back();
Leak* leak = &leaks.back();
ssize_t num_backtrace_frames = malloc_backtrace(
reinterpret_cast<void*>(it.range.begin), leak->backtrace.frames, leak->backtrace.max_frames);
if (num_backtrace_frames > 0) {
leak->backtrace.num_frames = num_backtrace_frames;
auto inserted = backtrace_map.emplace(leak->backtrace, leak);
if (!inserted.second) {
// Leak with same backtrace already exists, drop this one and
// increment similar counts on the existing one.
leaks.pop_back();
Leak* similar_leak = inserted.first->second;
similar_leak->similar_count++;
similar_leak->similar_size += it.range.size();
similar_leak->similar_referenced_count += it.referenced_count;
similar_leak->similar_referenced_size += it.referenced_size;
similar_leak->total_size += it.range.size();
similar_leak->total_size += it.referenced_size;
continue;
}
}
leak->begin = it.range.begin;
leak->size = it.range.size();
leak->referenced_count = it.referenced_count;
leak->referenced_size = it.referenced_size;
leak->total_size = leak->size + leak->referenced_size;
memcpy(leak->contents, reinterpret_cast<void*>(it.range.begin),
std::min(leak->size, Leak::contents_length));
}
MEM_ALOGI("folding done");
std::sort(leaks.begin(), leaks.end(),
[](const Leak& a, const Leak& b) { return a.total_size > b.total_size; });
if (leaks.size() > limit) {
leaks.resize(limit);
}
return true;
}
static bool has_prefix(const allocator::string& s, const char* prefix) {
int ret = s.compare(0, strlen(prefix), prefix);
return ret == 0;
}
static bool is_sanitizer_mapping(const allocator::string& s) {
return s == "[anon:low shadow]" || s == "[anon:high shadow]" || has_prefix(s, "[anon:hwasan");
}
bool MemUnreachable::ClassifyMappings(const allocator::vector<Mapping>& mappings,
allocator::vector<Mapping>& heap_mappings,
allocator::vector<Mapping>& anon_mappings,
allocator::vector<Mapping>& globals_mappings,
allocator::vector<Mapping>& stack_mappings) {
heap_mappings.clear();
anon_mappings.clear();
globals_mappings.clear();
stack_mappings.clear();
allocator::string current_lib{allocator_};
for (auto it = mappings.begin(); it != mappings.end(); it++) {
if (it->execute) {
current_lib = it->name;
continue;
}
if (!it->read) {
continue;
}
const allocator::string mapping_name{it->name, allocator_};
if (mapping_name == "[anon:.bss]") {
// named .bss section
globals_mappings.emplace_back(*it);
} else if (mapping_name == current_lib) {
// .rodata or .data section
globals_mappings.emplace_back(*it);
} else if (mapping_name == "[anon:libc_malloc]" ||
android::base::StartsWith(mapping_name, "[anon:scudo:")) {
// named malloc mapping
heap_mappings.emplace_back(*it);
} else if (has_prefix(mapping_name, "[anon:dalvik-")) {
// named dalvik heap mapping
globals_mappings.emplace_back(*it);
} else if (has_prefix(mapping_name, "[stack")) {
// named stack mapping
stack_mappings.emplace_back(*it);
} else if (mapping_name.size() == 0) {
globals_mappings.emplace_back(*it);
} else if (has_prefix(mapping_name, "[anon:") &&
mapping_name != "[anon:leak_detector_malloc]" &&
!is_sanitizer_mapping(mapping_name)) {
// TODO(ccross): it would be nice to treat named anonymous mappings as
// possible leaks, but naming something in a .bss or .data section makes
// it impossible to distinguish them from mmaped and then named mappings.
globals_mappings.emplace_back(*it);
}
}
return true;
}
template <typename T>
static inline const char* plural(T val) {
return (val == 1) ? "" : "s";
}
bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit) {
if (info.version > 0) {
MEM_ALOGE("unsupported UnreachableMemoryInfo.version %zu in GetUnreachableMemory",
info.version);
return false;
}
int parent_pid = getpid();
int parent_tid = gettid();
Heap heap;
Semaphore continue_parent_sem;
LeakPipe pipe;
PtracerThread thread{[&]() -> int {
/////////////////////////////////////////////
// Collection thread
/////////////////////////////////////////////
MEM_ALOGI("collecting thread info for process %d...", parent_pid);
ThreadCapture thread_capture(parent_pid, heap);
allocator::vector<ThreadInfo> thread_info(heap);
allocator::vector<Mapping> mappings(heap);
allocator::vector<uintptr_t> refs(heap);
// ptrace all the threads
if (!thread_capture.CaptureThreads()) {
continue_parent_sem.Post();
return 1;
}
// collect register contents and stacks
if (!thread_capture.CapturedThreadInfo(thread_info)) {
continue_parent_sem.Post();
return 1;
}
// snapshot /proc/pid/maps
if (!ProcessMappings(parent_pid, mappings)) {
continue_parent_sem.Post();
return 1;
}
if (!BinderReferences(refs)) {
continue_parent_sem.Post();
return 1;
}
// malloc must be enabled to call fork, at_fork handlers take the same
// locks as ScopedDisableMalloc. All threads are paused in ptrace, so
// memory state is still consistent. Unfreeze the original thread so it
// can drop the malloc locks, it will block until the collection thread
// exits.
thread_capture.ReleaseThread(parent_tid);
continue_parent_sem.Post();
// fork a process to do the heap walking
int ret = fork();
if (ret < 0) {
return 1;
} else if (ret == 0) {
/////////////////////////////////////////////
// Heap walker process
/////////////////////////////////////////////
// Examine memory state in the child using the data collected above and
// the CoW snapshot of the process memory contents.
if (!pipe.OpenSender()) {
_exit(1);
}
MemUnreachable unreachable{parent_pid, heap};
if (!unreachable.CollectAllocations(thread_info, mappings, refs)) {
_exit(2);
}
size_t num_allocations = unreachable.Allocations();
size_t allocation_bytes = unreachable.AllocationBytes();
allocator::vector<Leak> leaks{heap};
size_t num_leaks = 0;
size_t leak_bytes = 0;
bool ok = unreachable.GetUnreachableMemory(leaks, limit, &num_leaks, &leak_bytes);
ok = ok && pipe.Sender().Send(num_allocations);
ok = ok && pipe.Sender().Send(allocation_bytes);
ok = ok && pipe.Sender().Send(num_leaks);
ok = ok && pipe.Sender().Send(leak_bytes);
ok = ok && pipe.Sender().SendVector(leaks);
if (!ok) {
_exit(3);
}
_exit(0);
} else {
// Nothing left to do in the collection thread, return immediately,
// releasing all the captured threads.
MEM_ALOGI("collection thread done");
return 0;
}
}};
/////////////////////////////////////////////
// Original thread
/////////////////////////////////////////////
{
// Disable malloc to get a consistent view of memory
ScopedDisableMalloc disable_malloc;
// Start the collection thread
thread.Start();
// Wait for the collection thread to signal that it is ready to fork the
// heap walker process.
continue_parent_sem.Wait(30s);
// Re-enable malloc so the collection thread can fork.
}
// Wait for the collection thread to exit
int ret = thread.Join();
if (ret != 0) {
return false;
}
// Get a pipe from the heap walker process. Transferring a new pipe fd
// ensures no other forked processes can have it open, so when the heap
// walker process dies the remote side of the pipe will close.
if (!pipe.OpenReceiver()) {
return false;
}
bool ok = true;
ok = ok && pipe.Receiver().Receive(&info.num_allocations);
ok = ok && pipe.Receiver().Receive(&info.allocation_bytes);
ok = ok && pipe.Receiver().Receive(&info.num_leaks);
ok = ok && pipe.Receiver().Receive(&info.leak_bytes);
ok = ok && pipe.Receiver().ReceiveVector(info.leaks);
if (!ok) {
return false;
}
MEM_ALOGI("unreachable memory detection done");
MEM_ALOGE("%zu bytes in %zu allocation%s unreachable out of %zu bytes in %zu allocation%s",
info.leak_bytes, info.num_leaks, plural(info.num_leaks), info.allocation_bytes,
info.num_allocations, plural(info.num_allocations));
return true;
}
std::string Leak::ToString(bool log_contents) const {
std::ostringstream oss;
oss << " " << std::dec << size;
oss << " bytes unreachable at ";
oss << std::hex << begin;
oss << std::endl;
if (referenced_count > 0) {
oss << std::dec;
oss << " referencing " << referenced_size << " unreachable bytes";
oss << " in " << referenced_count << " allocation" << plural(referenced_count);
oss << std::endl;
}
if (similar_count > 0) {
oss << std::dec;
oss << " and " << similar_size << " similar unreachable bytes";
oss << " in " << similar_count << " allocation" << plural(similar_count);
oss << std::endl;
if (similar_referenced_count > 0) {
oss << " referencing " << similar_referenced_size << " unreachable bytes";
oss << " in " << similar_referenced_count << " allocation" << plural(similar_referenced_count);
oss << std::endl;
}
}
if (log_contents) {
const int bytes_per_line = 16;
const size_t bytes = std::min(size, contents_length);
if (bytes == size) {
oss << " contents:" << std::endl;
} else {
oss << " first " << bytes << " bytes of contents:" << std::endl;
}
for (size_t i = 0; i < bytes; i += bytes_per_line) {
oss << " " << std::hex << begin + i << ": ";
size_t j;
oss << std::setfill('0');
for (j = i; j < bytes && j < i + bytes_per_line; j++) {
oss << std::setw(2) << static_cast<int>(contents[j]) << " ";
}
oss << std::setfill(' ');
for (; j < i + bytes_per_line; j++) {
oss << " ";
}
for (j = i; j < bytes && j < i + bytes_per_line; j++) {
char c = contents[j];
if (c < ' ' || c >= 0x7f) {
c = '.';
}
oss << c;
}
oss << std::endl;
}
}
if (backtrace.num_frames > 0) {
oss << backtrace_string(backtrace.frames, backtrace.num_frames);
}
return oss.str();
}
std::string UnreachableMemoryInfo::ToString(bool log_contents) const {
std::ostringstream oss;
oss << " " << leak_bytes << " bytes in ";
oss << num_leaks << " unreachable allocation" << plural(num_leaks);
oss << std::endl;
oss << " ABI: '" ABI_STRING "'" << std::endl;
oss << std::endl;
for (auto it = leaks.begin(); it != leaks.end(); it++) {
oss << it->ToString(log_contents);
oss << std::endl;
}
return oss.str();
}
UnreachableMemoryInfo::~UnreachableMemoryInfo() {
// Clear the memory that holds the leaks, otherwise the next attempt to
// detect leaks may find the old data (for example in the jemalloc tcache)
// and consider all the leaks to be referenced.
memset(leaks.data(), 0, leaks.capacity() * sizeof(Leak));
std::vector<Leak> tmp;
leaks.swap(tmp);
// Disable and re-enable malloc to flush the jemalloc tcache to make sure
// there are no copies of the leaked pointer addresses there.
malloc_disable();
malloc_enable();
}
std::string GetUnreachableMemoryString(bool log_contents, size_t limit) {
UnreachableMemoryInfo info;
if (!GetUnreachableMemory(info, limit)) {
return "Failed to get unreachable memory\n"
"If you are trying to get unreachable memory from a system app\n"
"(like com.android.systemui), disable selinux first using\n"
"setenforce 0\n";
}
return info.ToString(log_contents);
}
} // namespace android
bool LogUnreachableMemory(bool log_contents, size_t limit) {
android::UnreachableMemoryInfo info;
if (!android::GetUnreachableMemory(info, limit)) {
return false;
}
for (auto it = info.leaks.begin(); it != info.leaks.end(); it++) {
MEM_ALOGE("%s", it->ToString(log_contents).c_str());
}
return true;
}
bool NoLeaks() {
android::UnreachableMemoryInfo info;
if (!android::GetUnreachableMemory(info, 0)) {
return false;
}
return info.num_leaks == 0;
}

View File

@ -1,2 +0,0 @@
ccross@google.com
cferris@google.com

View File

@ -1,59 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <string.h>
#include <sys/types.h>
#include <unistd.h>
#include <android-base/unique_fd.h>
#include <procinfo/process_map.h>
#include "ProcessMappings.h"
namespace android {
struct ReadMapCallback {
ReadMapCallback(allocator::vector<Mapping>& mappings) : mappings_(mappings) {}
void operator()(uint64_t start, uint64_t end, uint16_t flags, uint64_t, ino_t,
const char* name) const {
mappings_.emplace_back(start, end, flags & PROT_READ, flags & PROT_WRITE, flags & PROT_EXEC,
name);
}
allocator::vector<Mapping>& mappings_;
};
bool ProcessMappings(pid_t pid, allocator::vector<Mapping>& mappings) {
char map_buffer[1024];
snprintf(map_buffer, sizeof(map_buffer), "/proc/%d/maps", pid);
android::base::unique_fd fd(open(map_buffer, O_RDONLY));
if (fd == -1) {
return false;
}
allocator::string content(mappings.get_allocator());
ssize_t n;
while ((n = TEMP_FAILURE_RETRY(read(fd, map_buffer, sizeof(map_buffer)))) > 0) {
content.append(map_buffer, n);
}
ReadMapCallback callback(mappings);
return android::procinfo::ReadMapFileContent(&content[0], callback);
}
} // namespace android

View File

@ -1,47 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_PROCESS_MAPPING_H_
#define LIBMEMUNREACHABLE_PROCESS_MAPPING_H_
#include <string.h>
#include "Allocator.h"
namespace android {
struct Mapping {
uintptr_t begin;
uintptr_t end;
bool read;
bool write;
bool execute;
char name[96];
Mapping() {}
Mapping(uintptr_t begin, uintptr_t end, bool read, bool write, bool execute, const char* name)
: begin(begin), end(end), read(read), write(write), execute(execute) {
strlcpy(this->name, name, sizeof(this->name));
}
};
// This function is not re-entrant since it uses a static buffer for
// the line data.
bool ProcessMappings(pid_t pid, allocator::vector<Mapping>& mappings);
} // namespace android
#endif // LIBMEMUNREACHABLE_PROCESS_MAPPING_H_

View File

@ -1,155 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <errno.h>
#include <fcntl.h>
#include <inttypes.h>
#include <pthread.h>
#include <sched.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/prctl.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <unistd.h>
#include "android-base/macros.h"
#include "PtracerThread.h"
#include "log.h"
namespace android {
class Stack {
public:
explicit Stack(size_t size) : size_(size) {
int prot = PROT_READ | PROT_WRITE;
int flags = MAP_PRIVATE | MAP_ANONYMOUS;
page_size_ = sysconf(_SC_PAGE_SIZE);
size_ += page_size_ * 2; // guard pages
base_ = mmap(NULL, size_, prot, flags, -1, 0);
if (base_ == MAP_FAILED) {
base_ = NULL;
size_ = 0;
return;
}
prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, base_, size_, "libmemunreachable stack");
mprotect(base_, page_size_, PROT_NONE);
mprotect(top(), page_size_, PROT_NONE);
};
~Stack() { munmap(base_, size_); };
void* top() {
return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(base_) + size_ - page_size_);
};
private:
DISALLOW_COPY_AND_ASSIGN(Stack);
void* base_;
size_t size_;
size_t page_size_;
};
PtracerThread::PtracerThread(const std::function<int()>& func) : child_pid_(0) {
stack_ = std::make_unique<Stack>(PTHREAD_STACK_MIN);
if (stack_->top() == nullptr) {
MEM_LOG_ALWAYS_FATAL("failed to mmap child stack: %s", strerror(errno));
}
func_ = std::function<int()>{[&, func]() -> int {
// In the child thread, lock and unlock the mutex to wait for the parent
// to finish setting up for the child thread
std::unique_lock<std::mutex> lk(m_);
lk.unlock();
_exit(func());
}};
}
PtracerThread::~PtracerThread() {
Kill();
Join();
ClearTracer();
stack_ = nullptr;
}
bool PtracerThread::Start() {
std::unique_lock<std::mutex> lk(m_);
// Convert from void(*)(void*) to lambda with captures
auto proxy = [](void* arg) -> int {
prctl(PR_SET_NAME, "libmemunreachable ptrace thread");
return (*reinterpret_cast<std::function<int()>*>(arg))();
};
// See README.md for why we create the child process this way
child_pid_ = clone(proxy, stack_->top(), CLONE_VM | CLONE_FS | CLONE_FILES /*|CLONE_UNTRACED*/,
reinterpret_cast<void*>(&func_));
if (child_pid_ < 0) {
MEM_ALOGE("failed to clone child: %s", strerror(errno));
return false;
}
SetTracer(child_pid_);
lk.unlock();
return true;
}
int PtracerThread::Join() {
if (child_pid_ == -1) {
return -1;
}
int status;
int ret = TEMP_FAILURE_RETRY(waitpid(child_pid_, &status, __WALL));
if (ret < 0) {
MEM_ALOGE("waitpid %d failed: %s", child_pid_, strerror(errno));
return -1;
}
child_pid_ = -1;
if (WIFEXITED(status)) {
return WEXITSTATUS(status);
} else if (WIFSIGNALED(status)) {
return -WTERMSIG(status);
} else {
MEM_ALOGE("unexpected status %x", status);
return -1;
}
}
void PtracerThread::Kill() {
if (child_pid_ == -1) {
return;
}
syscall(SYS_tkill, child_pid_, SIGKILL);
}
void PtracerThread::SetTracer(pid_t tracer_pid) {
prctl(PR_SET_PTRACER, tracer_pid);
}
void PtracerThread::ClearTracer() {
prctl(PR_SET_PTRACER, 0);
}
} // namespace android

View File

@ -1,55 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_PTRACER_THREAD_H_
#define LIBMEMUNREACHABLE_PTRACER_THREAD_H_
#include <functional>
#include <mutex>
#include "android-base/macros.h"
#include "Allocator.h"
namespace android {
class Stack;
// PtracerThread is similar to std::thread, except that it creates a "thread"
// that can ptrace the other threads. The thread is actually a separate
// process, with its own thread group, but shares address space and fds with
// the parent.
class PtracerThread {
public:
explicit PtracerThread(const std::function<int()>& func);
~PtracerThread();
bool Start();
int Join();
private:
void SetTracer(pid_t);
void ClearTracer();
void Kill();
DISALLOW_COPY_AND_ASSIGN(PtracerThread);
std::unique_ptr<Stack> stack_;
std::function<int()> func_;
std::mutex m_;
pid_t child_pid_;
};
} // namespace android
#endif // LIBMEMUNREACHABLE_PTRACER_THREAD_H_

View File

@ -1,92 +0,0 @@
libmemunreachable
================
Introduction
--------------
libmemunreachable is a zero-overhead native memory leak detector. It uses an imprecise mark-and-sweep garbage collector pass over all native memory, reporting any unreachable blocks as leaks. It is similar to the [Heap Checker from tcmalloc](http://htmlpreview.github.io/?https://github.com/gperftools/gperftools/blob/master/doc/heap_checker.html), but with a few key differences to remove the overhead. Instead of instrumenting every call to malloc and free, it queries the allocator (jemalloc) for active allocations when leak detection is requested. In addition, it performs a very short stop-the-world data collection on the main process, and then forks a copy of the process to perform the mark-and-sweep, minimizing disruption to the original process.
In the default (zero-overhead) mode, the returned data on leaks is limited to the address, approximate (upper bound) size, and the the first 32 bytes of the contents of the leaked allocation. If malloc_debug backtraces are enabled they will be included in the leak information, but backtracing allocations requires significant overhead.
----------
Usage
-------
### In Android apps ###
libmemunreachble is loaded by zygote and can be triggered with `dumpsys -t 600 meminfo --unreachable [process]`.
To enable malloc\_debug backtraces on allocations for a single app process on a userdebug device, use:
```
adb root
adb shell setprop libc.debug.malloc.program app_process
adb shell setprop wrap.[process] "\$\@"
adb shell setprop libc.debug.malloc.options backtrace=4
```
Kill and restart the app, trigger the leak, and then run `dumpsys -t 600 meminfo --unreachable [process]`.
To disable malloc\_debug:
```
adb shell setprop libc.debug.malloc.options "''"
adb shell setprop libc.debug.malloc.program "''"
adb shell setprop wrap.[process] "''"
```
### C interface ###
#### `bool LogUnreachableMemory(bool log_contents, size_t limit)` ####
Writes a description of leaked memory to the log. A summary is always written, followed by details of up to `limit` leaks. If `log_contents` is `true`, details include up to 32 bytes of the contents of each leaked allocation.
Returns true if leak detection succeeded.
#### `bool NoLeaks()` ####
Returns `true` if no unreachable memory was found.
### C++ interface ###
#### `bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit = 100)` ####
Updates an `UnreachableMemoryInfo` object with information on leaks, including details on up to `limit` leaks. Returns true if leak detection succeeded.
#### `std::string GetUnreachableMemoryString(bool log_contents = false, size_t limit = 100)` ####
Returns a description of leaked memory. A summary is always written, followed by details of up to `limit` leaks. If `log_contents` is `true`, details include up to 32 bytes of the contents of each leaked allocation.
Returns true if leak detection succeeded.
Implementation
-------------------
The sequence of steps required to perform a leak detection pass is divided into three processes - the original process, the collection process, and the sweeper process.
1. *Original process*: Leak detection is requested by calling `GetUnreachableMemory()`
2. Allocations are disabled using `malloc_disable()`
3. The collection process is spawned. The collection process, created using clone, is similar to a normal `fork()` child process, except that it shares the address space of the parent - any writes by the original process are visible to the collection process, and vice-versa. If we forked instead of using clone, the address space might get out of sync with observed post-ptrace thread state, since it takes some time to pause the parent.
4. *Collection process*: All threads in the original process are paused with `ptrace()`.
5. Registers contents, active stack areas, and memory mapping information are collected.
6. *Original process*: Allocations are re-enabled using `malloc_enable()`, but all threads are still paused with `ptrace()`.
7. *Collection process*: The sweeper process is spawned using a normal `fork()`. The sweeper process has a copy of all memory from the original process, including all the data collected by the collection process.
8. Collection process releases all threads from `ptrace` and exits
9. *Original process*: All threads continue, the thread that called `GetUnreachableMemory()` blocks waiting for leak data over a pipe.
10. *Sweeper process*: A list of all active allocations is produced by examining the memory mappings and calling `malloc_iterate()` on any heap mappings.
11. A list of all roots is produced from globals (.data and .bss sections of binaries), and registers and stacks from each thread.
12. The mark-and-sweep pass is performed starting from roots.
13. Unmarked allocations are sent over the pipe back to the original process.
----------
Components
---------------
- `MemUnreachable.cpp`: Entry points, implements the sequencing described above.
- `PtracerThread.cpp`: Used to clone the collection process with shared address space.
- `ThreadCapture.cpp`: Pauses threads in the main process and collects register contents.
- `ProcessMappings.cpp`: Collects snapshots of `/proc/pid/maps`.
- `HeapWalker.cpp`: Performs the mark-and-sweep pass over active allocations.
- `LeakPipe.cpp`: transfers data describing leaks from the sweeper process to the original process.
Heap allocator requirements
----------------------------------
libmemunreachable requires a small interface to the allocator in order to collect information about active allocations.
- `malloc_disable()`: prevent any thread from mutating internal allocator state.
- `malloc enable()`: re-enable allocations in all threads.
- `malloc_iterate()`: call a callback on each active allocation in a given heap region.
- `malloc_backtrace()`: return the backtrace from when the allocation at the given address was allocated, if it was collected.

View File

@ -1,57 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_SCOPED_ALARM_H_
#define LIBMEMUNREACHABLE_SCOPED_ALARM_H_
#include <signal.h>
#include <sys/time.h>
#include <chrono>
#include <functional>
namespace android {
class ScopedAlarm {
public:
ScopedAlarm(std::chrono::microseconds us, std::function<void()> func) {
func_ = func;
struct sigaction oldact {};
struct sigaction act {};
act.sa_handler = [](int) { ScopedAlarm::func_(); };
sigaction(SIGALRM, &act, &oldact);
std::chrono::seconds s = std::chrono::duration_cast<std::chrono::seconds>(us);
itimerval t = itimerval{};
t.it_value.tv_sec = s.count();
t.it_value.tv_usec = (us - s).count();
setitimer(ITIMER_REAL, &t, NULL);
}
~ScopedAlarm() {
itimerval t = itimerval{};
setitimer(ITIMER_REAL, &t, NULL);
struct sigaction act {};
act.sa_handler = SIG_DFL;
sigaction(SIGALRM, &act, NULL);
}
private:
static std::function<void()> func_;
};
} // namespace android
#endif

View File

@ -1,108 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_SCOPED_DISABLE_MALLOC_H_
#define LIBMEMUNREACHABLE_SCOPED_DISABLE_MALLOC_H_
#include <memory>
#include "android-base/macros.h"
#include "ScopedAlarm.h"
#include "bionic.h"
#include "log.h"
namespace android {
class DisableMallocGuard {
public:
DisableMallocGuard() : disabled_(false) {}
~DisableMallocGuard() { Enable(); }
void Disable() {
if (!disabled_) {
disabled_ = true;
malloc_disable();
}
}
void Enable() {
if (disabled_) {
malloc_enable();
disabled_ = false;
}
}
private:
DISALLOW_COPY_AND_ASSIGN(DisableMallocGuard);
bool disabled_;
};
// Any calls to malloc or free from this thread will deadlock as long as this
// object is in scope. Calls to malloc from other threads may succeed (for
// example if the allocation is satisfied out of the thread's tcache), or may
// block until the object is destroyed.
//
// Don't call fork() while malloc is disabled, it needs the same locks held
// here.
class ScopedDisableMalloc {
public:
ScopedDisableMalloc() { disable_malloc_.Disable(); }
~ScopedDisableMalloc() { disable_malloc_.Enable(); }
private:
DISALLOW_COPY_AND_ASSIGN(ScopedDisableMalloc);
DisableMallocGuard disable_malloc_;
};
class ScopedDisableMallocTimeout {
public:
explicit ScopedDisableMallocTimeout(std::chrono::milliseconds timeout = std::chrono::seconds(10))
: timeout_(timeout), timed_out_(false), disable_malloc_() {
Disable();
}
~ScopedDisableMallocTimeout() { Enable(); }
bool timed_out() { return timed_out_; }
void Enable() {
disable_malloc_.Enable();
alarm_ = nullptr;
}
void Disable() {
// set up the alarm before disabling malloc so unique_ptr can be used
alarm_ = std::make_unique<ScopedAlarm>(timeout_, [&]() {
disable_malloc_.Enable();
timed_out_ = true;
});
disable_malloc_.Disable();
}
private:
DISALLOW_COPY_AND_ASSIGN(ScopedDisableMallocTimeout);
std::chrono::milliseconds timeout_;
bool timed_out_;
std::unique_ptr<ScopedAlarm> alarm_;
DisableMallocGuard disable_malloc_;
};
} // namespace android
#endif // LIBMEMUNREACHABLE_SCOPED_DISABLE_MALLOC_H_

View File

@ -1,80 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_SCOPED_PIPE_H_
#define LIBMEMUNREACHABLE_SCOPED_PIPE_H_
#include <unistd.h>
#include "log.h"
namespace android {
class ScopedPipe {
public:
ScopedPipe() : pipefd_{-1, -1} {
int ret = pipe2(pipefd_, O_CLOEXEC);
if (ret < 0) {
MEM_LOG_ALWAYS_FATAL("failed to open pipe");
}
}
~ScopedPipe() { Close(); }
ScopedPipe(ScopedPipe&& other) noexcept {
SetReceiver(other.ReleaseReceiver());
SetSender(other.ReleaseSender());
}
ScopedPipe& operator=(ScopedPipe&& other) noexcept {
SetReceiver(other.ReleaseReceiver());
SetSender(other.ReleaseSender());
return *this;
}
void CloseReceiver() { close(ReleaseReceiver()); }
void CloseSender() { close(ReleaseSender()); }
void Close() {
CloseReceiver();
CloseSender();
}
int Receiver() { return pipefd_[0]; }
int Sender() { return pipefd_[1]; }
int ReleaseReceiver() {
int ret = Receiver();
SetReceiver(-1);
return ret;
}
int ReleaseSender() {
int ret = Sender();
SetSender(-1);
return ret;
}
private:
void SetReceiver(int fd) { pipefd_[0] = fd; };
void SetSender(int fd) { pipefd_[1] = fd; };
int pipefd_[2];
};
} // namespace android
#endif

View File

@ -1,95 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_SCOPED_SIGNAL_HANDLER_H_
#define LIBMEMUNREACHABLE_SCOPED_SIGNAL_HANDLER_H_
#include <errno.h>
#include <signal.h>
#include <functional>
#include "android-base/macros.h"
#include "Allocator.h"
#include "log.h"
namespace android {
class ScopedSignalHandler {
public:
using Fn = std::function<void(ScopedSignalHandler&, int, siginfo_t*, void*)>;
explicit ScopedSignalHandler(Allocator<ScopedSignalHandler> allocator) : signal_(-1) {
if (handler_map_ == nullptr) {
Allocator<SignalFnMap> map_allocator = allocator;
handler_map_ = map_allocator.make_unique(allocator);
}
}
~ScopedSignalHandler() { reset(); }
template <class F>
void install(int signal, F&& f) {
if (signal_ != -1) MEM_LOG_ALWAYS_FATAL("ScopedSignalHandler already installed");
if (handler_map_->find(signal) != handler_map_->end()) {
MEM_LOG_ALWAYS_FATAL("ScopedSignalHandler already installed for %d", signal);
}
(*handler_map_)[signal] =
SignalFn([=](int signal, siginfo_t* si, void* uctx) { f(*this, signal, si, uctx); });
struct sigaction act {};
act.sa_sigaction = [](int signal, siginfo_t* si, void* uctx) {
((*handler_map_)[signal])(signal, si, uctx);
};
act.sa_flags = SA_SIGINFO;
int ret = sigaction(signal, &act, &old_act_);
if (ret < 0) {
MEM_LOG_ALWAYS_FATAL("failed to install segfault handler: %s", strerror(errno));
}
signal_ = signal;
}
void reset() {
if (signal_ != -1) {
int ret = sigaction(signal_, &old_act_, NULL);
if (ret < 0) {
MEM_ALOGE("failed to uninstall segfault handler");
}
handler_map_->erase(signal_);
if (handler_map_->empty()) {
handler_map_.reset();
}
signal_ = -1;
}
}
private:
using SignalFn = std::function<void(int, siginfo_t*, void*)>;
using SignalFnMap = allocator::unordered_map<int, SignalFn>;
DISALLOW_COPY_AND_ASSIGN(ScopedSignalHandler);
int signal_;
struct sigaction old_act_;
static Allocator<SignalFnMap>::unique_ptr handler_map_;
};
} // namespace android
#endif // LIBMEMUNREACHABLE_SCOPED_SIGNAL_HANDLER_H_

View File

@ -1,60 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_SEMAPHORE_H_
#define LIBMEMUNREACHABLE_SEMAPHORE_H_
#include <chrono>
#include <mutex>
#include "android-base/macros.h"
namespace android {
class Semaphore {
public:
explicit Semaphore(int count = 0) : count_(count) {}
~Semaphore() = default;
void Wait(std::chrono::milliseconds ms) {
std::unique_lock<std::mutex> lk(m_);
cv_.wait_for(lk, ms, [&] {
if (count_ > 0) {
count_--;
return true;
}
return false;
});
}
void Post() {
{
std::lock_guard<std::mutex> lk(m_);
count_++;
}
cv_.notify_one();
}
private:
DISALLOW_COPY_AND_ASSIGN(Semaphore);
int count_;
std::mutex m_;
std::condition_variable cv_;
};
} // namespace android
#endif // LIBMEMUNREACHABLE_SEMAPHORE_H_

View File

@ -1,138 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// Based on system/update_engine/payload_generator/tarjan.cc
#ifndef LIBMEMUNREACHABLE_TARJAN_H_
#define LIBMEMUNREACHABLE_TARJAN_H_
#include <assert.h>
#include <algorithm>
#include "Allocator.h"
namespace android {
template <class T>
class Node {
public:
allocator::set<Node<T>*> references_in;
allocator::set<Node<T>*> references_out;
size_t index;
size_t lowlink;
T* ptr;
Node(T* ptr, Allocator<Node> allocator)
: references_in(allocator), references_out(allocator), ptr(ptr){};
Node(Node&& rhs) noexcept = default;
void Edge(Node<T>* ref) {
references_out.emplace(ref);
ref->references_in.emplace(this);
}
template <class F>
void Foreach(F&& f) {
for (auto& node : references_out) {
f(node->ptr);
}
}
private:
DISALLOW_COPY_AND_ASSIGN(Node<T>);
};
template <class T>
using Graph = allocator::vector<Node<T>*>;
template <class T>
using SCC = allocator::vector<Node<T>*>;
template <class T>
using SCCList = allocator::vector<SCC<T>>;
template <class T>
class TarjanAlgorithm {
public:
explicit TarjanAlgorithm(Allocator<void> allocator)
: index_(0), stack_(allocator), components_(allocator) {}
void Execute(Graph<T>& graph, SCCList<T>& out);
private:
static constexpr size_t UNDEFINED_INDEX = static_cast<size_t>(-1);
void Tarjan(Node<T>* vertex, Graph<T>& graph);
size_t index_;
allocator::vector<Node<T>*> stack_;
SCCList<T> components_;
};
template <class T>
void TarjanAlgorithm<T>::Execute(Graph<T>& graph, SCCList<T>& out) {
stack_.clear();
components_.clear();
index_ = 0;
for (auto& it : graph) {
it->index = UNDEFINED_INDEX;
it->lowlink = UNDEFINED_INDEX;
}
for (auto& it : graph) {
if (it->index == UNDEFINED_INDEX) {
Tarjan(it, graph);
}
}
out.swap(components_);
}
template <class T>
void TarjanAlgorithm<T>::Tarjan(Node<T>* vertex, Graph<T>& graph) {
assert(vertex->index == UNDEFINED_INDEX);
vertex->index = index_;
vertex->lowlink = index_;
index_++;
stack_.push_back(vertex);
for (auto& it : vertex->references_out) {
Node<T>* vertex_next = it;
if (vertex_next->index == UNDEFINED_INDEX) {
Tarjan(vertex_next, graph);
vertex->lowlink = std::min(vertex->lowlink, vertex_next->lowlink);
} else if (std::find(stack_.begin(), stack_.end(), vertex_next) != stack_.end()) {
vertex->lowlink = std::min(vertex->lowlink, vertex_next->index);
}
}
if (vertex->lowlink == vertex->index) {
SCC<T> component{components_.get_allocator()};
Node<T>* other_vertex;
do {
other_vertex = stack_.back();
stack_.pop_back();
component.push_back(other_vertex);
} while (other_vertex != vertex && !stack_.empty());
components_.emplace_back(component);
}
}
template <class T>
void Tarjan(Graph<T>& graph, SCCList<T>& out) {
TarjanAlgorithm<T> tarjan{graph.get_allocator()};
tarjan.Execute(graph, out);
}
} // namespace android
#endif // LIBMEMUNREACHABLE_TARJAN_H_

View File

@ -1,367 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "ThreadCapture.h"
#include <elf.h>
#include <errno.h>
#include <fcntl.h>
#include <limits.h>
#include <stdlib.h>
#include <sys/ptrace.h>
#include <sys/stat.h>
#include <sys/syscall.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <sys/wait.h>
#include <unistd.h>
#include <map>
#include <memory>
#include <set>
#include <vector>
#include <android-base/unique_fd.h>
#include "Allocator.h"
#include "log.h"
namespace android {
// bionic interfaces used:
// atoi
// strlcat
// writev
// bionic interfaces reimplemented to avoid allocation:
// getdents64
// Convert a pid > 0 to a string. sprintf might allocate, so we can't use it.
// Returns a pointer somewhere in buf to a null terminated string, or NULL
// on error.
static char* pid_to_str(char* buf, size_t len, pid_t pid) {
if (pid <= 0) {
return nullptr;
}
char* ptr = buf + len - 1;
*ptr = 0;
while (pid > 0) {
ptr--;
if (ptr < buf) {
return nullptr;
}
*ptr = '0' + (pid % 10);
pid /= 10;
}
return ptr;
}
class ThreadCaptureImpl {
public:
ThreadCaptureImpl(pid_t pid, Allocator<ThreadCaptureImpl>& allocator);
~ThreadCaptureImpl() {}
bool ListThreads(TidList& tids);
bool CaptureThreads();
bool ReleaseThreads();
bool ReleaseThread(pid_t tid);
bool CapturedThreadInfo(ThreadInfoList& threads);
void InjectTestFunc(std::function<void(pid_t)>&& f) { inject_test_func_ = f; }
private:
int CaptureThread(pid_t tid);
bool ReleaseThread(pid_t tid, unsigned int signal);
int PtraceAttach(pid_t tid);
void PtraceDetach(pid_t tid, unsigned int signal);
bool PtraceThreadInfo(pid_t tid, ThreadInfo& thread_info);
allocator::map<pid_t, unsigned int> captured_threads_;
Allocator<ThreadCaptureImpl> allocator_;
pid_t pid_;
std::function<void(pid_t)> inject_test_func_;
};
ThreadCaptureImpl::ThreadCaptureImpl(pid_t pid, Allocator<ThreadCaptureImpl>& allocator)
: captured_threads_(allocator), allocator_(allocator), pid_(pid) {}
bool ThreadCaptureImpl::ListThreads(TidList& tids) {
tids.clear();
char pid_buf[11];
char path[256] = "/proc/";
char* pid_str = pid_to_str(pid_buf, sizeof(pid_buf), pid_);
if (!pid_str) {
return false;
}
strlcat(path, pid_str, sizeof(path));
strlcat(path, "/task", sizeof(path));
android::base::unique_fd fd(open(path, O_CLOEXEC | O_DIRECTORY | O_RDONLY));
if (fd == -1) {
MEM_ALOGE("failed to open %s: %s", path, strerror(errno));
return false;
}
struct linux_dirent64 {
uint64_t d_ino;
int64_t d_off;
uint16_t d_reclen;
char d_type;
char d_name[];
} __attribute((packed));
char dirent_buf[4096];
ssize_t nread;
do {
nread = syscall(SYS_getdents64, fd.get(), dirent_buf, sizeof(dirent_buf));
if (nread < 0) {
MEM_ALOGE("failed to get directory entries from %s: %s", path, strerror(errno));
return false;
} else if (nread > 0) {
ssize_t off = 0;
while (off < nread) {
linux_dirent64* dirent = reinterpret_cast<linux_dirent64*>(dirent_buf + off);
off += dirent->d_reclen;
pid_t tid = atoi(dirent->d_name);
if (tid <= 0) {
continue;
}
tids.push_back(tid);
}
}
} while (nread != 0);
return true;
}
bool ThreadCaptureImpl::CaptureThreads() {
TidList tids{allocator_};
bool found_new_thread;
do {
if (!ListThreads(tids)) {
ReleaseThreads();
return false;
}
found_new_thread = false;
for (auto it = tids.begin(); it != tids.end(); it++) {
auto captured = captured_threads_.find(*it);
if (captured == captured_threads_.end()) {
if (CaptureThread(*it) < 0) {
ReleaseThreads();
return false;
}
found_new_thread = true;
}
}
} while (found_new_thread);
return true;
}
// Detatches from a thread, delivering signal if nonzero, logs on error
void ThreadCaptureImpl::PtraceDetach(pid_t tid, unsigned int signal) {
void* sig_ptr = reinterpret_cast<void*>(static_cast<uintptr_t>(signal));
if (ptrace(PTRACE_DETACH, tid, NULL, sig_ptr) < 0 && errno != ESRCH) {
MEM_ALOGE("failed to detach from thread %d of process %d: %s", tid, pid_, strerror(errno));
}
}
// Attaches to and pauses thread.
// Returns 1 on attach, 0 on tid not found, -1 and logs on error
int ThreadCaptureImpl::PtraceAttach(pid_t tid) {
int ret = ptrace(PTRACE_SEIZE, tid, NULL, NULL);
if (ret < 0) {
MEM_ALOGE("failed to attach to thread %d of process %d: %s", tid, pid_, strerror(errno));
return -1;
}
if (inject_test_func_) {
inject_test_func_(tid);
}
if (ptrace(PTRACE_INTERRUPT, tid, 0, 0) < 0) {
if (errno == ESRCH) {
return 0;
} else {
MEM_ALOGE("failed to interrupt thread %d of process %d: %s", tid, pid_, strerror(errno));
PtraceDetach(tid, 0);
return -1;
}
}
return 1;
}
bool ThreadCaptureImpl::PtraceThreadInfo(pid_t tid, ThreadInfo& thread_info) {
thread_info.tid = tid;
const unsigned int max_num_regs = 128; // larger than number of registers on any device
uintptr_t regs[max_num_regs];
struct iovec iovec;
iovec.iov_base = &regs;
iovec.iov_len = sizeof(regs);
if (ptrace(PTRACE_GETREGSET, tid, reinterpret_cast<void*>(NT_PRSTATUS), &iovec)) {
MEM_ALOGE("ptrace getregset for thread %d of process %d failed: %s", tid, pid_, strerror(errno));
return false;
}
unsigned int num_regs = iovec.iov_len / sizeof(uintptr_t);
thread_info.regs.assign(&regs[0], &regs[num_regs]);
const int sp =
#if defined(__x86_64__)
offsetof(struct pt_regs, rsp) / sizeof(uintptr_t)
#elif defined(__i386__)
offsetof(struct pt_regs, esp) / sizeof(uintptr_t)
#elif defined(__arm__)
offsetof(struct pt_regs, ARM_sp) / sizeof(uintptr_t)
#elif defined(__aarch64__)
offsetof(struct user_pt_regs, sp) / sizeof(uintptr_t)
#elif defined(__mips__) || defined(__mips64__)
offsetof(struct pt_regs, regs[29]) / sizeof(uintptr_t)
#else
#error Unrecognized architecture
#endif
;
// TODO(ccross): use /proc/tid/status or /proc/pid/maps to get start_stack
thread_info.stack = std::pair<uintptr_t, uintptr_t>(regs[sp], 0);
return true;
}
int ThreadCaptureImpl::CaptureThread(pid_t tid) {
int ret = PtraceAttach(tid);
if (ret <= 0) {
return ret;
}
int status = 0;
if (TEMP_FAILURE_RETRY(waitpid(tid, &status, __WALL)) < 0) {
MEM_ALOGE("failed to wait for pause of thread %d of process %d: %s", tid, pid_, strerror(errno));
PtraceDetach(tid, 0);
return -1;
}
if (!WIFSTOPPED(status)) {
MEM_ALOGE("thread %d of process %d was not paused after waitpid, killed?", tid, pid_);
return 0;
}
unsigned int resume_signal = 0;
unsigned int signal = WSTOPSIG(status);
if ((status >> 16) == PTRACE_EVENT_STOP) {
switch (signal) {
case SIGSTOP:
case SIGTSTP:
case SIGTTIN:
case SIGTTOU:
// group-stop signals
break;
case SIGTRAP:
// normal ptrace interrupt stop
break;
default:
MEM_ALOGE("unexpected signal %d with PTRACE_EVENT_STOP for thread %d of process %d", signal,
tid, pid_);
return -1;
}
} else {
// signal-delivery-stop
resume_signal = signal;
}
captured_threads_[tid] = resume_signal;
return 1;
}
bool ThreadCaptureImpl::ReleaseThread(pid_t tid) {
auto it = captured_threads_.find(tid);
if (it == captured_threads_.end()) {
return false;
}
return ReleaseThread(it->first, it->second);
}
bool ThreadCaptureImpl::ReleaseThread(pid_t tid, unsigned int signal) {
PtraceDetach(tid, signal);
return true;
}
bool ThreadCaptureImpl::ReleaseThreads() {
bool ret = true;
for (auto it = captured_threads_.begin(); it != captured_threads_.end();) {
if (ReleaseThread(it->first, it->second)) {
it = captured_threads_.erase(it);
} else {
it++;
ret = false;
}
}
return ret;
}
bool ThreadCaptureImpl::CapturedThreadInfo(ThreadInfoList& threads) {
threads.clear();
for (auto it = captured_threads_.begin(); it != captured_threads_.end(); it++) {
ThreadInfo t{0, allocator::vector<uintptr_t>(allocator_), std::pair<uintptr_t, uintptr_t>(0, 0)};
if (!PtraceThreadInfo(it->first, t)) {
return false;
}
threads.push_back(t);
}
return true;
}
ThreadCapture::ThreadCapture(pid_t pid, Allocator<ThreadCapture> allocator) {
Allocator<ThreadCaptureImpl> impl_allocator = allocator;
impl_ = impl_allocator.make_unique(pid, impl_allocator);
}
ThreadCapture::~ThreadCapture() {}
bool ThreadCapture::ListThreads(TidList& tids) {
return impl_->ListThreads(tids);
}
bool ThreadCapture::CaptureThreads() {
return impl_->CaptureThreads();
}
bool ThreadCapture::ReleaseThreads() {
return impl_->ReleaseThreads();
}
bool ThreadCapture::ReleaseThread(pid_t tid) {
return impl_->ReleaseThread(tid);
}
bool ThreadCapture::CapturedThreadInfo(ThreadInfoList& threads) {
return impl_->CapturedThreadInfo(threads);
}
void ThreadCapture::InjectTestFunc(std::function<void(pid_t)>&& f) {
impl_->InjectTestFunc(std::forward<std::function<void(pid_t)>>(f));
}
} // namespace android

View File

@ -1,58 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_THREAD_CAPTURE_H_
#define LIBMEMUNREACHABLE_THREAD_CAPTURE_H_
#include <utility>
#include "Allocator.h"
namespace android {
struct ThreadInfo {
pid_t tid;
allocator::vector<uintptr_t> regs;
std::pair<uintptr_t, uintptr_t> stack;
};
using TidList = allocator::vector<pid_t>;
using ThreadInfoList = allocator::vector<ThreadInfo>;
class ThreadCaptureImpl;
class ThreadCapture {
public:
ThreadCapture(pid_t pid, Allocator<ThreadCapture> allocator);
~ThreadCapture();
bool ListThreads(TidList& tids);
bool CaptureThreads();
bool ReleaseThreads();
bool ReleaseThread(pid_t tid);
bool CapturedThreadInfo(ThreadInfoList& threads);
void InjectTestFunc(std::function<void(pid_t)>&& f);
private:
ThreadCapture(const ThreadCapture&) = delete;
void operator=(const ThreadCapture&) = delete;
Allocator<ThreadCaptureImpl>::unique_ptr impl_;
};
} // namespace android
#endif

View File

@ -1,35 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_BIONIC_H_
#define LIBMEMUNREACHABLE_BIONIC_H_
#include <stdint.h>
#include <stdlib.h>
#include <sys/cdefs.h>
__BEGIN_DECLS
/* Exported from bionic */
extern void malloc_disable();
extern void malloc_enable();
extern int malloc_iterate(uintptr_t base, size_t size,
void (*callback)(uintptr_t base, size_t size, void* arg), void* arg);
extern ssize_t malloc_backtrace(void* pointer, uintptr_t* frames, size_t frame_count);
__END_DECLS
#endif // LIBMEMUNREACHABLE_BIONIC_H_

View File

@ -1,93 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_MEMUNREACHABLE_H_
#define LIBMEMUNREACHABLE_MEMUNREACHABLE_H_
#include <string.h>
#include <sys/cdefs.h>
#ifdef __cplusplus
#include <string>
#include <vector>
namespace android {
struct Leak {
uintptr_t begin = 0;
size_t size = 0;
size_t referenced_count = 0;
size_t referenced_size = 0;
size_t similar_count = 0;
size_t similar_size = 0;
size_t similar_referenced_count = 0;
size_t similar_referenced_size = 0;
size_t total_size = 0;
static const size_t contents_length = 32;
char contents[contents_length] = {};
struct Backtrace {
size_t num_frames = 0;
static const size_t max_frames = 16;
uintptr_t frames[max_frames] = {};
size_t reserved[8] = {};
} backtrace;
size_t reserved[8] = {};
std::string ToString(bool log_contents) const;
};
struct UnreachableMemoryInfo {
std::vector<Leak> leaks;
size_t num_leaks = 0;
size_t leak_bytes = 0;
size_t num_allocations = 0;
size_t allocation_bytes = 0;
size_t version = 0; // Must be 0
size_t reserved[8] = {};
UnreachableMemoryInfo() {}
~UnreachableMemoryInfo();
std::string ToString(bool log_contents) const;
};
bool GetUnreachableMemory(UnreachableMemoryInfo& info, size_t limit = 100);
std::string GetUnreachableMemoryString(bool log_contents = false, size_t limit = 100);
} // namespace android
#endif
__BEGIN_DECLS
bool LogUnreachableMemory(bool log_contents, size_t limit);
bool NoLeaks();
__END_DECLS
#endif // LIBMEMUNREACHABLE_MEMUNREACHABLE_H_

View File

@ -1,13 +0,0 @@
LIBMEMUNREACHABLE {
global:
LogUnreachableMemory;
NoLeaks;
extern "C++" {
android::GetUnreachableMemory*;
android::GetUnreachableMemoryString*;
android::Leak::*;
android::UnreachableMemoryInfo::*;
};
local:
*;
};

View File

@ -1,57 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef LIBMEMUNREACHABLE_LOG_H_
#define LIBMEMUNREACHABLE_LOG_H_
#define LOG_TAG "libmemunreachable"
#if defined(__ANDROID__)
#include <async_safe/log.h>
#define MEM_ALOGE(...) async_safe_format_log(ANDROID_LOG_ERROR, LOG_TAG, ##__VA_ARGS__)
#define MEM_ALOGW(...) async_safe_format_log(ANDROID_LOG_WARN, LOG_TAG, ##__VA_ARGS__)
#define MEM_ALOGI(...) async_safe_format_log(ANDROID_LOG_INFO, LOG_TAG, ##__VA_ARGS__)
#define MEM_ALOGV_IMPL(...) async_safe_format_log(ANDROID_LOG_VERBOSE, LOG_TAG, ##__VA_ARGS__)
#ifdef NDEBUG
#define MEM_ALOGV(...) \
do { \
if (0) { \
MEM_ALOGV_IMPL(__VA_ARGS__); \
} \
} while (0)
#else
#define MEM_ALOGV(...) MEM_ALOGV_IMPL(__VA_ARGS__)
#endif
#define MEM_LOG_ALWAYS_FATAL(...) async_safe_fatal(__VA_ARGS__)
#else
#include <log/log.h>
#define MEM_ALOGW ALOGW
#define MEM_ALOGE ALOGE
#define MEM_ALOGV ALOGV
#define MEM_ALOGI ALOGI
#define MEM_LOG_ALWAYS_FATAL LOG_ALWAYS_FATAL
#endif
#endif // LIBMEMUNREACHABLE_LOG_H_

View File

@ -1,176 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <Allocator.h>
#include <ScopedDisableMalloc.h>
#include <gtest/gtest.h>
namespace android {
std::function<void()> ScopedAlarm::func_;
class AllocatorTest : public testing::Test {
protected:
AllocatorTest() : heap(), disable_malloc_() {}
virtual void SetUp() { heap_count = 0; }
virtual void TearDown() {
ASSERT_EQ(heap_count, 0);
ASSERT_TRUE(heap.empty());
ASSERT_FALSE(disable_malloc_.timed_out());
}
Heap heap;
private:
ScopedDisableMallocTimeout disable_malloc_;
};
TEST_F(AllocatorTest, simple) {
Allocator<char[100]> allocator(heap);
void* ptr = allocator.allocate();
ASSERT_TRUE(ptr != NULL);
allocator.deallocate(ptr);
}
TEST_F(AllocatorTest, multiple) {
Allocator<char[100]> allocator(heap);
void* ptr1 = allocator.allocate();
ASSERT_TRUE(ptr1 != NULL);
void* ptr2 = allocator.allocate();
ASSERT_TRUE(ptr2 != NULL);
ASSERT_NE(ptr1, ptr2);
allocator.deallocate(ptr1);
void* ptr3 = allocator.allocate();
ASSERT_EQ(ptr1, ptr3);
allocator.deallocate(ptr3);
allocator.deallocate(ptr2);
}
TEST_F(AllocatorTest, many) {
const int num = 4096;
const int size = 128;
Allocator<char[size]> allocator(heap);
void* ptr[num];
for (int i = 0; i < num; i++) {
ptr[i] = allocator.allocate();
memset(ptr[i], 0xaa, size);
*(reinterpret_cast<unsigned char*>(ptr[i])) = i;
}
for (int i = 0; i < num; i++) {
for (int j = 0; j < num; j++) {
if (i != j) {
ASSERT_NE(ptr[i], ptr[j]);
}
}
}
for (int i = 0; i < num; i++) {
ASSERT_EQ(*(reinterpret_cast<unsigned char*>(ptr[i])), i & 0xFF);
allocator.deallocate(ptr[i]);
}
}
TEST_F(AllocatorTest, large) {
const size_t size = 1024 * 1024;
Allocator<char[size]> allocator(heap);
void* ptr = allocator.allocate();
memset(ptr, 0xaa, size);
allocator.deallocate(ptr);
}
TEST_F(AllocatorTest, many_large) {
const int num = 128;
const int size = 1024 * 1024;
Allocator<char[size]> allocator(heap);
void* ptr[num];
for (int i = 0; i < num; i++) {
ptr[i] = allocator.allocate();
memset(ptr[i], 0xaa, size);
*(reinterpret_cast<unsigned char*>(ptr[i])) = i;
}
for (int i = 0; i < num; i++) {
ASSERT_EQ(*(reinterpret_cast<unsigned char*>(ptr[i])), i & 0xFF);
allocator.deallocate(ptr[i]);
}
}
TEST_F(AllocatorTest, copy) {
Allocator<char[100]> a(heap);
Allocator<char[200]> b = a;
Allocator<char[300]> c(b);
Allocator<char[100]> d(a);
Allocator<char[100]> e(heap);
ASSERT_EQ(a, b);
ASSERT_EQ(a, c);
ASSERT_EQ(a, d);
ASSERT_EQ(a, e);
void* ptr1 = a.allocate();
void* ptr2 = b.allocate();
void* ptr3 = c.allocate();
void* ptr4 = d.allocate();
b.deallocate(ptr1);
d.deallocate(ptr2);
a.deallocate(ptr3);
c.deallocate(ptr4);
}
TEST_F(AllocatorTest, stl_vector) {
auto v = allocator::vector<int>(Allocator<int>(heap));
for (int i = 0; i < 1024; i++) {
v.push_back(i);
}
for (int i = 0; i < 1024; i++) {
ASSERT_EQ(v[i], i);
}
v.clear();
}
TEST_F(AllocatorTest, stl_list) {
auto v = allocator::list<int>(Allocator<int>(heap));
for (int i = 0; i < 1024; i++) {
v.push_back(i);
}
int i = 0;
for (auto iter = v.begin(); iter != v.end(); iter++, i++) {
ASSERT_EQ(*iter, i);
}
v.clear();
}
TEST_F(AllocatorTest, shared) {
Allocator<int> allocator(heap);
Allocator<int>::shared_ptr ptr = allocator.make_shared(0);
{
auto ptr2 = ptr; // NOLINT, test copy of ptr
}
ASSERT_NE(ptr, nullptr);
}
TEST_F(AllocatorTest, unique) {
Allocator<int> allocator(heap);
Allocator<int>::unique_ptr ptr = allocator.make_unique(0);
ASSERT_NE(ptr, nullptr);
}
} // namespace android

View File

@ -1,26 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<!-- Copyright (C) 2017 The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<configuration description="Config for memunreachable_test">
<target_preparer class="com.android.tradefed.targetprep.PushFilePreparer">
<option name="cleanup" value="true" />
<option name="push" value="memunreachable_test->/data/local/tmp/memunreachable_test" />
</target_preparer>
<option name="test-suite-tag" value="apct" />
<test class="com.android.tradefed.testtype.GTest" >
<option name="native-test-device-path" value="/data/local/tmp" />
<option name="module-name" value="memunreachable_test" />
</test>
</configuration>

View File

@ -1,168 +0,0 @@
/*
* Copyright (C) 2016 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <signal.h>
#include <sys/types.h>
#include <unistd.h>
#include <binder/Binder.h>
#include <binder/IBinder.h>
#include <binder/IServiceManager.h>
#include <binder/Parcel.h>
#include <binder/ProcessState.h>
#include <gtest/gtest.h>
#include "Allocator.h"
#include "Binder.h"
namespace android {
static const String16 service_name("test.libmemunreachable_binder");
// Provides a service that will hold a strong reference to any remote binder
// object, so that the test can verify that a remote strong reference is
// visible to libmemunreachable.
class BinderService : public BBinder {
public:
BinderService() = default;
virtual ~BinderService() = default;
virtual status_t onTransact(uint32_t /*code*/, const Parcel& data, Parcel* reply,
uint32_t /*flags*/ = 0) {
reply->writeStrongBinder(ref);
ref = data.readStrongBinder();
return 0;
}
private:
sp<IBinder> ref;
};
class BinderObject : public BBinder {
public:
BinderObject() = default;
~BinderObject() = default;
};
// Forks a subprocess that registers a BinderService with the global binder
// servicemanager. Requires root permissions.
class ServiceProcess {
public:
ServiceProcess() : child_(0) {}
~ServiceProcess() { Stop(); }
bool Run() {
pid_t ret = fork();
if (ret < 0) {
return false;
} else if (ret == 0) {
// child
_exit(Service());
} else {
// parent
child_ = ret;
return true;
}
}
bool Stop() {
if (child_ > 0) {
if (kill(child_, SIGTERM)) {
return false;
}
int status = 0;
if (TEMP_FAILURE_RETRY(waitpid(child_, &status, 0)) != child_) {
return false;
}
child_ = 0;
return WIFEXITED(status) && WEXITSTATUS(status) == 0;
}
return true;
}
int Service() {
sp<ProcessState> proc{ProcessState::self()};
sp<IServiceManager> sm = defaultServiceManager();
if (sm == nullptr) {
fprintf(stderr, "Failed to get service manager\n");
return 1;
}
// This step requires root permissions
if (sm->addService(service_name, new BinderService()) != OK) {
fprintf(stderr, "Failed to add test service\n");
return 1;
}
proc->startThreadPool();
pause();
return 0;
}
private:
pid_t child_;
};
class MemunreachableBinderTest : public ::testing::Test {
protected:
ServiceProcess service_process_;
};
// Tests that a local binder object with a remote strong reference is visible
// through the libmemunreachable BinderReferences interface, which uses the
// getBinderKernelReferences method in libbinder. Starts a BinderService
// through ServiceProcess as a remote service to hold the strong reference.
TEST_F(MemunreachableBinderTest, binder) {
ASSERT_EQ(static_cast<uid_t>(0), getuid()) << "This test must be run as root.";
ServiceProcess service_process;
ASSERT_TRUE(service_process.Run());
sp<IServiceManager> sm = defaultServiceManager();
ASSERT_TRUE(sm != nullptr);
// A small sleep allows the service to start, which
// prevents a longer sleep in getService.
usleep(100000);
sp<IBinder> service = sm->getService(service_name);
ASSERT_TRUE(service != nullptr);
sp<IBinder> binder{new BinderObject()};
Parcel send;
Parcel reply;
send.writeStrongBinder(binder);
status_t rv = service->transact(0, send, &reply);
ASSERT_EQ(static_cast<status_t>(OK), rv);
Heap heap;
allocator::vector<uintptr_t> refs{heap};
ASSERT_TRUE(BinderReferences(refs));
bool found_ref = false;
for (auto ref : refs) {
if (ref == reinterpret_cast<uintptr_t>(binder.get())) {
found_ref = true;
}
}
ASSERT_TRUE(found_ref);
}
} // namespace android

Some files were not shown because too many files have changed in this diff Show More