liblog: is loggable is flaky

- Deal with __android_log_is_loggable inside signal handler by
  treating a lock contention system call as more costly than reading
  the associated property directly. Rather waiting around in a
  contended stat to hit cache.
- Check both the individual known __system_property_serial() and
  global __system_property_area_serial() to detect updates or
  additions to the properties to respond in the most aggressive and
  timely manner. __android_log_is_loggable() return at max CPU
  clockrate on a N9 in 61ns.
- Craft a common do_cache2 inline that adds the above to the other
  functions that utilize cache handling and preserves (within 3ns)
  performance in android_log_clockid() and __android_log_security().
  These functions return at max CPU clockrate on a N9 in 23ns.

Bug: 19544788
Bug: 25693940
Bug: 25792367
Bug: 26178938
Change-Id: I9cd94598f5c558e946b93977ad3714a4b03d0422
This commit is contained in:
Mark Salyzyn 2015-12-21 12:32:48 -08:00
parent 8a1b764db9
commit a67d8a53c7
2 changed files with 203 additions and 91 deletions

View File

@ -25,13 +25,18 @@
static pthread_mutex_t lock_loggable = PTHREAD_MUTEX_INITIALIZER;
static void lock()
static int lock()
{
/*
* If we trigger a signal handler in the middle of locked activity and the
* signal handler logs a message, we could get into a deadlock state.
*/
pthread_mutex_lock(&lock_loggable);
/*
* Any contention, and we can turn around and use the non-cached method
* in less time than the system call associated with a mutex to deal with
* the contention.
*/
return pthread_mutex_trylock(&lock_loggable);
}
static void unlock()
@ -45,6 +50,12 @@ struct cache {
unsigned char c;
};
static int check_cache(struct cache *cache)
{
return cache->pinfo
&& __system_property_serial(cache->pinfo) != cache->serial;
}
#define BOOLEAN_TRUE 0xFF
#define BOOLEAN_FALSE 0xFE
@ -58,6 +69,7 @@ static void refresh_cache(struct cache *cache, const char *key)
if (!cache->pinfo) {
return;
}
cache->serial = -1;
}
serial = __system_property_serial(cache->pinfo);
if (serial == cache->serial) {
@ -85,7 +97,7 @@ static int __android_log_level(const char *tag, int default_prio)
/* calculate the size of our key temporary buffer */
const size_t taglen = (tag && *tag) ? strlen(tag) : 0;
/* sizeof(log_namespace) = strlen(log_namespace) + 1 */
char key[sizeof(log_namespace) + taglen];
char key[sizeof(log_namespace) + taglen]; /* may be > PROPERTY_KEY_MAX */
char *kp;
size_t i;
char c = 0;
@ -101,48 +113,75 @@ static int __android_log_level(const char *tag, int default_prio)
static char *last_tag;
static uint32_t global_serial;
uint32_t current_global_serial;
static struct cache tag_cache[2] = {
{ NULL, -1, 0 },
{ NULL, -1, 0 }
};
static struct cache global_cache[2] = {
{ NULL, -1, 0 },
{ NULL, -1, 0 }
};
static struct cache tag_cache[2];
static struct cache global_cache[2];
int change_detected;
int global_change_detected;
int not_locked;
strcpy(key, log_namespace);
lock();
global_change_detected = change_detected = not_locked = lock();
current_global_serial = __system_property_area_serial();
if (!not_locked) {
/*
* check all known serial numbers to changes.
*/
for (i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) {
if (check_cache(&tag_cache[i])) {
change_detected = 1;
}
}
for (i = 0; i < (sizeof(global_cache) / sizeof(global_cache[0])); ++i) {
if (check_cache(&global_cache[i])) {
global_change_detected = 1;
}
}
current_global_serial = __system_property_area_serial();
if (current_global_serial != global_serial) {
change_detected = 1;
global_change_detected = 1;
}
}
if (taglen) {
uint32_t current_local_serial = current_global_serial;
if (!last_tag || (last_tag[0] != tag[0]) || strcmp(last_tag + 1, tag + 1)) {
/* invalidate log.tag.<tag> cache */
for(i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) {
tag_cache[i].pinfo = NULL;
tag_cache[i].serial = -1;
tag_cache[i].c = '\0';
int local_change_detected = change_detected;
if (!not_locked) {
if (!last_tag
|| (last_tag[0] != tag[0])
|| strcmp(last_tag + 1, tag + 1)) {
/* invalidate log.tag.<tag> cache */
for (i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) {
tag_cache[i].pinfo = NULL;
tag_cache[i].c = '\0';
}
free(last_tag);
last_tag = NULL;
local_change_detected = 1;
}
if (!last_tag) {
last_tag = strdup(tag);
}
free(last_tag);
last_tag = NULL;
current_global_serial = -1;
}
if (!last_tag) {
last_tag = strdup(tag);
}
strcpy(key + sizeof(log_namespace) - 1, tag);
kp = key;
for(i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) {
if (current_local_serial != global_serial) {
refresh_cache(&tag_cache[i], kp);
for (i = 0; i < (sizeof(tag_cache) / sizeof(tag_cache[0])); ++i) {
struct cache *cache = &tag_cache[i];
struct cache temp_cache;
if (not_locked) {
temp_cache.pinfo = NULL;
temp_cache.c = '\0';
cache = &temp_cache;
}
if (local_change_detected) {
refresh_cache(cache, kp);
}
if (tag_cache[i].c) {
c = tag_cache[i].c;
if (cache->c) {
c = cache->c;
break;
}
@ -166,13 +205,24 @@ static int __android_log_level(const char *tag, int default_prio)
key[sizeof(log_namespace) - 2] = '\0';
kp = key;
for(i = 0; i < (sizeof(global_cache) / sizeof(global_cache[0])); ++i) {
if (current_global_serial != global_serial) {
refresh_cache(&global_cache[i], kp);
for (i = 0; i < (sizeof(global_cache) / sizeof(global_cache[0])); ++i) {
struct cache *cache = &global_cache[i];
struct cache temp_cache;
if (not_locked) {
temp_cache = *cache;
if (temp_cache.pinfo != cache->pinfo) { /* check atomic */
temp_cache.pinfo = NULL;
temp_cache.c = '\0';
}
cache = &temp_cache;
}
if (global_change_detected) {
refresh_cache(cache, kp);
}
if (global_cache[i].c) {
c = global_cache[i].c;
if (cache->c) {
c = cache->c;
break;
}
@ -181,9 +231,10 @@ static int __android_log_level(const char *tag, int default_prio)
break;
}
global_serial = current_global_serial;
unlock();
if (!not_locked) {
global_serial = current_global_serial;
unlock();
}
switch (toupper(c)) {
case 'V': return ANDROID_LOG_VERBOSE;
@ -206,70 +257,103 @@ int __android_log_is_loggable(int prio, const char *tag, int default_prio)
}
/*
* Timestamp state generally remains constant, since a change is
* rare, we can accept a trylock failure gracefully. Use a separate
* lock from is_loggable to keep contention down b/25563384.
* For properties that are read often, but generally remain constant.
* Since a change is rare, we will accept a trylock failure gracefully.
* Use a separate lock from is_loggable to keep contention down b/25563384.
*/
static pthread_mutex_t lock_clockid = PTHREAD_MUTEX_INITIALIZER;
struct cache2 {
pthread_mutex_t lock;
uint32_t serial;
const char *key_persist;
struct cache cache_persist;
const char *key_ro;
struct cache cache_ro;
unsigned char (*const evaluate)(const struct cache2 *self);
};
clockid_t android_log_clockid()
static inline unsigned char do_cache2(struct cache2 *self)
{
static struct cache r_time_cache = { NULL, -1, 0 };
static struct cache p_time_cache = { NULL, -1, 0 };
char c;
uint32_t current_serial;
int change_detected;
unsigned char c;
if (pthread_mutex_trylock(&lock_clockid)) {
if (pthread_mutex_trylock(&self->lock)) {
/* We are willing to accept some race in this context */
if (!(c = p_time_cache.c)) {
c = r_time_cache.c;
}
} else {
static uint32_t serial;
uint32_t current_serial = __system_property_area_serial();
if (current_serial != serial) {
refresh_cache(&r_time_cache, "ro.logd.timestamp");
refresh_cache(&p_time_cache, "persist.logd.timestamp");
serial = current_serial;
}
if (!(c = p_time_cache.c)) {
c = r_time_cache.c;
}
pthread_mutex_unlock(&lock_clockid);
return self->evaluate(self);
}
return (tolower(c) == 'm') ? CLOCK_MONOTONIC : CLOCK_REALTIME;
change_detected = check_cache(&self->cache_persist)
|| check_cache(&self->cache_ro);
current_serial = __system_property_area_serial();
if (current_serial != self->serial) {
change_detected = 1;
}
if (change_detected) {
refresh_cache(&self->cache_persist, self->key_persist);
refresh_cache(&self->cache_ro, self->key_ro);
self->serial = current_serial;
}
c = self->evaluate(self);
pthread_mutex_unlock(&self->lock);
return c;
}
static unsigned char evaluate_persist_ro(const struct cache2 *self)
{
unsigned char c = self->cache_persist.c;
if (c) {
return c;
}
return self->cache_ro.c;
}
/*
* security state generally remains constant, since a change is
* rare, we can accept a trylock failure gracefully.
* Timestamp state generally remains constant, but can change at any time
* to handle developer requirements.
*/
static pthread_mutex_t lock_security = PTHREAD_MUTEX_INITIALIZER;
clockid_t android_log_clockid()
{
static struct cache2 clockid = {
PTHREAD_MUTEX_INITIALIZER,
0,
"persist.logd.timestamp",
{ NULL, -1, '\0' },
"ro.logd.timestamp",
{ NULL, -1, '\0' },
evaluate_persist_ro
};
return (tolower(do_cache2(&clockid)) == 'm')
? CLOCK_MONOTONIC
: CLOCK_REALTIME;
}
/*
* Security state generally remains constant, but the DO must be able
* to turn off logging should it become spammy after an attack is detected.
*/
static unsigned char evaluate_security(const struct cache2 *self)
{
unsigned char c = self->cache_ro.c;
return (c != BOOLEAN_FALSE) && c && (self->cache_persist.c == BOOLEAN_TRUE);
}
int __android_log_security()
{
static struct cache r_do_cache = { NULL, -1, BOOLEAN_FALSE };
static struct cache p_security_cache = { NULL, -1, BOOLEAN_FALSE };
int retval;
static struct cache2 security = {
PTHREAD_MUTEX_INITIALIZER,
0,
"persist.logd.security",
{ NULL, -1, BOOLEAN_FALSE },
"ro.device_owner",
{ NULL, -1, BOOLEAN_FALSE },
evaluate_security
};
if (pthread_mutex_trylock(&lock_security)) {
/* We are willing to accept some race in this context */
retval = (r_do_cache.c != BOOLEAN_FALSE) && r_do_cache.c &&
(p_security_cache.c == BOOLEAN_TRUE);
} else {
static uint32_t serial;
uint32_t current_serial = __system_property_area_serial();
if (current_serial != serial) {
refresh_cache(&r_do_cache, "ro.device_owner");
refresh_cache(&p_security_cache, "persist.logd.security");
serial = current_serial;
}
retval = (r_do_cache.c != BOOLEAN_FALSE) && r_do_cache.c &&
(p_security_cache.c == BOOLEAN_TRUE);
pthread_mutex_unlock(&lock_security);
}
return retval;
return do_cache2(&security);
}

View File

@ -660,3 +660,31 @@ static void BM_is_loggable(int iters) {
StopBenchmarkTiming();
}
BENCHMARK(BM_is_loggable);
/*
* Measure the time it takes for android_log_clockid.
*/
static void BM_clockid(int iters) {
StartBenchmarkTiming();
for (int i = 0; i < iters; ++i) {
android_log_clockid();
}
StopBenchmarkTiming();
}
BENCHMARK(BM_clockid);
/*
* Measure the time it takes for __android_log_security.
*/
static void BM_security(int iters) {
StartBenchmarkTiming();
for (int i = 0; i < iters; ++i) {
__android_log_security();
}
StopBenchmarkTiming();
}
BENCHMARK(BM_security);