dm bufio: remove old-style buffer cleanup

Remove code that cleans up buffers if the cache size grows over the limit.

The next commit will introduce a new global cleanup.

Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
Signed-off-by: Mike Snitzer <snitzer@redhat.com>
This commit is contained in:
Mikulas Patocka 2019-09-12 10:44:47 +02:00 committed by Mike Snitzer
parent af53badc0c
commit b132ff3332
1 changed files with 3 additions and 58 deletions

View File

@ -33,7 +33,7 @@
#define DM_BUFIO_MEMORY_PERCENT 2 #define DM_BUFIO_MEMORY_PERCENT 2
#define DM_BUFIO_VMALLOC_PERCENT 25 #define DM_BUFIO_VMALLOC_PERCENT 25
#define DM_BUFIO_WRITEBACK_PERCENT 75 #define DM_BUFIO_WRITEBACK_RATIO 3
/* /*
* Check buffer ages in this interval (seconds) * Check buffer ages in this interval (seconds)
@ -211,11 +211,6 @@ static unsigned long dm_bufio_current_allocated;
/*----------------------------------------------------------------*/ /*----------------------------------------------------------------*/
/*
* Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
*/
static unsigned long dm_bufio_cache_size_per_client;
/* /*
* The current number of clients. * The current number of clients.
*/ */
@ -227,8 +222,7 @@ static int dm_bufio_client_count;
static LIST_HEAD(dm_bufio_all_clients); static LIST_HEAD(dm_bufio_all_clients);
/* /*
* This mutex protects dm_bufio_cache_size_latch, * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
* dm_bufio_cache_size_per_client and dm_bufio_client_count
*/ */
static DEFINE_MUTEX(dm_bufio_clients_lock); static DEFINE_MUTEX(dm_bufio_clients_lock);
@ -340,9 +334,6 @@ static void __cache_size_refresh(void)
dm_bufio_default_cache_size); dm_bufio_default_cache_size);
dm_bufio_cache_size_latch = dm_bufio_default_cache_size; dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
} }
dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
(dm_bufio_client_count ? : 1);
} }
/* /*
@ -923,36 +914,6 @@ static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
} }
} }
/*
* Get writeback threshold and buffer limit for a given client.
*/
static void __get_memory_limit(struct dm_bufio_client *c,
unsigned long *threshold_buffers,
unsigned long *limit_buffers)
{
unsigned long buffers;
if (unlikely(READ_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
if (mutex_trylock(&dm_bufio_clients_lock)) {
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
}
}
buffers = dm_bufio_cache_size_per_client;
if (likely(c->sectors_per_block_bits >= 0))
buffers >>= c->sectors_per_block_bits + SECTOR_SHIFT;
else
buffers /= c->block_size;
if (buffers < c->minimum_buffers)
buffers = c->minimum_buffers;
*limit_buffers = buffers;
*threshold_buffers = mult_frac(buffers,
DM_BUFIO_WRITEBACK_PERCENT, 100);
}
/* /*
* Check if we're over watermark. * Check if we're over watermark.
* If we are over threshold_buffers, start freeing buffers. * If we are over threshold_buffers, start freeing buffers.
@ -961,23 +922,7 @@ static void __get_memory_limit(struct dm_bufio_client *c,
static void __check_watermark(struct dm_bufio_client *c, static void __check_watermark(struct dm_bufio_client *c,
struct list_head *write_list) struct list_head *write_list)
{ {
unsigned long threshold_buffers, limit_buffers; if (c->n_buffers[LIST_DIRTY] > c->n_buffers[LIST_CLEAN] * DM_BUFIO_WRITEBACK_RATIO)
__get_memory_limit(c, &threshold_buffers, &limit_buffers);
while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
limit_buffers) {
struct dm_buffer *b = __get_unclaimed_buffer(c);
if (!b)
return;
__free_buffer_wake(b);
cond_resched();
}
if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
__write_dirty_buffers_async(c, 1, write_list); __write_dirty_buffers_async(c, 1, write_list);
} }