mirror of https://gitee.com/openkylin/linux.git
ALSA: Remove memory reservation code from memalloc helper
Nowadays we have CMA for obtaining the contiguous memory pages efficiently. Let's kill the old kludge for reserving the memory pages for large buffers. It was rarely useful (only for preserving pages among module reloading or a little help by an early boot scripting), used only by a couple of drivers, and yet it gives too much ugliness than its benefit. Signed-off-by: Takashi Iwai <tiwai@suse.de>
This commit is contained in:
parent
d7b135410e
commit
47d98c026e
|
@ -149,13 +149,6 @@ int snd_dma_alloc_pages_fallback(int type, struct device *dev, size_t size,
|
|||
struct snd_dma_buffer *dmab);
|
||||
void snd_dma_free_pages(struct snd_dma_buffer *dmab);
|
||||
|
||||
/* buffer-preservation managements */
|
||||
|
||||
#define snd_dma_pci_buf_id(pci) (((unsigned int)(pci)->vendor << 16) | (pci)->device)
|
||||
|
||||
size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id);
|
||||
int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id);
|
||||
|
||||
/* basic memory allocation functions */
|
||||
void *snd_malloc_pages(size_t size, gfp_t gfp_flags);
|
||||
void snd_free_pages(void *ptr, size_t size);
|
||||
|
|
|
@ -381,7 +381,6 @@ struct snd_pcm_substream {
|
|||
struct pm_qos_request latency_pm_qos_req; /* pm_qos request */
|
||||
size_t buffer_bytes_max; /* limit ring buffer size */
|
||||
struct snd_dma_buffer dma_buffer;
|
||||
unsigned int dma_buf_id;
|
||||
size_t dma_max;
|
||||
/* -- hardware operations -- */
|
||||
const struct snd_pcm_ops *ops;
|
||||
|
|
|
@ -41,22 +41,6 @@ MODULE_DESCRIPTION("Memory allocator for ALSA system.");
|
|||
MODULE_LICENSE("GPL");
|
||||
|
||||
|
||||
/*
|
||||
*/
|
||||
|
||||
static DEFINE_MUTEX(list_mutex);
|
||||
static LIST_HEAD(mem_list_head);
|
||||
|
||||
/* buffer preservation list */
|
||||
struct snd_mem_list {
|
||||
struct snd_dma_buffer buffer;
|
||||
unsigned int id;
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
/* id for pre-allocated buffers */
|
||||
#define SNDRV_DMA_DEVICE_UNUSED (unsigned int)-1
|
||||
|
||||
/*
|
||||
*
|
||||
* Generic memory allocators
|
||||
|
@ -318,251 +302,6 @@ void snd_dma_free_pages(struct snd_dma_buffer *dmab)
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* snd_dma_get_reserved - get the reserved buffer for the given device
|
||||
* @dmab: the buffer allocation record to store
|
||||
* @id: the buffer id
|
||||
*
|
||||
* Looks for the reserved-buffer list and re-uses if the same buffer
|
||||
* is found in the list. When the buffer is found, it's removed from the free list.
|
||||
*
|
||||
* Return: The size of buffer if the buffer is found, or zero if not found.
|
||||
*/
|
||||
size_t snd_dma_get_reserved_buf(struct snd_dma_buffer *dmab, unsigned int id)
|
||||
{
|
||||
struct snd_mem_list *mem;
|
||||
|
||||
if (WARN_ON(!dmab))
|
||||
return 0;
|
||||
|
||||
mutex_lock(&list_mutex);
|
||||
list_for_each_entry(mem, &mem_list_head, list) {
|
||||
if (mem->id == id &&
|
||||
(mem->buffer.dev.dev == NULL || dmab->dev.dev == NULL ||
|
||||
! memcmp(&mem->buffer.dev, &dmab->dev, sizeof(dmab->dev)))) {
|
||||
struct device *dev = dmab->dev.dev;
|
||||
list_del(&mem->list);
|
||||
*dmab = mem->buffer;
|
||||
if (dmab->dev.dev == NULL)
|
||||
dmab->dev.dev = dev;
|
||||
kfree(mem);
|
||||
mutex_unlock(&list_mutex);
|
||||
return dmab->bytes;
|
||||
}
|
||||
}
|
||||
mutex_unlock(&list_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* snd_dma_reserve_buf - reserve the buffer
|
||||
* @dmab: the buffer to reserve
|
||||
* @id: the buffer id
|
||||
*
|
||||
* Reserves the given buffer as a reserved buffer.
|
||||
*
|
||||
* Return: Zero if successful, or a negative code on error.
|
||||
*/
|
||||
int snd_dma_reserve_buf(struct snd_dma_buffer *dmab, unsigned int id)
|
||||
{
|
||||
struct snd_mem_list *mem;
|
||||
|
||||
if (WARN_ON(!dmab))
|
||||
return -EINVAL;
|
||||
mem = kmalloc(sizeof(*mem), GFP_KERNEL);
|
||||
if (! mem)
|
||||
return -ENOMEM;
|
||||
mutex_lock(&list_mutex);
|
||||
mem->buffer = *dmab;
|
||||
mem->id = id;
|
||||
list_add_tail(&mem->list, &mem_list_head);
|
||||
mutex_unlock(&list_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* purge all reserved buffers
|
||||
*/
|
||||
static void free_all_reserved_pages(void)
|
||||
{
|
||||
struct list_head *p;
|
||||
struct snd_mem_list *mem;
|
||||
|
||||
mutex_lock(&list_mutex);
|
||||
while (! list_empty(&mem_list_head)) {
|
||||
p = mem_list_head.next;
|
||||
mem = list_entry(p, struct snd_mem_list, list);
|
||||
list_del(p);
|
||||
snd_dma_free_pages(&mem->buffer);
|
||||
kfree(mem);
|
||||
}
|
||||
mutex_unlock(&list_mutex);
|
||||
}
|
||||
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
/*
|
||||
* proc file interface
|
||||
*/
|
||||
#define SND_MEM_PROC_FILE "driver/snd-page-alloc"
|
||||
static struct proc_dir_entry *snd_mem_proc;
|
||||
|
||||
static int snd_mem_proc_read(struct seq_file *seq, void *offset)
|
||||
{
|
||||
struct snd_mem_list *mem;
|
||||
int devno;
|
||||
static char *types[] = { "UNKNOWN", "CONT", "DEV", "DEV-SG" };
|
||||
|
||||
mutex_lock(&list_mutex);
|
||||
devno = 0;
|
||||
list_for_each_entry(mem, &mem_list_head, list) {
|
||||
devno++;
|
||||
seq_printf(seq, "buffer %d : ID %08x : type %s\n",
|
||||
devno, mem->id, types[mem->buffer.dev.type]);
|
||||
seq_printf(seq, " addr = 0x%lx, size = %d bytes\n",
|
||||
(unsigned long)mem->buffer.addr,
|
||||
(int)mem->buffer.bytes);
|
||||
}
|
||||
mutex_unlock(&list_mutex);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int snd_mem_proc_open(struct inode *inode, struct file *file)
|
||||
{
|
||||
return single_open(file, snd_mem_proc_read, NULL);
|
||||
}
|
||||
|
||||
/* FIXME: for pci only - other bus? */
|
||||
#ifdef CONFIG_PCI
|
||||
#define gettoken(bufp) strsep(bufp, " \t\n")
|
||||
|
||||
static ssize_t snd_mem_proc_write(struct file *file, const char __user * buffer,
|
||||
size_t count, loff_t * ppos)
|
||||
{
|
||||
char buf[128];
|
||||
char *token, *p;
|
||||
|
||||
if (count > sizeof(buf) - 1)
|
||||
return -EINVAL;
|
||||
if (copy_from_user(buf, buffer, count))
|
||||
return -EFAULT;
|
||||
buf[count] = '\0';
|
||||
|
||||
p = buf;
|
||||
token = gettoken(&p);
|
||||
if (! token || *token == '#')
|
||||
return count;
|
||||
if (strcmp(token, "add") == 0) {
|
||||
char *endp;
|
||||
int vendor, device, size, buffers;
|
||||
long mask;
|
||||
int i, alloced;
|
||||
struct pci_dev *pci;
|
||||
|
||||
if ((token = gettoken(&p)) == NULL ||
|
||||
(vendor = simple_strtol(token, NULL, 0)) <= 0 ||
|
||||
(token = gettoken(&p)) == NULL ||
|
||||
(device = simple_strtol(token, NULL, 0)) <= 0 ||
|
||||
(token = gettoken(&p)) == NULL ||
|
||||
(mask = simple_strtol(token, NULL, 0)) < 0 ||
|
||||
(token = gettoken(&p)) == NULL ||
|
||||
(size = memparse(token, &endp)) < 64*1024 ||
|
||||
size > 16*1024*1024 /* too big */ ||
|
||||
(token = gettoken(&p)) == NULL ||
|
||||
(buffers = simple_strtol(token, NULL, 0)) <= 0 ||
|
||||
buffers > 4) {
|
||||
printk(KERN_ERR "snd-page-alloc: invalid proc write format\n");
|
||||
return count;
|
||||
}
|
||||
vendor &= 0xffff;
|
||||
device &= 0xffff;
|
||||
|
||||
alloced = 0;
|
||||
pci = NULL;
|
||||
while ((pci = pci_get_device(vendor, device, pci)) != NULL) {
|
||||
if (mask > 0 && mask < 0xffffffff) {
|
||||
if (pci_set_dma_mask(pci, mask) < 0 ||
|
||||
pci_set_consistent_dma_mask(pci, mask) < 0) {
|
||||
printk(KERN_ERR "snd-page-alloc: cannot set DMA mask %lx for pci %04x:%04x\n", mask, vendor, device);
|
||||
pci_dev_put(pci);
|
||||
return count;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < buffers; i++) {
|
||||
struct snd_dma_buffer dmab;
|
||||
memset(&dmab, 0, sizeof(dmab));
|
||||
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
|
||||
size, &dmab) < 0) {
|
||||
printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
|
||||
pci_dev_put(pci);
|
||||
return count;
|
||||
}
|
||||
snd_dma_reserve_buf(&dmab, snd_dma_pci_buf_id(pci));
|
||||
}
|
||||
alloced++;
|
||||
}
|
||||
if (! alloced) {
|
||||
for (i = 0; i < buffers; i++) {
|
||||
struct snd_dma_buffer dmab;
|
||||
memset(&dmab, 0, sizeof(dmab));
|
||||
/* FIXME: We can allocate only in ZONE_DMA
|
||||
* without a device pointer!
|
||||
*/
|
||||
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, NULL,
|
||||
size, &dmab) < 0) {
|
||||
printk(KERN_ERR "snd-page-alloc: cannot allocate buffer pages (size = %d)\n", size);
|
||||
break;
|
||||
}
|
||||
snd_dma_reserve_buf(&dmab, (unsigned int)((vendor << 16) | device));
|
||||
}
|
||||
}
|
||||
} else if (strcmp(token, "erase") == 0)
|
||||
/* FIXME: need for releasing each buffer chunk? */
|
||||
free_all_reserved_pages();
|
||||
else
|
||||
printk(KERN_ERR "snd-page-alloc: invalid proc cmd\n");
|
||||
return count;
|
||||
}
|
||||
#endif /* CONFIG_PCI */
|
||||
|
||||
static const struct file_operations snd_mem_proc_fops = {
|
||||
.owner = THIS_MODULE,
|
||||
.open = snd_mem_proc_open,
|
||||
.read = seq_read,
|
||||
#ifdef CONFIG_PCI
|
||||
.write = snd_mem_proc_write,
|
||||
#endif
|
||||
.llseek = seq_lseek,
|
||||
.release = single_release,
|
||||
};
|
||||
|
||||
#endif /* CONFIG_PROC_FS */
|
||||
|
||||
/*
|
||||
* module entry
|
||||
*/
|
||||
|
||||
static int __init snd_mem_init(void)
|
||||
{
|
||||
#ifdef CONFIG_PROC_FS
|
||||
snd_mem_proc = proc_create(SND_MEM_PROC_FILE, 0644, NULL,
|
||||
&snd_mem_proc_fops);
|
||||
#endif
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void __exit snd_mem_exit(void)
|
||||
{
|
||||
remove_proc_entry(SND_MEM_PROC_FILE, NULL);
|
||||
free_all_reserved_pages();
|
||||
}
|
||||
|
||||
|
||||
module_init(snd_mem_init)
|
||||
module_exit(snd_mem_exit)
|
||||
|
||||
|
||||
/*
|
||||
* exports
|
||||
*/
|
||||
|
@ -570,8 +309,5 @@ EXPORT_SYMBOL(snd_dma_alloc_pages);
|
|||
EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
|
||||
EXPORT_SYMBOL(snd_dma_free_pages);
|
||||
|
||||
EXPORT_SYMBOL(snd_dma_get_reserved_buf);
|
||||
EXPORT_SYMBOL(snd_dma_reserve_buf);
|
||||
|
||||
EXPORT_SYMBOL(snd_malloc_pages);
|
||||
EXPORT_SYMBOL(snd_free_pages);
|
||||
|
|
|
@ -53,15 +53,6 @@ static int preallocate_pcm_pages(struct snd_pcm_substream *substream, size_t siz
|
|||
struct snd_dma_buffer *dmab = &substream->dma_buffer;
|
||||
int err;
|
||||
|
||||
/* already reserved? */
|
||||
if (snd_dma_get_reserved_buf(dmab, substream->dma_buf_id) > 0) {
|
||||
if (dmab->bytes >= size)
|
||||
return 0; /* yes */
|
||||
/* no, free the reserved block */
|
||||
snd_dma_free_pages(dmab);
|
||||
dmab->bytes = 0;
|
||||
}
|
||||
|
||||
do {
|
||||
if ((err = snd_dma_alloc_pages(dmab->dev.type, dmab->dev.dev,
|
||||
size, dmab)) < 0) {
|
||||
|
@ -82,10 +73,7 @@ static void snd_pcm_lib_preallocate_dma_free(struct snd_pcm_substream *substream
|
|||
{
|
||||
if (substream->dma_buffer.area == NULL)
|
||||
return;
|
||||
if (substream->dma_buf_id)
|
||||
snd_dma_reserve_buf(&substream->dma_buffer, substream->dma_buf_id);
|
||||
else
|
||||
snd_dma_free_pages(&substream->dma_buffer);
|
||||
snd_dma_free_pages(&substream->dma_buffer);
|
||||
substream->dma_buffer.area = NULL;
|
||||
}
|
||||
|
||||
|
@ -260,11 +248,6 @@ static int snd_pcm_lib_preallocate_pages1(struct snd_pcm_substream *substream,
|
|||
*
|
||||
* Do pre-allocation for the given DMA buffer type.
|
||||
*
|
||||
* When substream->dma_buf_id is set, the function tries to look for
|
||||
* the reserved buffer, and the buffer is not freed but reserved at
|
||||
* destruction time. The dma_buf_id must be unique for all systems
|
||||
* (in the same DMA buffer type) e.g. using snd_dma_pci_buf_id().
|
||||
*
|
||||
* Return: Zero if successful, or a negative error code on failure.
|
||||
*/
|
||||
int snd_pcm_lib_preallocate_pages(struct snd_pcm_substream *substream,
|
||||
|
|
|
@ -1422,7 +1422,7 @@ static void snd_es1968_free_dmabuf(struct es1968 *chip)
|
|||
|
||||
if (! chip->dma.area)
|
||||
return;
|
||||
snd_dma_reserve_buf(&chip->dma, snd_dma_pci_buf_id(chip->pci));
|
||||
snd_dma_free_pages(&chip->dma);
|
||||
while ((p = chip->buf_list.next) != &chip->buf_list) {
|
||||
struct esm_memory *chunk = list_entry(p, struct esm_memory, list);
|
||||
list_del(p);
|
||||
|
@ -1438,20 +1438,18 @@ snd_es1968_init_dmabuf(struct es1968 *chip)
|
|||
|
||||
chip->dma.dev.type = SNDRV_DMA_TYPE_DEV;
|
||||
chip->dma.dev.dev = snd_dma_pci_data(chip->pci);
|
||||
if (! snd_dma_get_reserved_buf(&chip->dma, snd_dma_pci_buf_id(chip->pci))) {
|
||||
err = snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV,
|
||||
snd_dma_pci_data(chip->pci),
|
||||
chip->total_bufsize, &chip->dma);
|
||||
if (err < 0 || ! chip->dma.area) {
|
||||
snd_printk(KERN_ERR "es1968: can't allocate dma pages for size %d\n",
|
||||
chip->total_bufsize);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if ((chip->dma.addr + chip->dma.bytes - 1) & ~((1 << 28) - 1)) {
|
||||
snd_dma_free_pages(&chip->dma);
|
||||
snd_printk(KERN_ERR "es1968: DMA buffer beyond 256MB.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
err = snd_dma_alloc_pages_fallback(SNDRV_DMA_TYPE_DEV,
|
||||
snd_dma_pci_data(chip->pci),
|
||||
chip->total_bufsize, &chip->dma);
|
||||
if (err < 0 || ! chip->dma.area) {
|
||||
snd_printk(KERN_ERR "es1968: can't allocate dma pages for size %d\n",
|
||||
chip->total_bufsize);
|
||||
return -ENOMEM;
|
||||
}
|
||||
if ((chip->dma.addr + chip->dma.bytes - 1) & ~((1 << 28) - 1)) {
|
||||
snd_dma_free_pages(&chip->dma);
|
||||
snd_printk(KERN_ERR "es1968: DMA buffer beyond 256MB.\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
INIT_LIST_HEAD(&chip->buf_list);
|
||||
|
|
|
@ -584,10 +584,6 @@ static int snd_hammerfall_get_buffer(struct pci_dev *pci, struct snd_dma_buffer
|
|||
{
|
||||
dmab->dev.type = SNDRV_DMA_TYPE_DEV;
|
||||
dmab->dev.dev = snd_dma_pci_data(pci);
|
||||
if (snd_dma_get_reserved_buf(dmab, snd_dma_pci_buf_id(pci))) {
|
||||
if (dmab->bytes >= size)
|
||||
return 0;
|
||||
}
|
||||
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
|
||||
size, dmab) < 0)
|
||||
return -ENOMEM;
|
||||
|
@ -596,10 +592,8 @@ static int snd_hammerfall_get_buffer(struct pci_dev *pci, struct snd_dma_buffer
|
|||
|
||||
static void snd_hammerfall_free_buffer(struct snd_dma_buffer *dmab, struct pci_dev *pci)
|
||||
{
|
||||
if (dmab->area) {
|
||||
dmab->dev.dev = NULL; /* make it anonymous */
|
||||
snd_dma_reserve_buf(dmab, snd_dma_pci_buf_id(pci));
|
||||
}
|
||||
if (dmab->area)
|
||||
snd_dma_free_pages(dmab);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -294,10 +294,6 @@ static int snd_hammerfall_get_buffer(struct pci_dev *pci, struct snd_dma_buffer
|
|||
{
|
||||
dmab->dev.type = SNDRV_DMA_TYPE_DEV;
|
||||
dmab->dev.dev = snd_dma_pci_data(pci);
|
||||
if (snd_dma_get_reserved_buf(dmab, snd_dma_pci_buf_id(pci))) {
|
||||
if (dmab->bytes >= size)
|
||||
return 0;
|
||||
}
|
||||
if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(pci),
|
||||
size, dmab) < 0)
|
||||
return -ENOMEM;
|
||||
|
@ -306,10 +302,8 @@ static int snd_hammerfall_get_buffer(struct pci_dev *pci, struct snd_dma_buffer
|
|||
|
||||
static void snd_hammerfall_free_buffer(struct snd_dma_buffer *dmab, struct pci_dev *pci)
|
||||
{
|
||||
if (dmab->area) {
|
||||
dmab->dev.dev = NULL; /* make it anonymous */
|
||||
snd_dma_reserve_buf(dmab, snd_dma_pci_buf_id(pci));
|
||||
}
|
||||
if (dmab->area)
|
||||
snd_dma_free_pages(dmab);
|
||||
}
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue