Merge commit '2c563880ea' into work.xattr
pick xattr_handler conversion from lustre tree
This commit is contained in:
commit
bf02f5d2c0
|
@ -59,7 +59,6 @@ static struct dentry *binder_debugfs_dir_entry_proc;
|
||||||
static struct binder_node *binder_context_mgr_node;
|
static struct binder_node *binder_context_mgr_node;
|
||||||
static kuid_t binder_context_mgr_uid = INVALID_UID;
|
static kuid_t binder_context_mgr_uid = INVALID_UID;
|
||||||
static int binder_last_id;
|
static int binder_last_id;
|
||||||
static struct workqueue_struct *binder_deferred_workqueue;
|
|
||||||
|
|
||||||
#define BINDER_DEBUG_ENTRY(name) \
|
#define BINDER_DEBUG_ENTRY(name) \
|
||||||
static int binder_##name##_open(struct inode *inode, struct file *file) \
|
static int binder_##name##_open(struct inode *inode, struct file *file) \
|
||||||
|
@ -3227,7 +3226,7 @@ binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer)
|
||||||
if (hlist_unhashed(&proc->deferred_work_node)) {
|
if (hlist_unhashed(&proc->deferred_work_node)) {
|
||||||
hlist_add_head(&proc->deferred_work_node,
|
hlist_add_head(&proc->deferred_work_node,
|
||||||
&binder_deferred_list);
|
&binder_deferred_list);
|
||||||
queue_work(binder_deferred_workqueue, &binder_deferred_work);
|
schedule_work(&binder_deferred_work);
|
||||||
}
|
}
|
||||||
mutex_unlock(&binder_deferred_lock);
|
mutex_unlock(&binder_deferred_lock);
|
||||||
}
|
}
|
||||||
|
@ -3679,10 +3678,6 @@ static int __init binder_init(void)
|
||||||
{
|
{
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
binder_deferred_workqueue = create_singlethread_workqueue("binder");
|
|
||||||
if (!binder_deferred_workqueue)
|
|
||||||
return -ENOMEM;
|
|
||||||
|
|
||||||
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
|
binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL);
|
||||||
if (binder_debugfs_dir_entry_root)
|
if (binder_debugfs_dir_entry_root)
|
||||||
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
|
binder_debugfs_dir_entry_proc = debugfs_create_dir("proc",
|
||||||
|
|
|
@ -17,4 +17,17 @@ config SYNC_FILE
|
||||||
Files fds, to the DRM driver for example. More details at
|
Files fds, to the DRM driver for example. More details at
|
||||||
Documentation/sync_file.txt.
|
Documentation/sync_file.txt.
|
||||||
|
|
||||||
|
config SW_SYNC
|
||||||
|
bool "Sync File Validation Framework"
|
||||||
|
default n
|
||||||
|
depends on SYNC_FILE
|
||||||
|
depends on DEBUG_FS
|
||||||
|
---help---
|
||||||
|
A sync object driver that uses a 32bit counter to coordinate
|
||||||
|
synchronization. Useful when there is no hardware primitive backing
|
||||||
|
the synchronization.
|
||||||
|
|
||||||
|
WARNING: improper use of this can result in deadlocking kernel
|
||||||
|
drivers from userspace. Intended for test and debug only.
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
|
|
|
@ -1,2 +1,3 @@
|
||||||
obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
|
obj-y := dma-buf.o fence.o reservation.o seqno-fence.o fence-array.o
|
||||||
obj-$(CONFIG_SYNC_FILE) += sync_file.o
|
obj-$(CONFIG_SYNC_FILE) += sync_file.o
|
||||||
|
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* drivers/dma-buf/sw_sync.c
|
* Sync File validation framework
|
||||||
*
|
*
|
||||||
* Copyright (C) 2012 Google, Inc.
|
* Copyright (C) 2012 Google, Inc.
|
||||||
*
|
*
|
||||||
|
@ -23,8 +23,38 @@
|
||||||
#include "sync_debug.h"
|
#include "sync_debug.h"
|
||||||
|
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include "trace/sync.h"
|
#include "sync_trace.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* SW SYNC validation framework
|
||||||
|
*
|
||||||
|
* A sync object driver that uses a 32bit counter to coordinate
|
||||||
|
* synchronization. Useful when there is no hardware primitive backing
|
||||||
|
* the synchronization.
|
||||||
|
*
|
||||||
|
* To start the framework just open:
|
||||||
|
*
|
||||||
|
* <debugfs>/sync/sw_sync
|
||||||
|
*
|
||||||
|
* That will create a sync timeline, all fences created under this timeline
|
||||||
|
* file descriptor will belong to the this timeline.
|
||||||
|
*
|
||||||
|
* The 'sw_sync' file can be opened many times as to create different
|
||||||
|
* timelines.
|
||||||
|
*
|
||||||
|
* Fences can be created with SW_SYNC_IOC_CREATE_FENCE ioctl with struct
|
||||||
|
* sw_sync_ioctl_create_fence as parameter.
|
||||||
|
*
|
||||||
|
* To increment the timeline counter, SW_SYNC_IOC_INC ioctl should be used
|
||||||
|
* with the increment as u32. This will update the last signaled value
|
||||||
|
* from the timeline and signal any fence that has a seqno smaller or equal
|
||||||
|
* to it.
|
||||||
|
*
|
||||||
|
* struct sw_sync_ioctl_create_fence
|
||||||
|
* @value: the seqno to initialise the fence with
|
||||||
|
* @name: the name of the new sync point
|
||||||
|
* @fence: return the fd of the new sync_file with the created fence
|
||||||
|
*/
|
||||||
struct sw_sync_create_fence_data {
|
struct sw_sync_create_fence_data {
|
||||||
__u32 value;
|
__u32 value;
|
||||||
char name[32];
|
char name[32];
|
||||||
|
@ -35,6 +65,7 @@ struct sw_sync_create_fence_data {
|
||||||
|
|
||||||
#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
|
#define SW_SYNC_IOC_CREATE_FENCE _IOWR(SW_SYNC_IOC_MAGIC, 0,\
|
||||||
struct sw_sync_create_fence_data)
|
struct sw_sync_create_fence_data)
|
||||||
|
|
||||||
#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
|
#define SW_SYNC_IOC_INC _IOW(SW_SYNC_IOC_MAGIC, 1, __u32)
|
||||||
|
|
||||||
static const struct fence_ops timeline_fence_ops;
|
static const struct fence_ops timeline_fence_ops;
|
||||||
|
@ -176,7 +207,7 @@ static void timeline_fence_release(struct fence *fence)
|
||||||
|
|
||||||
spin_lock_irqsave(fence->lock, flags);
|
spin_lock_irqsave(fence->lock, flags);
|
||||||
list_del(&pt->child_list);
|
list_del(&pt->child_list);
|
||||||
if (WARN_ON_ONCE(!list_empty(&pt->active_list)))
|
if (!list_empty(&pt->active_list))
|
||||||
list_del(&pt->active_list);
|
list_del(&pt->active_list);
|
||||||
spin_unlock_irqrestore(fence->lock, flags);
|
spin_unlock_irqrestore(fence->lock, flags);
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* drivers/base/sync.c
|
* Sync File validation framework and debug information
|
||||||
*
|
*
|
||||||
* Copyright (C) 2012 Google, Inc.
|
* Copyright (C) 2012 Google, Inc.
|
||||||
*
|
*
|
|
@ -1,5 +1,5 @@
|
||||||
/*
|
/*
|
||||||
* include/linux/sync.h
|
* Sync File validation framework and debug infomation
|
||||||
*
|
*
|
||||||
* Copyright (C) 2012 Google, Inc.
|
* Copyright (C) 2012 Google, Inc.
|
||||||
*
|
*
|
|
@ -1,11 +1,11 @@
|
||||||
#undef TRACE_SYSTEM
|
#undef TRACE_SYSTEM
|
||||||
#define TRACE_INCLUDE_PATH ../../drivers/staging/android/trace
|
#define TRACE_INCLUDE_PATH ../../drivers/dma-buf
|
||||||
#define TRACE_SYSTEM sync
|
#define TRACE_SYSTEM sync_trace
|
||||||
|
|
||||||
#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ)
|
#if !defined(_TRACE_SYNC_H) || defined(TRACE_HEADER_MULTI_READ)
|
||||||
#define _TRACE_SYNC_H
|
#define _TRACE_SYNC_H
|
||||||
|
|
||||||
#include "../sync_debug.h"
|
#include "sync_debug.h"
|
||||||
#include <linux/tracepoint.h>
|
#include <linux/tracepoint.h>
|
||||||
|
|
||||||
TRACE_EVENT(sync_timeline,
|
TRACE_EVENT(sync_timeline,
|
|
@ -24,19 +24,6 @@ config ANDROID_LOW_MEMORY_KILLER
|
||||||
scripts (/init.rc), and it defines priority values with minimum free memory size
|
scripts (/init.rc), and it defines priority values with minimum free memory size
|
||||||
for each priority.
|
for each priority.
|
||||||
|
|
||||||
config SW_SYNC
|
|
||||||
bool "Software synchronization framework"
|
|
||||||
default n
|
|
||||||
depends on SYNC_FILE
|
|
||||||
depends on DEBUG_FS
|
|
||||||
---help---
|
|
||||||
A sync object driver that uses a 32bit counter to coordinate
|
|
||||||
synchronization. Useful when there is no hardware primitive backing
|
|
||||||
the synchronization.
|
|
||||||
|
|
||||||
WARNING: improper use of this can result in deadlocking kernel
|
|
||||||
drivers from userspace. Intended for test and debug only.
|
|
||||||
|
|
||||||
source "drivers/staging/android/ion/Kconfig"
|
source "drivers/staging/android/ion/Kconfig"
|
||||||
|
|
||||||
endif # if ANDROID
|
endif # if ANDROID
|
||||||
|
|
|
@ -4,4 +4,3 @@ obj-y += ion/
|
||||||
|
|
||||||
obj-$(CONFIG_ASHMEM) += ashmem.o
|
obj-$(CONFIG_ASHMEM) += ashmem.o
|
||||||
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
|
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
|
||||||
obj-$(CONFIG_SW_SYNC) += sw_sync.o sync_debug.o
|
|
||||||
|
|
|
@ -205,19 +205,16 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
|
||||||
goto err2;
|
goto err2;
|
||||||
}
|
}
|
||||||
|
|
||||||
buffer->dev = dev;
|
if (buffer->sg_table == NULL) {
|
||||||
buffer->size = len;
|
WARN_ONCE(1, "This heap needs to set the sgtable");
|
||||||
|
|
||||||
table = heap->ops->map_dma(heap, buffer);
|
|
||||||
if (WARN_ONCE(table == NULL,
|
|
||||||
"heap->ops->map_dma should return ERR_PTR on error"))
|
|
||||||
table = ERR_PTR(-EINVAL);
|
|
||||||
if (IS_ERR(table)) {
|
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
goto err1;
|
goto err1;
|
||||||
}
|
}
|
||||||
|
|
||||||
buffer->sg_table = table;
|
table = buffer->sg_table;
|
||||||
|
buffer->dev = dev;
|
||||||
|
buffer->size = len;
|
||||||
|
|
||||||
if (ion_buffer_fault_user_mappings(buffer)) {
|
if (ion_buffer_fault_user_mappings(buffer)) {
|
||||||
int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
|
int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
|
@ -226,7 +223,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
|
||||||
buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
|
buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
|
||||||
if (!buffer->pages) {
|
if (!buffer->pages) {
|
||||||
ret = -ENOMEM;
|
ret = -ENOMEM;
|
||||||
goto err;
|
goto err1;
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_sg(table->sgl, sg, table->nents, i) {
|
for_each_sg(table->sgl, sg, table->nents, i) {
|
||||||
|
@ -260,8 +257,6 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
|
||||||
mutex_unlock(&dev->buffer_lock);
|
mutex_unlock(&dev->buffer_lock);
|
||||||
return buffer;
|
return buffer;
|
||||||
|
|
||||||
err:
|
|
||||||
heap->ops->unmap_dma(heap, buffer);
|
|
||||||
err1:
|
err1:
|
||||||
heap->ops->free(buffer);
|
heap->ops->free(buffer);
|
||||||
err2:
|
err2:
|
||||||
|
@ -273,7 +268,6 @@ void ion_buffer_destroy(struct ion_buffer *buffer)
|
||||||
{
|
{
|
||||||
if (WARN_ON(buffer->kmap_cnt > 0))
|
if (WARN_ON(buffer->kmap_cnt > 0))
|
||||||
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
|
buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
|
||||||
buffer->heap->ops->unmap_dma(buffer->heap, buffer);
|
|
||||||
buffer->heap->ops->free(buffer);
|
buffer->heap->ops->free(buffer);
|
||||||
vfree(buffer->pages);
|
vfree(buffer->pages);
|
||||||
kfree(buffer);
|
kfree(buffer);
|
||||||
|
@ -551,7 +545,8 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ion_alloc);
|
EXPORT_SYMBOL(ion_alloc);
|
||||||
|
|
||||||
static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
|
static void ion_free_nolock(struct ion_client *client,
|
||||||
|
struct ion_handle *handle)
|
||||||
{
|
{
|
||||||
bool valid_handle;
|
bool valid_handle;
|
||||||
|
|
||||||
|
@ -576,32 +571,6 @@ void ion_free(struct ion_client *client, struct ion_handle *handle)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ion_free);
|
EXPORT_SYMBOL(ion_free);
|
||||||
|
|
||||||
int ion_phys(struct ion_client *client, struct ion_handle *handle,
|
|
||||||
ion_phys_addr_t *addr, size_t *len)
|
|
||||||
{
|
|
||||||
struct ion_buffer *buffer;
|
|
||||||
int ret;
|
|
||||||
|
|
||||||
mutex_lock(&client->lock);
|
|
||||||
if (!ion_handle_validate(client, handle)) {
|
|
||||||
mutex_unlock(&client->lock);
|
|
||||||
return -EINVAL;
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer = handle->buffer;
|
|
||||||
|
|
||||||
if (!buffer->heap->ops->phys) {
|
|
||||||
pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
|
|
||||||
__func__, buffer->heap->name, buffer->heap->type);
|
|
||||||
mutex_unlock(&client->lock);
|
|
||||||
return -ENODEV;
|
|
||||||
}
|
|
||||||
mutex_unlock(&client->lock);
|
|
||||||
ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(ion_phys);
|
|
||||||
|
|
||||||
static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
|
static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
|
||||||
{
|
{
|
||||||
void *vaddr;
|
void *vaddr;
|
||||||
|
@ -917,26 +886,6 @@ void ion_client_destroy(struct ion_client *client)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ion_client_destroy);
|
EXPORT_SYMBOL(ion_client_destroy);
|
||||||
|
|
||||||
struct sg_table *ion_sg_table(struct ion_client *client,
|
|
||||||
struct ion_handle *handle)
|
|
||||||
{
|
|
||||||
struct ion_buffer *buffer;
|
|
||||||
struct sg_table *table;
|
|
||||||
|
|
||||||
mutex_lock(&client->lock);
|
|
||||||
if (!ion_handle_validate(client, handle)) {
|
|
||||||
pr_err("%s: invalid handle passed to map_dma.\n",
|
|
||||||
__func__);
|
|
||||||
mutex_unlock(&client->lock);
|
|
||||||
return ERR_PTR(-EINVAL);
|
|
||||||
}
|
|
||||||
buffer = handle->buffer;
|
|
||||||
table = buffer->sg_table;
|
|
||||||
mutex_unlock(&client->lock);
|
|
||||||
return table;
|
|
||||||
}
|
|
||||||
EXPORT_SYMBOL(ion_sg_table);
|
|
||||||
|
|
||||||
static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
|
static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
|
||||||
struct device *dev,
|
struct device *dev,
|
||||||
enum dma_data_direction direction);
|
enum dma_data_direction direction);
|
||||||
|
@ -1358,7 +1307,8 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
|
||||||
struct ion_handle *handle;
|
struct ion_handle *handle;
|
||||||
|
|
||||||
mutex_lock(&client->lock);
|
mutex_lock(&client->lock);
|
||||||
handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
|
handle = ion_handle_get_by_id_nolock(client,
|
||||||
|
data.handle.handle);
|
||||||
if (IS_ERR(handle)) {
|
if (IS_ERR(handle)) {
|
||||||
mutex_unlock(&client->lock);
|
mutex_unlock(&client->lock);
|
||||||
return PTR_ERR(handle);
|
return PTR_ERR(handle);
|
||||||
|
@ -1588,8 +1538,7 @@ void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
|
||||||
{
|
{
|
||||||
struct dentry *debug_file;
|
struct dentry *debug_file;
|
||||||
|
|
||||||
if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
|
if (!heap->ops->allocate || !heap->ops->free)
|
||||||
!heap->ops->unmap_dma)
|
|
||||||
pr_err("%s: can not add heap with invalid ops struct.\n",
|
pr_err("%s: can not add heap with invalid ops struct.\n",
|
||||||
__func__);
|
__func__);
|
||||||
|
|
||||||
|
@ -1703,37 +1652,3 @@ void ion_device_destroy(struct ion_device *dev)
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(ion_device_destroy);
|
EXPORT_SYMBOL(ion_device_destroy);
|
||||||
|
|
||||||
void __init ion_reserve(struct ion_platform_data *data)
|
|
||||||
{
|
|
||||||
int i;
|
|
||||||
|
|
||||||
for (i = 0; i < data->nr; i++) {
|
|
||||||
if (data->heaps[i].size == 0)
|
|
||||||
continue;
|
|
||||||
|
|
||||||
if (data->heaps[i].base == 0) {
|
|
||||||
phys_addr_t paddr;
|
|
||||||
|
|
||||||
paddr = memblock_alloc_base(data->heaps[i].size,
|
|
||||||
data->heaps[i].align,
|
|
||||||
MEMBLOCK_ALLOC_ANYWHERE);
|
|
||||||
if (!paddr) {
|
|
||||||
pr_err("%s: error allocating memblock for heap %d\n",
|
|
||||||
__func__, i);
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
data->heaps[i].base = paddr;
|
|
||||||
} else {
|
|
||||||
int ret = memblock_reserve(data->heaps[i].base,
|
|
||||||
data->heaps[i].size);
|
|
||||||
if (ret)
|
|
||||||
pr_err("memblock reserve of %zx@%lx failed\n",
|
|
||||||
data->heaps[i].size,
|
|
||||||
data->heaps[i].base);
|
|
||||||
}
|
|
||||||
pr_info("%s: %s reserved base %lx size %zu\n", __func__,
|
|
||||||
data->heaps[i].name,
|
|
||||||
data->heaps[i].base,
|
|
||||||
data->heaps[i].size);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
|
@ -72,17 +72,6 @@ struct ion_platform_data {
|
||||||
struct ion_platform_heap *heaps;
|
struct ion_platform_heap *heaps;
|
||||||
};
|
};
|
||||||
|
|
||||||
/**
|
|
||||||
* ion_reserve() - reserve memory for ion heaps if applicable
|
|
||||||
* @data: platform data specifying starting physical address and
|
|
||||||
* size
|
|
||||||
*
|
|
||||||
* Calls memblock reserve to set aside memory for heaps that are
|
|
||||||
* located at specific memory addresses or of specific sizes not
|
|
||||||
* managed by the kernel
|
|
||||||
*/
|
|
||||||
void ion_reserve(struct ion_platform_data *data);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ion_client_create() - allocate a client and returns it
|
* ion_client_create() - allocate a client and returns it
|
||||||
* @dev: the global ion device
|
* @dev: the global ion device
|
||||||
|
@ -129,36 +118,6 @@ struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
|
||||||
*/
|
*/
|
||||||
void ion_free(struct ion_client *client, struct ion_handle *handle);
|
void ion_free(struct ion_client *client, struct ion_handle *handle);
|
||||||
|
|
||||||
/**
|
|
||||||
* ion_phys - returns the physical address and len of a handle
|
|
||||||
* @client: the client
|
|
||||||
* @handle: the handle
|
|
||||||
* @addr: a pointer to put the address in
|
|
||||||
* @len: a pointer to put the length in
|
|
||||||
*
|
|
||||||
* This function queries the heap for a particular handle to get the
|
|
||||||
* handle's physical address. It't output is only correct if
|
|
||||||
* a heap returns physically contiguous memory -- in other cases
|
|
||||||
* this api should not be implemented -- ion_sg_table should be used
|
|
||||||
* instead. Returns -EINVAL if the handle is invalid. This has
|
|
||||||
* no implications on the reference counting of the handle --
|
|
||||||
* the returned value may not be valid if the caller is not
|
|
||||||
* holding a reference.
|
|
||||||
*/
|
|
||||||
int ion_phys(struct ion_client *client, struct ion_handle *handle,
|
|
||||||
ion_phys_addr_t *addr, size_t *len);
|
|
||||||
|
|
||||||
/**
|
|
||||||
* ion_map_dma - return an sg_table describing a handle
|
|
||||||
* @client: the client
|
|
||||||
* @handle: the handle
|
|
||||||
*
|
|
||||||
* This function returns the sg_table describing
|
|
||||||
* a particular ion handle.
|
|
||||||
*/
|
|
||||||
struct sg_table *ion_sg_table(struct ion_client *client,
|
|
||||||
struct ion_handle *handle);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* ion_map_kernel - create mapping for the given handle
|
* ion_map_kernel - create mapping for the given handle
|
||||||
* @client: the client
|
* @client: the client
|
||||||
|
|
|
@ -25,6 +25,8 @@
|
||||||
#include "ion.h"
|
#include "ion.h"
|
||||||
#include "ion_priv.h"
|
#include "ion_priv.h"
|
||||||
|
|
||||||
|
#define ION_CARVEOUT_ALLOCATE_FAIL -1
|
||||||
|
|
||||||
struct ion_carveout_heap {
|
struct ion_carveout_heap {
|
||||||
struct ion_heap heap;
|
struct ion_heap heap;
|
||||||
struct gen_pool *pool;
|
struct gen_pool *pool;
|
||||||
|
@ -56,19 +58,6 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
|
||||||
gen_pool_free(carveout_heap->pool, addr, size);
|
gen_pool_free(carveout_heap->pool, addr, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ion_carveout_heap_phys(struct ion_heap *heap,
|
|
||||||
struct ion_buffer *buffer,
|
|
||||||
ion_phys_addr_t *addr, size_t *len)
|
|
||||||
{
|
|
||||||
struct sg_table *table = buffer->priv_virt;
|
|
||||||
struct page *page = sg_page(table->sgl);
|
|
||||||
ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
|
|
||||||
|
|
||||||
*addr = paddr;
|
|
||||||
*len = buffer->size;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ion_carveout_heap_allocate(struct ion_heap *heap,
|
static int ion_carveout_heap_allocate(struct ion_heap *heap,
|
||||||
struct ion_buffer *buffer,
|
struct ion_buffer *buffer,
|
||||||
unsigned long size, unsigned long align,
|
unsigned long size, unsigned long align,
|
||||||
|
@ -95,7 +84,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
|
||||||
}
|
}
|
||||||
|
|
||||||
sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
|
sg_set_page(table->sgl, pfn_to_page(PFN_DOWN(paddr)), size, 0);
|
||||||
buffer->priv_virt = table;
|
buffer->sg_table = table;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -109,7 +98,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap,
|
||||||
static void ion_carveout_heap_free(struct ion_buffer *buffer)
|
static void ion_carveout_heap_free(struct ion_buffer *buffer)
|
||||||
{
|
{
|
||||||
struct ion_heap *heap = buffer->heap;
|
struct ion_heap *heap = buffer->heap;
|
||||||
struct sg_table *table = buffer->priv_virt;
|
struct sg_table *table = buffer->sg_table;
|
||||||
struct page *page = sg_page(table->sgl);
|
struct page *page = sg_page(table->sgl);
|
||||||
ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
|
ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page));
|
||||||
|
|
||||||
|
@ -124,23 +113,9 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer)
|
||||||
kfree(table);
|
kfree(table);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sg_table *ion_carveout_heap_map_dma(struct ion_heap *heap,
|
|
||||||
struct ion_buffer *buffer)
|
|
||||||
{
|
|
||||||
return buffer->priv_virt;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ion_carveout_heap_unmap_dma(struct ion_heap *heap,
|
|
||||||
struct ion_buffer *buffer)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct ion_heap_ops carveout_heap_ops = {
|
static struct ion_heap_ops carveout_heap_ops = {
|
||||||
.allocate = ion_carveout_heap_allocate,
|
.allocate = ion_carveout_heap_allocate,
|
||||||
.free = ion_carveout_heap_free,
|
.free = ion_carveout_heap_free,
|
||||||
.phys = ion_carveout_heap_phys,
|
|
||||||
.map_dma = ion_carveout_heap_map_dma,
|
|
||||||
.unmap_dma = ion_carveout_heap_unmap_dma,
|
|
||||||
.map_user = ion_heap_map_user,
|
.map_user = ion_heap_map_user,
|
||||||
.map_kernel = ion_heap_map_kernel,
|
.map_kernel = ion_heap_map_kernel,
|
||||||
.unmap_kernel = ion_heap_unmap_kernel,
|
.unmap_kernel = ion_heap_unmap_kernel,
|
||||||
|
|
|
@ -75,7 +75,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
|
||||||
sg = sg_next(sg);
|
sg = sg_next(sg);
|
||||||
}
|
}
|
||||||
|
|
||||||
buffer->priv_virt = table;
|
buffer->sg_table = table;
|
||||||
chunk_heap->allocated += allocated_size;
|
chunk_heap->allocated += allocated_size;
|
||||||
return 0;
|
return 0;
|
||||||
err:
|
err:
|
||||||
|
@ -95,7 +95,7 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
|
||||||
struct ion_heap *heap = buffer->heap;
|
struct ion_heap *heap = buffer->heap;
|
||||||
struct ion_chunk_heap *chunk_heap =
|
struct ion_chunk_heap *chunk_heap =
|
||||||
container_of(heap, struct ion_chunk_heap, heap);
|
container_of(heap, struct ion_chunk_heap, heap);
|
||||||
struct sg_table *table = buffer->priv_virt;
|
struct sg_table *table = buffer->sg_table;
|
||||||
struct scatterlist *sg;
|
struct scatterlist *sg;
|
||||||
int i;
|
int i;
|
||||||
unsigned long allocated_size;
|
unsigned long allocated_size;
|
||||||
|
@ -117,22 +117,9 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer)
|
||||||
kfree(table);
|
kfree(table);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sg_table *ion_chunk_heap_map_dma(struct ion_heap *heap,
|
|
||||||
struct ion_buffer *buffer)
|
|
||||||
{
|
|
||||||
return buffer->priv_virt;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ion_chunk_heap_unmap_dma(struct ion_heap *heap,
|
|
||||||
struct ion_buffer *buffer)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct ion_heap_ops chunk_heap_ops = {
|
static struct ion_heap_ops chunk_heap_ops = {
|
||||||
.allocate = ion_chunk_heap_allocate,
|
.allocate = ion_chunk_heap_allocate,
|
||||||
.free = ion_chunk_heap_free,
|
.free = ion_chunk_heap_free,
|
||||||
.map_dma = ion_chunk_heap_map_dma,
|
|
||||||
.unmap_dma = ion_chunk_heap_unmap_dma,
|
|
||||||
.map_user = ion_heap_map_user,
|
.map_user = ion_heap_map_user,
|
||||||
.map_kernel = ion_heap_map_kernel,
|
.map_kernel = ion_heap_map_kernel,
|
||||||
.unmap_kernel = ion_heap_unmap_kernel,
|
.unmap_kernel = ion_heap_unmap_kernel,
|
||||||
|
|
|
@ -78,6 +78,7 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
|
||||||
goto free_table;
|
goto free_table;
|
||||||
/* keep this for memory release */
|
/* keep this for memory release */
|
||||||
buffer->priv_virt = info;
|
buffer->priv_virt = info;
|
||||||
|
buffer->sg_table = info->table;
|
||||||
dev_dbg(dev, "Allocate buffer %p\n", buffer);
|
dev_dbg(dev, "Allocate buffer %p\n", buffer);
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -105,36 +106,6 @@ static void ion_cma_free(struct ion_buffer *buffer)
|
||||||
kfree(info);
|
kfree(info);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* return physical address in addr */
|
|
||||||
static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
|
|
||||||
ion_phys_addr_t *addr, size_t *len)
|
|
||||||
{
|
|
||||||
struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap);
|
|
||||||
struct device *dev = cma_heap->dev;
|
|
||||||
struct ion_cma_buffer_info *info = buffer->priv_virt;
|
|
||||||
|
|
||||||
dev_dbg(dev, "Return buffer %p physical address %pa\n", buffer,
|
|
||||||
&info->handle);
|
|
||||||
|
|
||||||
*addr = info->handle;
|
|
||||||
*len = buffer->size;
|
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
|
|
||||||
struct ion_buffer *buffer)
|
|
||||||
{
|
|
||||||
struct ion_cma_buffer_info *info = buffer->priv_virt;
|
|
||||||
|
|
||||||
return info->table;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ion_cma_heap_unmap_dma(struct ion_heap *heap,
|
|
||||||
struct ion_buffer *buffer)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
|
static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
|
||||||
struct vm_area_struct *vma)
|
struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
|
@ -162,9 +133,6 @@ static void ion_cma_unmap_kernel(struct ion_heap *heap,
|
||||||
static struct ion_heap_ops ion_cma_ops = {
|
static struct ion_heap_ops ion_cma_ops = {
|
||||||
.allocate = ion_cma_allocate,
|
.allocate = ion_cma_allocate,
|
||||||
.free = ion_cma_free,
|
.free = ion_cma_free,
|
||||||
.map_dma = ion_cma_heap_map_dma,
|
|
||||||
.unmap_dma = ion_cma_heap_unmap_dma,
|
|
||||||
.phys = ion_cma_phys,
|
|
||||||
.map_user = ion_cma_mmap,
|
.map_user = ion_cma_mmap,
|
||||||
.map_kernel = ion_cma_map_kernel,
|
.map_kernel = ion_cma_map_kernel,
|
||||||
.unmap_kernel = ion_cma_unmap_kernel,
|
.unmap_kernel = ion_cma_unmap_kernel,
|
||||||
|
|
|
@ -42,8 +42,6 @@ struct ion_buffer *ion_handle_buffer(struct ion_handle *handle);
|
||||||
* @size: size of the buffer
|
* @size: size of the buffer
|
||||||
* @priv_virt: private data to the buffer representable as
|
* @priv_virt: private data to the buffer representable as
|
||||||
* a void *
|
* a void *
|
||||||
* @priv_phys: private data to the buffer representable as
|
|
||||||
* an ion_phys_addr_t (and someday a phys_addr_t)
|
|
||||||
* @lock: protects the buffers cnt fields
|
* @lock: protects the buffers cnt fields
|
||||||
* @kmap_cnt: number of times the buffer is mapped to the kernel
|
* @kmap_cnt: number of times the buffer is mapped to the kernel
|
||||||
* @vaddr: the kernel mapping if kmap_cnt is not zero
|
* @vaddr: the kernel mapping if kmap_cnt is not zero
|
||||||
|
@ -69,10 +67,7 @@ struct ion_buffer {
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
unsigned long private_flags;
|
unsigned long private_flags;
|
||||||
size_t size;
|
size_t size;
|
||||||
union {
|
void *priv_virt;
|
||||||
void *priv_virt;
|
|
||||||
ion_phys_addr_t priv_phys;
|
|
||||||
};
|
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
int kmap_cnt;
|
int kmap_cnt;
|
||||||
void *vaddr;
|
void *vaddr;
|
||||||
|
@ -91,10 +86,6 @@ void ion_buffer_destroy(struct ion_buffer *buffer);
|
||||||
* struct ion_heap_ops - ops to operate on a given heap
|
* struct ion_heap_ops - ops to operate on a given heap
|
||||||
* @allocate: allocate memory
|
* @allocate: allocate memory
|
||||||
* @free: free memory
|
* @free: free memory
|
||||||
* @phys get physical address of a buffer (only define on
|
|
||||||
* physically contiguous heaps)
|
|
||||||
* @map_dma map the memory for dma to a scatterlist
|
|
||||||
* @unmap_dma unmap the memory for dma
|
|
||||||
* @map_kernel map memory to the kernel
|
* @map_kernel map memory to the kernel
|
||||||
* @unmap_kernel unmap memory to the kernel
|
* @unmap_kernel unmap memory to the kernel
|
||||||
* @map_user map memory to userspace
|
* @map_user map memory to userspace
|
||||||
|
@ -111,11 +102,6 @@ struct ion_heap_ops {
|
||||||
struct ion_buffer *buffer, unsigned long len,
|
struct ion_buffer *buffer, unsigned long len,
|
||||||
unsigned long align, unsigned long flags);
|
unsigned long align, unsigned long flags);
|
||||||
void (*free)(struct ion_buffer *buffer);
|
void (*free)(struct ion_buffer *buffer);
|
||||||
int (*phys)(struct ion_heap *heap, struct ion_buffer *buffer,
|
|
||||||
ion_phys_addr_t *addr, size_t *len);
|
|
||||||
struct sg_table * (*map_dma)(struct ion_heap *heap,
|
|
||||||
struct ion_buffer *buffer);
|
|
||||||
void (*unmap_dma)(struct ion_heap *heap, struct ion_buffer *buffer);
|
|
||||||
void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
|
void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
|
||||||
void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
|
void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer);
|
||||||
int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
|
int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer,
|
||||||
|
@ -327,20 +313,6 @@ void ion_chunk_heap_destroy(struct ion_heap *);
|
||||||
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
|
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *);
|
||||||
void ion_cma_heap_destroy(struct ion_heap *);
|
void ion_cma_heap_destroy(struct ion_heap *);
|
||||||
|
|
||||||
/**
|
|
||||||
* kernel api to allocate/free from carveout -- used when carveout is
|
|
||||||
* used to back an architecture specific custom heap
|
|
||||||
*/
|
|
||||||
ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, unsigned long size,
|
|
||||||
unsigned long align);
|
|
||||||
void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
|
|
||||||
unsigned long size);
|
|
||||||
/**
|
|
||||||
* The carveout heap returns physical addresses, since 0 may be a valid
|
|
||||||
* physical address, this is used to indicate allocation failed
|
|
||||||
*/
|
|
||||||
#define ION_CARVEOUT_ALLOCATE_FAIL -1
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* functions for creating and destroying a heap pool -- allows you
|
* functions for creating and destroying a heap pool -- allows you
|
||||||
* to keep a pool of pre allocated memory to use from your heap. Keeping
|
* to keep a pool of pre allocated memory to use from your heap. Keeping
|
||||||
|
|
|
@ -164,7 +164,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
|
||||||
list_del(&page->lru);
|
list_del(&page->lru);
|
||||||
}
|
}
|
||||||
|
|
||||||
buffer->priv_virt = table;
|
buffer->sg_table = table;
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
free_table:
|
free_table:
|
||||||
|
@ -199,17 +199,6 @@ static void ion_system_heap_free(struct ion_buffer *buffer)
|
||||||
kfree(table);
|
kfree(table);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct sg_table *ion_system_heap_map_dma(struct ion_heap *heap,
|
|
||||||
struct ion_buffer *buffer)
|
|
||||||
{
|
|
||||||
return buffer->priv_virt;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ion_system_heap_unmap_dma(struct ion_heap *heap,
|
|
||||||
struct ion_buffer *buffer)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
|
static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
|
||||||
int nr_to_scan)
|
int nr_to_scan)
|
||||||
{
|
{
|
||||||
|
@ -243,8 +232,6 @@ static int ion_system_heap_shrink(struct ion_heap *heap, gfp_t gfp_mask,
|
||||||
static struct ion_heap_ops system_heap_ops = {
|
static struct ion_heap_ops system_heap_ops = {
|
||||||
.allocate = ion_system_heap_allocate,
|
.allocate = ion_system_heap_allocate,
|
||||||
.free = ion_system_heap_free,
|
.free = ion_system_heap_free,
|
||||||
.map_dma = ion_system_heap_map_dma,
|
|
||||||
.unmap_dma = ion_system_heap_unmap_dma,
|
|
||||||
.map_kernel = ion_heap_map_kernel,
|
.map_kernel = ion_heap_map_kernel,
|
||||||
.unmap_kernel = ion_heap_unmap_kernel,
|
.unmap_kernel = ion_heap_unmap_kernel,
|
||||||
.map_user = ion_heap_map_user,
|
.map_user = ion_heap_map_user,
|
||||||
|
@ -358,7 +345,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
|
||||||
|
|
||||||
sg_set_page(table->sgl, page, len, 0);
|
sg_set_page(table->sgl, page, len, 0);
|
||||||
|
|
||||||
buffer->priv_virt = table;
|
buffer->sg_table = table;
|
||||||
|
|
||||||
ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
|
ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL);
|
||||||
|
|
||||||
|
@ -375,7 +362,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap,
|
||||||
|
|
||||||
static void ion_system_contig_heap_free(struct ion_buffer *buffer)
|
static void ion_system_contig_heap_free(struct ion_buffer *buffer)
|
||||||
{
|
{
|
||||||
struct sg_table *table = buffer->priv_virt;
|
struct sg_table *table = buffer->sg_table;
|
||||||
struct page *page = sg_page(table->sgl);
|
struct page *page = sg_page(table->sgl);
|
||||||
unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
|
unsigned long pages = PAGE_ALIGN(buffer->size) >> PAGE_SHIFT;
|
||||||
unsigned long i;
|
unsigned long i;
|
||||||
|
@ -386,34 +373,9 @@ static void ion_system_contig_heap_free(struct ion_buffer *buffer)
|
||||||
kfree(table);
|
kfree(table);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ion_system_contig_heap_phys(struct ion_heap *heap,
|
|
||||||
struct ion_buffer *buffer,
|
|
||||||
ion_phys_addr_t *addr, size_t *len)
|
|
||||||
{
|
|
||||||
struct sg_table *table = buffer->priv_virt;
|
|
||||||
struct page *page = sg_page(table->sgl);
|
|
||||||
*addr = page_to_phys(page);
|
|
||||||
*len = buffer->size;
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct sg_table *ion_system_contig_heap_map_dma(struct ion_heap *heap,
|
|
||||||
struct ion_buffer *buffer)
|
|
||||||
{
|
|
||||||
return buffer->priv_virt;
|
|
||||||
}
|
|
||||||
|
|
||||||
static void ion_system_contig_heap_unmap_dma(struct ion_heap *heap,
|
|
||||||
struct ion_buffer *buffer)
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
static struct ion_heap_ops kmalloc_ops = {
|
static struct ion_heap_ops kmalloc_ops = {
|
||||||
.allocate = ion_system_contig_heap_allocate,
|
.allocate = ion_system_contig_heap_allocate,
|
||||||
.free = ion_system_contig_heap_free,
|
.free = ion_system_contig_heap_free,
|
||||||
.phys = ion_system_contig_heap_phys,
|
|
||||||
.map_dma = ion_system_contig_heap_map_dma,
|
|
||||||
.unmap_dma = ion_system_contig_heap_unmap_dma,
|
|
||||||
.map_kernel = ion_heap_map_kernel,
|
.map_kernel = ion_heap_map_kernel,
|
||||||
.unmap_kernel = ion_heap_unmap_kernel,
|
.unmap_kernel = ion_heap_unmap_kernel,
|
||||||
.map_user = ion_heap_map_user,
|
.map_user = ion_heap_map_user,
|
||||||
|
|
|
@ -946,10 +946,8 @@ static int usbduxfast_auto_attach(struct comedi_device *dev,
|
||||||
}
|
}
|
||||||
|
|
||||||
devpriv->urb = usb_alloc_urb(0, GFP_KERNEL);
|
devpriv->urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||||
if (!devpriv->urb) {
|
if (!devpriv->urb)
|
||||||
dev_err(dev->class_dev, "Could not alloc. urb\n");
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
devpriv->inbuf = kmalloc(SIZEINBUF, GFP_KERNEL);
|
devpriv->inbuf = kmalloc(SIZEINBUF, GFP_KERNEL);
|
||||||
if (!devpriv->inbuf)
|
if (!devpriv->inbuf)
|
||||||
|
|
|
@ -297,11 +297,10 @@ static int enqueue_txdev(struct ks_wlan_private *priv, unsigned char *p,
|
||||||
static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer,
|
static int write_to_device(struct ks_wlan_private *priv, unsigned char *buffer,
|
||||||
unsigned long size)
|
unsigned long size)
|
||||||
{
|
{
|
||||||
int rc, retval;
|
int retval;
|
||||||
unsigned char rw_data;
|
unsigned char rw_data;
|
||||||
struct hostif_hdr *hdr;
|
struct hostif_hdr *hdr;
|
||||||
hdr = (struct hostif_hdr *)buffer;
|
hdr = (struct hostif_hdr *)buffer;
|
||||||
rc = 0;
|
|
||||||
|
|
||||||
DPRINTK(4, "size=%d\n", hdr->size);
|
DPRINTK(4, "size=%d\n", hdr->size);
|
||||||
if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) {
|
if (hdr->event < HIF_DATA_REQ || HIF_REQ_MAX < hdr->event) {
|
||||||
|
@ -711,7 +710,6 @@ static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
int retval;
|
int retval;
|
||||||
unsigned char *data_buf;
|
unsigned char *data_buf;
|
||||||
data_buf = NULL;
|
|
||||||
|
|
||||||
data_buf = kmalloc(sizeof(u32), GFP_KERNEL);
|
data_buf = kmalloc(sizeof(u32), GFP_KERNEL);
|
||||||
if (!data_buf) {
|
if (!data_buf) {
|
||||||
|
@ -732,8 +730,7 @@ static int ks7010_sdio_update_index(struct ks_wlan_private *priv, u32 index)
|
||||||
goto error_out;
|
goto error_out;
|
||||||
}
|
}
|
||||||
error_out:
|
error_out:
|
||||||
if (data_buf)
|
kfree(data_buf);
|
||||||
kfree(data_buf);
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -744,7 +741,7 @@ static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address,
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
int retval;
|
int retval;
|
||||||
unsigned char *read_buf;
|
unsigned char *read_buf;
|
||||||
read_buf = NULL;
|
|
||||||
read_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
|
read_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
|
||||||
if (!read_buf) {
|
if (!read_buf) {
|
||||||
rc = 1;
|
rc = 1;
|
||||||
|
@ -763,8 +760,7 @@ static int ks7010_sdio_data_compare(struct ks_wlan_private *priv, u32 address,
|
||||||
goto error_out;
|
goto error_out;
|
||||||
}
|
}
|
||||||
error_out:
|
error_out:
|
||||||
if (read_buf)
|
kfree(read_buf);
|
||||||
kfree(read_buf);
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -778,8 +774,6 @@ static int ks7010_upload_firmware(struct ks_wlan_private *priv,
|
||||||
int length;
|
int length;
|
||||||
const struct firmware *fw_entry = NULL;
|
const struct firmware *fw_entry = NULL;
|
||||||
|
|
||||||
rom_buf = NULL;
|
|
||||||
|
|
||||||
/* buffer allocate */
|
/* buffer allocate */
|
||||||
rom_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
|
rom_buf = kmalloc(ROM_BUFF_SIZE, GFP_KERNEL);
|
||||||
if (!rom_buf) {
|
if (!rom_buf) {
|
||||||
|
@ -879,8 +873,7 @@ static int ks7010_upload_firmware(struct ks_wlan_private *priv,
|
||||||
release_firmware(fw_entry);
|
release_firmware(fw_entry);
|
||||||
error_out0:
|
error_out0:
|
||||||
sdio_release_host(card->func);
|
sdio_release_host(card->func);
|
||||||
if (rom_buf)
|
kfree(rom_buf);
|
||||||
kfree(rom_buf);
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1141,7 +1134,6 @@ static void ks7010_sdio_remove(struct sdio_func *func)
|
||||||
int ret;
|
int ret;
|
||||||
struct ks_sdio_card *card;
|
struct ks_sdio_card *card;
|
||||||
struct ks_wlan_private *priv;
|
struct ks_wlan_private *priv;
|
||||||
struct net_device *netdev;
|
|
||||||
DPRINTK(1, "ks7010_sdio_remove()\n");
|
DPRINTK(1, "ks7010_sdio_remove()\n");
|
||||||
|
|
||||||
card = sdio_get_drvdata(func);
|
card = sdio_get_drvdata(func);
|
||||||
|
@ -1151,8 +1143,9 @@ static void ks7010_sdio_remove(struct sdio_func *func)
|
||||||
|
|
||||||
DPRINTK(1, "priv = card->priv\n");
|
DPRINTK(1, "priv = card->priv\n");
|
||||||
priv = card->priv;
|
priv = card->priv;
|
||||||
netdev = priv->net_dev;
|
|
||||||
if (priv) {
|
if (priv) {
|
||||||
|
struct net_device *netdev = priv->net_dev;
|
||||||
|
|
||||||
ks_wlan_net_stop(netdev);
|
ks_wlan_net_stop(netdev);
|
||||||
DPRINTK(1, "ks_wlan_net_stop\n");
|
DPRINTK(1, "ks_wlan_net_stop\n");
|
||||||
|
|
||||||
|
@ -1199,9 +1192,7 @@ static void ks7010_sdio_remove(struct sdio_func *func)
|
||||||
unregister_netdev(netdev);
|
unregister_netdev(netdev);
|
||||||
|
|
||||||
trx_device_exit(priv);
|
trx_device_exit(priv);
|
||||||
if (priv->ks_wlan_hw.read_buf) {
|
kfree(priv->ks_wlan_hw.read_buf);
|
||||||
kfree(priv->ks_wlan_hw.read_buf);
|
|
||||||
}
|
|
||||||
free_netdev(priv->net_dev);
|
free_netdev(priv->net_dev);
|
||||||
card->priv = NULL;
|
card->priv = NULL;
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,15 +20,21 @@
|
||||||
#define getUInt32( A, B ) (uint32_t)(A[B+0] << 0) + (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24)
|
#define getUInt32( A, B ) (uint32_t)(A[B+0] << 0) + (A[B+1] << 8) + (A[B+2] << 16) + (A[B+3] << 24)
|
||||||
|
|
||||||
// Convert from UInt32 to Byte[] in a portable way
|
// Convert from UInt32 to Byte[] in a portable way
|
||||||
#define putUInt32( A, B, C ) A[B+0] = (uint8_t) (C & 0xff); \
|
#define putUInt32(A, B, C) \
|
||||||
A[B+1] = (uint8_t) ((C>>8) & 0xff); \
|
do { \
|
||||||
A[B+2] = (uint8_t) ((C>>16) & 0xff); \
|
A[B + 0] = (uint8_t)(C & 0xff); \
|
||||||
A[B+3] = (uint8_t) ((C>>24) & 0xff)
|
A[B + 1] = (uint8_t)((C >> 8) & 0xff); \
|
||||||
|
A[B + 2] = (uint8_t)((C >> 16) & 0xff); \
|
||||||
|
A[B + 3] = (uint8_t)((C >> 24) & 0xff); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
// Reset the state to the empty message.
|
// Reset the state to the empty message.
|
||||||
#define MichaelClear( A ) A->L = A->K0; \
|
#define MichaelClear(A) \
|
||||||
A->R = A->K1; \
|
do { \
|
||||||
A->nBytesInM = 0;
|
A->L = A->K0; \
|
||||||
|
A->R = A->K1; \
|
||||||
|
A->nBytesInM = 0; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
static
|
static
|
||||||
void MichaelInitializeFunction(struct michel_mic_t *Mic, uint8_t * key)
|
void MichaelInitializeFunction(struct michel_mic_t *Mic, uint8_t * key)
|
||||||
|
|
|
@ -1468,11 +1468,6 @@ ksocknal_close_conn_locked(struct ksock_conn *conn, int error)
|
||||||
|
|
||||||
conn->ksnc_route = NULL;
|
conn->ksnc_route = NULL;
|
||||||
|
|
||||||
#if 0 /* irrelevant with only eager routes */
|
|
||||||
/* make route least favourite */
|
|
||||||
list_del(&route->ksnr_list);
|
|
||||||
list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
|
|
||||||
#endif
|
|
||||||
ksocknal_route_decref(route); /* drop conn's ref on route */
|
ksocknal_route_decref(route); /* drop conn's ref on route */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2008,13 +2008,6 @@ ksocknal_connect(struct ksock_route *route)
|
||||||
list_splice_init(&peer->ksnp_tx_queue, &zombies);
|
list_splice_init(&peer->ksnp_tx_queue, &zombies);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0 /* irrelevant with only eager routes */
|
|
||||||
if (!route->ksnr_deleted) {
|
|
||||||
/* make this route least-favourite for re-selection */
|
|
||||||
list_del(&route->ksnr_list);
|
|
||||||
list_add_tail(&route->ksnr_list, &peer->ksnp_routes);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
|
write_unlock_bh(&ksocknal_data.ksnd_global_lock);
|
||||||
|
|
||||||
ksocknal_peer_failed(peer);
|
ksocknal_peer_failed(peer);
|
||||||
|
|
|
@ -449,23 +449,7 @@ lnet_finalize(lnet_ni_t *ni, lnet_msg_t *msg, int status)
|
||||||
|
|
||||||
if (!msg)
|
if (!msg)
|
||||||
return;
|
return;
|
||||||
#if 0
|
|
||||||
CDEBUG(D_WARNING, "%s msg->%s Flags:%s%s%s%s%s%s%s%s%s%s%s txp %s rxp %s\n",
|
|
||||||
lnet_msgtyp2str(msg->msg_type), libcfs_id2str(msg->msg_target),
|
|
||||||
msg->msg_target_is_router ? "t" : "",
|
|
||||||
msg->msg_routing ? "X" : "",
|
|
||||||
msg->msg_ack ? "A" : "",
|
|
||||||
msg->msg_sending ? "S" : "",
|
|
||||||
msg->msg_receiving ? "R" : "",
|
|
||||||
msg->msg_delayed ? "d" : "",
|
|
||||||
msg->msg_txcredit ? "C" : "",
|
|
||||||
msg->msg_peertxcredit ? "c" : "",
|
|
||||||
msg->msg_rtrcredit ? "F" : "",
|
|
||||||
msg->msg_peerrtrcredit ? "f" : "",
|
|
||||||
msg->msg_onactivelist ? "!" : "",
|
|
||||||
!msg->msg_txpeer ? "<none>" : libcfs_nid2str(msg->msg_txpeer->lp_nid),
|
|
||||||
!msg->msg_rxpeer ? "<none>" : libcfs_nid2str(msg->msg_rxpeer->lp_nid));
|
|
||||||
#endif
|
|
||||||
msg->msg_ev.status = status;
|
msg->msg_ev.status = status;
|
||||||
|
|
||||||
if (msg->msg_md) {
|
if (msg->msg_md) {
|
||||||
|
|
|
@ -3213,10 +3213,10 @@ const struct inode_operations ll_file_inode_operations = {
|
||||||
.setattr = ll_setattr,
|
.setattr = ll_setattr,
|
||||||
.getattr = ll_getattr,
|
.getattr = ll_getattr,
|
||||||
.permission = ll_inode_permission,
|
.permission = ll_inode_permission,
|
||||||
.setxattr = ll_setxattr,
|
.setxattr = generic_setxattr,
|
||||||
.getxattr = ll_getxattr,
|
.getxattr = generic_getxattr,
|
||||||
.listxattr = ll_listxattr,
|
.listxattr = ll_listxattr,
|
||||||
.removexattr = ll_removexattr,
|
.removexattr = generic_removexattr,
|
||||||
.fiemap = ll_fiemap,
|
.fiemap = ll_fiemap,
|
||||||
.get_acl = ll_get_acl,
|
.get_acl = ll_get_acl,
|
||||||
};
|
};
|
||||||
|
|
|
@ -42,6 +42,7 @@
|
||||||
#include "../include/lustre_mdc.h"
|
#include "../include/lustre_mdc.h"
|
||||||
#include "../include/lustre_intent.h"
|
#include "../include/lustre_intent.h"
|
||||||
#include <linux/compat.h>
|
#include <linux/compat.h>
|
||||||
|
#include <linux/xattr.h>
|
||||||
#include <linux/posix_acl_xattr.h>
|
#include <linux/posix_acl_xattr.h>
|
||||||
#include "vvp_internal.h"
|
#include "vvp_internal.h"
|
||||||
|
|
||||||
|
@ -933,12 +934,9 @@ static inline __u64 ll_file_maxbytes(struct inode *inode)
|
||||||
}
|
}
|
||||||
|
|
||||||
/* llite/xattr.c */
|
/* llite/xattr.c */
|
||||||
int ll_setxattr(struct dentry *dentry, struct inode *inode,
|
extern const struct xattr_handler *ll_xattr_handlers[];
|
||||||
const char *name, const void *value, size_t size, int flags);
|
|
||||||
ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
|
|
||||||
const char *name, void *buffer, size_t size);
|
|
||||||
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
|
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size);
|
||||||
int ll_removexattr(struct dentry *dentry, const char *name);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Common IO arguments for various VFS I/O interfaces.
|
* Common IO arguments for various VFS I/O interfaces.
|
||||||
|
|
|
@ -418,6 +418,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
|
||||||
CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
|
CDEBUG(D_SUPER, "rootfid "DFID"\n", PFID(&sbi->ll_root_fid));
|
||||||
|
|
||||||
sb->s_op = &lustre_super_operations;
|
sb->s_op = &lustre_super_operations;
|
||||||
|
sb->s_xattr = ll_xattr_handlers;
|
||||||
#if THREAD_SIZE >= 8192 /*b=17630*/
|
#if THREAD_SIZE >= 8192 /*b=17630*/
|
||||||
sb->s_export_op = &lustre_export_operations;
|
sb->s_export_op = &lustre_export_operations;
|
||||||
#endif
|
#endif
|
||||||
|
|
|
@ -1106,10 +1106,10 @@ const struct inode_operations ll_dir_inode_operations = {
|
||||||
.setattr = ll_setattr,
|
.setattr = ll_setattr,
|
||||||
.getattr = ll_getattr,
|
.getattr = ll_getattr,
|
||||||
.permission = ll_inode_permission,
|
.permission = ll_inode_permission,
|
||||||
.setxattr = ll_setxattr,
|
.setxattr = generic_setxattr,
|
||||||
.getxattr = ll_getxattr,
|
.getxattr = generic_getxattr,
|
||||||
.listxattr = ll_listxattr,
|
.listxattr = ll_listxattr,
|
||||||
.removexattr = ll_removexattr,
|
.removexattr = generic_removexattr,
|
||||||
.get_acl = ll_get_acl,
|
.get_acl = ll_get_acl,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1117,9 +1117,9 @@ const struct inode_operations ll_special_inode_operations = {
|
||||||
.setattr = ll_setattr,
|
.setattr = ll_setattr,
|
||||||
.getattr = ll_getattr,
|
.getattr = ll_getattr,
|
||||||
.permission = ll_inode_permission,
|
.permission = ll_inode_permission,
|
||||||
.setxattr = ll_setxattr,
|
.setxattr = generic_setxattr,
|
||||||
.getxattr = ll_getxattr,
|
.getxattr = generic_getxattr,
|
||||||
.listxattr = ll_listxattr,
|
.listxattr = ll_listxattr,
|
||||||
.removexattr = ll_removexattr,
|
.removexattr = generic_removexattr,
|
||||||
.get_acl = ll_get_acl,
|
.get_acl = ll_get_acl,
|
||||||
};
|
};
|
||||||
|
|
|
@ -155,8 +155,8 @@ const struct inode_operations ll_fast_symlink_inode_operations = {
|
||||||
.get_link = ll_get_link,
|
.get_link = ll_get_link,
|
||||||
.getattr = ll_getattr,
|
.getattr = ll_getattr,
|
||||||
.permission = ll_inode_permission,
|
.permission = ll_inode_permission,
|
||||||
.setxattr = ll_setxattr,
|
.setxattr = generic_setxattr,
|
||||||
.getxattr = ll_getxattr,
|
.getxattr = generic_getxattr,
|
||||||
.listxattr = ll_listxattr,
|
.listxattr = ll_listxattr,
|
||||||
.removexattr = ll_removexattr,
|
.removexattr = generic_removexattr,
|
||||||
};
|
};
|
||||||
|
|
|
@ -99,46 +99,57 @@ int xattr_type_filter(struct ll_sb_info *sbi, int xattr_type)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static
|
static int
|
||||||
int ll_setxattr_common(struct inode *inode, const char *name,
|
ll_xattr_set_common(const struct xattr_handler *handler,
|
||||||
const void *value, size_t size,
|
struct dentry *dentry, struct inode *inode,
|
||||||
int flags, __u64 valid)
|
const char *name, const void *value, size_t size,
|
||||||
|
int flags)
|
||||||
{
|
{
|
||||||
|
char fullname[strlen(handler->prefix) + strlen(name) + 1];
|
||||||
struct ll_sb_info *sbi = ll_i2sbi(inode);
|
struct ll_sb_info *sbi = ll_i2sbi(inode);
|
||||||
struct ptlrpc_request *req = NULL;
|
struct ptlrpc_request *req = NULL;
|
||||||
int xattr_type, rc;
|
|
||||||
const char *pv = value;
|
const char *pv = value;
|
||||||
|
__u64 valid;
|
||||||
|
int rc;
|
||||||
|
|
||||||
xattr_type = get_xattr_type(name);
|
if (flags == XATTR_REPLACE) {
|
||||||
rc = xattr_type_filter(sbi, xattr_type);
|
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1);
|
||||||
|
valid = OBD_MD_FLXATTRRM;
|
||||||
|
} else {
|
||||||
|
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1);
|
||||||
|
valid = OBD_MD_FLXATTR;
|
||||||
|
}
|
||||||
|
|
||||||
|
rc = xattr_type_filter(sbi, handler->flags);
|
||||||
if (rc)
|
if (rc)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
if ((xattr_type == XATTR_ACL_ACCESS_T ||
|
if ((handler->flags == XATTR_ACL_ACCESS_T ||
|
||||||
xattr_type == XATTR_ACL_DEFAULT_T) &&
|
handler->flags == XATTR_ACL_DEFAULT_T) &&
|
||||||
!inode_owner_or_capable(inode))
|
!inode_owner_or_capable(inode))
|
||||||
return -EPERM;
|
return -EPERM;
|
||||||
|
|
||||||
/* b10667: ignore lustre special xattr for now */
|
/* b10667: ignore lustre special xattr for now */
|
||||||
if ((xattr_type == XATTR_TRUSTED_T && strcmp(name, "trusted.lov") == 0) ||
|
if ((handler->flags == XATTR_TRUSTED_T && !strcmp(name, "lov")) ||
|
||||||
(xattr_type == XATTR_LUSTRE_T && strcmp(name, "lustre.lov") == 0))
|
(handler->flags == XATTR_LUSTRE_T && !strcmp(name, "lov")))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* b15587: ignore security.capability xattr for now */
|
/* b15587: ignore security.capability xattr for now */
|
||||||
if ((xattr_type == XATTR_SECURITY_T &&
|
if ((handler->flags == XATTR_SECURITY_T &&
|
||||||
strcmp(name, "security.capability") == 0))
|
!strcmp(name, "capability")))
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* LU-549: Disable security.selinux when selinux is disabled */
|
/* LU-549: Disable security.selinux when selinux is disabled */
|
||||||
if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() &&
|
if (handler->flags == XATTR_SECURITY_T && !selinux_is_enabled() &&
|
||||||
strcmp(name, "security.selinux") == 0)
|
strcmp(name, "selinux") == 0)
|
||||||
return -EOPNOTSUPP;
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
sprintf(fullname, "%s%s\n", handler->prefix, name);
|
||||||
rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode),
|
rc = md_setxattr(sbi->ll_md_exp, ll_inode2fid(inode),
|
||||||
valid, name, pv, size, 0, flags,
|
valid, fullname, pv, size, 0, flags,
|
||||||
ll_i2suppgid(inode), &req);
|
ll_i2suppgid(inode), &req);
|
||||||
if (rc) {
|
if (rc) {
|
||||||
if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
|
if (rc == -EOPNOTSUPP && handler->flags == XATTR_USER_T) {
|
||||||
LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
|
LCONSOLE_INFO("Disabling user_xattr feature because it is not supported on the server\n");
|
||||||
sbi->ll_flags &= ~LL_SBI_USER_XATTR;
|
sbi->ll_flags &= ~LL_SBI_USER_XATTR;
|
||||||
}
|
}
|
||||||
|
@ -149,8 +160,10 @@ int ll_setxattr_common(struct inode *inode, const char *name,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int ll_setxattr(struct dentry *dentry, struct inode *inode,
|
static int ll_xattr_set(const struct xattr_handler *handler,
|
||||||
const char *name, const void *value, size_t size, int flags)
|
struct dentry *dentry, struct inode *inode,
|
||||||
|
const char *name, const void *value, size_t size,
|
||||||
|
int flags)
|
||||||
{
|
{
|
||||||
LASSERT(inode);
|
LASSERT(inode);
|
||||||
LASSERT(name);
|
LASSERT(name);
|
||||||
|
@ -158,20 +171,24 @@ int ll_setxattr(struct dentry *dentry, struct inode *inode,
|
||||||
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
|
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
|
||||||
PFID(ll_inode2fid(inode)), inode, name);
|
PFID(ll_inode2fid(inode)), inode, name);
|
||||||
|
|
||||||
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1);
|
if (!strcmp(name, "lov")) {
|
||||||
|
|
||||||
if ((strncmp(name, XATTR_TRUSTED_PREFIX,
|
|
||||||
sizeof(XATTR_TRUSTED_PREFIX) - 1) == 0 &&
|
|
||||||
strcmp(name + sizeof(XATTR_TRUSTED_PREFIX) - 1, "lov") == 0) ||
|
|
||||||
(strncmp(name, XATTR_LUSTRE_PREFIX,
|
|
||||||
sizeof(XATTR_LUSTRE_PREFIX) - 1) == 0 &&
|
|
||||||
strcmp(name + sizeof(XATTR_LUSTRE_PREFIX) - 1, "lov") == 0)) {
|
|
||||||
struct lov_user_md *lump = (struct lov_user_md *)value;
|
struct lov_user_md *lump = (struct lov_user_md *)value;
|
||||||
|
int op_type = flags == XATTR_REPLACE ? LPROC_LL_REMOVEXATTR :
|
||||||
|
LPROC_LL_SETXATTR;
|
||||||
int rc = 0;
|
int rc = 0;
|
||||||
|
|
||||||
|
ll_stats_ops_tally(ll_i2sbi(inode), op_type, 1);
|
||||||
|
|
||||||
if (size != 0 && size < sizeof(struct lov_user_md))
|
if (size != 0 && size < sizeof(struct lov_user_md))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* It is possible to set an xattr to a "" value of zero size.
|
||||||
|
* For this case we are going to treat it as a removal.
|
||||||
|
*/
|
||||||
|
if (!size && lump)
|
||||||
|
lump = NULL;
|
||||||
|
|
||||||
/* Attributes that are saved via getxattr will always have
|
/* Attributes that are saved via getxattr will always have
|
||||||
* the stripe_offset as 0. Instead, the MDS should be
|
* the stripe_offset as 0. Instead, the MDS should be
|
||||||
* allowed to pick the starting OST index. b=17846
|
* allowed to pick the starting OST index. b=17846
|
||||||
|
@ -194,92 +211,27 @@ int ll_setxattr(struct dentry *dentry, struct inode *inode,
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
} else if (strcmp(name, XATTR_NAME_LMA) == 0 ||
|
} else if (!strcmp(name, "lma") || !strcmp(name, "link")) {
|
||||||
strcmp(name, XATTR_NAME_LINK) == 0)
|
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1);
|
||||||
return 0;
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
return ll_setxattr_common(inode, name, value, size, flags,
|
return ll_xattr_set_common(handler, dentry, inode, name, value, size,
|
||||||
OBD_MD_FLXATTR);
|
flags);
|
||||||
}
|
}
|
||||||
|
|
||||||
int ll_removexattr(struct dentry *dentry, const char *name)
|
static int
|
||||||
{
|
ll_xattr_list(struct inode *inode, const char *name, int type, void *buffer,
|
||||||
struct inode *inode = d_inode(dentry);
|
size_t size, __u64 valid)
|
||||||
|
|
||||||
LASSERT(inode);
|
|
||||||
LASSERT(name);
|
|
||||||
|
|
||||||
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
|
|
||||||
PFID(ll_inode2fid(inode)), inode, name);
|
|
||||||
|
|
||||||
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1);
|
|
||||||
return ll_setxattr_common(inode, name, NULL, 0, 0,
|
|
||||||
OBD_MD_FLXATTRRM);
|
|
||||||
}
|
|
||||||
|
|
||||||
static
|
|
||||||
int ll_getxattr_common(struct inode *inode, const char *name,
|
|
||||||
void *buffer, size_t size, __u64 valid)
|
|
||||||
{
|
{
|
||||||
|
struct ll_inode_info *lli = ll_i2info(inode);
|
||||||
struct ll_sb_info *sbi = ll_i2sbi(inode);
|
struct ll_sb_info *sbi = ll_i2sbi(inode);
|
||||||
struct ptlrpc_request *req = NULL;
|
struct ptlrpc_request *req = NULL;
|
||||||
struct mdt_body *body;
|
struct mdt_body *body;
|
||||||
int xattr_type, rc;
|
|
||||||
void *xdata;
|
void *xdata;
|
||||||
struct ll_inode_info *lli = ll_i2info(inode);
|
int rc;
|
||||||
|
|
||||||
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
|
if (sbi->ll_xattr_cache_enabled && type != XATTR_ACL_ACCESS_T) {
|
||||||
PFID(ll_inode2fid(inode)), inode);
|
|
||||||
|
|
||||||
/* listxattr have slightly different behavior from of ext3:
|
|
||||||
* without 'user_xattr' ext3 will list all xattr names but
|
|
||||||
* filtered out "^user..*"; we list them all for simplicity.
|
|
||||||
*/
|
|
||||||
if (!name) {
|
|
||||||
xattr_type = XATTR_OTHER_T;
|
|
||||||
goto do_getxattr;
|
|
||||||
}
|
|
||||||
|
|
||||||
xattr_type = get_xattr_type(name);
|
|
||||||
rc = xattr_type_filter(sbi, xattr_type);
|
|
||||||
if (rc)
|
|
||||||
return rc;
|
|
||||||
|
|
||||||
/* b15587: ignore security.capability xattr for now */
|
|
||||||
if ((xattr_type == XATTR_SECURITY_T &&
|
|
||||||
strcmp(name, "security.capability") == 0))
|
|
||||||
return -ENODATA;
|
|
||||||
|
|
||||||
/* LU-549: Disable security.selinux when selinux is disabled */
|
|
||||||
if (xattr_type == XATTR_SECURITY_T && !selinux_is_enabled() &&
|
|
||||||
strcmp(name, "security.selinux") == 0)
|
|
||||||
return -EOPNOTSUPP;
|
|
||||||
|
|
||||||
#ifdef CONFIG_FS_POSIX_ACL
|
|
||||||
/* posix acl is under protection of LOOKUP lock. when calling to this,
|
|
||||||
* we just have path resolution to the target inode, so we have great
|
|
||||||
* chance that cached ACL is uptodate.
|
|
||||||
*/
|
|
||||||
if (xattr_type == XATTR_ACL_ACCESS_T) {
|
|
||||||
struct posix_acl *acl;
|
|
||||||
|
|
||||||
spin_lock(&lli->lli_lock);
|
|
||||||
acl = posix_acl_dup(lli->lli_posix_acl);
|
|
||||||
spin_unlock(&lli->lli_lock);
|
|
||||||
|
|
||||||
if (!acl)
|
|
||||||
return -ENODATA;
|
|
||||||
|
|
||||||
rc = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
|
|
||||||
posix_acl_release(acl);
|
|
||||||
return rc;
|
|
||||||
}
|
|
||||||
if (xattr_type == XATTR_ACL_DEFAULT_T && !S_ISDIR(inode->i_mode))
|
|
||||||
return -ENODATA;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
do_getxattr:
|
|
||||||
if (sbi->ll_xattr_cache_enabled && xattr_type != XATTR_ACL_ACCESS_T) {
|
|
||||||
rc = ll_xattr_cache_get(inode, name, buffer, size, valid);
|
rc = ll_xattr_cache_get(inode, name, buffer, size, valid);
|
||||||
if (rc == -EAGAIN)
|
if (rc == -EAGAIN)
|
||||||
goto getxattr_nocache;
|
goto getxattr_nocache;
|
||||||
|
@ -340,7 +292,7 @@ int ll_getxattr_common(struct inode *inode, const char *name,
|
||||||
}
|
}
|
||||||
|
|
||||||
out_xattr:
|
out_xattr:
|
||||||
if (rc == -EOPNOTSUPP && xattr_type == XATTR_USER_T) {
|
if (rc == -EOPNOTSUPP && type == XATTR_USER_T) {
|
||||||
LCONSOLE_INFO(
|
LCONSOLE_INFO(
|
||||||
"%s: disabling user_xattr feature because it is not supported on the server: rc = %d\n",
|
"%s: disabling user_xattr feature because it is not supported on the server: rc = %d\n",
|
||||||
ll_get_fsname(inode->i_sb, NULL, 0), rc);
|
ll_get_fsname(inode->i_sb, NULL, 0), rc);
|
||||||
|
@ -351,8 +303,63 @@ int ll_getxattr_common(struct inode *inode, const char *name,
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
|
static int ll_xattr_get_common(const struct xattr_handler *handler,
|
||||||
const char *name, void *buffer, size_t size)
|
struct dentry *dentry, struct inode *inode,
|
||||||
|
const char *name, void *buffer, size_t size)
|
||||||
|
{
|
||||||
|
char fullname[strlen(handler->prefix) + strlen(name) + 1];
|
||||||
|
struct ll_sb_info *sbi = ll_i2sbi(inode);
|
||||||
|
struct ll_inode_info *lli = ll_i2info(inode);
|
||||||
|
int rc;
|
||||||
|
|
||||||
|
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
|
||||||
|
PFID(ll_inode2fid(inode)), inode);
|
||||||
|
|
||||||
|
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
|
||||||
|
|
||||||
|
rc = xattr_type_filter(sbi, handler->flags);
|
||||||
|
if (rc)
|
||||||
|
return rc;
|
||||||
|
|
||||||
|
/* b15587: ignore security.capability xattr for now */
|
||||||
|
if ((handler->flags == XATTR_SECURITY_T && !strcmp(name, "capability")))
|
||||||
|
return -ENODATA;
|
||||||
|
|
||||||
|
/* LU-549: Disable security.selinux when selinux is disabled */
|
||||||
|
if (handler->flags == XATTR_SECURITY_T && !selinux_is_enabled() &&
|
||||||
|
!strcmp(name, "selinux"))
|
||||||
|
return -EOPNOTSUPP;
|
||||||
|
|
||||||
|
#ifdef CONFIG_FS_POSIX_ACL
|
||||||
|
/* posix acl is under protection of LOOKUP lock. when calling to this,
|
||||||
|
* we just have path resolution to the target inode, so we have great
|
||||||
|
* chance that cached ACL is uptodate.
|
||||||
|
*/
|
||||||
|
if (handler->flags == XATTR_ACL_ACCESS_T) {
|
||||||
|
struct posix_acl *acl;
|
||||||
|
|
||||||
|
spin_lock(&lli->lli_lock);
|
||||||
|
acl = posix_acl_dup(lli->lli_posix_acl);
|
||||||
|
spin_unlock(&lli->lli_lock);
|
||||||
|
|
||||||
|
if (!acl)
|
||||||
|
return -ENODATA;
|
||||||
|
|
||||||
|
rc = posix_acl_to_xattr(&init_user_ns, acl, buffer, size);
|
||||||
|
posix_acl_release(acl);
|
||||||
|
return rc;
|
||||||
|
}
|
||||||
|
if (handler->flags == XATTR_ACL_DEFAULT_T && !S_ISDIR(inode->i_mode))
|
||||||
|
return -ENODATA;
|
||||||
|
#endif
|
||||||
|
sprintf(fullname, "%s%s\n", handler->prefix, name);
|
||||||
|
return ll_xattr_list(inode, fullname, handler->flags, buffer, size,
|
||||||
|
OBD_MD_FLXATTR);
|
||||||
|
}
|
||||||
|
|
||||||
|
static int ll_xattr_get(const struct xattr_handler *handler,
|
||||||
|
struct dentry *dentry, struct inode *inode,
|
||||||
|
const char *name, void *buffer, size_t size)
|
||||||
{
|
{
|
||||||
LASSERT(inode);
|
LASSERT(inode);
|
||||||
LASSERT(name);
|
LASSERT(name);
|
||||||
|
@ -360,20 +367,15 @@ ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
|
||||||
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
|
CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
|
||||||
PFID(ll_inode2fid(inode)), inode, name);
|
PFID(ll_inode2fid(inode)), inode, name);
|
||||||
|
|
||||||
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
|
if (!strcmp(name, "lov")) {
|
||||||
|
|
||||||
if ((strncmp(name, XATTR_TRUSTED_PREFIX,
|
|
||||||
sizeof(XATTR_TRUSTED_PREFIX) - 1) == 0 &&
|
|
||||||
strcmp(name + sizeof(XATTR_TRUSTED_PREFIX) - 1, "lov") == 0) ||
|
|
||||||
(strncmp(name, XATTR_LUSTRE_PREFIX,
|
|
||||||
sizeof(XATTR_LUSTRE_PREFIX) - 1) == 0 &&
|
|
||||||
strcmp(name + sizeof(XATTR_LUSTRE_PREFIX) - 1, "lov") == 0)) {
|
|
||||||
struct lov_stripe_md *lsm;
|
struct lov_stripe_md *lsm;
|
||||||
struct lov_user_md *lump;
|
struct lov_user_md *lump;
|
||||||
struct lov_mds_md *lmm = NULL;
|
struct lov_mds_md *lmm = NULL;
|
||||||
struct ptlrpc_request *request = NULL;
|
struct ptlrpc_request *request = NULL;
|
||||||
int rc = 0, lmmsize = 0;
|
int rc = 0, lmmsize = 0;
|
||||||
|
|
||||||
|
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
|
||||||
|
|
||||||
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
|
if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode))
|
||||||
return -ENODATA;
|
return -ENODATA;
|
||||||
|
|
||||||
|
@ -439,7 +441,7 @@ ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
return ll_getxattr_common(inode, name, buffer, size, OBD_MD_FLXATTR);
|
return ll_xattr_get_common(handler, dentry, inode, name, buffer, size);
|
||||||
}
|
}
|
||||||
|
|
||||||
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
|
ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
|
||||||
|
@ -457,7 +459,8 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
|
||||||
|
|
||||||
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LISTXATTR, 1);
|
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LISTXATTR, 1);
|
||||||
|
|
||||||
rc = ll_getxattr_common(inode, NULL, buffer, size, OBD_MD_FLXATTRLS);
|
rc = ll_xattr_list(inode, NULL, XATTR_OTHER_T, buffer, size,
|
||||||
|
OBD_MD_FLXATTRLS);
|
||||||
if (rc < 0)
|
if (rc < 0)
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
|
@ -518,3 +521,57 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
|
||||||
|
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static const struct xattr_handler ll_user_xattr_handler = {
|
||||||
|
.prefix = XATTR_USER_PREFIX,
|
||||||
|
.flags = XATTR_USER_T,
|
||||||
|
.get = ll_xattr_get_common,
|
||||||
|
.set = ll_xattr_set_common,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct xattr_handler ll_trusted_xattr_handler = {
|
||||||
|
.prefix = XATTR_TRUSTED_PREFIX,
|
||||||
|
.flags = XATTR_TRUSTED_T,
|
||||||
|
.get = ll_xattr_get,
|
||||||
|
.set = ll_xattr_set,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct xattr_handler ll_security_xattr_handler = {
|
||||||
|
.prefix = XATTR_SECURITY_PREFIX,
|
||||||
|
.flags = XATTR_SECURITY_T,
|
||||||
|
.get = ll_xattr_get_common,
|
||||||
|
.set = ll_xattr_set_common,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct xattr_handler ll_acl_access_xattr_handler = {
|
||||||
|
.prefix = XATTR_NAME_POSIX_ACL_ACCESS,
|
||||||
|
.flags = XATTR_ACL_ACCESS_T,
|
||||||
|
.get = ll_xattr_get_common,
|
||||||
|
.set = ll_xattr_set_common,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct xattr_handler ll_acl_default_xattr_handler = {
|
||||||
|
.prefix = XATTR_NAME_POSIX_ACL_DEFAULT,
|
||||||
|
.flags = XATTR_ACL_DEFAULT_T,
|
||||||
|
.get = ll_xattr_get_common,
|
||||||
|
.set = ll_xattr_set_common,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const struct xattr_handler ll_lustre_xattr_handler = {
|
||||||
|
.prefix = XATTR_LUSTRE_PREFIX,
|
||||||
|
.flags = XATTR_LUSTRE_T,
|
||||||
|
.get = ll_xattr_get,
|
||||||
|
.set = ll_xattr_set,
|
||||||
|
};
|
||||||
|
|
||||||
|
const struct xattr_handler *ll_xattr_handlers[] = {
|
||||||
|
&ll_user_xattr_handler,
|
||||||
|
&ll_trusted_xattr_handler,
|
||||||
|
&ll_security_xattr_handler,
|
||||||
|
#ifdef CONFIG_FS_POSIX_ACL
|
||||||
|
&ll_acl_access_xattr_handler,
|
||||||
|
&ll_acl_default_xattr_handler,
|
||||||
|
#endif
|
||||||
|
&ll_lustre_xattr_handler,
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
|
@ -797,16 +797,11 @@ static int imon_probe(struct usb_interface *interface,
|
||||||
goto free_rbuf;
|
goto free_rbuf;
|
||||||
}
|
}
|
||||||
rx_urb = usb_alloc_urb(0, GFP_KERNEL);
|
rx_urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||||
if (!rx_urb) {
|
if (!rx_urb)
|
||||||
dev_err(dev, "%s: usb_alloc_urb failed for IR urb\n", __func__);
|
|
||||||
goto free_lirc_buf;
|
goto free_lirc_buf;
|
||||||
}
|
|
||||||
tx_urb = usb_alloc_urb(0, GFP_KERNEL);
|
tx_urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||||
if (!tx_urb) {
|
if (!tx_urb)
|
||||||
dev_err(dev, "%s: usb_alloc_urb failed for display urb\n",
|
|
||||||
__func__);
|
|
||||||
goto free_rx_urb;
|
goto free_rx_urb;
|
||||||
}
|
|
||||||
|
|
||||||
mutex_init(&context->ctx_lock);
|
mutex_init(&context->ctx_lock);
|
||||||
context->vfd_proto_6p = vfd_proto_6p;
|
context->vfd_proto_6p = vfd_proto_6p;
|
||||||
|
|
|
@ -758,17 +758,12 @@ static int sasem_probe(struct usb_interface *interface,
|
||||||
}
|
}
|
||||||
rx_urb = usb_alloc_urb(0, GFP_KERNEL);
|
rx_urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||||
if (!rx_urb) {
|
if (!rx_urb) {
|
||||||
dev_err(&interface->dev,
|
|
||||||
"%s: usb_alloc_urb failed for IR urb\n", __func__);
|
|
||||||
alloc_status = 5;
|
alloc_status = 5;
|
||||||
goto alloc_status_switch;
|
goto alloc_status_switch;
|
||||||
}
|
}
|
||||||
if (vfd_ep_found) {
|
if (vfd_ep_found) {
|
||||||
tx_urb = usb_alloc_urb(0, GFP_KERNEL);
|
tx_urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||||
if (!tx_urb) {
|
if (!tx_urb) {
|
||||||
dev_err(&interface->dev,
|
|
||||||
"%s: usb_alloc_urb failed for VFD urb",
|
|
||||||
__func__);
|
|
||||||
alloc_status = 6;
|
alloc_status = 6;
|
||||||
goto alloc_status_switch;
|
goto alloc_status_switch;
|
||||||
}
|
}
|
||||||
|
|
|
@ -650,10 +650,8 @@ static int hdm_enqueue(struct most_interface *iface, int channel,
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_ATOMIC);
|
urb = usb_alloc_urb(NO_ISOCHRONOUS_URB, GFP_ATOMIC);
|
||||||
if (!urb) {
|
if (!urb)
|
||||||
dev_err(dev, "Failed to allocate URB\n");
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
|
|
||||||
anchor = kzalloc(sizeof(*anchor), GFP_ATOMIC);
|
anchor = kzalloc(sizeof(*anchor), GFP_ATOMIC);
|
||||||
if (!anchor) {
|
if (!anchor) {
|
||||||
|
|
|
@ -1702,11 +1702,8 @@ short rtl8192_tx(struct net_device *dev, struct sk_buff *skb)
|
||||||
}
|
}
|
||||||
if (bSend0Byte) {
|
if (bSend0Byte) {
|
||||||
tx_urb_zero = usb_alloc_urb(0, GFP_ATOMIC);
|
tx_urb_zero = usb_alloc_urb(0, GFP_ATOMIC);
|
||||||
if (!tx_urb_zero) {
|
if (!tx_urb_zero)
|
||||||
RT_TRACE(COMP_ERR,
|
|
||||||
"can't alloc urb for zero byte\n");
|
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
}
|
|
||||||
usb_fill_bulk_urb(tx_urb_zero, udev,
|
usb_fill_bulk_urb(tx_urb_zero, udev,
|
||||||
usb_sndbulkpipe(udev, idx_pipe),
|
usb_sndbulkpipe(udev, idx_pipe),
|
||||||
&zero, 0, tx_zero_isr, dev);
|
&zero, 0, tx_zero_isr, dev);
|
||||||
|
|
|
@ -440,10 +440,8 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
|
||||||
|
|
||||||
/* allocate URBs */
|
/* allocate URBs */
|
||||||
tx_context->urb = usb_alloc_urb(0, GFP_KERNEL);
|
tx_context->urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||||
if (!tx_context->urb) {
|
if (!tx_context->urb)
|
||||||
dev_err(&priv->usb->dev, "alloc tx urb failed\n");
|
|
||||||
goto free_tx;
|
goto free_tx;
|
||||||
}
|
|
||||||
|
|
||||||
tx_context->in_use = false;
|
tx_context->in_use = false;
|
||||||
}
|
}
|
||||||
|
@ -462,10 +460,8 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
|
||||||
|
|
||||||
/* allocate URBs */
|
/* allocate URBs */
|
||||||
rcb->urb = usb_alloc_urb(0, GFP_KERNEL);
|
rcb->urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||||
if (!rcb->urb) {
|
if (!rcb->urb)
|
||||||
dev_err(&priv->usb->dev, "Failed to alloc rx urb\n");
|
|
||||||
goto free_rx_tx;
|
goto free_rx_tx;
|
||||||
}
|
|
||||||
|
|
||||||
rcb->skb = dev_alloc_skb(priv->rx_buf_sz);
|
rcb->skb = dev_alloc_skb(priv->rx_buf_sz);
|
||||||
if (!rcb->skb)
|
if (!rcb->skb)
|
||||||
|
@ -479,10 +475,8 @@ static bool vnt_alloc_bufs(struct vnt_private *priv)
|
||||||
}
|
}
|
||||||
|
|
||||||
priv->interrupt_urb = usb_alloc_urb(0, GFP_KERNEL);
|
priv->interrupt_urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||||
if (!priv->interrupt_urb) {
|
if (!priv->interrupt_urb)
|
||||||
dev_err(&priv->usb->dev, "Failed to alloc int urb\n");
|
|
||||||
goto free_rx_tx;
|
goto free_rx_tx;
|
||||||
}
|
|
||||||
|
|
||||||
priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
|
priv->int_buf.data_buf = kmalloc(MAX_INTERRUPT_SIZE, GFP_KERNEL);
|
||||||
if (!priv->int_buf.data_buf) {
|
if (!priv->int_buf.data_buf) {
|
||||||
|
|
Loading…
Reference in New Issue