mirror of https://gitee.com/openkylin/linux.git
Merge branch 'work.poll2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
Pull more poll annotation updates from Al Viro: "This is preparation to solving the problems you've mentioned in the original poll series. After this series, the kernel is ready for running for V in IN OUT PRI ERR RDNORM RDBAND WRNORM WRBAND HUP RDHUP NVAL MSG; do L=`git grep -l -w POLL$V | grep -v '^t' | grep -v /um/ | grep -v '^sa' | grep -v '/poll.h$'|grep -v '^D'` for f in $L; do sed -i "-es/^\([^\"]*\)\(\<POLL$V\>\)/\\1E\\2/" $f; done done as a for bulk search-and-replace. After that, the kernel is ready to apply the patch to unify {de,}mangle_poll(), and then get rid of kernel-side POLL... uses entirely, and we should be all done with that stuff. Basically, that's what you suggested wrt KPOLL..., except that we can use EPOLL... instead - they already are arch-independent (and equal to what is currently kernel-side POLL...). After the preparations (in this series) switch to returning EPOLL... from ->poll() instances is completely mechanical and kernel-side POLL... can go away. The last step (killing kernel-side POLL... and unifying {de,}mangle_poll() has to be done after the search-and-replace job, since we need userland-side POLL... for unified {de,}mangle_poll(), thus the cherry-pick at the last step. After that we will have: - POLL{IN,OUT,...} *not* in __poll_t, so any stray instances of ->poll() still using those will be caught by sparse. - eventpoll.c and select.c warning-free wrt __poll_t - no more kernel-side definitions of POLL... - userland ones are visible through the entire kernel (and used pretty much only for mangle/demangle) - same behavior as after the first series (i.e. sparc et.al. epoll(2) working correctly)" * 'work.poll2' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: annotate ep_scan_ready_list() ep_send_events_proc(): return result via esed->res preparation to switching ->poll() to returning EPOLL... add EPOLLNVAL, annotate EPOLL... and event_poll->event use linux/poll.h instead of asm/poll.h xen: fix poll misannotation smc: missing poll annotations
This commit is contained in:
commit
ee5daa1361
|
@ -20,7 +20,7 @@ int pvcalls_front_recvmsg(struct socket *sock,
|
||||||
struct msghdr *msg,
|
struct msghdr *msg,
|
||||||
size_t len,
|
size_t len,
|
||||||
int flags);
|
int flags);
|
||||||
unsigned int pvcalls_front_poll(struct file *file,
|
__poll_t pvcalls_front_poll(struct file *file,
|
||||||
struct socket *sock,
|
struct socket *sock,
|
||||||
poll_table *wait);
|
poll_table *wait);
|
||||||
int pvcalls_front_release(struct socket *sock);
|
int pvcalls_front_release(struct socket *sock);
|
||||||
|
|
|
@ -39,7 +39,7 @@
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <linux/pid_namespace.h>
|
#include <linux/pid_namespace.h>
|
||||||
#include <asm/io.h>
|
#include <asm/io.h>
|
||||||
#include <asm/poll.h>
|
#include <linux/poll.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
|
||||||
#include <linux/coda.h>
|
#include <linux/coda.h>
|
||||||
|
|
|
@ -18,7 +18,7 @@
|
||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
#include <linux/atomic.h>
|
#include <linux/atomic.h>
|
||||||
#include <linux/device.h>
|
#include <linux/device.h>
|
||||||
#include <asm/poll.h>
|
#include <linux/poll.h>
|
||||||
|
|
||||||
#include "internal.h"
|
#include "internal.h"
|
||||||
|
|
||||||
|
|
|
@ -260,6 +260,7 @@ struct ep_pqueue {
|
||||||
struct ep_send_events_data {
|
struct ep_send_events_data {
|
||||||
int maxevents;
|
int maxevents;
|
||||||
struct epoll_event __user *events;
|
struct epoll_event __user *events;
|
||||||
|
int res;
|
||||||
};
|
};
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -660,12 +661,13 @@ static inline void ep_pm_stay_awake_rcu(struct epitem *epi)
|
||||||
*
|
*
|
||||||
* Returns: The same integer error code returned by the @sproc callback.
|
* Returns: The same integer error code returned by the @sproc callback.
|
||||||
*/
|
*/
|
||||||
static int ep_scan_ready_list(struct eventpoll *ep,
|
static __poll_t ep_scan_ready_list(struct eventpoll *ep,
|
||||||
int (*sproc)(struct eventpoll *,
|
__poll_t (*sproc)(struct eventpoll *,
|
||||||
struct list_head *, void *),
|
struct list_head *, void *),
|
||||||
void *priv, int depth, bool ep_locked)
|
void *priv, int depth, bool ep_locked)
|
||||||
{
|
{
|
||||||
int error, pwake = 0;
|
__poll_t res;
|
||||||
|
int pwake = 0;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
struct epitem *epi, *nepi;
|
struct epitem *epi, *nepi;
|
||||||
LIST_HEAD(txlist);
|
LIST_HEAD(txlist);
|
||||||
|
@ -694,7 +696,7 @@ static int ep_scan_ready_list(struct eventpoll *ep,
|
||||||
/*
|
/*
|
||||||
* Now call the callback function.
|
* Now call the callback function.
|
||||||
*/
|
*/
|
||||||
error = (*sproc)(ep, &txlist, priv);
|
res = (*sproc)(ep, &txlist, priv);
|
||||||
|
|
||||||
spin_lock_irqsave(&ep->lock, flags);
|
spin_lock_irqsave(&ep->lock, flags);
|
||||||
/*
|
/*
|
||||||
|
@ -747,7 +749,7 @@ static int ep_scan_ready_list(struct eventpoll *ep,
|
||||||
if (pwake)
|
if (pwake)
|
||||||
ep_poll_safewake(&ep->poll_wait);
|
ep_poll_safewake(&ep->poll_wait);
|
||||||
|
|
||||||
return error;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void epi_rcu_free(struct rcu_head *head)
|
static void epi_rcu_free(struct rcu_head *head)
|
||||||
|
@ -864,7 +866,7 @@ static int ep_eventpoll_release(struct inode *inode, struct file *file)
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
|
static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
|
||||||
void *priv);
|
void *priv);
|
||||||
static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
|
static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
|
||||||
poll_table *pt);
|
poll_table *pt);
|
||||||
|
@ -874,7 +876,7 @@ static void ep_ptable_queue_proc(struct file *file, wait_queue_head_t *whead,
|
||||||
* the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()
|
* the ep->mtx so we need to start from depth=1, such that mutex_lock_nested()
|
||||||
* is correctly annotated.
|
* is correctly annotated.
|
||||||
*/
|
*/
|
||||||
static unsigned int ep_item_poll(const struct epitem *epi, poll_table *pt,
|
static __poll_t ep_item_poll(const struct epitem *epi, poll_table *pt,
|
||||||
int depth)
|
int depth)
|
||||||
{
|
{
|
||||||
struct eventpoll *ep;
|
struct eventpoll *ep;
|
||||||
|
@ -894,7 +896,7 @@ static unsigned int ep_item_poll(const struct epitem *epi, poll_table *pt,
|
||||||
locked) & epi->event.events;
|
locked) & epi->event.events;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
|
static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head,
|
||||||
void *priv)
|
void *priv)
|
||||||
{
|
{
|
||||||
struct epitem *epi, *tmp;
|
struct epitem *epi, *tmp;
|
||||||
|
@ -1414,7 +1416,8 @@ static noinline void ep_destroy_wakeup_source(struct epitem *epi)
|
||||||
static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
|
static int ep_insert(struct eventpoll *ep, const struct epoll_event *event,
|
||||||
struct file *tfile, int fd, int full_check)
|
struct file *tfile, int fd, int full_check)
|
||||||
{
|
{
|
||||||
int error, revents, pwake = 0;
|
int error, pwake = 0;
|
||||||
|
__poll_t revents;
|
||||||
unsigned long flags;
|
unsigned long flags;
|
||||||
long user_watches;
|
long user_watches;
|
||||||
struct epitem *epi;
|
struct epitem *epi;
|
||||||
|
@ -1612,12 +1615,11 @@ static int ep_modify(struct eventpoll *ep, struct epitem *epi,
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
|
static __poll_t ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
|
||||||
void *priv)
|
void *priv)
|
||||||
{
|
{
|
||||||
struct ep_send_events_data *esed = priv;
|
struct ep_send_events_data *esed = priv;
|
||||||
int eventcnt;
|
__poll_t revents;
|
||||||
unsigned int revents;
|
|
||||||
struct epitem *epi;
|
struct epitem *epi;
|
||||||
struct epoll_event __user *uevent;
|
struct epoll_event __user *uevent;
|
||||||
struct wakeup_source *ws;
|
struct wakeup_source *ws;
|
||||||
|
@ -1630,8 +1632,8 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
|
||||||
* Items cannot vanish during the loop because ep_scan_ready_list() is
|
* Items cannot vanish during the loop because ep_scan_ready_list() is
|
||||||
* holding "mtx" during this call.
|
* holding "mtx" during this call.
|
||||||
*/
|
*/
|
||||||
for (eventcnt = 0, uevent = esed->events;
|
for (esed->res = 0, uevent = esed->events;
|
||||||
!list_empty(head) && eventcnt < esed->maxevents;) {
|
!list_empty(head) && esed->res < esed->maxevents;) {
|
||||||
epi = list_first_entry(head, struct epitem, rdllink);
|
epi = list_first_entry(head, struct epitem, rdllink);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -1665,9 +1667,11 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
|
||||||
__put_user(epi->event.data, &uevent->data)) {
|
__put_user(epi->event.data, &uevent->data)) {
|
||||||
list_add(&epi->rdllink, head);
|
list_add(&epi->rdllink, head);
|
||||||
ep_pm_stay_awake(epi);
|
ep_pm_stay_awake(epi);
|
||||||
return eventcnt ? eventcnt : -EFAULT;
|
if (!esed->res)
|
||||||
|
esed->res = -EFAULT;
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
eventcnt++;
|
esed->res++;
|
||||||
uevent++;
|
uevent++;
|
||||||
if (epi->event.events & EPOLLONESHOT)
|
if (epi->event.events & EPOLLONESHOT)
|
||||||
epi->event.events &= EP_PRIVATE_BITS;
|
epi->event.events &= EP_PRIVATE_BITS;
|
||||||
|
@ -1689,7 +1693,7 @@ static int ep_send_events_proc(struct eventpoll *ep, struct list_head *head,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return eventcnt;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int ep_send_events(struct eventpoll *ep,
|
static int ep_send_events(struct eventpoll *ep,
|
||||||
|
@ -1700,7 +1704,8 @@ static int ep_send_events(struct eventpoll *ep,
|
||||||
esed.maxevents = maxevents;
|
esed.maxevents = maxevents;
|
||||||
esed.events = events;
|
esed.events = events;
|
||||||
|
|
||||||
return ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false);
|
ep_scan_ready_list(ep, ep_send_events_proc, &esed, 0, false);
|
||||||
|
return esed.res;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline struct timespec64 ep_set_mstimeout(long ms)
|
static inline struct timespec64 ep_set_mstimeout(long ms)
|
||||||
|
|
|
@ -26,7 +26,7 @@
|
||||||
#include <linux/shmem_fs.h>
|
#include <linux/shmem_fs.h>
|
||||||
#include <linux/compat.h>
|
#include <linux/compat.h>
|
||||||
|
|
||||||
#include <asm/poll.h>
|
#include <linux/poll.h>
|
||||||
#include <asm/siginfo.h>
|
#include <asm/siginfo.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
#include <linux/sysctl.h>
|
#include <linux/sysctl.h>
|
||||||
#include <linux/uaccess.h>
|
#include <linux/uaccess.h>
|
||||||
#include <uapi/linux/poll.h>
|
#include <uapi/linux/poll.h>
|
||||||
|
#include <uapi/linux/eventpoll.h>
|
||||||
|
|
||||||
extern struct ctl_table epoll_table[]; /* for sysctl */
|
extern struct ctl_table epoll_table[]; /* for sysctl */
|
||||||
/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
|
/* ~832 bytes of stack space used max in sys_select/sys_poll before allocating
|
||||||
|
@ -22,7 +23,7 @@ extern struct ctl_table epoll_table[]; /* for sysctl */
|
||||||
#define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC)
|
#define WQUEUES_STACK_ALLOC (MAX_STACK_ALLOC - FRONTEND_STACK_ALLOC)
|
||||||
#define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry))
|
#define N_INLINE_POLL_ENTRIES (WQUEUES_STACK_ALLOC / sizeof(struct poll_table_entry))
|
||||||
|
|
||||||
#define DEFAULT_POLLMASK (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM)
|
#define DEFAULT_POLLMASK (EPOLLIN | EPOLLOUT | EPOLLRDNORM | EPOLLWRNORM)
|
||||||
|
|
||||||
struct poll_table_struct;
|
struct poll_table_struct;
|
||||||
|
|
||||||
|
|
|
@ -28,20 +28,21 @@
|
||||||
#define EPOLL_CTL_MOD 3
|
#define EPOLL_CTL_MOD 3
|
||||||
|
|
||||||
/* Epoll event masks */
|
/* Epoll event masks */
|
||||||
#define EPOLLIN 0x00000001
|
#define EPOLLIN (__force __poll_t)0x00000001
|
||||||
#define EPOLLPRI 0x00000002
|
#define EPOLLPRI (__force __poll_t)0x00000002
|
||||||
#define EPOLLOUT 0x00000004
|
#define EPOLLOUT (__force __poll_t)0x00000004
|
||||||
#define EPOLLERR 0x00000008
|
#define EPOLLERR (__force __poll_t)0x00000008
|
||||||
#define EPOLLHUP 0x00000010
|
#define EPOLLHUP (__force __poll_t)0x00000010
|
||||||
#define EPOLLRDNORM 0x00000040
|
#define EPOLLNVAL (__force __poll_t)0x00000020
|
||||||
#define EPOLLRDBAND 0x00000080
|
#define EPOLLRDNORM (__force __poll_t)0x00000040
|
||||||
#define EPOLLWRNORM 0x00000100
|
#define EPOLLRDBAND (__force __poll_t)0x00000080
|
||||||
#define EPOLLWRBAND 0x00000200
|
#define EPOLLWRNORM (__force __poll_t)0x00000100
|
||||||
#define EPOLLMSG 0x00000400
|
#define EPOLLWRBAND (__force __poll_t)0x00000200
|
||||||
#define EPOLLRDHUP 0x00002000
|
#define EPOLLMSG (__force __poll_t)0x00000400
|
||||||
|
#define EPOLLRDHUP (__force __poll_t)0x00002000
|
||||||
|
|
||||||
/* Set exclusive wakeup mode for the target file descriptor */
|
/* Set exclusive wakeup mode for the target file descriptor */
|
||||||
#define EPOLLEXCLUSIVE (1U << 28)
|
#define EPOLLEXCLUSIVE (__force __poll_t)(1U << 28)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Request the handling of system wakeup events so as to prevent system suspends
|
* Request the handling of system wakeup events so as to prevent system suspends
|
||||||
|
@ -53,13 +54,13 @@
|
||||||
*
|
*
|
||||||
* Requires CAP_BLOCK_SUSPEND
|
* Requires CAP_BLOCK_SUSPEND
|
||||||
*/
|
*/
|
||||||
#define EPOLLWAKEUP (1U << 29)
|
#define EPOLLWAKEUP (__force __poll_t)(1U << 29)
|
||||||
|
|
||||||
/* Set the One Shot behaviour for the target file descriptor */
|
/* Set the One Shot behaviour for the target file descriptor */
|
||||||
#define EPOLLONESHOT (1U << 30)
|
#define EPOLLONESHOT (__force __poll_t)(1U << 30)
|
||||||
|
|
||||||
/* Set the Edge Triggered behaviour for the target file descriptor */
|
/* Set the Edge Triggered behaviour for the target file descriptor */
|
||||||
#define EPOLLET (1U << 31)
|
#define EPOLLET (__force __poll_t)(1U << 31)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* On x86-64 make the 64bit structure have the same alignment as the
|
* On x86-64 make the 64bit structure have the same alignment as the
|
||||||
|
@ -74,7 +75,7 @@
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
struct epoll_event {
|
struct epoll_event {
|
||||||
__u32 events;
|
__poll_t events;
|
||||||
__u64 data;
|
__u64 data;
|
||||||
} EPOLL_PACKED;
|
} EPOLL_PACKED;
|
||||||
|
|
||||||
|
|
|
@ -1141,7 +1141,7 @@ static int smc_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
|
||||||
static __poll_t smc_accept_poll(struct sock *parent)
|
static __poll_t smc_accept_poll(struct sock *parent)
|
||||||
{
|
{
|
||||||
struct smc_sock *isk = smc_sk(parent);
|
struct smc_sock *isk = smc_sk(parent);
|
||||||
int mask = 0;
|
__poll_t mask = 0;
|
||||||
|
|
||||||
spin_lock(&isk->accept_q_lock);
|
spin_lock(&isk->accept_q_lock);
|
||||||
if (!list_empty(&isk->accept_q))
|
if (!list_empty(&isk->accept_q))
|
||||||
|
|
Loading…
Reference in New Issue