diff options
Diffstat (limited to 'fs/eventpoll.c')
-rw-r--r-- | fs/eventpoll.c | 36 |
1 files changed, 25 insertions, 11 deletions
diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 8cf07242067..267d0ada454 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -217,7 +217,7 @@ struct ep_send_events_data { * Configuration options available inside /proc/sys/fs/epoll/ */ /* Maximum number of epoll watched descriptors, per user */ -static int max_user_watches __read_mostly; +static long max_user_watches __read_mostly; /* * This mutex is used to serialize ep_free() and eventpoll_release_file(). @@ -240,16 +240,18 @@ static struct kmem_cache *pwq_cache __read_mostly; #include <linux/sysctl.h> -static int zero; +static long zero; +static long long_max = LONG_MAX; ctl_table epoll_table[] = { { .procname = "max_user_watches", .data = &max_user_watches, - .maxlen = sizeof(int), + .maxlen = sizeof(max_user_watches), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = proc_doulongvec_minmax, .extra1 = &zero, + .extra2 = &long_max, }, { } }; @@ -561,7 +563,7 @@ static int ep_remove(struct eventpoll *ep, struct epitem *epi) /* At this point it is safe to free the eventpoll item */ kmem_cache_free(epi_cache, epi); - atomic_dec(&ep->user->epoll_watches); + atomic_long_dec(&ep->user->epoll_watches); return 0; } @@ -898,11 +900,12 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, { int error, revents, pwake = 0; unsigned long flags; + long user_watches; struct epitem *epi; struct ep_pqueue epq; - if (unlikely(atomic_read(&ep->user->epoll_watches) >= - max_user_watches)) + user_watches = atomic_long_read(&ep->user->epoll_watches); + if (unlikely(user_watches >= max_user_watches)) return -ENOSPC; if (!(epi = kmem_cache_alloc(epi_cache, GFP_KERNEL))) return -ENOMEM; @@ -966,7 +969,7 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, spin_unlock_irqrestore(&ep->lock, flags); - atomic_inc(&ep->user->epoll_watches); + atomic_long_inc(&ep->user->epoll_watches); /* We have to call this outside the lock */ if (pwake) @@ -1111,6 +1114,17 @@ static int ep_send_events(struct eventpoll *ep, return ep_scan_ready_list(ep, ep_send_events_proc, &esed); } +static inline struct timespec ep_set_mstimeout(long ms) +{ + struct timespec now, ts = { + .tv_sec = ms / MSEC_PER_SEC, + .tv_nsec = NSEC_PER_MSEC * (ms % MSEC_PER_SEC), + }; + + ktime_get_ts(&now); + return timespec_add_safe(now, ts); +} + static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, int maxevents, long timeout) { @@ -1118,12 +1132,11 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events, unsigned long flags; long slack; wait_queue_t wait; - struct timespec end_time; ktime_t expires, *to = NULL; if (timeout > 0) { - ktime_get_ts(&end_time); - timespec_add_ns(&end_time, (u64)timeout * NSEC_PER_MSEC); + struct timespec end_time = ep_set_mstimeout(timeout); + slack = select_estimate_accuracy(&end_time); to = &expires; *to = timespec_to_ktime(end_time); @@ -1426,6 +1439,7 @@ static int __init eventpoll_init(void) */ max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) / EP_ITEM_COST; + BUG_ON(max_user_watches < 0); /* Initialize the structure used to perform safe poll wait head wake ups */ ep_nested_calls_init(&poll_safewake_ncalls); |