summaryrefslogtreecommitdiffstats
path: root/drivers/char
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/mem.c60
1 files changed, 19 insertions, 41 deletions
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index f0a90590cb1..aaa9c24d4c1 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -34,6 +34,19 @@
# include <linux/efi.h>
#endif
+static inline unsigned long size_inside_page(unsigned long start,
+ unsigned long size)
+{
+ unsigned long sz;
+
+ if (-start & (PAGE_SIZE - 1))
+ sz = -start & (PAGE_SIZE - 1);
+ else
+ sz = PAGE_SIZE;
+
+ return min_t(unsigned long, sz, size);
+}
+
/*
* Architectures vary in how they handle caching for addresses
* outside of main memory.
@@ -141,15 +154,7 @@ static ssize_t read_mem(struct file * file, char __user * buf,
#endif
while (count > 0) {
- /*
- * Handle first page in case it's not aligned
- */
- if (-p & (PAGE_SIZE - 1))
- sz = -p & (PAGE_SIZE - 1);
- else
- sz = PAGE_SIZE;
-
- sz = min_t(unsigned long, sz, count);
+ sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, count))
return -EPERM;
@@ -208,15 +213,7 @@ static ssize_t write_mem(struct file * file, const char __user * buf,
#endif
while (count > 0) {
- /*
- * Handle first page in case it's not aligned
- */
- if (-p & (PAGE_SIZE - 1))
- sz = -p & (PAGE_SIZE - 1);
- else
- sz = PAGE_SIZE;
-
- sz = min_t(unsigned long, sz, count);
+ sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, sz))
return -EPERM;
@@ -429,15 +426,7 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
}
#endif
while (low_count > 0) {
- /*
- * Handle first page in case it's not aligned
- */
- if (-p & (PAGE_SIZE - 1))
- sz = -p & (PAGE_SIZE - 1);
- else
- sz = PAGE_SIZE;
-
- sz = min_t(unsigned long, sz, low_count);
+ sz = size_inside_page(p, low_count);
/*
* On ia64 if a page has been mapped somewhere as
@@ -461,10 +450,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
if (!kbuf)
return -ENOMEM;
while (count > 0) {
- int len = count;
+ int len = size_inside_page(p, count);
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
len = vread(kbuf, (char *)p, len);
if (!len)
break;
@@ -509,15 +496,8 @@ do_write_kmem(void *p, unsigned long realp, const char __user * buf,
while (count > 0) {
char *ptr;
- /*
- * Handle first page in case it's not aligned
- */
- if (-realp & (PAGE_SIZE - 1))
- sz = -realp & (PAGE_SIZE - 1);
- else
- sz = PAGE_SIZE;
- sz = min_t(unsigned long, sz, count);
+ sz = size_inside_page(realp, count);
/*
* On ia64 if a page has been mapped somewhere as
@@ -577,10 +557,8 @@ static ssize_t write_kmem(struct file * file, const char __user * buf,
if (!kbuf)
return wrote ? wrote : -ENOMEM;
while (count > 0) {
- int len = count;
+ int len = size_inside_page(p, count);
- if (len > PAGE_SIZE)
- len = PAGE_SIZE;
written = copy_from_user(kbuf, buf, len);
if (written) {
if (wrote + virtr)