# HG changeset patch
# User kaf24@xxxxxxxxxxxxxxxxxxxx
# Node ID cfbf7332d616da0b0118e4e92f3feb73ada82919
# Parent 4dd325c1d87d50cfa27f5f43b4c93706db98adac
While native code range-checks the requested address, possibly
truncates the range, and then reads/writes page-wise until possibly
encountering a -EFAULT issue, Xen code accessed the whole range in a
single step, thus not allowing partially succeeding accesses.
From: Jan Beulich
Signed-off-by: Keir Fraser <keir@xxxxxxxxxxxxx>
diff -r 4dd325c1d87d -r cfbf7332d616 linux-2.6-xen-sparse/drivers/xen/char/mem.c
--- a/linux-2.6-xen-sparse/drivers/xen/char/mem.c Thu Mar 2 13:43:24 2006
+++ b/linux-2.6-xen-sparse/drivers/xen/char/mem.c Thu Mar 2 15:06:51 2006
@@ -43,49 +43,85 @@
static ssize_t read_mem(struct file * file, char __user * buf,
size_t count, loff_t *ppos)
{
- unsigned long i, p = *ppos;
- ssize_t read = -EFAULT;
+ unsigned long p = *ppos, ignored;
+ ssize_t read = 0, sz;
void __iomem *v;
- if ((v = ioremap(p, count)) == NULL) {
+ while (count > 0) {
/*
- * Some programs (e.g., dmidecode) groove off into weird RAM
- * areas where no table scan possibly exist (because Xen will
- * have stomped on them!). These programs get rather upset if
- * we let them know that Xen failed their access, so we fake
- * out a read of all zeroes. :-)
+ * Handle first page in case it's not aligned
*/
- for (i = 0; i < count; i++)
- if (put_user(0, buf+i))
+ if (-p & (PAGE_SIZE - 1))
+ sz = -p & (PAGE_SIZE - 1);
+ else
+ sz = PAGE_SIZE;
+
+ sz = min_t(unsigned long, sz, count);
+
+ if ((v = ioremap(p, sz)) == NULL) {
+ /*
+ * Some programs (e.g., dmidecode) groove off into
weird RAM
+ * areas where no tables can possibly exist (because
Xen will
+ * have stomped on them!). These programs get rather
upset if
+ * we let them know that Xen failed their access, so we
fake
+ * out a read of all zeroes. :-)
+ */
+ if (clear_user(buf, count))
return -EFAULT;
- return count;
+ read += count;
+ break;
+ }
+
+ ignored = copy_to_user(buf, v, sz);
+ iounmap(v);
+ if (ignored)
+ return -EFAULT;
+ buf += sz;
+ p += sz;
+ count -= sz;
+ read += sz;
}
- if (copy_to_user(buf, v, count))
- goto out;
- read = count;
*ppos += read;
-out:
- iounmap(v);
return read;
}
static ssize_t write_mem(struct file * file, const char __user * buf,
size_t count, loff_t *ppos)
{
- unsigned long p = *ppos;
- ssize_t written = -EFAULT;
+ unsigned long p = *ppos, ignored;
+ ssize_t written = 0, sz;
void __iomem *v;
- if ((v = ioremap(p, count)) == NULL)
- return -EFAULT;
- if (copy_from_user(v, buf, count))
- goto out;
+ while (count > 0) {
+ /*
+ * Handle first page in case it's not aligned
+ */
+ if (-p & (PAGE_SIZE - 1))
+ sz = -p & (PAGE_SIZE - 1);
+ else
+ sz = PAGE_SIZE;
- written = count;
+ sz = min_t(unsigned long, sz, count);
+
+ if ((v = ioremap(p, sz)) == NULL)
+ break;
+
+ ignored = copy_from_user(v, buf, sz);
+ iounmap(v);
+ if (ignored) {
+ written += sz - ignored;
+ if (written)
+ break;
+ return -EFAULT;
+ }
+ buf += sz;
+ p += sz;
+ count -= sz;
+ written += sz;
+ }
+
*ppos += written;
-out:
- iounmap(v);
return written;
}
_______________________________________________
Xen-changelog mailing list
Xen-changelog@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-changelog
|