WARNING - OLD ARCHIVES

This is an archived copy of the Xen.org mailing list, which we have preserved to ensure that existing links to archives are not broken. The live archive, which contains the latest emails, can be found at http://lists.xen.org/
   
 
 
Xen 
 
Home Products Support Community News
 
   
 

xen-ia64-devel

[Xen-ia64-devel] [patch 14/16] Kdump: Add /proc/iomem_machine

To: xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
Subject: [Xen-ia64-devel] [patch 14/16] Kdump: Add /proc/iomem_machine
From: Simon Horman <horms@xxxxxxxxxxxx>
Date: Thu, 27 Sep 2007 16:31:15 +0900
Cc: Alex Williamson <alex.williamson@xxxxxx>
Delivery-date: Thu, 27 Sep 2007 01:33:13 -0700
Envelope-to: www-data@xxxxxxxxxxxxxxxxxx
List-help: <mailto:xen-ia64-devel-request@lists.xensource.com?subject=help>
List-id: Discussion of the ia64 port of Xen <xen-ia64-devel.lists.xensource.com>
List-post: <mailto:xen-ia64-devel@lists.xensource.com>
List-subscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=subscribe>
List-unsubscribe: <http://lists.xensource.com/cgi-bin/mailman/listinfo/xen-ia64-devel>, <mailto:xen-ia64-devel-request@lists.xensource.com?subject=unsubscribe>
References: <20070927073101.163912627@xxxxxxxxxxxx>
Sender: xen-ia64-devel-bounces@xxxxxxxxxxxxxxxxxxx
User-agent: quilt/0.46-1
Add /proc/iomem_machine. This is basically the iomem regions
as the hypervisor sees them. As opposed to Linux's /proc/iomem
which is provides a somewhat limited and distorted view of the world.
Or in other words, /proc/iomem is for pseudo-phical memory and
/proc/iomem_machine is for machine memory.

This is needed for kdump to work on ia64 as else it can't place
the crashkernel region correctly, nor can it map out all physical
memory to be included in the vmcore file in the second kernel.

There is an acompanying patch to kexec-tools to allow it
to use /proc/iomem_machine instead of /proc/iomem as appropriate.

Signed-off-by: Simon Horman <horms@xxxxxxxxxxxx>

---
Date: Tue, 25 Sep 2007 18:07:30 +0900
From: Horms <horms@xxxxxxxxxxxx>

Fixes as suggested by Alex Williamson:
- Removed unrelated whitesace-fix hunk
- Make efi_initialize_iomem_machine_resources() transparent virtualisation
  aware by using /is_initial_xendomain()

Index: linux-2.6.18-xen.hg/fs/Kconfig
===================================================================
--- linux-2.6.18-xen.hg.orig/fs/Kconfig 2007-07-09 11:53:16.000000000 +0900
+++ linux-2.6.18-xen.hg/fs/Kconfig      2007-07-09 15:35:20.000000000 +0900
@@ -826,6 +826,11 @@ config PROC_VMCORE
         help
         Exports the dump image of crashed kernel in ELF format.
 
+config PROC_IOMEM_MACHINE
+       bool
+       depends on PROC_FS && EXPERIMENTAL && KEXEC && XEN && IA64
+       default y
+
 config SYSFS
        bool "sysfs file system support" if EMBEDDED
        default y
Index: linux-2.6.18-xen.hg/arch/ia64/kernel/efi.c
===================================================================
--- linux-2.6.18-xen.hg.orig/arch/ia64/kernel/efi.c     2007-07-09 
15:37:34.000000000 +0900
+++ linux-2.6.18-xen.hg/arch/ia64/kernel/efi.c  2007-07-09 15:37:41.000000000 
+0900
@@ -35,6 +35,11 @@
 #include <asm/processor.h>
 #include <asm/mca.h>
 
+#ifdef CONFIG_PROC_IOMEM_MACHINE
+#include <xen/interface/memory.h>
+#include <asm/hypercall.h>
+#endif
+
 #define EFI_DEBUG      0
 
 extern efi_status_t efi_call_phys (void *, ...);
@@ -1040,21 +1045,22 @@ efi_memmap_init(unsigned long *s, unsign
        *e = (u64)++k;
 }
 
-void
-efi_initialize_iomem_resources(struct resource *code_resource,
-                              struct resource *data_resource)
+#define EFI_INITIALISE_PHYS 0x1
+#define EFI_INITIALISE_MACH 0x2
+#define EFI_INITIALISE_ALL  (EFI_INITIALISE_PHYS|EFI_INITIALISE_MACH)
+
+static void
+efi_initialize_resources(void *efi_map_start, void *efi_map_end,
+                        u64 efi_desc_size, struct resource *root_resource,
+                        struct resource *code_resource,
+                        struct resource *data_resource, unsigned flag)
 {
        struct resource *res;
-       void *efi_map_start, *efi_map_end, *p;
+       void *p;
        efi_memory_desc_t *md;
-       u64 efi_desc_size;
        char *name;
        unsigned long flags;
 
-       efi_map_start = __va(ia64_boot_param->efi_memmap);
-       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
-       efi_desc_size = ia64_boot_param->efi_memdesc_size;
-
        res = NULL;
 
        for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
@@ -1113,7 +1119,7 @@ efi_initialize_iomem_resources(struct re
                res->end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 
1;
                res->flags = flags;
 
-               if (insert_resource(&iomem_resource, res) < 0)
+               if (insert_resource(root_resource, res) < 0)
                        kfree(res);
                else {
                        /*
@@ -1121,22 +1127,93 @@ efi_initialize_iomem_resources(struct re
                         * kernel data so we try it repeatedly and
                         * let the resource manager test it.
                         */
-                       insert_resource(res, code_resource);
-                       insert_resource(res, data_resource);
+                       if (flag & EFI_INITIALISE_PHYS) {
+                               insert_resource(res, code_resource);
+                               insert_resource(res, data_resource);
+                       }
 #ifdef CONFIG_KEXEC
-                        insert_resource(res, &efi_memmap_res);
-                        insert_resource(res, &boot_param_res);
-                       if (crashk_res.end > crashk_res.start)
-                               insert_resource(res, &crashk_res);
+                       if (flag & EFI_INITIALISE_MACH) {
+                               insert_resource(res, &efi_memmap_res);
+                               insert_resource(res, &boot_param_res);
+                               if (crashk_res.end > crashk_res.start)
+                                       insert_resource(res, &crashk_res);
 #ifdef CONFIG_XEN
-                       if (is_initial_xendomain())
-                               xen_machine_kexec_register_resources(res);
+                               if (is_initial_xendomain())
+                                       xen_machine_kexec_register_resources(
+                                                               res);
 #endif
+                       }
 #endif
                }
        }
 }
 
+#ifdef CONFIG_PROC_IOMEM_MACHINE
+static int
+efi_initialize_iomem_machine_resources(void)
+{
+       unsigned long size;
+       xen_memory_map_t memmap;
+       xen_ia64_memmap_info_t *memmap_info = NULL;
+       void *efi_map_start, *efi_map_end;
+       u64 efi_desc_size;
+       int ret;
+
+       /* It would be nice if it wasn't neccessary to loop like this */
+       for (size = 1024; 1; size += 1024) {
+               memmap_info = kmalloc(size, GFP_KERNEL);
+               if (memmap_info == NULL)
+                       return -ENOMEM;
+
+               memmap.nr_entries = size;
+               set_xen_guest_handle(memmap.buffer, memmap_info);
+               ret = HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap);
+               if (!ret)
+                       break;
+
+               kfree(memmap_info);
+       }
+
+       efi_map_start = &memmap_info->memdesc;
+       efi_map_end = efi_map_start + memmap_info->efi_memmap_size;
+       efi_desc_size = memmap_info->efi_memdesc_size;
+       efi_initialize_resources(efi_map_start, efi_map_end, efi_desc_size,
+                                &iomem_machine_resource, NULL, NULL,
+                                EFI_INITIALISE_MACH);
+
+       kfree(memmap_info);
+}
+#endif
+
+void
+efi_initialize_iomem_resources(struct resource *code_resource,
+                              struct resource *data_resource)
+{
+       void *efi_map_start, *efi_map_end;
+       u64 efi_desc_size;
+
+       efi_map_start = __va(ia64_boot_param->efi_memmap);
+       efi_map_end   = efi_map_start + ia64_boot_param->efi_memmap_size;
+       efi_desc_size = ia64_boot_param->efi_memdesc_size;
+
+#ifdef CONFIG_PROC_IOMEM_MACHINE
+       if (is_initial_xendomain()) {
+               efi_initialize_resources(efi_map_start, efi_map_end,
+                                        efi_desc_size, &iomem_resource,
+                                        code_resource, data_resource,
+                                        EFI_INITIALISE_PHYS);
+               efi_initialize_iomem_machine_resources();
+       }
+       else
+#endif
+               efi_initialize_resources(efi_map_start, efi_map_end,
+                                        efi_desc_size, &iomem_resource,
+                                        code_resource, data_resource,
+                                        EFI_INITIALISE_ALL);
+}
+
+
+
 #ifdef CONFIG_KEXEC
 /* find a block of memory aligned to 64M exclude reserved regions
    rsvd_regions are sorted
Index: linux-2.6.18-xen.hg/include/linux/ioport.h
===================================================================
--- linux-2.6.18-xen.hg.orig/include/linux/ioport.h     2007-07-09 
15:37:34.000000000 +0900
+++ linux-2.6.18-xen.hg/include/linux/ioport.h  2007-07-09 15:37:41.000000000 
+0900
@@ -93,6 +93,9 @@ struct resource_list {
 /* PC/ISA/whatever - the normal PC address spaces: IO and memory */
 extern struct resource ioport_resource;
 extern struct resource iomem_resource;
+#ifdef CONFIG_XEN
+extern struct resource iomem_machine_resource;
+#endif
 
 extern int request_resource(struct resource *root, struct resource *new);
 extern struct resource * ____request_resource(struct resource *root, struct 
resource *new);
Index: linux-2.6.18-xen.hg/kernel/resource.c
===================================================================
--- linux-2.6.18-xen.hg.orig/kernel/resource.c  2007-07-09 15:37:34.000000000 
+0900
+++ linux-2.6.18-xen.hg/kernel/resource.c       2007-07-09 15:37:41.000000000 
+0900
@@ -36,6 +36,16 @@ struct resource iomem_resource = {
 };
 EXPORT_SYMBOL(iomem_resource);
 
+#ifdef CONFIG_PROC_IOMEM_MACHINE
+struct resource iomem_machine_resource = {
+       .name   = "Machine PCI mem",
+       .start  = 0,
+       .end    = -1,
+       .flags  = IORESOURCE_MEM,
+};
+EXPORT_SYMBOL(iomem_machine_resource);
+#endif
+
 static DEFINE_RWLOCK(resource_lock);
 
 #ifdef CONFIG_PROC_FS
@@ -115,6 +125,18 @@ static int iomem_open(struct inode *inod
        return res;
 }
 
+#ifdef CONFIG_PROC_IOMEM_MACHINE
+static int iomem_machine_open(struct inode *inode, struct file *file)
+{
+       int res = seq_open(file, &resource_op);
+       if (!res) {
+               struct seq_file *m = file->private_data;
+               m->private = &iomem_machine_resource;
+       }
+       return res;
+}
+#endif
+
 static struct file_operations proc_ioports_operations = {
        .open           = ioports_open,
        .read           = seq_read,
@@ -129,6 +151,15 @@ static struct file_operations proc_iomem
        .release        = seq_release,
 };
 
+#ifdef CONFIG_PROC_IOMEM_MACHINE
+static struct file_operations proc_iomem_machine_operations = {
+       .open           = iomem_machine_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
+#endif
+
 static int __init ioresources_init(void)
 {
        struct proc_dir_entry *entry;
@@ -139,6 +170,13 @@ static int __init ioresources_init(void)
        entry = create_proc_entry("iomem", 0, NULL);
        if (entry)
                entry->proc_fops = &proc_iomem_operations;
+#ifdef CONFIG_PROC_IOMEM_MACHINE
+       if (is_initial_xendomain()) {
+               entry = create_proc_entry("iomem_machine", 0, NULL);
+               if (entry)
+                       entry->proc_fops = &proc_iomem_machine_operations;
+       }
+#endif
        return 0;
 }
 __initcall(ioresources_init);

-- 

-- 
Horms
  H: http://www.vergenet.net/~horms/
  W: http://www.valinux.co.jp/en/


_______________________________________________
Xen-ia64-devel mailing list
Xen-ia64-devel@xxxxxxxxxxxxxxxxxxx
http://lists.xensource.com/xen-ia64-devel

<Prev in Thread] Current Thread [Next in Thread>