[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]

[XEN RFC PATCH 22/40] xen/arm: introduce a helper to parse device tree processor node


  • To: <wei.chen@xxxxxxx>, <xen-devel@xxxxxxxxxxxxxxxxxxxx>, <sstabellini@xxxxxxxxxx>, <julien@xxxxxxx>, <jbeulich@xxxxxxxx>
  • From: Wei Chen <wei.chen@xxxxxxx>
  • Date: Wed, 11 Aug 2021 18:24:05 +0800
  • Arc-authentication-results: i=1; mx.microsoft.com 1; spf=pass (sender ip is 40.67.248.234) smtp.rcpttodomain=lists.xenproject.org smtp.mailfrom=arm.com; dmarc=pass (p=none sp=none pct=100) action=none header.from=arm.com; dkim=none (message not signed); arc=none
  • Arc-message-signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=microsoft.com; s=arcselector9901; h=From:Date:Subject:Message-ID:Content-Type:MIME-Version:X-MS-Exchange-SenderADCheck; bh=tJdqw/WESrBdaASbDcLiFFaHTeYsE5uHyM9TMKEbuAg=; b=bgOs3lYnePVKnu4FtcWUB60z70oFi1CBDTMqXitHMmcrSjrxCvUg1bxlTBTv/20D1jhtEGJ4jzwz/GfeJO7NCuLvyfMyJEcWwAVJYaMJriY2b3sCTnvwqReGHyf/LASzY1y85PQ2DFGZvhqfFLdLk9xP5ub/dak0tbBnlKh7M8qwasFNet275VokjVassZ5JG6emhU3Ezu5aFFA7JIxS9s/ohx+ZXymRC72YaBWaaLKcKPow4pZelGSM7UL25+NDLa+NrjkYfxloTaVfKXhOQJdxj+tWutzdORYDb8AkBVv7FxjKPTNtjKAmWF+GjBsQ3lCrHQ2zD7Fc5j16MzCcCw==
  • Arc-seal: i=1; a=rsa-sha256; s=arcselector9901; d=microsoft.com; cv=none; b=UuefLb6Wknlk6Zb1WxqaNj/Oww2DOpwbfPl7I//aK9nSaT93xH2XhTpzOp8CcZq5/UnubsTyq3f1/Q7E5gtBUL7tI5kg/iFTqc7fC6CGBp0MpOWuN2/d0MUwj92jx8vN4v7JUwGxO9tnKtQHEsv1Tw8NBupGZ6VGBterW/nBAOnIrK5/9U10gEtuPMXoVM1AFptIn0IwmRKnEqEkilObUt7yuzf0QSEYZmb/4dJ6I8y6lWZuNcCpDefgr9lQ75IbMJyr/tk4+vQ06voFZehyc2eTUn4RQAwEXHZoHxp4fUSEN5RyKmoUn0jrs6YpE97hYo19A3NgjuItOiS+GlmbCw==
  • Cc: <Bertrand.Marquis@xxxxxxx>
  • Delivery-date: Wed, 11 Aug 2021 10:31:45 +0000
  • List-id: Xen developer discussion <xen-devel.lists.xenproject.org>
  • Nodisclaimer: true

Processor NUMA ID information is stored in device tree's processor
node as "numa-node-id". We need a new helper to parse this ID from
processor node. If we get this ID from processor node, this ID's
validity still need to be checked. Once we got a invalid NUMA ID
from any processor node, the device tree will be marked as NUMA
information invalid.

Signed-off-by: Wei Chen <wei.chen@xxxxxxx>
---
 xen/arch/arm/numa_device_tree.c | 41 +++++++++++++++++++++++++++++++--
 1 file changed, 39 insertions(+), 2 deletions(-)

diff --git a/xen/arch/arm/numa_device_tree.c b/xen/arch/arm/numa_device_tree.c
index 1c74ad135d..37cc56acf3 100644
--- a/xen/arch/arm/numa_device_tree.c
+++ b/xen/arch/arm/numa_device_tree.c
@@ -20,16 +20,53 @@
 #include <xen/init.h>
 #include <xen/nodemask.h>
 #include <xen/numa.h>
+#include <xen/device_tree.h>
+#include <asm/setup.h>
 
 s8 device_tree_numa = 0;
+static nodemask_t processor_nodes_parsed __initdata;
 
-int srat_disabled(void)
+static int srat_disabled(void)
 {
     return numa_off || device_tree_numa < 0;
 }
 
-void __init bad_srat(void)
+static __init void bad_srat(void)
 {
     printk(KERN_ERR "DT: NUMA information is not used.\n");
     device_tree_numa = -1;
 }
+
+/* Callback for device tree processor affinity */
+static int __init dtb_numa_processor_affinity_init(nodeid_t node)
+{
+    if ( srat_disabled() )
+        return -EINVAL;
+    else if ( node == NUMA_NO_NODE || node >= MAX_NUMNODES ) {
+               bad_srat();
+               return -EINVAL;
+       }
+
+    node_set(node, processor_nodes_parsed);
+
+    device_tree_numa = 1;
+    printk(KERN_INFO "DT: NUMA node %u processor parsed\n", node);
+
+    return 0;
+}
+
+/* Parse CPU NUMA node info */
+int __init device_tree_parse_numa_cpu_node(const void *fdt, int node)
+{
+    uint32_t nid;
+
+    nid = device_tree_get_u32(fdt, node, "numa-node-id", MAX_NUMNODES);
+    printk(XENLOG_WARNING "CPU on NUMA node:%u\n", nid);
+    if ( nid >= MAX_NUMNODES )
+    {
+        printk(XENLOG_WARNING "Node id %u exceeds maximum value\n", nid);
+        return -EINVAL;
+    }
+
+    return dtb_numa_processor_affinity_init(nid);
+}
-- 
2.25.1




 


Rackspace

Lists.xenproject.org is hosted with RackSpace, monitoring our
servers 24x7x365 and backed by RackSpace's Fanatical Support®.