From 849b384f92bcadf2bd967f81ceeff815c9cd6af9 Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 27 Jun 2019 12:52:56 -0700 Subject: Documentation: DT: arm: add support for sockets defining package boundaries The current ARM DT topology description provides the operating system with a topological view of the system that is based on leaf nodes representing either cores or threads (in an SMT system) and a hierarchical set of cluster nodes that creates a hierarchical topology view of how those cores and threads are grouped. However this hierarchical representation of clusters does not allow to describe what topology level actually represents the physical package or the socket boundary, which is a key piece of information to be used by an operating system to optimize resource allocation and scheduling. Lets add a new "socket" node type in the cpu-map node to describe the same. Signed-off-by: Sudeep Holla Reviewed-by: Rob Herring Signed-off-by: Paul Walmsley --- Documentation/devicetree/bindings/arm/topology.txt | 172 ++++++++++++--------- 1 file changed, 99 insertions(+), 73 deletions(-) diff --git a/Documentation/devicetree/bindings/arm/topology.txt b/Documentation/devicetree/bindings/arm/topology.txt index b0d80c0fb265..3b8febb46dad 100644 --- a/Documentation/devicetree/bindings/arm/topology.txt +++ b/Documentation/devicetree/bindings/arm/topology.txt @@ -9,6 +9,7 @@ ARM topology binding description In an ARM system, the hierarchy of CPUs is defined through three entities that are used to describe the layout of physical CPUs in the system: +- socket - cluster - core - thread @@ -63,21 +64,23 @@ nodes are listed. The cpu-map node's child nodes can be: - - one or more cluster nodes + - one or more cluster nodes or + - one or more socket nodes in a multi-socket system Any other configuration is considered invalid. -The cpu-map node can only contain three types of child nodes: +The cpu-map node can only contain 4 types of child nodes: +- socket node - cluster node - core node - thread node whose bindings are described in paragraph 3. -The nodes describing the CPU topology (cluster/core/thread) can only -be defined within the cpu-map node and every core/thread in the system -must be defined within the topology. Any other configuration is +The nodes describing the CPU topology (socket/cluster/core/thread) can +only be defined within the cpu-map node and every core/thread in the +system must be defined within the topology. Any other configuration is invalid and therefore must be ignored. =========================================== @@ -85,26 +88,44 @@ invalid and therefore must be ignored. =========================================== cpu-map child nodes must follow a naming convention where the node name -must be "clusterN", "coreN", "threadN" depending on the node type (ie -cluster/core/thread) (where N = {0, 1, ...} is the node number; nodes which -are siblings within a single common parent node must be given a unique and +must be "socketN", "clusterN", "coreN", "threadN" depending on the node type +(ie socket/cluster/core/thread) (where N = {0, 1, ...} is the node number; nodes +which are siblings within a single common parent node must be given a unique and sequential N value, starting from 0). cpu-map child nodes which do not share a common parent node can have the same name (ie same number N as other cpu-map child nodes at different device tree levels) since name uniqueness will be guaranteed by the device tree hierarchy. =========================================== -3 - cluster/core/thread node bindings +3 - socket/cluster/core/thread node bindings =========================================== -Bindings for cluster/cpu/thread nodes are defined as follows: +Bindings for socket/cluster/cpu/thread nodes are defined as follows: + +- socket node + + Description: must be declared within a cpu-map node, one node + per physical socket in the system. A system can + contain single or multiple physical socket. + The association of sockets and NUMA nodes is beyond + the scope of this bindings, please refer [2] for + NUMA bindings. + + This node is optional for a single socket system. + + The socket node name must be "socketN" as described in 2.1 above. + A socket node can not be a leaf node. + + A socket node's child nodes must be one or more cluster nodes. + + Any other configuration is considered invalid. - cluster node Description: must be declared within a cpu-map node, one node per cluster. A system can contain several layers of - clustering and cluster nodes can be contained in parent - cluster nodes. + clustering within a single physical socket and cluster + nodes can be contained in parent cluster nodes. The cluster node name must be "clusterN" as described in 2.1 above. A cluster node can not be a leaf node. @@ -164,90 +185,93 @@ Bindings for cluster/cpu/thread nodes are defined as follows: 4 - Example dts =========================================== -Example 1 (ARM 64-bit, 16-cpu system, two clusters of clusters): +Example 1 (ARM 64-bit, 16-cpu system, two clusters of clusters in a single +physical socket): cpus { #size-cells = <0>; #address-cells = <2>; cpu-map { - cluster0 { + socket0 { cluster0 { - core0 { - thread0 { - cpu = <&CPU0>; - }; - thread1 { - cpu = <&CPU1>; + cluster0 { + core0 { + thread0 { + cpu = <&CPU0>; + }; + thread1 { + cpu = <&CPU1>; + }; }; - }; - core1 { - thread0 { - cpu = <&CPU2>; - }; - thread1 { - cpu = <&CPU3>; + core1 { + thread0 { + cpu = <&CPU2>; + }; + thread1 { + cpu = <&CPU3>; + }; }; }; - }; - cluster1 { - core0 { - thread0 { - cpu = <&CPU4>; - }; - thread1 { - cpu = <&CPU5>; + cluster1 { + core0 { + thread0 { + cpu = <&CPU4>; + }; + thread1 { + cpu = <&CPU5>; + }; }; - }; - - core1 { - thread0 { - cpu = <&CPU6>; - }; - thread1 { - cpu = <&CPU7>; - }; - }; - }; - }; - cluster1 { - cluster0 { - core0 { - thread0 { - cpu = <&CPU8>; - }; - thread1 { - cpu = <&CPU9>; - }; - }; - core1 { - thread0 { - cpu = <&CPU10>; - }; - thread1 { - cpu = <&CPU11>; + core1 { + thread0 { + cpu = <&CPU6>; + }; + thread1 { + cpu = <&CPU7>; + }; }; }; }; cluster1 { - core0 { - thread0 { - cpu = <&CPU12>; + cluster0 { + core0 { + thread0 { + cpu = <&CPU8>; + }; + thread1 { + cpu = <&CPU9>; + }; }; - thread1 { - cpu = <&CPU13>; + core1 { + thread0 { + cpu = <&CPU10>; + }; + thread1 { + cpu = <&CPU11>; + }; }; }; - core1 { - thread0 { - cpu = <&CPU14>; + + cluster1 { + core0 { + thread0 { + cpu = <&CPU12>; + }; + thread1 { + cpu = <&CPU13>; + }; }; - thread1 { - cpu = <&CPU15>; + core1 { + thread0 { + cpu = <&CPU14>; + }; + thread1 { + cpu = <&CPU15>; + }; }; }; }; @@ -473,3 +497,5 @@ cpus { =============================================================================== [1] ARM Linux kernel documentation Documentation/devicetree/bindings/arm/cpus.yaml +[2] Devicetree NUMA binding description + Documentation/devicetree/bindings/numa.txt -- cgit v1.2.3-58-ga151 From 124e46a86580c71e0eee8459c5da7649318118db Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Thu, 27 Jun 2019 12:52:57 -0700 Subject: dt-binding: cpu-topology: Move cpu-map to a common binding. cpu-map binding can be used to described cpu topology for both RISC-V & ARM. It makes more sense to move the binding to document to a common place. The relevant discussion can be found here. https://lkml.org/lkml/2018/11/6/19 Signed-off-by: Atish Patra Reviewed-by: Sudeep Holla Reviewed-by: Rob Herring Signed-off-by: Paul Walmsley --- Documentation/devicetree/bindings/arm/topology.txt | 501 ------------------- .../devicetree/bindings/cpu/cpu-topology.txt | 553 +++++++++++++++++++++ 2 files changed, 553 insertions(+), 501 deletions(-) delete mode 100644 Documentation/devicetree/bindings/arm/topology.txt create mode 100644 Documentation/devicetree/bindings/cpu/cpu-topology.txt diff --git a/Documentation/devicetree/bindings/arm/topology.txt b/Documentation/devicetree/bindings/arm/topology.txt deleted file mode 100644 index 3b8febb46dad..000000000000 --- a/Documentation/devicetree/bindings/arm/topology.txt +++ /dev/null @@ -1,501 +0,0 @@ -=========================================== -ARM topology binding description -=========================================== - -=========================================== -1 - Introduction -=========================================== - -In an ARM system, the hierarchy of CPUs is defined through three entities that -are used to describe the layout of physical CPUs in the system: - -- socket -- cluster -- core -- thread - -The cpu nodes (bindings defined in [1]) represent the devices that -correspond to physical CPUs and are to be mapped to the hierarchy levels. - -The bottom hierarchy level sits at core or thread level depending on whether -symmetric multi-threading (SMT) is supported or not. - -For instance in a system where CPUs support SMT, "cpu" nodes represent all -threads existing in the system and map to the hierarchy level "thread" above. -In systems where SMT is not supported "cpu" nodes represent all cores present -in the system and map to the hierarchy level "core" above. - -ARM topology bindings allow one to associate cpu nodes with hierarchical groups -corresponding to the system hierarchy; syntactically they are defined as device -tree nodes. - -The remainder of this document provides the topology bindings for ARM, based -on the Devicetree Specification, available from: - -https://www.devicetree.org/specifications/ - -If not stated otherwise, whenever a reference to a cpu node phandle is made its -value must point to a cpu node compliant with the cpu node bindings as -documented in [1]. -A topology description containing phandles to cpu nodes that are not compliant -with bindings standardized in [1] is therefore considered invalid. - -=========================================== -2 - cpu-map node -=========================================== - -The ARM CPU topology is defined within the cpu-map node, which is a direct -child of the cpus node and provides a container where the actual topology -nodes are listed. - -- cpu-map node - - Usage: Optional - On ARM SMP systems provide CPUs topology to the OS. - ARM uniprocessor systems do not require a topology - description and therefore should not define a - cpu-map node. - - Description: The cpu-map node is just a container node where its - subnodes describe the CPU topology. - - Node name must be "cpu-map". - - The cpu-map node's parent node must be the cpus node. - - The cpu-map node's child nodes can be: - - - one or more cluster nodes or - - one or more socket nodes in a multi-socket system - - Any other configuration is considered invalid. - -The cpu-map node can only contain 4 types of child nodes: - -- socket node -- cluster node -- core node -- thread node - -whose bindings are described in paragraph 3. - -The nodes describing the CPU topology (socket/cluster/core/thread) can -only be defined within the cpu-map node and every core/thread in the -system must be defined within the topology. Any other configuration is -invalid and therefore must be ignored. - -=========================================== -2.1 - cpu-map child nodes naming convention -=========================================== - -cpu-map child nodes must follow a naming convention where the node name -must be "socketN", "clusterN", "coreN", "threadN" depending on the node type -(ie socket/cluster/core/thread) (where N = {0, 1, ...} is the node number; nodes -which are siblings within a single common parent node must be given a unique and -sequential N value, starting from 0). -cpu-map child nodes which do not share a common parent node can have the same -name (ie same number N as other cpu-map child nodes at different device tree -levels) since name uniqueness will be guaranteed by the device tree hierarchy. - -=========================================== -3 - socket/cluster/core/thread node bindings -=========================================== - -Bindings for socket/cluster/cpu/thread nodes are defined as follows: - -- socket node - - Description: must be declared within a cpu-map node, one node - per physical socket in the system. A system can - contain single or multiple physical socket. - The association of sockets and NUMA nodes is beyond - the scope of this bindings, please refer [2] for - NUMA bindings. - - This node is optional for a single socket system. - - The socket node name must be "socketN" as described in 2.1 above. - A socket node can not be a leaf node. - - A socket node's child nodes must be one or more cluster nodes. - - Any other configuration is considered invalid. - -- cluster node - - Description: must be declared within a cpu-map node, one node - per cluster. A system can contain several layers of - clustering within a single physical socket and cluster - nodes can be contained in parent cluster nodes. - - The cluster node name must be "clusterN" as described in 2.1 above. - A cluster node can not be a leaf node. - - A cluster node's child nodes must be: - - - one or more cluster nodes; or - - one or more core nodes - - Any other configuration is considered invalid. - -- core node - - Description: must be declared in a cluster node, one node per core in - the cluster. If the system does not support SMT, core - nodes are leaf nodes, otherwise they become containers of - thread nodes. - - The core node name must be "coreN" as described in 2.1 above. - - A core node must be a leaf node if SMT is not supported. - - Properties for core nodes that are leaf nodes: - - - cpu - Usage: required - Value type: - Definition: a phandle to the cpu node that corresponds to the - core node. - - If a core node is not a leaf node (CPUs supporting SMT) a core node's - child nodes can be: - - - one or more thread nodes - - Any other configuration is considered invalid. - -- thread node - - Description: must be declared in a core node, one node per thread - in the core if the system supports SMT. Thread nodes are - always leaf nodes in the device tree. - - The thread node name must be "threadN" as described in 2.1 above. - - A thread node must be a leaf node. - - A thread node must contain the following property: - - - cpu - Usage: required - Value type: - Definition: a phandle to the cpu node that corresponds to - the thread node. - -=========================================== -4 - Example dts -=========================================== - -Example 1 (ARM 64-bit, 16-cpu system, two clusters of clusters in a single -physical socket): - -cpus { - #size-cells = <0>; - #address-cells = <2>; - - cpu-map { - socket0 { - cluster0 { - cluster0 { - core0 { - thread0 { - cpu = <&CPU0>; - }; - thread1 { - cpu = <&CPU1>; - }; - }; - - core1 { - thread0 { - cpu = <&CPU2>; - }; - thread1 { - cpu = <&CPU3>; - }; - }; - }; - - cluster1 { - core0 { - thread0 { - cpu = <&CPU4>; - }; - thread1 { - cpu = <&CPU5>; - }; - }; - - core1 { - thread0 { - cpu = <&CPU6>; - }; - thread1 { - cpu = <&CPU7>; - }; - }; - }; - }; - - cluster1 { - cluster0 { - core0 { - thread0 { - cpu = <&CPU8>; - }; - thread1 { - cpu = <&CPU9>; - }; - }; - core1 { - thread0 { - cpu = <&CPU10>; - }; - thread1 { - cpu = <&CPU11>; - }; - }; - }; - - cluster1 { - core0 { - thread0 { - cpu = <&CPU12>; - }; - thread1 { - cpu = <&CPU13>; - }; - }; - core1 { - thread0 { - cpu = <&CPU14>; - }; - thread1 { - cpu = <&CPU15>; - }; - }; - }; - }; - }; - }; - - CPU0: cpu@0 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; - reg = <0x0 0x0>; - enable-method = "spin-table"; - cpu-release-addr = <0 0x20000000>; - }; - - CPU1: cpu@1 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; - reg = <0x0 0x1>; - enable-method = "spin-table"; - cpu-release-addr = <0 0x20000000>; - }; - - CPU2: cpu@100 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; - reg = <0x0 0x100>; - enable-method = "spin-table"; - cpu-release-addr = <0 0x20000000>; - }; - - CPU3: cpu@101 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; - reg = <0x0 0x101>; - enable-method = "spin-table"; - cpu-release-addr = <0 0x20000000>; - }; - - CPU4: cpu@10000 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; - reg = <0x0 0x10000>; - enable-method = "spin-table"; - cpu-release-addr = <0 0x20000000>; - }; - - CPU5: cpu@10001 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; - reg = <0x0 0x10001>; - enable-method = "spin-table"; - cpu-release-addr = <0 0x20000000>; - }; - - CPU6: cpu@10100 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; - reg = <0x0 0x10100>; - enable-method = "spin-table"; - cpu-release-addr = <0 0x20000000>; - }; - - CPU7: cpu@10101 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; - reg = <0x0 0x10101>; - enable-method = "spin-table"; - cpu-release-addr = <0 0x20000000>; - }; - - CPU8: cpu@100000000 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; - reg = <0x1 0x0>; - enable-method = "spin-table"; - cpu-release-addr = <0 0x20000000>; - }; - - CPU9: cpu@100000001 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; - reg = <0x1 0x1>; - enable-method = "spin-table"; - cpu-release-addr = <0 0x20000000>; - }; - - CPU10: cpu@100000100 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; - reg = <0x1 0x100>; - enable-method = "spin-table"; - cpu-release-addr = <0 0x20000000>; - }; - - CPU11: cpu@100000101 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; - reg = <0x1 0x101>; - enable-method = "spin-table"; - cpu-release-addr = <0 0x20000000>; - }; - - CPU12: cpu@100010000 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; - reg = <0x1 0x10000>; - enable-method = "spin-table"; - cpu-release-addr = <0 0x20000000>; - }; - - CPU13: cpu@100010001 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; - reg = <0x1 0x10001>; - enable-method = "spin-table"; - cpu-release-addr = <0 0x20000000>; - }; - - CPU14: cpu@100010100 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; - reg = <0x1 0x10100>; - enable-method = "spin-table"; - cpu-release-addr = <0 0x20000000>; - }; - - CPU15: cpu@100010101 { - device_type = "cpu"; - compatible = "arm,cortex-a57"; - reg = <0x1 0x10101>; - enable-method = "spin-table"; - cpu-release-addr = <0 0x20000000>; - }; -}; - -Example 2 (ARM 32-bit, dual-cluster, 8-cpu system, no SMT): - -cpus { - #size-cells = <0>; - #address-cells = <1>; - - cpu-map { - cluster0 { - core0 { - cpu = <&CPU0>; - }; - core1 { - cpu = <&CPU1>; - }; - core2 { - cpu = <&CPU2>; - }; - core3 { - cpu = <&CPU3>; - }; - }; - - cluster1 { - core0 { - cpu = <&CPU4>; - }; - core1 { - cpu = <&CPU5>; - }; - core2 { - cpu = <&CPU6>; - }; - core3 { - cpu = <&CPU7>; - }; - }; - }; - - CPU0: cpu@0 { - device_type = "cpu"; - compatible = "arm,cortex-a15"; - reg = <0x0>; - }; - - CPU1: cpu@1 { - device_type = "cpu"; - compatible = "arm,cortex-a15"; - reg = <0x1>; - }; - - CPU2: cpu@2 { - device_type = "cpu"; - compatible = "arm,cortex-a15"; - reg = <0x2>; - }; - - CPU3: cpu@3 { - device_type = "cpu"; - compatible = "arm,cortex-a15"; - reg = <0x3>; - }; - - CPU4: cpu@100 { - device_type = "cpu"; - compatible = "arm,cortex-a7"; - reg = <0x100>; - }; - - CPU5: cpu@101 { - device_type = "cpu"; - compatible = "arm,cortex-a7"; - reg = <0x101>; - }; - - CPU6: cpu@102 { - device_type = "cpu"; - compatible = "arm,cortex-a7"; - reg = <0x102>; - }; - - CPU7: cpu@103 { - device_type = "cpu"; - compatible = "arm,cortex-a7"; - reg = <0x103>; - }; -}; - -=============================================================================== -[1] ARM Linux kernel documentation - Documentation/devicetree/bindings/arm/cpus.yaml -[2] Devicetree NUMA binding description - Documentation/devicetree/bindings/numa.txt diff --git a/Documentation/devicetree/bindings/cpu/cpu-topology.txt b/Documentation/devicetree/bindings/cpu/cpu-topology.txt new file mode 100644 index 000000000000..99918189403c --- /dev/null +++ b/Documentation/devicetree/bindings/cpu/cpu-topology.txt @@ -0,0 +1,553 @@ +=========================================== +CPU topology binding description +=========================================== + +=========================================== +1 - Introduction +=========================================== + +In a SMP system, the hierarchy of CPUs is defined through three entities that +are used to describe the layout of physical CPUs in the system: + +- socket +- cluster +- core +- thread + +The bottom hierarchy level sits at core or thread level depending on whether +symmetric multi-threading (SMT) is supported or not. + +For instance in a system where CPUs support SMT, "cpu" nodes represent all +threads existing in the system and map to the hierarchy level "thread" above. +In systems where SMT is not supported "cpu" nodes represent all cores present +in the system and map to the hierarchy level "core" above. + +CPU topology bindings allow one to associate cpu nodes with hierarchical groups +corresponding to the system hierarchy; syntactically they are defined as device +tree nodes. + +Currently, only ARM/RISC-V intend to use this cpu topology binding but it may be +used for any other architecture as well. + +The cpu nodes, as per bindings defined in [4], represent the devices that +correspond to physical CPUs and are to be mapped to the hierarchy levels. + +A topology description containing phandles to cpu nodes that are not compliant +with bindings standardized in [4] is therefore considered invalid. + +=========================================== +2 - cpu-map node +=========================================== + +The ARM/RISC-V CPU topology is defined within the cpu-map node, which is a direct +child of the cpus node and provides a container where the actual topology +nodes are listed. + +- cpu-map node + + Usage: Optional - On SMP systems provide CPUs topology to the OS. + Uniprocessor systems do not require a topology + description and therefore should not define a + cpu-map node. + + Description: The cpu-map node is just a container node where its + subnodes describe the CPU topology. + + Node name must be "cpu-map". + + The cpu-map node's parent node must be the cpus node. + + The cpu-map node's child nodes can be: + + - one or more cluster nodes or + - one or more socket nodes in a multi-socket system + + Any other configuration is considered invalid. + +The cpu-map node can only contain 4 types of child nodes: + +- socket node +- cluster node +- core node +- thread node + +whose bindings are described in paragraph 3. + +The nodes describing the CPU topology (socket/cluster/core/thread) can +only be defined within the cpu-map node and every core/thread in the +system must be defined within the topology. Any other configuration is +invalid and therefore must be ignored. + +=========================================== +2.1 - cpu-map child nodes naming convention +=========================================== + +cpu-map child nodes must follow a naming convention where the node name +must be "socketN", "clusterN", "coreN", "threadN" depending on the node type +(ie socket/cluster/core/thread) (where N = {0, 1, ...} is the node number; nodes +which are siblings within a single common parent node must be given a unique and +sequential N value, starting from 0). +cpu-map child nodes which do not share a common parent node can have the same +name (ie same number N as other cpu-map child nodes at different device tree +levels) since name uniqueness will be guaranteed by the device tree hierarchy. + +=========================================== +3 - socket/cluster/core/thread node bindings +=========================================== + +Bindings for socket/cluster/cpu/thread nodes are defined as follows: + +- socket node + + Description: must be declared within a cpu-map node, one node + per physical socket in the system. A system can + contain single or multiple physical socket. + The association of sockets and NUMA nodes is beyond + the scope of this bindings, please refer [2] for + NUMA bindings. + + This node is optional for a single socket system. + + The socket node name must be "socketN" as described in 2.1 above. + A socket node can not be a leaf node. + + A socket node's child nodes must be one or more cluster nodes. + + Any other configuration is considered invalid. + +- cluster node + + Description: must be declared within a cpu-map node, one node + per cluster. A system can contain several layers of + clustering within a single physical socket and cluster + nodes can be contained in parent cluster nodes. + + The cluster node name must be "clusterN" as described in 2.1 above. + A cluster node can not be a leaf node. + + A cluster node's child nodes must be: + + - one or more cluster nodes; or + - one or more core nodes + + Any other configuration is considered invalid. + +- core node + + Description: must be declared in a cluster node, one node per core in + the cluster. If the system does not support SMT, core + nodes are leaf nodes, otherwise they become containers of + thread nodes. + + The core node name must be "coreN" as described in 2.1 above. + + A core node must be a leaf node if SMT is not supported. + + Properties for core nodes that are leaf nodes: + + - cpu + Usage: required + Value type: + Definition: a phandle to the cpu node that corresponds to the + core node. + + If a core node is not a leaf node (CPUs supporting SMT) a core node's + child nodes can be: + + - one or more thread nodes + + Any other configuration is considered invalid. + +- thread node + + Description: must be declared in a core node, one node per thread + in the core if the system supports SMT. Thread nodes are + always leaf nodes in the device tree. + + The thread node name must be "threadN" as described in 2.1 above. + + A thread node must be a leaf node. + + A thread node must contain the following property: + + - cpu + Usage: required + Value type: + Definition: a phandle to the cpu node that corresponds to + the thread node. + +=========================================== +4 - Example dts +=========================================== + +Example 1 (ARM 64-bit, 16-cpu system, two clusters of clusters in a single +physical socket): + +cpus { + #size-cells = <0>; + #address-cells = <2>; + + cpu-map { + socket0 { + cluster0 { + cluster0 { + core0 { + thread0 { + cpu = <&CPU0>; + }; + thread1 { + cpu = <&CPU1>; + }; + }; + + core1 { + thread0 { + cpu = <&CPU2>; + }; + thread1 { + cpu = <&CPU3>; + }; + }; + }; + + cluster1 { + core0 { + thread0 { + cpu = <&CPU4>; + }; + thread1 { + cpu = <&CPU5>; + }; + }; + + core1 { + thread0 { + cpu = <&CPU6>; + }; + thread1 { + cpu = <&CPU7>; + }; + }; + }; + }; + + cluster1 { + cluster0 { + core0 { + thread0 { + cpu = <&CPU8>; + }; + thread1 { + cpu = <&CPU9>; + }; + }; + core1 { + thread0 { + cpu = <&CPU10>; + }; + thread1 { + cpu = <&CPU11>; + }; + }; + }; + + cluster1 { + core0 { + thread0 { + cpu = <&CPU12>; + }; + thread1 { + cpu = <&CPU13>; + }; + }; + core1 { + thread0 { + cpu = <&CPU14>; + }; + thread1 { + cpu = <&CPU15>; + }; + }; + }; + }; + }; + }; + + CPU0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x0 0x0>; + enable-method = "spin-table"; + cpu-release-addr = <0 0x20000000>; + }; + + CPU1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x0 0x1>; + enable-method = "spin-table"; + cpu-release-addr = <0 0x20000000>; + }; + + CPU2: cpu@100 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x0 0x100>; + enable-method = "spin-table"; + cpu-release-addr = <0 0x20000000>; + }; + + CPU3: cpu@101 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x0 0x101>; + enable-method = "spin-table"; + cpu-release-addr = <0 0x20000000>; + }; + + CPU4: cpu@10000 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x0 0x10000>; + enable-method = "spin-table"; + cpu-release-addr = <0 0x20000000>; + }; + + CPU5: cpu@10001 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x0 0x10001>; + enable-method = "spin-table"; + cpu-release-addr = <0 0x20000000>; + }; + + CPU6: cpu@10100 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x0 0x10100>; + enable-method = "spin-table"; + cpu-release-addr = <0 0x20000000>; + }; + + CPU7: cpu@10101 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x0 0x10101>; + enable-method = "spin-table"; + cpu-release-addr = <0 0x20000000>; + }; + + CPU8: cpu@100000000 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x1 0x0>; + enable-method = "spin-table"; + cpu-release-addr = <0 0x20000000>; + }; + + CPU9: cpu@100000001 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x1 0x1>; + enable-method = "spin-table"; + cpu-release-addr = <0 0x20000000>; + }; + + CPU10: cpu@100000100 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x1 0x100>; + enable-method = "spin-table"; + cpu-release-addr = <0 0x20000000>; + }; + + CPU11: cpu@100000101 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x1 0x101>; + enable-method = "spin-table"; + cpu-release-addr = <0 0x20000000>; + }; + + CPU12: cpu@100010000 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x1 0x10000>; + enable-method = "spin-table"; + cpu-release-addr = <0 0x20000000>; + }; + + CPU13: cpu@100010001 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x1 0x10001>; + enable-method = "spin-table"; + cpu-release-addr = <0 0x20000000>; + }; + + CPU14: cpu@100010100 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x1 0x10100>; + enable-method = "spin-table"; + cpu-release-addr = <0 0x20000000>; + }; + + CPU15: cpu@100010101 { + device_type = "cpu"; + compatible = "arm,cortex-a57"; + reg = <0x1 0x10101>; + enable-method = "spin-table"; + cpu-release-addr = <0 0x20000000>; + }; +}; + +Example 2 (ARM 32-bit, dual-cluster, 8-cpu system, no SMT): + +cpus { + #size-cells = <0>; + #address-cells = <1>; + + cpu-map { + cluster0 { + core0 { + cpu = <&CPU0>; + }; + core1 { + cpu = <&CPU1>; + }; + core2 { + cpu = <&CPU2>; + }; + core3 { + cpu = <&CPU3>; + }; + }; + + cluster1 { + core0 { + cpu = <&CPU4>; + }; + core1 { + cpu = <&CPU5>; + }; + core2 { + cpu = <&CPU6>; + }; + core3 { + cpu = <&CPU7>; + }; + }; + }; + + CPU0: cpu@0 { + device_type = "cpu"; + compatible = "arm,cortex-a15"; + reg = <0x0>; + }; + + CPU1: cpu@1 { + device_type = "cpu"; + compatible = "arm,cortex-a15"; + reg = <0x1>; + }; + + CPU2: cpu@2 { + device_type = "cpu"; + compatible = "arm,cortex-a15"; + reg = <0x2>; + }; + + CPU3: cpu@3 { + device_type = "cpu"; + compatible = "arm,cortex-a15"; + reg = <0x3>; + }; + + CPU4: cpu@100 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0x100>; + }; + + CPU5: cpu@101 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0x101>; + }; + + CPU6: cpu@102 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0x102>; + }; + + CPU7: cpu@103 { + device_type = "cpu"; + compatible = "arm,cortex-a7"; + reg = <0x103>; + }; +}; + +Example 3: HiFive Unleashed (RISC-V 64 bit, 4 core system) + +{ + #address-cells = <2>; + #size-cells = <2>; + compatible = "sifive,fu540g", "sifive,fu500"; + model = "sifive,hifive-unleashed-a00"; + + ... + cpus { + #address-cells = <1>; + #size-cells = <0>; + cpu-map { + socket0 { + cluster0 { + core0 { + cpu = <&CPU1>; + }; + core1 { + cpu = <&CPU2>; + }; + core2 { + cpu0 = <&CPU2>; + }; + core3 { + cpu0 = <&CPU3>; + }; + }; + }; + }; + + CPU1: cpu@1 { + device_type = "cpu"; + compatible = "sifive,rocket0", "riscv"; + reg = <0x1>; + } + + CPU2: cpu@2 { + device_type = "cpu"; + compatible = "sifive,rocket0", "riscv"; + reg = <0x2>; + } + CPU3: cpu@3 { + device_type = "cpu"; + compatible = "sifive,rocket0", "riscv"; + reg = <0x3>; + } + CPU4: cpu@4 { + device_type = "cpu"; + compatible = "sifive,rocket0", "riscv"; + reg = <0x4>; + } + } +}; +=============================================================================== +[1] ARM Linux kernel documentation + Documentation/devicetree/bindings/arm/cpus.yaml +[2] Devicetree NUMA binding description + Documentation/devicetree/bindings/numa.txt +[3] RISC-V Linux kernel documentation + Documentation/devicetree/bindings/riscv/cpus.txt +[4] https://www.devicetree.org/specifications/ -- cgit v1.2.3-58-ga151 From 60c1b220d8bc6baeaf837cd60f94a331b25c26bc Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Thu, 27 Jun 2019 12:52:58 -0700 Subject: cpu-topology: Move cpu topology code to common code. Both RISC-V & ARM64 are using cpu-map device tree to describe their cpu topology. It's better to move the relevant code to a common place instead of duplicate code. To: Will Deacon To: Catalin Marinas Signed-off-by: Atish Patra [Tested on QDF2400] Tested-by: Jeffrey Hugo [Tested on Juno and other embedded platforms.] Tested-by: Sudeep Holla Reviewed-by: Sudeep Holla Acked-by: Will Deacon Acked-by: Greg Kroah-Hartman Signed-off-by: Paul Walmsley --- arch/arm64/include/asm/topology.h | 23 --- arch/arm64/kernel/topology.c | 303 +------------------------------------- drivers/base/arch_topology.c | 296 +++++++++++++++++++++++++++++++++++++ include/linux/arch_topology.h | 28 ++++ include/linux/topology.h | 1 + 5 files changed, 329 insertions(+), 322 deletions(-) diff --git a/arch/arm64/include/asm/topology.h b/arch/arm64/include/asm/topology.h index 0524f2438649..a4d945db95a2 100644 --- a/arch/arm64/include/asm/topology.h +++ b/arch/arm64/include/asm/topology.h @@ -4,29 +4,6 @@ #include -struct cpu_topology { - int thread_id; - int core_id; - int package_id; - int llc_id; - cpumask_t thread_sibling; - cpumask_t core_sibling; - cpumask_t llc_sibling; -}; - -extern struct cpu_topology cpu_topology[NR_CPUS]; - -#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id) -#define topology_core_id(cpu) (cpu_topology[cpu].core_id) -#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) -#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) -#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) - -void init_cpu_topology(void); -void store_cpu_topology(unsigned int cpuid); -void remove_cpu_topology(unsigned int cpuid); -const struct cpumask *cpu_coregroup_mask(int cpu); - #ifdef CONFIG_NUMA struct pci_bus; diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 0825c4a856e3..6b95c91e7d67 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -14,250 +14,13 @@ #include #include #include -#include -#include #include #include -#include -#include -#include -#include -#include -#include -#include -#include #include #include #include -static int __init get_cpu_for_node(struct device_node *node) -{ - struct device_node *cpu_node; - int cpu; - - cpu_node = of_parse_phandle(node, "cpu", 0); - if (!cpu_node) - return -1; - - cpu = of_cpu_node_to_id(cpu_node); - if (cpu >= 0) - topology_parse_cpu_capacity(cpu_node, cpu); - else - pr_crit("Unable to find CPU node for %pOF\n", cpu_node); - - of_node_put(cpu_node); - return cpu; -} - -static int __init parse_core(struct device_node *core, int package_id, - int core_id) -{ - char name[10]; - bool leaf = true; - int i = 0; - int cpu; - struct device_node *t; - - do { - snprintf(name, sizeof(name), "thread%d", i); - t = of_get_child_by_name(core, name); - if (t) { - leaf = false; - cpu = get_cpu_for_node(t); - if (cpu >= 0) { - cpu_topology[cpu].package_id = package_id; - cpu_topology[cpu].core_id = core_id; - cpu_topology[cpu].thread_id = i; - } else { - pr_err("%pOF: Can't get CPU for thread\n", - t); - of_node_put(t); - return -EINVAL; - } - of_node_put(t); - } - i++; - } while (t); - - cpu = get_cpu_for_node(core); - if (cpu >= 0) { - if (!leaf) { - pr_err("%pOF: Core has both threads and CPU\n", - core); - return -EINVAL; - } - - cpu_topology[cpu].package_id = package_id; - cpu_topology[cpu].core_id = core_id; - } else if (leaf) { - pr_err("%pOF: Can't get CPU for leaf core\n", core); - return -EINVAL; - } - - return 0; -} - -static int __init parse_cluster(struct device_node *cluster, int depth) -{ - char name[10]; - bool leaf = true; - bool has_cores = false; - struct device_node *c; - static int package_id __initdata; - int core_id = 0; - int i, ret; - - /* - * First check for child clusters; we currently ignore any - * information about the nesting of clusters and present the - * scheduler with a flat list of them. - */ - i = 0; - do { - snprintf(name, sizeof(name), "cluster%d", i); - c = of_get_child_by_name(cluster, name); - if (c) { - leaf = false; - ret = parse_cluster(c, depth + 1); - of_node_put(c); - if (ret != 0) - return ret; - } - i++; - } while (c); - - /* Now check for cores */ - i = 0; - do { - snprintf(name, sizeof(name), "core%d", i); - c = of_get_child_by_name(cluster, name); - if (c) { - has_cores = true; - - if (depth == 0) { - pr_err("%pOF: cpu-map children should be clusters\n", - c); - of_node_put(c); - return -EINVAL; - } - - if (leaf) { - ret = parse_core(c, package_id, core_id++); - } else { - pr_err("%pOF: Non-leaf cluster with core %s\n", - cluster, name); - ret = -EINVAL; - } - - of_node_put(c); - if (ret != 0) - return ret; - } - i++; - } while (c); - - if (leaf && !has_cores) - pr_warn("%pOF: empty cluster\n", cluster); - - if (leaf) - package_id++; - - return 0; -} - -static int __init parse_dt_topology(void) -{ - struct device_node *cn, *map; - int ret = 0; - int cpu; - - cn = of_find_node_by_path("/cpus"); - if (!cn) { - pr_err("No CPU information found in DT\n"); - return 0; - } - - /* - * When topology is provided cpu-map is essentially a root - * cluster with restricted subnodes. - */ - map = of_get_child_by_name(cn, "cpu-map"); - if (!map) - goto out; - - ret = parse_cluster(map, 0); - if (ret != 0) - goto out_map; - - topology_normalize_cpu_scale(); - - /* - * Check that all cores are in the topology; the SMP code will - * only mark cores described in the DT as possible. - */ - for_each_possible_cpu(cpu) - if (cpu_topology[cpu].package_id == -1) - ret = -EINVAL; - -out_map: - of_node_put(map); -out: - of_node_put(cn); - return ret; -} - -/* - * cpu topology table - */ -struct cpu_topology cpu_topology[NR_CPUS]; -EXPORT_SYMBOL_GPL(cpu_topology); - -const struct cpumask *cpu_coregroup_mask(int cpu) -{ - const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); - - /* Find the smaller of NUMA, core or LLC siblings */ - if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { - /* not numa in package, lets use the package siblings */ - core_mask = &cpu_topology[cpu].core_sibling; - } - if (cpu_topology[cpu].llc_id != -1) { - if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) - core_mask = &cpu_topology[cpu].llc_sibling; - } - - return core_mask; -} - -static void update_siblings_masks(unsigned int cpuid) -{ - struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; - int cpu; - - /* update core and thread sibling masks */ - for_each_online_cpu(cpu) { - cpu_topo = &cpu_topology[cpu]; - - if (cpuid_topo->llc_id == cpu_topo->llc_id) { - cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); - cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); - } - - if (cpuid_topo->package_id != cpu_topo->package_id) - continue; - - cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); - cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); - - if (cpuid_topo->core_id != cpu_topo->core_id) - continue; - - cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); - cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); - } -} - void store_cpu_topology(unsigned int cpuid) { struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; @@ -296,59 +59,19 @@ topology_populated: update_siblings_masks(cpuid); } -static void clear_cpu_topology(int cpu) -{ - struct cpu_topology *cpu_topo = &cpu_topology[cpu]; - - cpumask_clear(&cpu_topo->llc_sibling); - cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); - - cpumask_clear(&cpu_topo->core_sibling); - cpumask_set_cpu(cpu, &cpu_topo->core_sibling); - cpumask_clear(&cpu_topo->thread_sibling); - cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); -} - -static void __init reset_cpu_topology(void) -{ - unsigned int cpu; - - for_each_possible_cpu(cpu) { - struct cpu_topology *cpu_topo = &cpu_topology[cpu]; - - cpu_topo->thread_id = -1; - cpu_topo->core_id = 0; - cpu_topo->package_id = -1; - cpu_topo->llc_id = -1; - - clear_cpu_topology(cpu); - } -} - -void remove_cpu_topology(unsigned int cpu) -{ - int sibling; - - for_each_cpu(sibling, topology_core_cpumask(cpu)) - cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); - for_each_cpu(sibling, topology_sibling_cpumask(cpu)) - cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); - for_each_cpu(sibling, topology_llc_cpumask(cpu)) - cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); - - clear_cpu_topology(cpu); -} - #ifdef CONFIG_ACPI /* * Propagate the topology information of the processor_topology_node tree to the * cpu_topology array. */ -static int __init parse_acpi_topology(void) +int __init parse_acpi_topology(void) { bool is_threaded; int cpu, topology_id; + if (acpi_disabled) + return 0; + is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK; for_each_possible_cpu(cpu) { @@ -384,24 +107,6 @@ static int __init parse_acpi_topology(void) return 0; } - -#else -static inline int __init parse_acpi_topology(void) -{ - return -EINVAL; -} #endif -void __init init_cpu_topology(void) -{ - reset_cpu_topology(); - /* - * Discard anything that was parsed if we hit an error so we - * don't use partial information. - */ - if (!acpi_disabled && parse_acpi_topology()) - reset_cpu_topology(); - else if (of_have_populated_dt() && parse_dt_topology()) - reset_cpu_topology(); -} diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 63c1e76739f1..5dc0e1ddd080 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -15,6 +15,11 @@ #include #include #include +#include +#include +#include +#include +#include DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE; @@ -241,3 +246,294 @@ static void parsing_done_workfn(struct work_struct *work) #else core_initcall(free_raw_capacity); #endif + +#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) +static int __init get_cpu_for_node(struct device_node *node) +{ + struct device_node *cpu_node; + int cpu; + + cpu_node = of_parse_phandle(node, "cpu", 0); + if (!cpu_node) + return -1; + + cpu = of_cpu_node_to_id(cpu_node); + if (cpu >= 0) + topology_parse_cpu_capacity(cpu_node, cpu); + else + pr_crit("Unable to find CPU node for %pOF\n", cpu_node); + + of_node_put(cpu_node); + return cpu; +} + +static int __init parse_core(struct device_node *core, int package_id, + int core_id) +{ + char name[10]; + bool leaf = true; + int i = 0; + int cpu; + struct device_node *t; + + do { + snprintf(name, sizeof(name), "thread%d", i); + t = of_get_child_by_name(core, name); + if (t) { + leaf = false; + cpu = get_cpu_for_node(t); + if (cpu >= 0) { + cpu_topology[cpu].package_id = package_id; + cpu_topology[cpu].core_id = core_id; + cpu_topology[cpu].thread_id = i; + } else { + pr_err("%pOF: Can't get CPU for thread\n", + t); + of_node_put(t); + return -EINVAL; + } + of_node_put(t); + } + i++; + } while (t); + + cpu = get_cpu_for_node(core); + if (cpu >= 0) { + if (!leaf) { + pr_err("%pOF: Core has both threads and CPU\n", + core); + return -EINVAL; + } + + cpu_topology[cpu].package_id = package_id; + cpu_topology[cpu].core_id = core_id; + } else if (leaf) { + pr_err("%pOF: Can't get CPU for leaf core\n", core); + return -EINVAL; + } + + return 0; +} + +static int __init parse_cluster(struct device_node *cluster, int depth) +{ + char name[10]; + bool leaf = true; + bool has_cores = false; + struct device_node *c; + static int package_id __initdata; + int core_id = 0; + int i, ret; + + /* + * First check for child clusters; we currently ignore any + * information about the nesting of clusters and present the + * scheduler with a flat list of them. + */ + i = 0; + do { + snprintf(name, sizeof(name), "cluster%d", i); + c = of_get_child_by_name(cluster, name); + if (c) { + leaf = false; + ret = parse_cluster(c, depth + 1); + of_node_put(c); + if (ret != 0) + return ret; + } + i++; + } while (c); + + /* Now check for cores */ + i = 0; + do { + snprintf(name, sizeof(name), "core%d", i); + c = of_get_child_by_name(cluster, name); + if (c) { + has_cores = true; + + if (depth == 0) { + pr_err("%pOF: cpu-map children should be clusters\n", + c); + of_node_put(c); + return -EINVAL; + } + + if (leaf) { + ret = parse_core(c, package_id, core_id++); + } else { + pr_err("%pOF: Non-leaf cluster with core %s\n", + cluster, name); + ret = -EINVAL; + } + + of_node_put(c); + if (ret != 0) + return ret; + } + i++; + } while (c); + + if (leaf && !has_cores) + pr_warn("%pOF: empty cluster\n", cluster); + + if (leaf) + package_id++; + + return 0; +} + +static int __init parse_dt_topology(void) +{ + struct device_node *cn, *map; + int ret = 0; + int cpu; + + cn = of_find_node_by_path("/cpus"); + if (!cn) { + pr_err("No CPU information found in DT\n"); + return 0; + } + + /* + * When topology is provided cpu-map is essentially a root + * cluster with restricted subnodes. + */ + map = of_get_child_by_name(cn, "cpu-map"); + if (!map) + goto out; + + ret = parse_cluster(map, 0); + if (ret != 0) + goto out_map; + + topology_normalize_cpu_scale(); + + /* + * Check that all cores are in the topology; the SMP code will + * only mark cores described in the DT as possible. + */ + for_each_possible_cpu(cpu) + if (cpu_topology[cpu].package_id == -1) + ret = -EINVAL; + +out_map: + of_node_put(map); +out: + of_node_put(cn); + return ret; +} + +/* + * cpu topology table + */ +struct cpu_topology cpu_topology[NR_CPUS]; +EXPORT_SYMBOL_GPL(cpu_topology); + +const struct cpumask *cpu_coregroup_mask(int cpu) +{ + const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); + + /* Find the smaller of NUMA, core or LLC siblings */ + if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { + /* not numa in package, lets use the package siblings */ + core_mask = &cpu_topology[cpu].core_sibling; + } + if (cpu_topology[cpu].llc_id != -1) { + if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) + core_mask = &cpu_topology[cpu].llc_sibling; + } + + return core_mask; +} + +void update_siblings_masks(unsigned int cpuid) +{ + struct cpu_topology *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; + int cpu; + + /* update core and thread sibling masks */ + for_each_online_cpu(cpu) { + cpu_topo = &cpu_topology[cpu]; + + if (cpuid_topo->llc_id == cpu_topo->llc_id) { + cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); + cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); + } + + if (cpuid_topo->package_id != cpu_topo->package_id) + continue; + + cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); + cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); + + if (cpuid_topo->core_id != cpu_topo->core_id) + continue; + + cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); + cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); + } +} + +static void clear_cpu_topology(int cpu) +{ + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + cpumask_clear(&cpu_topo->llc_sibling); + cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); + + cpumask_clear(&cpu_topo->core_sibling); + cpumask_set_cpu(cpu, &cpu_topo->core_sibling); + cpumask_clear(&cpu_topo->thread_sibling); + cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); +} + +static void __init reset_cpu_topology(void) +{ + unsigned int cpu; + + for_each_possible_cpu(cpu) { + struct cpu_topology *cpu_topo = &cpu_topology[cpu]; + + cpu_topo->thread_id = -1; + cpu_topo->core_id = -1; + cpu_topo->package_id = -1; + cpu_topo->llc_id = -1; + + clear_cpu_topology(cpu); + } +} + +void remove_cpu_topology(unsigned int cpu) +{ + int sibling; + + for_each_cpu(sibling, topology_core_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); + for_each_cpu(sibling, topology_sibling_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); + for_each_cpu(sibling, topology_llc_cpumask(cpu)) + cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); + + clear_cpu_topology(cpu); +} + +__weak int __init parse_acpi_topology(void) +{ + return 0; +} + +void __init init_cpu_topology(void) +{ + reset_cpu_topology(); + + /* + * Discard anything that was parsed if we hit an error so we + * don't use partial information. + */ + if (parse_acpi_topology()) + reset_cpu_topology(); + else if (of_have_populated_dt() && parse_dt_topology()) + reset_cpu_topology(); +} +#endif diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 1cfe05ea1d89..ede0ce4623b4 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -33,4 +33,32 @@ unsigned long topology_get_freq_scale(int cpu) return per_cpu(freq_scale, cpu); } +struct cpu_topology { + int thread_id; + int core_id; + int package_id; + int llc_id; + cpumask_t thread_sibling; + cpumask_t core_sibling; + cpumask_t llc_sibling; +}; + +#ifdef CONFIG_GENERIC_ARCH_TOPOLOGY +extern struct cpu_topology cpu_topology[NR_CPUS]; + +#define topology_physical_package_id(cpu) (cpu_topology[cpu].package_id) +#define topology_core_id(cpu) (cpu_topology[cpu].core_id) +#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) +#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) +#define topology_llc_cpumask(cpu) (&cpu_topology[cpu].llc_sibling) +void init_cpu_topology(void); +void store_cpu_topology(unsigned int cpuid); +const struct cpumask *cpu_coregroup_mask(int cpu); +#endif + +#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) +void update_siblings_masks(unsigned int cpu); +#endif +void remove_cpu_topology(unsigned int cpuid); + #endif /* _LINUX_ARCH_TOPOLOGY_H_ */ diff --git a/include/linux/topology.h b/include/linux/topology.h index 47a3e3c08036..2a19d196af28 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h @@ -27,6 +27,7 @@ #ifndef _LINUX_TOPOLOGY_H #define _LINUX_TOPOLOGY_H +#include #include #include #include -- cgit v1.2.3-58-ga151 From ca74b316df96d7c40ee3e8301065607c11c60c27 Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Thu, 27 Jun 2019 12:52:59 -0700 Subject: arm: Use common cpu_topology structure and functions. Currently, ARM32 and ARM64 uses different data structures to represent their cpu topologies. Since, we are moving the ARM64 topology to common code to be used by other architectures, we can reuse that for ARM32 as well. Take this opprtunity to remove the redundant functions from ARM32 and reuse the common code instead. To: Russell King Signed-off-by: Atish Patra Tested-by: Sudeep Holla (on TC2) Reviewed-by: Sudeep Holla Signed-off-by: Paul Walmsley --- arch/arm/include/asm/topology.h | 20 -------------- arch/arm/kernel/topology.c | 60 +++++------------------------------------ drivers/base/arch_topology.c | 4 ++- include/linux/arch_topology.h | 6 ++--- 4 files changed, 11 insertions(+), 79 deletions(-) diff --git a/arch/arm/include/asm/topology.h b/arch/arm/include/asm/topology.h index 2a786f54d8b8..8a0fae94d45e 100644 --- a/arch/arm/include/asm/topology.h +++ b/arch/arm/include/asm/topology.h @@ -5,26 +5,6 @@ #ifdef CONFIG_ARM_CPU_TOPOLOGY #include - -struct cputopo_arm { - int thread_id; - int core_id; - int socket_id; - cpumask_t thread_sibling; - cpumask_t core_sibling; -}; - -extern struct cputopo_arm cpu_topology[NR_CPUS]; - -#define topology_physical_package_id(cpu) (cpu_topology[cpu].socket_id) -#define topology_core_id(cpu) (cpu_topology[cpu].core_id) -#define topology_core_cpumask(cpu) (&cpu_topology[cpu].core_sibling) -#define topology_sibling_cpumask(cpu) (&cpu_topology[cpu].thread_sibling) - -void init_cpu_topology(void); -void store_cpu_topology(unsigned int cpuid); -const struct cpumask *cpu_coregroup_mask(int cpu); - #include /* Replace task scheduler's default frequency-invariant accounting */ diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c index d17cb1e6d679..5b9faba03afb 100644 --- a/arch/arm/kernel/topology.c +++ b/arch/arm/kernel/topology.c @@ -177,17 +177,6 @@ static inline void parse_dt_topology(void) {} static inline void update_cpu_capacity(unsigned int cpuid) {} #endif - /* - * cpu topology table - */ -struct cputopo_arm cpu_topology[NR_CPUS]; -EXPORT_SYMBOL_GPL(cpu_topology); - -const struct cpumask *cpu_coregroup_mask(int cpu) -{ - return &cpu_topology[cpu].core_sibling; -} - /* * The current assumption is that we can power gate each core independently. * This will be superseded by DT binding once available. @@ -197,32 +186,6 @@ const struct cpumask *cpu_corepower_mask(int cpu) return &cpu_topology[cpu].thread_sibling; } -static void update_siblings_masks(unsigned int cpuid) -{ - struct cputopo_arm *cpu_topo, *cpuid_topo = &cpu_topology[cpuid]; - int cpu; - - /* update core and thread sibling masks */ - for_each_possible_cpu(cpu) { - cpu_topo = &cpu_topology[cpu]; - - if (cpuid_topo->socket_id != cpu_topo->socket_id) - continue; - - cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); - if (cpu != cpuid) - cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); - - if (cpuid_topo->core_id != cpu_topo->core_id) - continue; - - cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); - if (cpu != cpuid) - cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); - } - smp_wmb(); -} - /* * store_cpu_topology is called at boot when only one cpu is running * and with the mutex cpu_hotplug.lock locked, when several cpus have booted, @@ -230,7 +193,7 @@ static void update_siblings_masks(unsigned int cpuid) */ void store_cpu_topology(unsigned int cpuid) { - struct cputopo_arm *cpuid_topo = &cpu_topology[cpuid]; + struct cpu_topology *cpuid_topo = &cpu_topology[cpuid]; unsigned int mpidr; /* If the cpu topology has been already set, just return */ @@ -250,12 +213,12 @@ void store_cpu_topology(unsigned int cpuid) /* core performance interdependency */ cpuid_topo->thread_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); - cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); + cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 2); } else { /* largely independent cores */ cpuid_topo->thread_id = -1; cpuid_topo->core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); - cpuid_topo->socket_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); + cpuid_topo->package_id = MPIDR_AFFINITY_LEVEL(mpidr, 1); } } else { /* @@ -265,7 +228,7 @@ void store_cpu_topology(unsigned int cpuid) */ cpuid_topo->thread_id = -1; cpuid_topo->core_id = 0; - cpuid_topo->socket_id = -1; + cpuid_topo->package_id = -1; } update_siblings_masks(cpuid); @@ -275,7 +238,7 @@ void store_cpu_topology(unsigned int cpuid) pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", cpuid, cpu_topology[cpuid].thread_id, cpu_topology[cpuid].core_id, - cpu_topology[cpuid].socket_id, mpidr); + cpu_topology[cpuid].package_id, mpidr); } static inline int cpu_corepower_flags(void) @@ -298,18 +261,7 @@ static struct sched_domain_topology_level arm_topology[] = { */ void __init init_cpu_topology(void) { - unsigned int cpu; - - /* init core mask and capacity */ - for_each_possible_cpu(cpu) { - struct cputopo_arm *cpu_topo = &(cpu_topology[cpu]); - - cpu_topo->thread_id = -1; - cpu_topo->core_id = -1; - cpu_topo->socket_id = -1; - cpumask_clear(&cpu_topo->core_sibling); - cpumask_clear(&cpu_topo->thread_sibling); - } + reset_cpu_topology(); smp_wmb(); parse_dt_topology(); diff --git a/drivers/base/arch_topology.c b/drivers/base/arch_topology.c index 5dc0e1ddd080..b54d241a2ff5 100644 --- a/drivers/base/arch_topology.c +++ b/drivers/base/arch_topology.c @@ -423,6 +423,7 @@ out: of_node_put(cn); return ret; } +#endif /* * cpu topology table @@ -488,7 +489,7 @@ static void clear_cpu_topology(int cpu) cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); } -static void __init reset_cpu_topology(void) +void __init reset_cpu_topology(void) { unsigned int cpu; @@ -523,6 +524,7 @@ __weak int __init parse_acpi_topology(void) return 0; } +#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) void __init init_cpu_topology(void) { reset_cpu_topology(); diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index ede0ce4623b4..42f2b5126094 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h @@ -54,11 +54,9 @@ extern struct cpu_topology cpu_topology[NR_CPUS]; void init_cpu_topology(void); void store_cpu_topology(unsigned int cpuid); const struct cpumask *cpu_coregroup_mask(int cpu); -#endif - -#if defined(CONFIG_ARM64) || defined(CONFIG_RISCV) void update_siblings_masks(unsigned int cpu); -#endif void remove_cpu_topology(unsigned int cpuid); +void reset_cpu_topology(void); +#endif #endif /* _LINUX_ARCH_TOPOLOGY_H_ */ -- cgit v1.2.3-58-ga151 From 03f11f03dbfe37c0bff2768b8f2e277a29ac62b9 Mon Sep 17 00:00:00 2001 From: Atish Patra Date: Thu, 27 Jun 2019 12:53:00 -0700 Subject: RISC-V: Parse cpu topology during boot. Currently, there are no topology defined for RISC-V. Parse the cpu-map node from device tree and setup the cpu topology. CPU topology after applying the patch. $cat /sys/devices/system/cpu/cpu2/topology/core_siblings_list 0-3 $cat /sys/devices/system/cpu/cpu3/topology/core_siblings_list 0-3 $cat /sys/devices/system/cpu/cpu3/topology/physical_package_id 0 $cat /sys/devices/system/cpu/cpu3/topology/core_id 3 Signed-off-by: Atish Patra Acked-by: Sudeep Holla Signed-off-by: Paul Walmsley --- arch/riscv/Kconfig | 1 + arch/riscv/kernel/smpboot.c | 3 +++ 2 files changed, 4 insertions(+) diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index 59a4727ecd6c..86ee362a1375 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -48,6 +48,7 @@ config RISCV select PCI_MSI if PCI select RISCV_TIMER select GENERIC_IRQ_MULTI_HANDLER + select GENERIC_ARCH_TOPOLOGY if SMP select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_MMIOWB select HAVE_EBPF_JIT if 64BIT diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index 7462a44304fe..18ae6da5115e 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -8,6 +8,7 @@ * Copyright (C) 2017 SiFive */ +#include #include #include #include @@ -35,6 +36,7 @@ static DECLARE_COMPLETION(cpu_running); void __init smp_prepare_boot_cpu(void) { + init_cpu_topology(); } void __init smp_prepare_cpus(unsigned int max_cpus) @@ -138,6 +140,7 @@ asmlinkage void __init smp_callin(void) trap_init(); notify_cpu_starting(smp_processor_id()); + update_siblings_masks(smp_processor_id()); set_cpu_online(smp_processor_id(), 1); /* * Remote TLB flushes are ignored while the CPU is offline, so emit -- cgit v1.2.3-58-ga151 From c181831f161c8da460b1ffa26632448323c3bfcc Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 27 Jun 2019 12:53:01 -0700 Subject: base: arch_topology: update Kconfig help description Commit 5d777b185f6d ("arch_topology: Make cpu_capacity sysfs node as read-only") made cpu_capacity sysfs node read-only. Update the GENERIC_ARCH_TOPOLOGY Kconfig help section to reflect the same. Cc: Greg Kroah-Hartman Signed-off-by: Sudeep Holla Signed-off-by: Paul Walmsley --- drivers/base/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig index dc404492381d..28b92e3cc570 100644 --- a/drivers/base/Kconfig +++ b/drivers/base/Kconfig @@ -202,7 +202,7 @@ config GENERIC_ARCH_TOPOLOGY help Enable support for architectures common topology code: e.g., parsing CPU capacity information from DT, usage of such information for - appropriate scaling, sysfs interface for changing capacity values at + appropriate scaling, sysfs interface for reading capacity values at runtime. endmenu -- cgit v1.2.3-58-ga151 From f51edcec5288556069d27ab7040a4ce8ca55b74e Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Thu, 27 Jun 2019 12:53:02 -0700 Subject: MAINTAINERS: Add an entry for generic architecture topology arm and arm64 shared lot of CPU topology related code. This was consolidated under driver/base/arch_topology.c by Juri. Now RISC-V is also started sharing the same code pulling more code from arm64 into arch_topology.c Since I was involved in the review from the beginning, I would like to assume maintenance for the same. Cc: Will Deacon Cc: Greg Kroah-Hartman Acked-by: Juri Lelli Signed-off-by: Sudeep Holla Acked-by: Greg Kroah-Hartman Signed-off-by: Paul Walmsley --- MAINTAINERS | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index 783569e3c4b4..1ed8f5c93bc9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6719,6 +6719,13 @@ W: https://linuxtv.org S: Maintained F: drivers/media/radio/radio-gemtek* +GENERIC ARCHITECTURE TOPOLOGY +M: Sudeep Holla +L: linux-kernel@vger.kernel.org +S: Maintained +F: drivers/base/arch_topology.c +F: include/linux/arch_topology.h + GENERIC GPIO I2C DRIVER M: Wolfram Sang S: Supported -- cgit v1.2.3-58-ga151 From ca786b8db751c0dd980fccf2d65acb77a296f629 Mon Sep 17 00:00:00 2001 From: Shaokun Zhang Date: Tue, 2 Jul 2019 15:35:53 +0800 Subject: arm64: perf: Remove unused macro ARMV8_EVENT_ATTR_RESOLVE became unused after commit <4b1a9e6934ec> ("arm64/perf: Filter common events based on PMCEIDn_EL0"). Remove it. Cc: Will Deacon Cc: Mark Rutland Signed-off-by: Shaokun Zhang Signed-off-by: Will Deacon --- arch/arm64/kernel/perf_event.c | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 96e90e270042..2d3bdebdf6df 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -157,7 +157,6 @@ armv8pmu_events_sysfs_show(struct device *dev, return sprintf(page, "event=0x%03llx\n", pmu_attr->id); } -#define ARMV8_EVENT_ATTR_RESOLVE(m) #m #define ARMV8_EVENT_ATTR(name, config) \ PMU_EVENT_ATTR(name, armv8_event_attr_##name, \ config, armv8pmu_events_sysfs_show) -- cgit v1.2.3-58-ga151 From 4b9ace9c25dc5d672a548da34cebf3a39710017f Mon Sep 17 00:00:00 2001 From: Leonard Crestez Date: Thu, 4 Jul 2019 11:53:20 +0300 Subject: perf/imx_ddr: Add MODULE_DEVICE_TABLE This is required for automatic probing when driver is built as a module. Fixes: 9a66d36cc7ac ("drivers/perf: imx_ddr: Add DDR performance counter support to perf") Acked-by: Frank Li Signed-off-by: Leonard Crestez Signed-off-by: Will Deacon --- drivers/perf/fsl_imx8_ddr_perf.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 63fe21600072..0e3310dbb145 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -47,6 +47,7 @@ static const struct of_device_id imx_ddr_pmu_dt_ids[] = { { .compatible = "fsl,imx8m-ddr-pmu",}, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids); struct ddr_pmu { struct pmu pmu; -- cgit v1.2.3-58-ga151 From 228f855fb57ae25599eee64a3f3db4f7f405b34f Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Tue, 30 Jul 2019 11:15:32 -0700 Subject: perf: Remove dev_err() usage after platform_get_irq() We don't need dev_err() messages when platform_get_irq() fails now that platform_get_irq() prints an error message itself when something goes wrong. Let's remove these prints with a simple semantic patch. // @@ expression ret; struct platform_device *E; @@ ret = ( platform_get_irq(E, ...) | platform_get_irq_byname(E, ...) ); if ( \( ret < 0 \| ret <= 0 \) ) { ( -if (ret != -EPROBE_DEFER) -{ ... -dev_err(...); -... } | ... -dev_err(...); ) ... } // While we're here, remove braces on if statements that only have one statement (manually). Cc: Will Deacon Cc: Mark Rutland Cc: linux-arm-kernel@lists.infradead.org Cc: Greg Kroah-Hartman Signed-off-by: Stephen Boyd Signed-off-by: Will Deacon --- drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c | 4 +--- drivers/perf/hisilicon/hisi_uncore_hha_pmu.c | 4 +--- drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c | 4 +--- drivers/perf/qcom_l2_pmu.c | 6 +----- drivers/perf/xgene_pmu.c | 4 +--- 5 files changed, 5 insertions(+), 17 deletions(-) diff --git a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c index 6ad0823bcf23..e42d4464c2cf 100644 --- a/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_ddrc_pmu.c @@ -217,10 +217,8 @@ static int hisi_ddrc_pmu_init_irq(struct hisi_pmu *ddrc_pmu, /* Read and init IRQ */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "DDRC PMU get irq fail; irq:%d\n", irq); + if (irq < 0) return irq; - } ret = devm_request_irq(&pdev->dev, irq, hisi_ddrc_pmu_isr, IRQF_NOBALANCING | IRQF_NO_THREAD, diff --git a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c index 4f2917f3e25e..f28063873e11 100644 --- a/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_hha_pmu.c @@ -207,10 +207,8 @@ static int hisi_hha_pmu_init_irq(struct hisi_pmu *hha_pmu, /* Read and init IRQ */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "HHA PMU get irq fail; irq:%d\n", irq); + if (irq < 0) return irq; - } ret = devm_request_irq(&pdev->dev, irq, hisi_hha_pmu_isr, IRQF_NOBALANCING | IRQF_NO_THREAD, diff --git a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c index 9153e093f9df..078b8dc57250 100644 --- a/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c +++ b/drivers/perf/hisilicon/hisi_uncore_l3c_pmu.c @@ -206,10 +206,8 @@ static int hisi_l3c_pmu_init_irq(struct hisi_pmu *l3c_pmu, /* Read and init IRQ */ irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "L3C PMU get irq fail; irq:%d\n", irq); + if (irq < 0) return irq; - } ret = devm_request_irq(&pdev->dev, irq, hisi_l3c_pmu_isr, IRQF_NOBALANCING | IRQF_NO_THREAD, diff --git a/drivers/perf/qcom_l2_pmu.c b/drivers/perf/qcom_l2_pmu.c index d06182fe14b8..21d6991dbe0b 100644 --- a/drivers/perf/qcom_l2_pmu.c +++ b/drivers/perf/qcom_l2_pmu.c @@ -909,12 +909,8 @@ static int l2_cache_pmu_probe_cluster(struct device *dev, void *data) cluster->cluster_id = fw_cluster_id; irq = platform_get_irq(sdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, - "Failed to get valid irq for cluster %ld\n", - fw_cluster_id); + if (irq < 0) return irq; - } irq_set_status_flags(irq, IRQ_NOAUTOEN); cluster->irq = irq; diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c index 3259e2ebeb39..7e328d6385c3 100644 --- a/drivers/perf/xgene_pmu.c +++ b/drivers/perf/xgene_pmu.c @@ -1901,10 +1901,8 @@ static int xgene_pmu_probe(struct platform_device *pdev) } irq = platform_get_irq(pdev, 0); - if (irq < 0) { - dev_err(&pdev->dev, "No IRQ resource\n"); + if (irq < 0) return -EINVAL; - } rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr, IRQF_NOBALANCING | IRQF_NO_THREAD, -- cgit v1.2.3-58-ga151 From c87857945b0e61c46222798b56fb1b0f4868088b Mon Sep 17 00:00:00 2001 From: Julien Thierry Date: Thu, 4 Jul 2019 14:44:04 +0100 Subject: arm64: Remove unused assembly macro As of commit 4141c857fd09dbed480f021b3eece4f46c653161 ("arm64: convert raw syscall invocation to C"), moving syscall handling from assembly to C, the macro mask_nospec64 is no longer referenced. Acked-by: Mark Rutland Signed-off-by: Julien Thierry Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index e3a15c751b13..9f7395a1177f 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -123,17 +123,6 @@ alternative_else alternative_endif .endm -/* - * Sanitise a 64-bit bounded index wrt speculation, returning zero if out - * of bounds. - */ - .macro mask_nospec64, idx, limit, tmp - sub \tmp, \idx, \limit - bic \tmp, \tmp, \idx - and \idx, \idx, \tmp, asr #63 - csdb - .endm - /* * NOP sequence */ -- cgit v1.2.3-58-ga151 From b907b80d7ae7b2b65ef9f534f3e9a32ce6a4b539 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 8 Jul 2019 17:36:40 +0100 Subject: arm64: remove pointless __KERNEL__ guards For a number of years, UAPI headers have been split from kernel-internal headers. The latter are never exposed to userspace, and always built with __KERNEL__ defined. Most headers under arch/arm64 don't have __KERNEL__ guards, but there are a few stragglers lying around. To make things more consistent, and to set a good example going forward, let's remove these redundant __KERNEL__ guards. In a couple of cases, a trailing #endif lacked a comment describing its corresponding #if or #ifdef, so these are fixes up at the same time. Guards in auto-generated crypto code are left as-is, as these guards are generated by scripting imported from the upstream openssl project scripts. Guards in UAPI headers are left as-is, as these can be included by userspace or the kernel. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Ard Biesheuvel Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/atomic.h | 5 +---- arch/arm64/include/asm/compat.h | 2 -- arch/arm64/include/asm/debug-monitors.h | 3 --- arch/arm64/include/asm/dma-mapping.h | 3 --- arch/arm64/include/asm/fpsimd.h | 2 +- arch/arm64/include/asm/futex.h | 3 --- arch/arm64/include/asm/hw_breakpoint.h | 3 --- arch/arm64/include/asm/io.h | 3 --- arch/arm64/include/asm/irqflags.h | 5 +---- arch/arm64/include/asm/pci.h | 2 -- arch/arm64/include/asm/proc-fns.h | 2 -- arch/arm64/include/asm/processor.h | 3 --- arch/arm64/include/asm/signal32.h | 2 -- arch/arm64/include/asm/thread_info.h | 3 --- arch/arm64/include/asm/vdso.h | 4 ---- arch/arm64/include/asm/vdso_datapage.h | 4 ---- 16 files changed, 3 insertions(+), 46 deletions(-) diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 657b0457d83c..a5ca23950cfd 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -15,8 +15,6 @@ #include #include -#ifdef __KERNEL__ - #define __ARM64_IN_ATOMIC_IMPL #if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE) @@ -157,5 +155,4 @@ #include -#endif -#endif +#endif /* __ASM_ATOMIC_H */ diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h index fb8ad4616b3b..b0d53a265f1d 100644 --- a/arch/arm64/include/asm/compat.h +++ b/arch/arm64/include/asm/compat.h @@ -4,7 +4,6 @@ */ #ifndef __ASM_COMPAT_H #define __ASM_COMPAT_H -#ifdef __KERNEL__ #ifdef CONFIG_COMPAT /* @@ -215,5 +214,4 @@ static inline int is_compat_thread(struct thread_info *thread) } #endif /* CONFIG_COMPAT */ -#endif /* __KERNEL__ */ #endif /* __ASM_COMPAT_H */ diff --git a/arch/arm64/include/asm/debug-monitors.h b/arch/arm64/include/asm/debug-monitors.h index d8ec5bb881c2..7619f473155f 100644 --- a/arch/arm64/include/asm/debug-monitors.h +++ b/arch/arm64/include/asm/debug-monitors.h @@ -5,8 +5,6 @@ #ifndef __ASM_DEBUG_MONITORS_H #define __ASM_DEBUG_MONITORS_H -#ifdef __KERNEL__ - #include #include #include @@ -128,5 +126,4 @@ static inline int reinstall_suspended_bps(struct pt_regs *regs) int aarch32_break_handler(struct pt_regs *regs); #endif /* __ASSEMBLY */ -#endif /* __KERNEL__ */ #endif /* __ASM_DEBUG_MONITORS_H */ diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h index bdcb0922a40c..fb3e5044f473 100644 --- a/arch/arm64/include/asm/dma-mapping.h +++ b/arch/arm64/include/asm/dma-mapping.h @@ -5,8 +5,6 @@ #ifndef __ASM_DMA_MAPPING_H #define __ASM_DMA_MAPPING_H -#ifdef __KERNEL__ - #include #include @@ -27,5 +25,4 @@ static inline bool is_device_dma_coherent(struct device *dev) return dev->dma_coherent; } -#endif /* __KERNEL__ */ #endif /* __ASM_DMA_MAPPING_H */ diff --git a/arch/arm64/include/asm/fpsimd.h b/arch/arm64/include/asm/fpsimd.h index b6a2c352f4c3..59f10dd13f12 100644 --- a/arch/arm64/include/asm/fpsimd.h +++ b/arch/arm64/include/asm/fpsimd.h @@ -21,7 +21,7 @@ #include #include -#if defined(__KERNEL__) && defined(CONFIG_COMPAT) +#ifdef CONFIG_COMPAT /* Masks for extracting the FPSR and FPCR from the FPSCR */ #define VFP_FPSCR_STAT_MASK 0xf800009f #define VFP_FPSCR_CTRL_MASK 0x07f79f00 diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h index 6211e3105491..6cc26a127819 100644 --- a/arch/arm64/include/asm/futex.h +++ b/arch/arm64/include/asm/futex.h @@ -5,8 +5,6 @@ #ifndef __ASM_FUTEX_H #define __ASM_FUTEX_H -#ifdef __KERNEL__ - #include #include @@ -129,5 +127,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, return ret; } -#endif /* __KERNEL__ */ #endif /* __ASM_FUTEX_H */ diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index db9ab760e6fd..bc7aaed4b34e 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -10,8 +10,6 @@ #include #include -#ifdef __KERNEL__ - struct arch_hw_breakpoint_ctrl { u32 __reserved : 19, len : 8, @@ -156,5 +154,4 @@ static inline int get_num_wrps(void) ID_AA64DFR0_WRPS_SHIFT); } -#endif /* __KERNEL__ */ #endif /* __ASM_BREAKPOINT_H */ diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 7ed92626949d..179a4c65c0d4 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -8,8 +8,6 @@ #ifndef __ASM_IO_H #define __ASM_IO_H -#ifdef __KERNEL__ - #include #include @@ -207,5 +205,4 @@ extern int valid_mmap_phys_addr_range(unsigned long pfn, size_t size); extern int devmem_is_allowed(unsigned long pfn); -#endif /* __KERNEL__ */ #endif /* __ASM_IO_H */ diff --git a/arch/arm64/include/asm/irqflags.h b/arch/arm64/include/asm/irqflags.h index 7872f260c9ee..1a59f0ed1ae3 100644 --- a/arch/arm64/include/asm/irqflags.h +++ b/arch/arm64/include/asm/irqflags.h @@ -5,8 +5,6 @@ #ifndef __ASM_IRQFLAGS_H #define __ASM_IRQFLAGS_H -#ifdef __KERNEL__ - #include #include #include @@ -128,5 +126,4 @@ static inline void arch_local_irq_restore(unsigned long flags) : "memory"); } -#endif -#endif +#endif /* __ASM_IRQFLAGS_H */ diff --git a/arch/arm64/include/asm/pci.h b/arch/arm64/include/asm/pci.h index 9e690686e8aa..70b323cf8300 100644 --- a/arch/arm64/include/asm/pci.h +++ b/arch/arm64/include/asm/pci.h @@ -1,7 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 */ #ifndef __ASM_PCI_H #define __ASM_PCI_H -#ifdef __KERNEL__ #include #include @@ -35,5 +34,4 @@ static inline int pci_proc_domain(struct pci_bus *bus) } #endif /* CONFIG_PCI */ -#endif /* __KERNEL__ */ #endif /* __ASM_PCI_H */ diff --git a/arch/arm64/include/asm/proc-fns.h b/arch/arm64/include/asm/proc-fns.h index 368d90a9d0e5..a2ce65a0c1fa 100644 --- a/arch/arm64/include/asm/proc-fns.h +++ b/arch/arm64/include/asm/proc-fns.h @@ -9,7 +9,6 @@ #ifndef __ASM_PROCFNS_H #define __ASM_PROCFNS_H -#ifdef __KERNEL__ #ifndef __ASSEMBLY__ #include @@ -25,5 +24,4 @@ extern u64 cpu_do_resume(phys_addr_t ptr, u64 idmap_ttbr); #include #endif /* __ASSEMBLY__ */ -#endif /* __KERNEL__ */ #endif /* __ASM_PROCFNS_H */ diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 844e2964b0f5..fa7a6f63308f 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -20,7 +20,6 @@ #define NET_IP_ALIGN 0 #ifndef __ASSEMBLY__ -#ifdef __KERNEL__ #include #include @@ -283,8 +282,6 @@ static inline void spin_lock_prefetch(const void *ptr) #define HAVE_ARCH_PICK_MMAP_LAYOUT -#endif - extern unsigned long __ro_after_init signal_minsigstksz; /* sigframe size */ extern void __init minsigstksz_setup(void); diff --git a/arch/arm64/include/asm/signal32.h b/arch/arm64/include/asm/signal32.h index bd43d1cf724b..7e9f163d02ec 100644 --- a/arch/arm64/include/asm/signal32.h +++ b/arch/arm64/include/asm/signal32.h @@ -5,7 +5,6 @@ #ifndef __ASM_SIGNAL32_H #define __ASM_SIGNAL32_H -#ifdef __KERNEL__ #ifdef CONFIG_COMPAT #include @@ -79,5 +78,4 @@ static inline void compat_setup_restart_syscall(struct pt_regs *regs) { } #endif /* CONFIG_COMPAT */ -#endif /* __KERNEL__ */ #endif /* __ASM_SIGNAL32_H */ diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 180b34ec5965..e35cd84b102c 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -8,8 +8,6 @@ #ifndef __ASM_THREAD_INFO_H #define __ASM_THREAD_INFO_H -#ifdef __KERNEL__ - #include #ifndef __ASSEMBLY__ @@ -121,5 +119,4 @@ void arch_release_task_struct(struct task_struct *tsk); .addr_limit = KERNEL_DS, \ } -#endif /* __KERNEL__ */ #endif /* __ASM_THREAD_INFO_H */ diff --git a/arch/arm64/include/asm/vdso.h b/arch/arm64/include/asm/vdso.h index 9c15e0a06301..07468428fd29 100644 --- a/arch/arm64/include/asm/vdso.h +++ b/arch/arm64/include/asm/vdso.h @@ -5,8 +5,6 @@ #ifndef __ASM_VDSO_H #define __ASM_VDSO_H -#ifdef __KERNEL__ - /* * Default link address for the vDSO. * Since we randomise the VDSO mapping, there's little point in trying @@ -28,6 +26,4 @@ #endif /* !__ASSEMBLY__ */ -#endif /* __KERNEL__ */ - #endif /* __ASM_VDSO_H */ diff --git a/arch/arm64/include/asm/vdso_datapage.h b/arch/arm64/include/asm/vdso_datapage.h index ba6dbc3de864..1f38bf330a6e 100644 --- a/arch/arm64/include/asm/vdso_datapage.h +++ b/arch/arm64/include/asm/vdso_datapage.h @@ -5,8 +5,6 @@ #ifndef __ASM_VDSO_DATAPAGE_H #define __ASM_VDSO_DATAPAGE_H -#ifdef __KERNEL__ - #ifndef __ASSEMBLY__ struct vdso_data { @@ -32,6 +30,4 @@ struct vdso_data { #endif /* !__ASSEMBLY__ */ -#endif /* __KERNEL__ */ - #endif /* __ASM_VDSO_DATAPAGE_H */ -- cgit v1.2.3-58-ga151 From c19d050f80881296aab3ba90fe5b2c107a238dcb Mon Sep 17 00:00:00 2001 From: Bhupesh Sharma Date: Thu, 11 Jul 2019 17:27:32 +0530 Subject: arm64/kexec: Use consistent convention of initializing 'kxec_buf.mem' with KEXEC_BUF_MEM_UNKNOWN With commit b6664ba42f14 ("s390, kexec_file: drop arch_kexec_mem_walk()"), we introduced the KEXEC_BUF_MEM_UNKNOWN macro. If kexec_buf.mem is set to this value, kexec_locate_mem_hole() will try to allocate free memory. While other arch(s) like s390 and x86_64 already use this macro to initialize kexec_buf.mem with, arm64 uses an equivalent value of 0. Replace it with KEXEC_BUF_MEM_UNKNOWN, to keep the convention of initializing 'kxec_buf.mem' consistent across various archs. Cc: takahiro.akashi@linaro.org Cc: james.morse@arm.com Reviewed-by: Matthias Brugger Signed-off-by: Bhupesh Sharma Signed-off-by: Will Deacon --- arch/arm64/kernel/kexec_image.c | 2 +- arch/arm64/kernel/machine_kexec_file.c | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/kernel/kexec_image.c b/arch/arm64/kernel/kexec_image.c index 2514fd6f12cb..29a9428486a5 100644 --- a/arch/arm64/kernel/kexec_image.c +++ b/arch/arm64/kernel/kexec_image.c @@ -84,7 +84,7 @@ static void *image_load(struct kimage *image, kbuf.buffer = kernel; kbuf.bufsz = kernel_len; - kbuf.mem = 0; + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; kbuf.memsz = le64_to_cpu(h->image_size); text_offset = le64_to_cpu(h->text_offset); kbuf.buf_align = MIN_KIMG_ALIGN; diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index 58871333737a..ba78ee7ca990 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -177,7 +177,7 @@ int load_other_segments(struct kimage *image, if (initrd) { kbuf.buffer = initrd; kbuf.bufsz = initrd_len; - kbuf.mem = 0; + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; kbuf.memsz = initrd_len; kbuf.buf_align = 0; /* within 1GB-aligned window of up to 32GB in size */ @@ -204,7 +204,7 @@ int load_other_segments(struct kimage *image, dtb_len = fdt_totalsize(dtb); kbuf.buffer = dtb; kbuf.bufsz = dtb_len; - kbuf.mem = 0; + kbuf.mem = KEXEC_BUF_MEM_UNKNOWN; kbuf.memsz = dtb_len; /* not across 2MB boundary */ kbuf.buf_align = SZ_2M; -- cgit v1.2.3-58-ga151 From b717480f5415201286cc9b5e3244513f54a327c4 Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Tue, 23 Jul 2019 20:29:22 +0900 Subject: arm64: remove unneeded uapi/asm/stat.h stat.h is listed in include/uapi/asm-generic/Kbuild, so Kbuild will automatically generate it. Signed-off-by: Masahiro Yamada Signed-off-by: Will Deacon --- arch/arm64/include/uapi/asm/stat.h | 17 ----------------- 1 file changed, 17 deletions(-) delete mode 100644 arch/arm64/include/uapi/asm/stat.h diff --git a/arch/arm64/include/uapi/asm/stat.h b/arch/arm64/include/uapi/asm/stat.h deleted file mode 100644 index 313325fa22fa..000000000000 --- a/arch/arm64/include/uapi/asm/stat.h +++ /dev/null @@ -1,17 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * Copyright (C) 2012 ARM Ltd. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program. If not, see . - */ -#include -- cgit v1.2.3-58-ga151 From 3e77eeb7a27fc3dcf6b65e7ee01ac00bf5d2b4fb Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Mon, 22 Jul 2019 17:14:33 +0100 Subject: ACPI/IORT: Rename arm_smmu_v3_set_proximity() 'node' local variable Commit 36a2ba07757d ("ACPI/IORT: Reject platform device creation on NUMA node mapping failure") introduced a local variable 'node' in arm_smmu_v3_set_proximity() that shadows the struct acpi_iort_node pointer function parameter. Execution was unaffected but it is prone to errors and can lead to subtle bugs. Rename the local variable to prevent any issue. Reviewed-by: Hanjun Guo Reported-by: Will Deacon Signed-off-by: Lorenzo Pieralisi Cc: Will Deacon Cc: Hanjun Guo Cc: Sudeep Holla Cc: Catalin Marinas Cc: Robin Murphy Cc: Kefeng Wang Signed-off-by: Will Deacon --- drivers/acpi/arm64/iort.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index 8569b79e8b58..5a7551d060f2 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1256,12 +1256,12 @@ static int __init arm_smmu_v3_set_proximity(struct device *dev, smmu = (struct acpi_iort_smmu_v3 *)node->node_data; if (smmu->flags & ACPI_IORT_SMMU_V3_PXM_VALID) { - int node = acpi_map_pxm_to_node(smmu->pxm); + int dev_node = acpi_map_pxm_to_node(smmu->pxm); - if (node != NUMA_NO_NODE && !node_online(node)) + if (dev_node != NUMA_NO_NODE && !node_online(dev_node)) return -EINVAL; - set_dev_node(dev, node); + set_dev_node(dev, dev_node); pr_info("SMMU-v3[%llx] Mapped to Proximity domain %d\n", smmu->base_address, smmu->pxm); -- cgit v1.2.3-58-ga151 From b3e089cd446b26bb1e12860e1afb9da314453fd6 Mon Sep 17 00:00:00 2001 From: Chuhong Yuan Date: Tue, 30 Jul 2019 10:44:15 +0800 Subject: arm64: Replace strncmp with str_has_prefix In commit b6b2735514bc ("tracing: Use str_has_prefix() instead of using fixed sizes") the newly introduced str_has_prefix() was used to replace error-prone strncmp(str, const, len). Here fix codes with the same pattern. Signed-off-by: Chuhong Yuan Signed-off-by: Will Deacon --- arch/arm64/kernel/module-plts.c | 2 +- arch/arm64/mm/numa.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/kernel/module-plts.c b/arch/arm64/kernel/module-plts.c index 044c0ae4d6c8..b182442b87a3 100644 --- a/arch/arm64/kernel/module-plts.c +++ b/arch/arm64/kernel/module-plts.c @@ -302,7 +302,7 @@ int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, /* sort by type, symbol index and addend */ sort(rels, numrels, sizeof(Elf64_Rela), cmp_rela, NULL); - if (strncmp(secstrings + dstsec->sh_name, ".init", 5) != 0) + if (!str_has_prefix(secstrings + dstsec->sh_name, ".init")) core_plts += count_plts(syms, rels, numrels, sechdrs[i].sh_info, dstsec); else diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c index 4f241cc7cc3b..4decf1659700 100644 --- a/arch/arm64/mm/numa.c +++ b/arch/arm64/mm/numa.c @@ -29,7 +29,7 @@ static __init int numa_parse_early_param(char *opt) { if (!opt) return -EINVAL; - if (!strncmp(opt, "off", 3)) + if (str_has_prefix(opt, "off")) numa_off = true; return 0; -- cgit v1.2.3-58-ga151 From 332e5281a4e8269b96233a7babc98b03596b7e6d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 16 Jul 2019 08:14:19 +0100 Subject: arm64: esr: Add ESR exception class encoding for trapped ERET The ESR.EC encoding of 0b011010 (0x1a) describes an exception generated by an ERET, ERETAA or ERETAB instruction as a result of a nested virtualisation trap to EL2. Add an encoding for this EC and a string description so that we identify it correctly if we take one unexpectedly. Acked-by: Mark Rutland Acked-by: Marc Zyngier Signed-off-by: Will Deacon --- arch/arm64/include/asm/esr.h | 3 ++- arch/arm64/kernel/traps.c | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h index 65ac18400979..cb29253ae86b 100644 --- a/arch/arm64/include/asm/esr.h +++ b/arch/arm64/include/asm/esr.h @@ -34,7 +34,8 @@ #define ESR_ELx_EC_SMC64 (0x17) /* EL2 and above */ #define ESR_ELx_EC_SYS64 (0x18) #define ESR_ELx_EC_SVE (0x19) -/* Unallocated EC: 0x1A - 0x1E */ +#define ESR_ELx_EC_ERET (0x1a) /* EL2 only */ +/* Unallocated EC: 0x1b - 0x1E */ #define ESR_ELx_EC_IMP_DEF (0x1f) /* EL3 only */ #define ESR_ELx_EC_IABT_LOW (0x20) #define ESR_ELx_EC_IABT_CUR (0x21) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index d3313797cca9..42c8422cdf4a 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -743,6 +743,7 @@ static const char *esr_class_str[] = { [ESR_ELx_EC_SMC64] = "SMC (AArch64)", [ESR_ELx_EC_SYS64] = "MSR/MRS (AArch64)", [ESR_ELx_EC_SVE] = "SVE", + [ESR_ELx_EC_ERET] = "ERET/ERETAA/ERETAB", [ESR_ELx_EC_IMP_DEF] = "EL3 IMP DEF", [ESR_ELx_EC_IABT_LOW] = "IABT (lower EL)", [ESR_ELx_EC_IABT_CUR] = "IABT (current EL)", -- cgit v1.2.3-58-ga151 From 73961dc1182ed7e1eb80a90ee28871ab3547af7e Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Mon, 15 Jul 2019 14:28:17 +0100 Subject: arm64: sysreg: Remove unused and rotting SCTLR_ELx field definitions Our SCTLR_ELx field definitions are somewhat over-engineered in that they carefully define masks describing the RES0/RES1 bits and then use these to construct further masks representing bits to be set/cleared for the _EL1 and _EL2 registers. However, most of the resulting definitions aren't actually used by anybody and have subsequently started to bit-rot when new fields have been added by the architecture, resulting in fields being part of the RES0 mask despite being defined and used elsewhere. Rather than fix up these masks, simply remove the unused parts entirely so that we can drop the maintenance burden. We can always add things back if we need them in the future. Acked-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/sysreg.h | 29 ----------------------------- 1 file changed, 29 deletions(-) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 06ebcfef73df..1df45c7ffcf7 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -499,28 +499,11 @@ #define SCTLR_EL2_RES1 ((BIT(4)) | (BIT(5)) | (BIT(11)) | (BIT(16)) | \ (BIT(18)) | (BIT(22)) | (BIT(23)) | (BIT(28)) | \ (BIT(29))) -#define SCTLR_EL2_RES0 ((BIT(6)) | (BIT(7)) | (BIT(8)) | (BIT(9)) | \ - (BIT(10)) | (BIT(13)) | (BIT(14)) | (BIT(15)) | \ - (BIT(17)) | (BIT(20)) | (BIT(24)) | (BIT(26)) | \ - (BIT(27)) | (BIT(30)) | (BIT(31)) | \ - (0xffffefffUL << 32)) #ifdef CONFIG_CPU_BIG_ENDIAN #define ENDIAN_SET_EL2 SCTLR_ELx_EE -#define ENDIAN_CLEAR_EL2 0 #else #define ENDIAN_SET_EL2 0 -#define ENDIAN_CLEAR_EL2 SCTLR_ELx_EE -#endif - -/* SCTLR_EL2 value used for the hyp-stub */ -#define SCTLR_EL2_SET (SCTLR_ELx_IESB | ENDIAN_SET_EL2 | SCTLR_EL2_RES1) -#define SCTLR_EL2_CLEAR (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | \ - SCTLR_ELx_SA | SCTLR_ELx_I | SCTLR_ELx_WXN | \ - SCTLR_ELx_DSSBS | ENDIAN_CLEAR_EL2 | SCTLR_EL2_RES0) - -#if (SCTLR_EL2_SET ^ SCTLR_EL2_CLEAR) != 0xffffffffffffffffUL -#error "Inconsistent SCTLR_EL2 set/clear bits" #endif /* SCTLR_EL1 specific flags. */ @@ -539,16 +522,11 @@ #define SCTLR_EL1_RES1 ((BIT(11)) | (BIT(20)) | (BIT(22)) | (BIT(28)) | \ (BIT(29))) -#define SCTLR_EL1_RES0 ((BIT(6)) | (BIT(10)) | (BIT(13)) | (BIT(17)) | \ - (BIT(27)) | (BIT(30)) | (BIT(31)) | \ - (0xffffefffUL << 32)) #ifdef CONFIG_CPU_BIG_ENDIAN #define ENDIAN_SET_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) -#define ENDIAN_CLEAR_EL1 0 #else #define ENDIAN_SET_EL1 0 -#define ENDIAN_CLEAR_EL1 (SCTLR_EL1_E0E | SCTLR_ELx_EE) #endif #define SCTLR_EL1_SET (SCTLR_ELx_M | SCTLR_ELx_C | SCTLR_ELx_SA |\ @@ -556,13 +534,6 @@ SCTLR_EL1_DZE | SCTLR_EL1_UCT |\ SCTLR_EL1_NTWE | SCTLR_ELx_IESB | SCTLR_EL1_SPAN |\ ENDIAN_SET_EL1 | SCTLR_EL1_UCI | SCTLR_EL1_RES1) -#define SCTLR_EL1_CLEAR (SCTLR_ELx_A | SCTLR_EL1_CP15BEN | SCTLR_EL1_ITD |\ - SCTLR_EL1_UMA | SCTLR_ELx_WXN | ENDIAN_CLEAR_EL1 |\ - SCTLR_ELx_DSSBS | SCTLR_EL1_NTWI | SCTLR_EL1_RES0) - -#if (SCTLR_EL1_SET ^ SCTLR_EL1_CLEAR) != 0xffffffffffffffffUL -#error "Inconsistent SCTLR_EL1 set/clear bits" -#endif /* id_aa64isar0 */ #define ID_AA64ISAR0_TS_SHIFT 52 -- cgit v1.2.3-58-ga151 From 2f8f180b3ceed7a16a92cc3c164368c26e1f9320 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 30 Jul 2019 11:26:31 +0100 Subject: arm64: Remove unused cpucap_multi_entry_cap_cpu_enable() The function cpucap_multi_entry_cap_cpu_enable() is unused, remove it to avoid any confusion reading the code and potential for bit rot. Signed-off-by: Mark Brown Signed-off-by: Will Deacon --- arch/arm64/include/asm/cpufeature.h | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index c96ffa4722d3..cf65a47ee6b4 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -363,21 +363,6 @@ cpucap_multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, return false; } -/* - * Take appropriate action for all matching entries in the shared capability - * entry. - */ -static inline void -cpucap_multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry) -{ - const struct arm64_cpu_capabilities *caps; - - for (caps = entry->match_list; caps->matches; caps++) - if (caps->matches(caps, SCOPE_LOCAL_CPU) && - caps->cpu_enable) - caps->cpu_enable(caps); -} - extern DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS); extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; extern struct static_key_false arm64_const_caps_ready; -- cgit v1.2.3-58-ga151 From 22ec71615d824f4f11d38d0e55a88d8956b7e45f Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 7 Jun 2019 15:48:58 +0100 Subject: arm64: io: Relax implicit barriers in default I/O accessors The arm64 implementation of the default I/O accessors requires barrier instructions to satisfy the memory ordering requirements documented in memory-barriers.txt [1], which are largely derived from the behaviour of I/O accesses on x86. Of particular interest are the requirements that a write to a device must be ordered against prior writes to memory, and a read from a device must be ordered against subsequent reads from memory. We satisfy these requirements using various flavours of DSB: the most expensive barrier we have, since it implies completion of prior accesses. This was deemed necessary when we first implemented the accessors, since accesses to different endpoints could propagate independently and therefore the only way to enforce order is to rely on completion guarantees [2]. Since then, the Armv8 memory model has been retrospectively strengthened to require "other-multi-copy atomicity", a property that requires memory accesses from an observer to become visible to all other observers simultaneously [3]. In other words, propagation of accesses is limited to transitioning from locally observed to globally observed. It recently became apparent that this change also has a subtle impact on our I/O accessors for shared peripherals, allowing us to use the cheaper DMB instruction instead. As a concrete example, consider the following: memcpy(dma_buffer, data, bufsz); writel(DMA_START, dev->ctrl_reg); A DMB ST instruction between the final write to the DMA buffer and the write to the control register will ensure that the writes to the DMA buffer are observed before the write to the control register by all observers. Put another way, if an observer can see the write to the control register, it can also see the writes to memory. This has always been the case and is not sufficient to provide the ordering required by Linux, since there is no guarantee that the master interface of the DMA-capable device has observed either of the accesses. However, in an other-multi-copy atomic world, we can infer two things: 1. A write arriving at an endpoint shared between multiple CPUs is visible to all CPUs 2. A write that is visible to all CPUs is also visible to all other observers in the shareability domain Pieced together, this allows us to use DMB OSHST for our default I/O write accessors and DMB OSHLD for our default I/O read accessors (the outer-shareability is for handling non-cacheable mappings) for shared devices. Memory-mapped, DMA-capable peripherals that are private to a CPU (i.e. inaccessible to other CPUs) still require the DSB, however these are few and far between and typically require special treatment anyway which is outside of the scope of the portable driver API (e.g. GIC, page-table walker, SPE profiler). Note that our mandatory barriers remain as DSBs, since there are cases where they are used to flush the store buffer of the CPU, e.g. when publishing page table updates to the SMMU. [1] https://git.kernel.org/linus/4614bbdee357 [2] https://www.youtube.com/watch?v=i6DayghhA8Q [3] https://www.cl.cam.ac.uk/~pes20/armv8-mca/ Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/io.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index 179a4c65c0d4..e9763831186a 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -95,7 +95,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) ({ \ unsigned long tmp; \ \ - rmb(); \ + dma_rmb(); \ \ /* \ * Create a dummy control dependency from the IO read to any \ @@ -109,7 +109,7 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) }) #define __io_par(v) __iormb(v) -#define __iowmb() wmb() +#define __iowmb() dma_wmb() /* * Relaxed I/O memory access primitives. These follow the Device memory -- cgit v1.2.3-58-ga151 From 13776f9d40a028a245bb766269e360f5b7a62721 Mon Sep 17 00:00:00 2001 From: Junhua Huang Date: Sat, 6 Jul 2019 14:41:15 +0800 Subject: arm64: mm: free the initrd reserved memblock in a aligned manner We should free the initrd reserved memblock in an aligned manner, because the initrd reserves the memblock in an aligned manner in arm64_memblock_init(). Otherwise there are some fragments in memblock_reserved regions after free_initrd_mem(). e.g.: /sys/kernel/debug/memblock # cat reserved 0: 0x0000000080080000..0x00000000817fafff 1: 0x0000000083400000..0x0000000083ffffff 2: 0x0000000090000000..0x000000009000407f 3: 0x00000000b0000000..0x00000000b000003f 4: 0x00000000b26184ea..0x00000000b2618fff The fragments like the ranges from b0000000 to b000003f and from b26184ea to b2618fff should be freed. And we can do free_reserved_area() after memblock_free(), as free_reserved_area() calls __free_pages(), once we've done that it could be allocated somewhere else, but memblock and iomem still say this is reserved memory. Fixes: 05c58752f9dc ("arm64: To remove initrd reserved area entry from memblock") Signed-off-by: Junhua Huang Signed-off-by: Will Deacon --- arch/arm64/mm/init.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index f3c795278def..b1ee6cb4b17f 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -570,8 +570,12 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void __init free_initrd_mem(unsigned long start, unsigned long end) { + unsigned long aligned_start, aligned_end; + + aligned_start = __virt_to_phys(start) & PAGE_MASK; + aligned_end = PAGE_ALIGN(__virt_to_phys(end)); + memblock_free(aligned_start, aligned_end - aligned_start); free_reserved_area((void *)start, (void *)end, 0, "initrd"); - memblock_free(__virt_to_phys(start), end - start); } #endif -- cgit v1.2.3-58-ga151 From 66cbdf5d0c96f5fe570b548e764583ea9d793077 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 31 Jul 2019 15:35:20 +0200 Subject: arm64: Move TIF_* documentation to individual definitions Some TIF_* flags are documented in the comment block at the top, some next to their definitions, some in both places. Move all documentation to the individual definitions for consistency, and for easy lookup. Acked-by: Mark Rutland Signed-off-by: Geert Uytterhoeven Signed-off-by: Will Deacon --- arch/arm64/include/asm/thread_info.h | 25 +++++++------------------ 1 file changed, 7 insertions(+), 18 deletions(-) diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index e35cd84b102c..2a408dd64f5c 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -57,29 +57,18 @@ void arch_release_task_struct(struct task_struct *tsk); #endif -/* - * thread information flags: - * TIF_SYSCALL_TRACE - syscall trace active - * TIF_SYSCALL_TRACEPOINT - syscall tracepoint for ftrace - * TIF_SYSCALL_AUDIT - syscall auditing - * TIF_SECCOMP - syscall secure computing - * TIF_SYSCALL_EMU - syscall emulation active - * TIF_SIGPENDING - signal pending - * TIF_NEED_RESCHED - rescheduling necessary - * TIF_NOTIFY_RESUME - callback before returning to user - */ -#define TIF_SIGPENDING 0 -#define TIF_NEED_RESCHED 1 +#define TIF_SIGPENDING 0 /* signal pending */ +#define TIF_NEED_RESCHED 1 /* rescheduling necessary */ #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ #define TIF_UPROBE 4 /* uprobe breakpoint or singlestep */ #define TIF_FSCHECK 5 /* Check FS is USER_DS on return */ #define TIF_NOHZ 7 -#define TIF_SYSCALL_TRACE 8 -#define TIF_SYSCALL_AUDIT 9 -#define TIF_SYSCALL_TRACEPOINT 10 -#define TIF_SECCOMP 11 -#define TIF_SYSCALL_EMU 12 +#define TIF_SYSCALL_TRACE 8 /* syscall trace active */ +#define TIF_SYSCALL_AUDIT 9 /* syscall auditing */ +#define TIF_SYSCALL_TRACEPOINT 10 /* syscall tracepoint for ftrace */ +#define TIF_SECCOMP 11 /* syscall secure computing */ +#define TIF_SYSCALL_EMU 12 /* syscall emulation active */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_FREEZE 19 #define TIF_RESTORE_SIGMASK 20 -- cgit v1.2.3-58-ga151 From 5cf896fb6be3effd9aea455b22213e27be8bdb1d Mon Sep 17 00:00:00 2001 From: Peter Collingbourne Date: Wed, 31 Jul 2019 18:18:42 -0700 Subject: arm64: Add support for relocating the kernel with RELR relocations RELR is a relocation packing format for relative relocations. The format is described in a generic-abi proposal: https://groups.google.com/d/topic/generic-abi/bX460iggiKg/discussion The LLD linker can be instructed to pack relocations in the RELR format by passing the flag --pack-dyn-relocs=relr. This patch adds a new config option, CONFIG_RELR. Enabling this option instructs the linker to pack vmlinux's relative relocations in the RELR format, and causes the kernel to apply the relocations at startup along with the RELA relocations. RELA relocations still need to be applied because the linker will emit RELA relative relocations if they are unrepresentable in the RELR format (i.e. address not a multiple of 2). Enabling CONFIG_RELR reduces the size of a defconfig kernel image with CONFIG_RANDOMIZE_BASE by 3.5MB/16% uncompressed, or 550KB/5% compressed (lz4). Signed-off-by: Peter Collingbourne Tested-by: Nick Desaulniers Reviewed-by: Nick Desaulniers Signed-off-by: Will Deacon --- Makefile | 4 ++ arch/Kconfig | 14 ++++++ arch/arm64/Kconfig | 1 + arch/arm64/kernel/head.S | 96 ++++++++++++++++++++++++++++++++++++++--- arch/arm64/kernel/vmlinux.lds.S | 9 ++++ init/Kconfig | 3 ++ scripts/tools-support-relr.sh | 16 +++++++ 7 files changed, 137 insertions(+), 6 deletions(-) create mode 100755 scripts/tools-support-relr.sh diff --git a/Makefile b/Makefile index 23cdf1f41364..9e6ec0c9962c 100644 --- a/Makefile +++ b/Makefile @@ -912,6 +912,10 @@ ifeq ($(CONFIG_STRIP_ASM_SYMS),y) LDFLAGS_vmlinux += $(call ld-option, -X,) endif +ifeq ($(CONFIG_RELR),y) +LDFLAGS_vmlinux += --pack-dyn-relocs=relr +endif + # insure the checker run with the right endianness CHECKFLAGS += $(if $(CONFIG_CPU_BIG_ENDIAN),-mbig-endian,-mlittle-endian) diff --git a/arch/Kconfig b/arch/Kconfig index a7b57dd42c26..aa6bdb3df5c1 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -925,6 +925,20 @@ config LOCK_EVENT_COUNTS the chance of application behavior change because of timing differences. The counts are reported via debugfs. +# Select if the architecture has support for applying RELR relocations. +config ARCH_HAS_RELR + bool + +config RELR + bool "Use RELR relocation packing" + depends on ARCH_HAS_RELR && TOOLS_SUPPORT_RELR + default y + help + Store the kernel's dynamic relocations in the RELR relocation packing + format. Requires a compatible linker (LLD supports this feature), as + well as compatible NM and OBJCOPY utilities (llvm-nm and llvm-objcopy + are compatible). + source "kernel/gcov/Kconfig" source "scripts/gcc-plugins/Kconfig" diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3adcec05b1f6..2681eb79c40b 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1467,6 +1467,7 @@ endif config RELOCATABLE bool + select ARCH_HAS_RELR help This builds the kernel as a Position Independent Executable (PIE), which retains all relocation metadata required to relocate the diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 2cdacd1c141b..cc23302e9d95 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -102,6 +102,8 @@ pe_header: * x23 stext() .. start_kernel() physical misalignment/KASLR offset * x28 __create_page_tables() callee preserved temp register * x19/x20 __primary_switch() callee preserved temp registers + * x24 __primary_switch() .. relocate_kernel() + * current RELR displacement */ ENTRY(stext) bl preserve_boot_args @@ -834,14 +836,93 @@ __relocate_kernel: 0: cmp x9, x10 b.hs 1f - ldp x11, x12, [x9], #24 - ldr x13, [x9, #-8] - cmp w12, #R_AARCH64_RELATIVE + ldp x12, x13, [x9], #24 + ldr x14, [x9, #-8] + cmp w13, #R_AARCH64_RELATIVE b.ne 0b - add x13, x13, x23 // relocate - str x13, [x11, x23] + add x14, x14, x23 // relocate + str x14, [x12, x23] b 0b -1: ret + +1: +#ifdef CONFIG_RELR + /* + * Apply RELR relocations. + * + * RELR is a compressed format for storing relative relocations. The + * encoded sequence of entries looks like: + * [ AAAAAAAA BBBBBBB1 BBBBBBB1 ... AAAAAAAA BBBBBB1 ... ] + * + * i.e. start with an address, followed by any number of bitmaps. The + * address entry encodes 1 relocation. The subsequent bitmap entries + * encode up to 63 relocations each, at subsequent offsets following + * the last address entry. + * + * The bitmap entries must have 1 in the least significant bit. The + * assumption here is that an address cannot have 1 in lsb. Odd + * addresses are not supported. Any odd addresses are stored in the RELA + * section, which is handled above. + * + * Excluding the least significant bit in the bitmap, each non-zero + * bit in the bitmap represents a relocation to be applied to + * a corresponding machine word that follows the base address + * word. The second least significant bit represents the machine + * word immediately following the initial address, and each bit + * that follows represents the next word, in linear order. As such, + * a single bitmap can encode up to 63 relocations in a 64-bit object. + * + * In this implementation we store the address of the next RELR table + * entry in x9, the address being relocated by the current address or + * bitmap entry in x13 and the address being relocated by the current + * bit in x14. + * + * Because addends are stored in place in the binary, RELR relocations + * cannot be applied idempotently. We use x24 to keep track of the + * currently applied displacement so that we can correctly relocate if + * __relocate_kernel is called twice with non-zero displacements (i.e. + * if there is both a physical misalignment and a KASLR displacement). + */ + ldr w9, =__relr_offset // offset to reloc table + ldr w10, =__relr_size // size of reloc table + add x9, x9, x11 // __va(.relr) + add x10, x9, x10 // __va(.relr) + sizeof(.relr) + + sub x15, x23, x24 // delta from previous offset + cbz x15, 7f // nothing to do if unchanged + mov x24, x23 // save new offset + +2: cmp x9, x10 + b.hs 7f + ldr x11, [x9], #8 + tbnz x11, #0, 3f // branch to handle bitmaps + add x13, x11, x23 + ldr x12, [x13] // relocate address entry + add x12, x12, x15 + str x12, [x13], #8 // adjust to start of bitmap + b 2b + +3: mov x14, x13 +4: lsr x11, x11, #1 + cbz x11, 6f + tbz x11, #0, 5f // skip bit if not set + ldr x12, [x14] // relocate bit + add x12, x12, x15 + str x12, [x14] + +5: add x14, x14, #8 // move to next bit's address + b 4b + +6: /* + * Move to the next bitmap's address. 8 is the word size, and 63 is the + * number of significant bits in a bitmap entry. + */ + add x13, x13, #(8 * 63) + b 2b + +7: +#endif + ret + ENDPROC(__relocate_kernel) #endif @@ -854,6 +935,9 @@ __primary_switch: adrp x1, init_pg_dir bl __enable_mmu #ifdef CONFIG_RELOCATABLE +#ifdef CONFIG_RELR + mov x24, #0 // no RELR displacement yet +#endif bl __relocate_kernel #ifdef CONFIG_RANDOMIZE_BASE ldr x8, =__primary_switched diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 7fa008374907..31716afa30f6 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -200,6 +200,15 @@ SECTIONS __rela_offset = ABSOLUTE(ADDR(.rela.dyn) - KIMAGE_VADDR); __rela_size = SIZEOF(.rela.dyn); +#ifdef CONFIG_RELR + .relr.dyn : ALIGN(8) { + *(.relr.dyn) + } + + __relr_offset = ABSOLUTE(ADDR(.relr.dyn) - KIMAGE_VADDR); + __relr_size = SIZEOF(.relr.dyn); +#endif + . = ALIGN(SEGMENT_ALIGN); __initdata_end = .; __init_end = .; diff --git a/init/Kconfig b/init/Kconfig index bd7d650d4a99..d96127ebc44e 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -30,6 +30,9 @@ config CC_CAN_LINK config CC_HAS_ASM_GOTO def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC)) +config TOOLS_SUPPORT_RELR + def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh) + config CC_HAS_WARN_MAYBE_UNINITIALIZED def_bool $(cc-option,-Wmaybe-uninitialized) help diff --git a/scripts/tools-support-relr.sh b/scripts/tools-support-relr.sh new file mode 100755 index 000000000000..97a2c844a95e --- /dev/null +++ b/scripts/tools-support-relr.sh @@ -0,0 +1,16 @@ +#!/bin/sh -eu +# SPDX-License-Identifier: GPL-2.0 + +tmp_file=$(mktemp) +trap "rm -f $tmp_file.o $tmp_file $tmp_file.bin" EXIT + +cat << "END" | "$CC" -c -x c - -o $tmp_file.o >/dev/null 2>&1 +void *p = &p; +END +"$LD" $tmp_file.o -shared -Bsymbolic --pack-dyn-relocs=relr -o $tmp_file + +# Despite printing an error message, GNU nm still exits with exit code 0 if it +# sees a relr section. So we need to check that nothing is printed to stderr. +test -z "$("$NM" $tmp_file 2>&1 >/dev/null)" + +"$OBJCOPY" -O binary $tmp_file $tmp_file.bin -- cgit v1.2.3-58-ga151 From 2b835e24b5c6f9c633ff51973581ee7ca7b3e8ec Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 23 Jul 2019 19:58:38 +0200 Subject: arm64: untag user pointers in access_ok and __uaccess_mask_ptr This patch is a part of a series that extends kernel ABI to allow to pass tagged user pointers (with the top byte set to something else other than 0x00) as syscall arguments. copy_from_user (and a few other similar functions) are used to copy data from user memory into the kernel memory or vice versa. Since a user can provided a tagged pointer to one of the syscalls that use copy_from_user, we need to correctly handle such pointers. Do this by untagging user pointers in access_ok and in __uaccess_mask_ptr, before performing access validity checks. Note, that this patch only temporarily untags the pointers to perform the checks, but then passes them as is into the kernel internals. Reviewed-by: Vincenzo Frascino Reviewed-by: Kees Cook Reviewed-by: Catalin Marinas Signed-off-by: Andrey Konovalov [will: Add __force to casting in untagged_addr() to kill sparse warning] Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 2 +- arch/arm64/include/asm/uaccess.h | 10 +++++++--- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index fb04f10a78ab..46c4c08a80a9 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -201,7 +201,7 @@ extern u64 vabits_user; * pass on to access_ok(), for instance. */ #define untagged_addr(addr) \ - ((__typeof__(addr))sign_extend64((u64)(addr), 55)) + ((__typeof__(addr))sign_extend64((__force u64)(addr), 55)) #ifdef CONFIG_KASAN_SW_TAGS #define __tag_shifted(tag) ((u64)(tag) << 56) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 5a1c32260c1f..a138e3b4f717 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -62,6 +62,8 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si { unsigned long ret, limit = current_thread_info()->addr_limit; + addr = untagged_addr(addr); + __chk_user_ptr(addr); asm volatile( // A + B <= C + 1 for all A,B,C, in four easy steps: @@ -215,7 +217,8 @@ static inline void uaccess_enable_not_uao(void) /* * Sanitise a uaccess pointer such that it becomes NULL if above the - * current addr_limit. + * current addr_limit. In case the pointer is tagged (has the top byte set), + * untag the pointer before checking. */ #define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr) static inline void __user *__uaccess_mask_ptr(const void __user *ptr) @@ -223,10 +226,11 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr) void __user *safe_ptr; asm volatile( - " bics xzr, %1, %2\n" + " bics xzr, %3, %2\n" " csel %0, %1, xzr, eq\n" : "=&r" (safe_ptr) - : "r" (ptr), "r" (current_thread_info()->addr_limit) + : "r" (ptr), "r" (current_thread_info()->addr_limit), + "r" (untagged_addr(ptr)) : "cc"); csdb(); -- cgit v1.2.3-58-ga151 From 63f0c60379650d82250f22e4cf4137ef3dc4f43d Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Tue, 23 Jul 2019 19:58:39 +0200 Subject: arm64: Introduce prctl() options to control the tagged user addresses ABI It is not desirable to relax the ABI to allow tagged user addresses into the kernel indiscriminately. This patch introduces a prctl() interface for enabling or disabling the tagged ABI with a global sysctl control for preventing applications from enabling the relaxed ABI (meant for testing user-space prctl() return error checking without reconfiguring the kernel). The ABI properties are inherited by threads of the same application and fork()'ed children but cleared on execve(). A Kconfig option allows the overall disabling of the relaxed ABI. The PR_SET_TAGGED_ADDR_CTRL will be expanded in the future to handle MTE-specific settings like imprecise vs precise exceptions. Reviewed-by: Kees Cook Signed-off-by: Catalin Marinas Signed-off-by: Andrey Konovalov Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 9 +++++ arch/arm64/include/asm/processor.h | 8 ++++ arch/arm64/include/asm/thread_info.h | 1 + arch/arm64/include/asm/uaccess.h | 4 +- arch/arm64/kernel/process.c | 73 ++++++++++++++++++++++++++++++++++++ include/uapi/linux/prctl.h | 5 +++ kernel/sys.c | 12 ++++++ 7 files changed, 111 insertions(+), 1 deletion(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3adcec05b1f6..5d254178b9ca 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1110,6 +1110,15 @@ config ARM64_SW_TTBR0_PAN zeroed area and reserved ASID. The user access routines restore the valid TTBR0_EL1 temporarily. +config ARM64_TAGGED_ADDR_ABI + bool "Enable the tagged user addresses syscall ABI" + default y + help + When this option is enabled, user applications can opt in to a + relaxed ABI via prctl() allowing tagged addresses to be passed + to system calls as pointer arguments. For details, see + Documentation/arm64/tagged-address-abi.txt. + menuconfig COMPAT bool "Kernel support for 32-bit EL0" depends on ARM64_4K_PAGES || EXPERT diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 844e2964b0f5..28eed40ffc12 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -306,6 +306,14 @@ extern void __init minsigstksz_setup(void); /* PR_PAC_RESET_KEYS prctl */ #define PAC_RESET_KEYS(tsk, arg) ptrauth_prctl_reset_keys(tsk, arg) +#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI +/* PR_{SET,GET}_TAGGED_ADDR_CTRL prctl */ +long set_tagged_addr_ctrl(unsigned long arg); +long get_tagged_addr_ctrl(void); +#define SET_TAGGED_ADDR_CTRL(arg) set_tagged_addr_ctrl(arg) +#define GET_TAGGED_ADDR_CTRL() get_tagged_addr_ctrl() +#endif + /* * For CONFIG_GCC_PLUGIN_STACKLEAK * diff --git a/arch/arm64/include/asm/thread_info.h b/arch/arm64/include/asm/thread_info.h index 180b34ec5965..012238d8e58d 100644 --- a/arch/arm64/include/asm/thread_info.h +++ b/arch/arm64/include/asm/thread_info.h @@ -90,6 +90,7 @@ void arch_release_task_struct(struct task_struct *tsk); #define TIF_SVE 23 /* Scalable Vector Extension in use */ #define TIF_SVE_VL_INHERIT 24 /* Inherit sve_vl_onexec across exec */ #define TIF_SSBD 25 /* Wants SSB mitigation */ +#define TIF_TAGGED_ADDR 26 /* Allow tagged user addresses */ #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index a138e3b4f717..097d6bfac0b7 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -62,7 +62,9 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si { unsigned long ret, limit = current_thread_info()->addr_limit; - addr = untagged_addr(addr); + if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI) && + test_thread_flag(TIF_TAGGED_ADDR)) + addr = untagged_addr(addr); __chk_user_ptr(addr); asm volatile( diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index f674f28df663..76b7c55026aa 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -38,6 +39,7 @@ #include #include #include +#include #include #include @@ -307,11 +309,18 @@ static void tls_thread_flush(void) } } +static void flush_tagged_addr_state(void) +{ + if (IS_ENABLED(CONFIG_ARM64_TAGGED_ADDR_ABI)) + clear_thread_flag(TIF_TAGGED_ADDR); +} + void flush_thread(void) { fpsimd_flush_thread(); tls_thread_flush(); flush_ptrace_hw_breakpoint(current); + flush_tagged_addr_state(); } void release_thread(struct task_struct *dead_task) @@ -565,3 +574,67 @@ void arch_setup_new_exec(void) ptrauth_thread_init_user(current); } + +#ifdef CONFIG_ARM64_TAGGED_ADDR_ABI +/* + * Control the relaxed ABI allowing tagged user addresses into the kernel. + */ +static unsigned int tagged_addr_prctl_allowed = 1; + +long set_tagged_addr_ctrl(unsigned long arg) +{ + if (!tagged_addr_prctl_allowed) + return -EINVAL; + if (is_compat_task()) + return -EINVAL; + if (arg & ~PR_TAGGED_ADDR_ENABLE) + return -EINVAL; + + update_thread_flag(TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE); + + return 0; +} + +long get_tagged_addr_ctrl(void) +{ + if (!tagged_addr_prctl_allowed) + return -EINVAL; + if (is_compat_task()) + return -EINVAL; + + if (test_thread_flag(TIF_TAGGED_ADDR)) + return PR_TAGGED_ADDR_ENABLE; + + return 0; +} + +/* + * Global sysctl to disable the tagged user addresses support. This control + * only prevents the tagged address ABI enabling via prctl() and does not + * disable it for tasks that already opted in to the relaxed ABI. + */ +static int zero; +static int one = 1; + +static struct ctl_table tagged_addr_sysctl_table[] = { + { + .procname = "tagged_addr", + .mode = 0644, + .data = &tagged_addr_prctl_allowed, + .maxlen = sizeof(int), + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, + { } +}; + +static int __init tagged_addr_init(void) +{ + if (!register_sysctl("abi", tagged_addr_sysctl_table)) + return -EINVAL; + return 0; +} + +core_initcall(tagged_addr_init); +#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h index 094bb03b9cc2..2e927b3e9d6c 100644 --- a/include/uapi/linux/prctl.h +++ b/include/uapi/linux/prctl.h @@ -229,4 +229,9 @@ struct prctl_mm_map { # define PR_PAC_APDBKEY (1UL << 3) # define PR_PAC_APGAKEY (1UL << 4) +/* Tagged user address controls for arm64 */ +#define PR_SET_TAGGED_ADDR_CTRL 55 +#define PR_GET_TAGGED_ADDR_CTRL 56 +# define PR_TAGGED_ADDR_ENABLE (1UL << 0) + #endif /* _LINUX_PRCTL_H */ diff --git a/kernel/sys.c b/kernel/sys.c index 2969304c29fe..c6c4d5358bd3 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -124,6 +124,12 @@ #ifndef PAC_RESET_KEYS # define PAC_RESET_KEYS(a, b) (-EINVAL) #endif +#ifndef SET_TAGGED_ADDR_CTRL +# define SET_TAGGED_ADDR_CTRL(a) (-EINVAL) +#endif +#ifndef GET_TAGGED_ADDR_CTRL +# define GET_TAGGED_ADDR_CTRL() (-EINVAL) +#endif /* * this is where the system-wide overflow UID and GID are defined, for @@ -2492,6 +2498,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, return -EINVAL; error = PAC_RESET_KEYS(me, arg2); break; + case PR_SET_TAGGED_ADDR_CTRL: + error = SET_TAGGED_ADDR_CTRL(arg2); + break; + case PR_GET_TAGGED_ADDR_CTRL: + error = GET_TAGGED_ADDR_CTRL(); + break; default: error = -EINVAL; break; -- cgit v1.2.3-58-ga151 From 9ce1263033cd2ad393e2ff0df4a1c4ab4992c9df Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Tue, 23 Jul 2019 19:58:52 +0200 Subject: selftests, arm64: add a selftest for passing tagged pointers to kernel This patch is a part of a series that extends kernel ABI to allow to pass tagged user pointers (with the top byte set to something else other than 0x00) as syscall arguments. This patch adds a simple test, that calls the uname syscall with a tagged user pointer as an argument. Without the kernel accepting tagged user pointers the test fails with EFAULT. Reviewed-by: Catalin Marinas Acked-by: Kees Cook Signed-off-by: Andrey Konovalov Signed-off-by: Will Deacon --- tools/testing/selftests/arm64/.gitignore | 1 + tools/testing/selftests/arm64/Makefile | 11 ++++++++++ tools/testing/selftests/arm64/run_tags_test.sh | 12 +++++++++++ tools/testing/selftests/arm64/tags_test.c | 29 ++++++++++++++++++++++++++ 4 files changed, 53 insertions(+) create mode 100644 tools/testing/selftests/arm64/.gitignore create mode 100644 tools/testing/selftests/arm64/Makefile create mode 100755 tools/testing/selftests/arm64/run_tags_test.sh create mode 100644 tools/testing/selftests/arm64/tags_test.c diff --git a/tools/testing/selftests/arm64/.gitignore b/tools/testing/selftests/arm64/.gitignore new file mode 100644 index 000000000000..e8fae8d61ed6 --- /dev/null +++ b/tools/testing/selftests/arm64/.gitignore @@ -0,0 +1 @@ +tags_test diff --git a/tools/testing/selftests/arm64/Makefile b/tools/testing/selftests/arm64/Makefile new file mode 100644 index 000000000000..a61b2e743e99 --- /dev/null +++ b/tools/testing/selftests/arm64/Makefile @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 + +# ARCH can be overridden by the user for cross compiling +ARCH ?= $(shell uname -m 2>/dev/null || echo not) + +ifneq (,$(filter $(ARCH),aarch64 arm64)) +TEST_GEN_PROGS := tags_test +TEST_PROGS := run_tags_test.sh +endif + +include ../lib.mk diff --git a/tools/testing/selftests/arm64/run_tags_test.sh b/tools/testing/selftests/arm64/run_tags_test.sh new file mode 100755 index 000000000000..745f11379930 --- /dev/null +++ b/tools/testing/selftests/arm64/run_tags_test.sh @@ -0,0 +1,12 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0 + +echo "--------------------" +echo "running tags test" +echo "--------------------" +./tags_test +if [ $? -ne 0 ]; then + echo "[FAIL]" +else + echo "[PASS]" +fi diff --git a/tools/testing/selftests/arm64/tags_test.c b/tools/testing/selftests/arm64/tags_test.c new file mode 100644 index 000000000000..22a1b266e373 --- /dev/null +++ b/tools/testing/selftests/arm64/tags_test.c @@ -0,0 +1,29 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include +#include +#include +#include +#include + +#define SHIFT_TAG(tag) ((uint64_t)(tag) << 56) +#define SET_TAG(ptr, tag) (((uint64_t)(ptr) & ~SHIFT_TAG(0xff)) | \ + SHIFT_TAG(tag)) + +int main(void) +{ + static int tbi_enabled = 0; + struct utsname *ptr, *tagged_ptr; + int err; + + if (prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) == 0) + tbi_enabled = 1; + ptr = (struct utsname *)malloc(sizeof(*ptr)); + if (tbi_enabled) + tagged_ptr = (struct utsname *)SET_TAG(ptr, 0x42); + err = uname(tagged_ptr); + free(ptr); + + return err; +} -- cgit v1.2.3-58-ga151 From 45880f7b7b19e043ce0aaa4cb7d05369425c82fa Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Tue, 6 Aug 2019 18:00:13 +0800 Subject: error-injection: Consolidate override function definition The function override_function_with_return() is defined separately for each architecture and every architecture's definition is almost same with each other. E.g. x86 and powerpc both define function in its own asm/error-injection.h header and override_function_with_return() has the same definition, the only difference is that x86 defines an extra function just_return_func() but it is specific for x86 and is only used by x86's override_function_with_return(), so don't need to export this function. This patch consolidates override_function_with_return() definition into asm-generic/error-injection.h header, thus all architectures can use the common definition. As result, the architecture specific headers are removed; the include/linux/error-injection.h header also changes to include asm-generic/error-injection.h header rather than architecture header, furthermore, it includes linux/compiler.h for successful compilation. Reviewed-by: Masami Hiramatsu Signed-off-by: Leo Yan Signed-off-by: Will Deacon --- arch/powerpc/include/asm/error-injection.h | 13 ------------- arch/x86/include/asm/error-injection.h | 13 ------------- include/asm-generic/error-injection.h | 6 ++++++ include/linux/error-injection.h | 6 +++--- 4 files changed, 9 insertions(+), 29 deletions(-) delete mode 100644 arch/powerpc/include/asm/error-injection.h delete mode 100644 arch/x86/include/asm/error-injection.h diff --git a/arch/powerpc/include/asm/error-injection.h b/arch/powerpc/include/asm/error-injection.h deleted file mode 100644 index 62fd24739852..000000000000 --- a/arch/powerpc/include/asm/error-injection.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0+ */ - -#ifndef _ASM_ERROR_INJECTION_H -#define _ASM_ERROR_INJECTION_H - -#include -#include -#include -#include - -void override_function_with_return(struct pt_regs *regs); - -#endif /* _ASM_ERROR_INJECTION_H */ diff --git a/arch/x86/include/asm/error-injection.h b/arch/x86/include/asm/error-injection.h deleted file mode 100644 index 47b7a1296245..000000000000 --- a/arch/x86/include/asm/error-injection.h +++ /dev/null @@ -1,13 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef _ASM_ERROR_INJECTION_H -#define _ASM_ERROR_INJECTION_H - -#include -#include -#include -#include - -asmlinkage void just_return_func(void); -void override_function_with_return(struct pt_regs *regs); - -#endif /* _ASM_ERROR_INJECTION_H */ diff --git a/include/asm-generic/error-injection.h b/include/asm-generic/error-injection.h index 95a159a4137f..80ca61058dd2 100644 --- a/include/asm-generic/error-injection.h +++ b/include/asm-generic/error-injection.h @@ -16,6 +16,8 @@ struct error_injection_entry { int etype; }; +struct pt_regs; + #ifdef CONFIG_FUNCTION_ERROR_INJECTION /* * Whitelist ganerating macro. Specify functions which can be @@ -28,8 +30,12 @@ static struct error_injection_entry __used \ .addr = (unsigned long)fname, \ .etype = EI_ETYPE_##_etype, \ }; + +void override_function_with_return(struct pt_regs *regs); #else #define ALLOW_ERROR_INJECTION(fname, _etype) + +static inline void override_function_with_return(struct pt_regs *regs) { } #endif #endif diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h index 280c61ecbf20..635a95caf29f 100644 --- a/include/linux/error-injection.h +++ b/include/linux/error-injection.h @@ -2,16 +2,16 @@ #ifndef _LINUX_ERROR_INJECTION_H #define _LINUX_ERROR_INJECTION_H -#ifdef CONFIG_FUNCTION_ERROR_INJECTION +#include +#include -#include +#ifdef CONFIG_FUNCTION_ERROR_INJECTION extern bool within_error_injection_list(unsigned long addr); extern int get_injectable_error_type(unsigned long addr); #else /* !CONFIG_FUNCTION_ERROR_INJECTION */ -#include static inline bool within_error_injection_list(unsigned long addr) { return false; -- cgit v1.2.3-58-ga151 From 42d038c4fb00f1ec1a4c4616784da4561385b628 Mon Sep 17 00:00:00 2001 From: Leo Yan Date: Tue, 6 Aug 2019 18:00:14 +0800 Subject: arm64: Add support for function error injection Inspired by the commit 7cd01b08d35f ("powerpc: Add support for function error injection"), this patch supports function error injection for Arm64. This patch mainly support two functions: one is regs_set_return_value() which is used to overwrite the return value; the another function is override_function_with_return() which is to override the probed function returning and jump to its caller. Reviewed-by: Masami Hiramatsu Signed-off-by: Leo Yan Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/ptrace.h | 5 +++++ arch/arm64/lib/Makefile | 2 ++ arch/arm64/lib/error-inject.c | 18 ++++++++++++++++++ 4 files changed, 26 insertions(+) create mode 100644 arch/arm64/lib/error-inject.c diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3adcec05b1f6..b15803afb2a0 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -148,6 +148,7 @@ config ARM64 select HAVE_FAST_GUP select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_TRACER + select HAVE_FUNCTION_ERROR_INJECTION select HAVE_FUNCTION_GRAPH_TRACER select HAVE_GCC_PLUGINS select HAVE_HW_BREAKPOINT if PERF_EVENTS diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 1dcf63a9ac1f..fbebb411ae20 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -301,6 +301,11 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) return regs->regs[0]; } +static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc) +{ + regs->regs[0] = rc; +} + /** * regs_get_kernel_argument() - get Nth function argument in kernel * @regs: pt_regs of that context diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 33c2a4abda04..f182ccb0438e 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -33,3 +33,5 @@ UBSAN_SANITIZE_atomic_ll_sc.o := n lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o obj-$(CONFIG_CRC32) += crc32.o + +obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o diff --git a/arch/arm64/lib/error-inject.c b/arch/arm64/lib/error-inject.c new file mode 100644 index 000000000000..ed15021da3ed --- /dev/null +++ b/arch/arm64/lib/error-inject.c @@ -0,0 +1,18 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include +#include + +void override_function_with_return(struct pt_regs *regs) +{ + /* + * 'regs' represents the state on entry of a predefined function in + * the kernel/module and which is captured on a kprobe. + * + * When kprobe returns back from exception it will override the end + * of probed function and directly return to the predefined + * function's caller. + */ + instruction_pointer_set(regs, procedure_link_pointer(regs)); +} +NOKPROBE_SYMBOL(override_function_with_return); -- cgit v1.2.3-58-ga151 From 71c67a31f09fa8fdd1495dffd96a5f0d4cef2ede Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 7 Aug 2019 12:48:33 +0100 Subject: init/Kconfig: Fix infinite Kconfig recursion on PPC Commit 5cf896fb6be3 ("arm64: Add support for relocating the kernel with RELR relocations") introduced CONFIG_TOOLS_SUPPORT_RELR, which checks for RELR support in the toolchain as part of the kernel configuration. During this procedure, "$(NM)" is invoked to see if it supports the new relocation format, however PowerPC conditionally overrides this variable in the architecture Makefile in order to pass '--synthetic' when targetting PPC64. This conditional override causes Kconfig to recurse forever, since CONFIG_TOOLS_SUPPORT_RELR cannot be determined without $(NM) being defined, but that in turn depends on CONFIG_PPC64: $ make ARCH=powerpc CROSS_COMPILE=powerpc-linux-gnu- scripts/kconfig/conf --syncconfig Kconfig scripts/kconfig/conf --syncconfig Kconfig scripts/kconfig/conf --syncconfig Kconfig [...] In this particular case, it looks like PowerPC may be able to pass '--synthetic' unconditionally to nm or even drop it altogether. While that is being resolved, let's just bodge the RELR check by picking up $(NM) directly from the environment in whatever state it happens to be in. Cc: Peter Collingbourne Reported-by: Stephen Rothwell Suggested-by: Masahiro Yamada Signed-off-by: Will Deacon --- init/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/init/Kconfig b/init/Kconfig index d96127ebc44e..8b4596edda4e 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -31,7 +31,7 @@ config CC_HAS_ASM_GOTO def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC)) config TOOLS_SUPPORT_RELR - def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh) + def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh) config CC_HAS_WARN_MAYBE_UNINITIALIZED def_bool $(cc-option,-Wmaybe-uninitialized) -- cgit v1.2.3-58-ga151 From b99286b088ea843b935dcfb29f187697359fe5cd Mon Sep 17 00:00:00 2001 From: Qian Cai Date: Mon, 5 Aug 2019 23:05:03 -0400 Subject: arm64/prefetch: fix a -Wtype-limits warning The commit d5370f754875 ("arm64: prefetch: add alternative pattern for CPUs without a prefetcher") introduced MIDR_IS_CPU_MODEL_RANGE() to be used in has_no_hw_prefetch() with rv_min=0 which generates a compilation warning from GCC, In file included from ./arch/arm64/include/asm/cache.h:8, from ./include/linux/cache.h:6, from ./include/linux/printk.h:9, from ./include/linux/kernel.h:15, from ./include/linux/cpumask.h:10, from arch/arm64/kernel/cpufeature.c:11: arch/arm64/kernel/cpufeature.c: In function 'has_no_hw_prefetch': ./arch/arm64/include/asm/cputype.h:59:26: warning: comparison of unsigned expression >= 0 is always true [-Wtype-limits] _model == (model) && rv >= (rv_min) && rv <= (rv_max); \ ^~ arch/arm64/kernel/cpufeature.c:889:9: note: in expansion of macro 'MIDR_IS_CPU_MODEL_RANGE' return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, ^~~~~~~~~~~~~~~~~~~~~~~ Fix it by converting MIDR_IS_CPU_MODEL_RANGE to a static inline function. Signed-off-by: Qian Cai Signed-off-by: Will Deacon --- arch/arm64/include/asm/cputype.h | 21 +++++++++++---------- arch/arm64/kernel/cpufeature.c | 2 +- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/arch/arm64/include/asm/cputype.h b/arch/arm64/include/asm/cputype.h index e7d46631cc42..b1454d117cd2 100644 --- a/arch/arm64/include/asm/cputype.h +++ b/arch/arm64/include/asm/cputype.h @@ -51,14 +51,6 @@ #define MIDR_CPU_MODEL_MASK (MIDR_IMPLEMENTOR_MASK | MIDR_PARTNUM_MASK | \ MIDR_ARCHITECTURE_MASK) -#define MIDR_IS_CPU_MODEL_RANGE(midr, model, rv_min, rv_max) \ -({ \ - u32 _model = (midr) & MIDR_CPU_MODEL_MASK; \ - u32 rv = (midr) & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); \ - \ - _model == (model) && rv >= (rv_min) && rv <= (rv_max); \ - }) - #define ARM_CPU_IMP_ARM 0x41 #define ARM_CPU_IMP_APM 0x50 #define ARM_CPU_IMP_CAVIUM 0x43 @@ -159,10 +151,19 @@ struct midr_range { #define MIDR_REV(m, v, r) MIDR_RANGE(m, v, r, v, r) #define MIDR_ALL_VERSIONS(m) MIDR_RANGE(m, 0, 0, 0xf, 0xf) +static inline bool midr_is_cpu_model_range(u32 midr, u32 model, u32 rv_min, + u32 rv_max) +{ + u32 _model = midr & MIDR_CPU_MODEL_MASK; + u32 rv = midr & (MIDR_REVISION_MASK | MIDR_VARIANT_MASK); + + return _model == model && rv >= rv_min && rv <= rv_max; +} + static inline bool is_midr_in_range(u32 midr, struct midr_range const *range) { - return MIDR_IS_CPU_MODEL_RANGE(midr, range->model, - range->rv_min, range->rv_max); + return midr_is_cpu_model_range(midr, range->model, + range->rv_min, range->rv_max); } static inline bool diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c index d19d14ba9ae4..95201e5ff5e1 100644 --- a/arch/arm64/kernel/cpufeature.c +++ b/arch/arm64/kernel/cpufeature.c @@ -886,7 +886,7 @@ static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int _ u32 midr = read_cpuid_id(); /* Cavium ThunderX pass 1.x and 2.x */ - return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX, + return midr_is_cpu_model_range(midr, MIDR_THUNDERX, MIDR_CPU_VAR_REV(0, 0), MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK)); } -- cgit v1.2.3-58-ga151 From 2951d5efaf8b67cc4851373de92a91a60899611c Mon Sep 17 00:00:00 2001 From: Miles Chen Date: Wed, 7 Aug 2019 08:33:36 +0800 Subject: arm64: mm: print hexadecimal EC value in mem_abort_decode() This change prints the hexadecimal EC value in mem_abort_decode(), which makes it easier to lookup the corresponding EC in the ARM Architecture Reference Manual. The commit 1f9b8936f36f ("arm64: Decode information from ESR upon mem faults") prints useful information when memory abort occurs. It would be easier to lookup "0x25" instead of "DABT" in the document. Then we can check the corresponding ISS. For example: Current info Document EC Exception class "CP15 MCR/MRC" 0x3 "MCR or MRC access to CP15a..." "ASIMD" 0x7 "Access to SIMD or floating-point..." "DABT (current EL)" 0x25 "Data Abort taken without..." ... Before: Unable to handle kernel paging request at virtual address 000000000000c000 Mem abort info: ESR = 0x96000046 Exception class = DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 Data abort info: ISV = 0, ISS = 0x00000046 CM = 0, WnR = 1 After: Unable to handle kernel paging request at virtual address 000000000000c000 Mem abort info: ESR = 0x96000046 EC = 0x25: DABT (current EL), IL = 32 bits SET = 0, FnV = 0 EA = 0, S1PTW = 0 Data abort info: ISV = 0, ISS = 0x00000046 CM = 0, WnR = 1 Cc: Anshuman Khandual Cc: James Morse Acked-by: Mark Rutland Signed-off-by: Miles Chen Signed-off-by: Will Deacon --- arch/arm64/mm/fault.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index cfd65b63f36f..ad4980a27edb 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -86,8 +86,8 @@ static void mem_abort_decode(unsigned int esr) pr_alert("Mem abort info:\n"); pr_alert(" ESR = 0x%08x\n", esr); - pr_alert(" Exception class = %s, IL = %u bits\n", - esr_get_class_string(esr), + pr_alert(" EC = 0x%02lx: %s, IL = %u bits\n", + ESR_ELx_EC(esr), esr_get_class_string(esr), (esr & ESR_ELx_IL) ? 32 : 16); pr_alert(" SET = %lu, FnV = %lu\n", (esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT, -- cgit v1.2.3-58-ga151 From 08f103b9a9502974109fab47ea35ca8542c4e57a Mon Sep 17 00:00:00 2001 From: Julien Grall Date: Wed, 7 Aug 2019 11:34:45 +0100 Subject: arm64/ptrace: Fix typoes in sve_set() comment The ptrace trace SVE flags are prefixed with SVE_PT_*. Update the comment accordingly. Reviewed-by: Dave Martin Signed-off-by: Julien Grall Signed-off-by: Will Deacon --- arch/arm64/kernel/ptrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 3cf3b135027e..21176d02e21a 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -870,7 +870,7 @@ static int sve_set(struct task_struct *target, goto out; /* - * Apart from PT_SVE_REGS_MASK, all PT_SVE_* flags are consumed by + * Apart from SVE_PT_REGS_MASK, all SVE_PT_* flags are consumed by * sve_set_vector_length(), which will also validate them for us: */ ret = sve_set_vector_length(target, header.vl, -- cgit v1.2.3-58-ga151 From 9cb1c5ddd2c432834dd4f40c0170d6b639e8e5c3 Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Wed, 7 Aug 2019 16:55:13 +0100 Subject: arm64: mm: Remove bit-masking optimisations for PAGE_OFFSET and VMEMMAP_START Currently there are assumptions about the alignment of VMEMMAP_START and PAGE_OFFSET that won't be valid after this series is applied. These assumptions are in the form of bitwise operators being used instead of addition and subtraction when calculating addresses. This patch replaces these bitwise operators with addition/subtraction. Signed-off-by: Steve Capper Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index fb04f10a78ab..98fda92a2612 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -299,21 +299,20 @@ static inline void *phys_to_virt(phys_addr_t x) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #else -#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) -#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) +#define __virt_to_pgoff(kaddr) (((u64)(kaddr) - PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) +#define __page_to_voff(kaddr) (((u64)(kaddr) - VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) #define page_to_virt(page) ({ \ unsigned long __addr = \ - ((__page_to_voff(page)) | PAGE_OFFSET); \ + ((__page_to_voff(page)) + PAGE_OFFSET); \ const void *__addr_tag = \ __tag_set((void *)__addr, page_kasan_tag(page)); \ ((void *)__addr_tag); \ }) -#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START)) +#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) + VMEMMAP_START)) -#define _virt_addr_valid(kaddr) pfn_valid((((u64)(kaddr) & ~PAGE_OFFSET) \ - + PHYS_OFFSET) >> PAGE_SHIFT) +#define _virt_addr_valid(kaddr) pfn_valid(__virt_to_phys((u64)(kaddr)) >> PAGE_SHIFT) #endif #endif -- cgit v1.2.3-58-ga151 From 14c127c957c1c6070647c171e72f06e0db275ebf Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Wed, 7 Aug 2019 16:55:14 +0100 Subject: arm64: mm: Flip kernel VA space In order to allow for a KASAN shadow that changes size at boot time, one must fix the KASAN_SHADOW_END for both 48 & 52-bit VAs and "grow" the start address. Also, it is highly desirable to maintain the same function addresses in the kernel .text between VA sizes. Both of these requirements necessitate us to flip the kernel address space halves s.t. the direct linear map occupies the lower addresses. This patch puts the direct linear map in the lower addresses of the kernel VA range and everything else in the higher ranges. We need to adjust: *) KASAN shadow region placement logic, *) KASAN_SHADOW_OFFSET computation logic, *) virt_to_phys, phys_to_virt checks, *) page table dumper. These are all small changes, that need to take place atomically, so they are bundled into this commit. As part of the re-arrangement, a guard region of 2MB (to preserve alignment for fixed map) is added after the vmemmap. Otherwise the vmemmap could intersect with IS_ERR pointers. Reviewed-by: Catalin Marinas Signed-off-by: Steve Capper Signed-off-by: Will Deacon --- arch/arm64/Makefile | 2 +- arch/arm64/include/asm/memory.h | 8 ++++---- arch/arm64/include/asm/pgtable.h | 2 +- arch/arm64/kernel/hibernate.c | 2 +- arch/arm64/mm/dump.c | 5 +++-- arch/arm64/mm/init.c | 9 +-------- arch/arm64/mm/kasan_init.c | 6 +++--- arch/arm64/mm/mmu.c | 4 ++-- 8 files changed, 16 insertions(+), 22 deletions(-) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 61de992bbea3..61f7926fdeca 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -130,7 +130,7 @@ KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT) # - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) # in 32-bit arithmetic KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \ - (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 32))) \ + (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 1 - 32))) \ + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \ - (1 << (64 - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) )) ) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 98fda92a2612..380594b1a0ba 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -38,9 +38,9 @@ */ #define VA_BITS (CONFIG_ARM64_VA_BITS) #define VA_START (UL(0xffffffffffffffff) - \ - (UL(1) << VA_BITS) + 1) -#define PAGE_OFFSET (UL(0xffffffffffffffff) - \ (UL(1) << (VA_BITS - 1)) + 1) +#define PAGE_OFFSET (UL(0xffffffffffffffff) - \ + (UL(1) << VA_BITS) + 1) #define KIMAGE_VADDR (MODULES_END) #define BPF_JIT_REGION_START (VA_START + KASAN_SHADOW_SIZE) #define BPF_JIT_REGION_SIZE (SZ_128M) @@ -48,7 +48,7 @@ #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) #define MODULES_VADDR (BPF_JIT_REGION_END) #define MODULES_VSIZE (SZ_128M) -#define VMEMMAP_START (PAGE_OFFSET - VMEMMAP_SIZE) +#define VMEMMAP_START (-VMEMMAP_SIZE - SZ_2M) #define PCI_IO_END (VMEMMAP_START - SZ_2M) #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) #define FIXADDR_TOP (PCI_IO_START - SZ_2M) @@ -231,7 +231,7 @@ static inline const void *__tag_set(const void *addr, u8 tag) * space. Testing the top bit for the start of the region is a * sufficient check. */ -#define __is_lm_address(addr) (!!((addr) & BIT(VA_BITS - 1))) +#define __is_lm_address(addr) (!((addr) & BIT(VA_BITS - 1))) #define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) #define __kimg_to_phys(addr) ((addr) - kimage_voffset) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 5fdcfe237338..046b811309bb 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -21,7 +21,7 @@ * and fixed mappings */ #define VMALLOC_START (MODULES_END) -#define VMALLOC_END (PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K) +#define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K) #define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index 9341fcc6e809..e130db05d932 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -496,7 +496,7 @@ int swsusp_arch_resume(void) rc = -ENOMEM; goto out; } - rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0); + rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, VA_START); if (rc) goto out; diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index 82b3a7fdb4a6..beec87488e97 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -26,6 +26,8 @@ #include static const struct addr_marker address_markers[] = { + { PAGE_OFFSET, "Linear Mapping start" }, + { VA_START, "Linear Mapping end" }, #ifdef CONFIG_KASAN { KASAN_SHADOW_START, "Kasan shadow start" }, { KASAN_SHADOW_END, "Kasan shadow end" }, @@ -42,7 +44,6 @@ static const struct addr_marker address_markers[] = { { VMEMMAP_START, "vmemmap start" }, { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" }, #endif - { PAGE_OFFSET, "Linear mapping" }, { -1, NULL }, }; @@ -376,7 +377,7 @@ static void ptdump_initialize(void) static struct ptdump_info kernel_ptdump_info = { .mm = &init_mm, .markers = address_markers, - .base_addr = VA_START, + .base_addr = PAGE_OFFSET, }; void ptdump_check_wx(void) diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index f3c795278def..62927ed02229 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -301,7 +301,7 @@ static void __init fdt_enforce_memory_region(void) void __init arm64_memblock_init(void) { - const s64 linear_region_size = -(s64)PAGE_OFFSET; + const s64 linear_region_size = BIT(VA_BITS - 1); /* Handle linux,usable-memory-range property */ fdt_enforce_memory_region(); @@ -309,13 +309,6 @@ void __init arm64_memblock_init(void) /* Remove memory above our supported physical address size */ memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX); - /* - * Ensure that the linear region takes up exactly half of the kernel - * virtual address space. This way, we can distinguish a linear address - * from a kernel/module/vmalloc address by testing a single bit. - */ - BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1)); - /* * Select a suitable value for the base of physical memory. */ diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 6cf97b904ebb..05edfe9b02e4 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -225,10 +225,10 @@ void __init kasan_init(void) kasan_map_populate(kimg_shadow_start, kimg_shadow_end, early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); - kasan_populate_early_shadow((void *)KASAN_SHADOW_START, - (void *)mod_shadow_start); + kasan_populate_early_shadow(kasan_mem_to_shadow((void *) VA_START), + (void *)mod_shadow_start); kasan_populate_early_shadow((void *)kimg_shadow_end, - kasan_mem_to_shadow((void *)PAGE_OFFSET)); + (void *)KASAN_SHADOW_END); if (kimg_shadow_start > mod_shadow_end) kasan_populate_early_shadow((void *)mod_shadow_end, diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 750a69dde39b..1d4247f9a496 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -398,7 +398,7 @@ static phys_addr_t pgd_pgtable_alloc(int shift) static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot) { - if (virt < VMALLOC_START) { + if ((virt >= VA_START) && (virt < VMALLOC_START)) { pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", &phys, virt); return; @@ -425,7 +425,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, static void update_mapping_prot(phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot) { - if (virt < VMALLOC_START) { + if ((virt >= VA_START) && (virt < VMALLOC_START)) { pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", &phys, virt); return; -- cgit v1.2.3-58-ga151 From 6bd1d0be0e97936d15cdacc71f5c232fbf71293e Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Wed, 7 Aug 2019 16:55:15 +0100 Subject: arm64: kasan: Switch to using KASAN_SHADOW_OFFSET KASAN_SHADOW_OFFSET is a constant that is supplied to gcc as a command line argument and affects the codegen of the inline address sanetiser. Essentially, for an example memory access: *ptr1 = val; The compiler will insert logic similar to the below: shadowValue = *(ptr1 >> KASAN_SHADOW_SCALE_SHIFT + KASAN_SHADOW_OFFSET) if (somethingWrong(shadowValue)) flagAnError(); This code sequence is inserted into many places, thus KASAN_SHADOW_OFFSET is essentially baked into many places in the kernel text. If we want to run a single kernel binary with multiple address spaces, then we need to do this with KASAN_SHADOW_OFFSET fixed. Thankfully, due to the way the KASAN_SHADOW_OFFSET is used to provide shadow addresses we know that the end of the shadow region is constant w.r.t. VA space size: KASAN_SHADOW_END = ~0 >> KASAN_SHADOW_SCALE_SHIFT + KASAN_SHADOW_OFFSET This means that if we increase the size of the VA space, the start of the KASAN region expands into lower addresses whilst the end of the KASAN region is fixed. Currently the arm64 code computes KASAN_SHADOW_OFFSET at build time via build scripts with the VA size used as a parameter. (There are build time checks in the C code too to ensure that expected values are being derived). It is sufficient, and indeed is a simplification, to remove the build scripts (and build time checks) entirely and instead provide KASAN_SHADOW_OFFSET values. This patch removes the logic to compute the KASAN_SHADOW_OFFSET in the arm64 Makefile, and instead we adopt the approach used by x86 to supply offset values in kConfig. To help debug/develop future VA space changes, the Makefile logic has been preserved in a script file in the arm64 Documentation folder. Reviewed-by: Catalin Marinas Signed-off-by: Steve Capper Signed-off-by: Will Deacon --- Documentation/arm64/kasan-offsets.sh | 27 +++++++++++++++++++++++++++ arch/arm64/Kconfig | 15 +++++++++++++++ arch/arm64/Makefile | 8 -------- arch/arm64/include/asm/kasan.h | 11 ++++------- arch/arm64/include/asm/memory.h | 8 +++++--- 5 files changed, 51 insertions(+), 18 deletions(-) create mode 100644 Documentation/arm64/kasan-offsets.sh diff --git a/Documentation/arm64/kasan-offsets.sh b/Documentation/arm64/kasan-offsets.sh new file mode 100644 index 000000000000..2b7a021db363 --- /dev/null +++ b/Documentation/arm64/kasan-offsets.sh @@ -0,0 +1,27 @@ +#!/bin/sh + +# Print out the KASAN_SHADOW_OFFSETS required to place the KASAN SHADOW +# start address at the mid-point of the kernel VA space + +print_kasan_offset () { + printf "%02d\t" $1 + printf "0x%08x00000000\n" $(( (0xffffffff & (-1 << ($1 - 1 - 32))) \ + + (1 << ($1 - 32 - $2)) \ + - (1 << (64 - 32 - $2)) )) +} + +echo KASAN_SHADOW_SCALE_SHIFT = 3 +printf "VABITS\tKASAN_SHADOW_OFFSET\n" +print_kasan_offset 48 3 +print_kasan_offset 47 3 +print_kasan_offset 42 3 +print_kasan_offset 39 3 +print_kasan_offset 36 3 +echo +echo KASAN_SHADOW_SCALE_SHIFT = 4 +printf "VABITS\tKASAN_SHADOW_OFFSET\n" +print_kasan_offset 48 4 +print_kasan_offset 47 4 +print_kasan_offset 42 4 +print_kasan_offset 39 4 +print_kasan_offset 36 4 diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3adcec05b1f6..f7f23e47c28f 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -297,6 +297,21 @@ config ARCH_SUPPORTS_UPROBES config ARCH_PROC_KCORE_TEXT def_bool y +config KASAN_SHADOW_OFFSET + hex + depends on KASAN + default 0xdfffa00000000000 if (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52) && !KASAN_SW_TAGS + default 0xdfffd00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS + default 0xdffffe8000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS + default 0xdfffffd000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS + default 0xdffffffa00000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS + default 0xefff900000000000 if (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52) && KASAN_SW_TAGS + default 0xefffc80000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS + default 0xeffffe4000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS + default 0xefffffc800000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS + default 0xeffffff900000000 if ARM64_VA_BITS_36 && KASAN_SW_TAGS + default 0xffffffffffffffff + source "arch/arm64/Kconfig.platforms" menu "Kernel Features" diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 61f7926fdeca..a8d2a241ac58 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -126,14 +126,6 @@ KBUILD_CFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT) KBUILD_CPPFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT) KBUILD_AFLAGS += -DKASAN_SHADOW_SCALE_SHIFT=$(KASAN_SHADOW_SCALE_SHIFT) -# KASAN_SHADOW_OFFSET = VA_START + (1 << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) -# - (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) -# in 32-bit arithmetic -KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \ - (0xffffffff & (-1 << ($(CONFIG_ARM64_VA_BITS) - 1 - 32))) \ - + (1 << ($(CONFIG_ARM64_VA_BITS) - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) \ - - (1 << (64 - 32 - $(KASAN_SHADOW_SCALE_SHIFT))) )) ) - export TEXT_OFFSET GZFLAGS core-y += arch/arm64/kernel/ arch/arm64/mm/ diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h index b52aacd2c526..10d2add842da 100644 --- a/arch/arm64/include/asm/kasan.h +++ b/arch/arm64/include/asm/kasan.h @@ -18,11 +18,8 @@ * KASAN_SHADOW_START: beginning of the kernel virtual addresses. * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses, * where N = (1 << KASAN_SHADOW_SCALE_SHIFT). - */ -#define KASAN_SHADOW_START (VA_START) -#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE) - -/* + * + * KASAN_SHADOW_OFFSET: * This value is used to map an address to the corresponding shadow * address by the following formula: * shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET @@ -33,8 +30,8 @@ * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - * (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT)) */ -#define KASAN_SHADOW_OFFSET (KASAN_SHADOW_END - (1ULL << \ - (64 - KASAN_SHADOW_SCALE_SHIFT))) +#define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT))) +#define KASAN_SHADOW_START _KASAN_SHADOW_START(VA_BITS) void kasan_init(void); void kasan_copy_shadow(pgd_t *pgdir); diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 380594b1a0ba..968659c90f5c 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -42,7 +42,7 @@ #define PAGE_OFFSET (UL(0xffffffffffffffff) - \ (UL(1) << VA_BITS) + 1) #define KIMAGE_VADDR (MODULES_END) -#define BPF_JIT_REGION_START (VA_START + KASAN_SHADOW_SIZE) +#define BPF_JIT_REGION_START (KASAN_SHADOW_END) #define BPF_JIT_REGION_SIZE (SZ_128M) #define BPF_JIT_REGION_END (BPF_JIT_REGION_START + BPF_JIT_REGION_SIZE) #define MODULES_END (MODULES_VADDR + MODULES_VSIZE) @@ -68,11 +68,13 @@ * significantly, so double the (minimum) stack size when they are in use. */ #ifdef CONFIG_KASAN -#define KASAN_SHADOW_SIZE (UL(1) << (VA_BITS - KASAN_SHADOW_SCALE_SHIFT)) +#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) +#define KASAN_SHADOW_END ((UL(1) << (64 - KASAN_SHADOW_SCALE_SHIFT)) \ + + KASAN_SHADOW_OFFSET) #define KASAN_THREAD_SHIFT 1 #else -#define KASAN_SHADOW_SIZE (0) #define KASAN_THREAD_SHIFT 0 +#define KASAN_SHADOW_END (VA_START) #endif #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) -- cgit v1.2.3-58-ga151 From 99426e5e8c9f11b9de65e7c1200868e8a9ceaa47 Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Wed, 7 Aug 2019 16:55:16 +0100 Subject: arm64: dump: De-constify VA_START and KASAN_SHADOW_START The kernel page table dumper assumes that the placement of VA regions is constant and determined at compile time. As we are about to introduce variable VA logic, we need to be able to determine certain regions at boot time. Specifically the VA_START and KASAN_SHADOW_START will depend on whether or not the system is booted with 52-bit kernel VAs. This patch adds logic to the kernel page table dumper s.t. these regions can be computed at boot time. Signed-off-by: Steve Capper Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/mm/dump.c | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index beec87488e97..6ec75305828e 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -25,11 +25,20 @@ #include #include -static const struct addr_marker address_markers[] = { + +enum address_markers_idx { + PAGE_OFFSET_NR = 0, + VA_START_NR, +#ifdef CONFIG_KASAN + KASAN_START_NR, +#endif +}; + +static struct addr_marker address_markers[] = { { PAGE_OFFSET, "Linear Mapping start" }, - { VA_START, "Linear Mapping end" }, + { 0 /* VA_START */, "Linear Mapping end" }, #ifdef CONFIG_KASAN - { KASAN_SHADOW_START, "Kasan shadow start" }, + { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" }, { KASAN_SHADOW_END, "Kasan shadow end" }, #endif { MODULES_VADDR, "Modules start" }, @@ -402,6 +411,10 @@ void ptdump_check_wx(void) static int ptdump_init(void) { + address_markers[VA_START_NR].start_address = VA_START; +#ifdef CONFIG_KASAN + address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START; +#endif ptdump_initialize(); ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables"); return 0; -- cgit v1.2.3-58-ga151 From 90ec95cda91a021d82351c976896a63aa364ebf1 Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Wed, 7 Aug 2019 16:55:17 +0100 Subject: arm64: mm: Introduce VA_BITS_MIN In order to support 52-bit kernel addresses detectable at boot time, the kernel needs to know the most conservative VA_BITS possible should it need to fall back to this quantity due to lack of hardware support. A new compile time constant VA_BITS_MIN is introduced in this patch and it is employed in the KASAN end address, KASLR, and EFI stub. For Arm, if 52-bit VA support is unavailable the fallback is to 48-bits. In other words: VA_BITS_MIN = min (48, VA_BITS) Reviewed-by: Catalin Marinas Signed-off-by: Steve Capper Signed-off-by: Will Deacon --- arch/arm64/include/asm/efi.h | 4 ++-- arch/arm64/include/asm/memory.h | 9 ++++++++- arch/arm64/include/asm/processor.h | 2 +- arch/arm64/kernel/head.S | 2 +- arch/arm64/kernel/kaslr.c | 6 +++--- arch/arm64/mm/kasan_init.c | 3 ++- 6 files changed, 17 insertions(+), 9 deletions(-) diff --git a/arch/arm64/include/asm/efi.h b/arch/arm64/include/asm/efi.h index 76a144702586..b54d3a86c444 100644 --- a/arch/arm64/include/asm/efi.h +++ b/arch/arm64/include/asm/efi.h @@ -79,7 +79,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base) /* * On arm64, we have to ensure that the initrd ends up in the linear region, - * which is a 1 GB aligned region of size '1UL << (VA_BITS - 1)' that is + * which is a 1 GB aligned region of size '1UL << (VA_BITS_MIN - 1)' that is * guaranteed to cover the kernel Image. * * Since the EFI stub is part of the kernel Image, we can relax the @@ -90,7 +90,7 @@ static inline unsigned long efi_get_max_fdt_addr(unsigned long dram_base) static inline unsigned long efi_get_max_initrd_addr(unsigned long dram_base, unsigned long image_addr) { - return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS - 1)); + return (image_addr & ~(SZ_1G - 1UL)) + (1UL << (VA_BITS_MIN - 1)); } #define efi_call_early(f, ...) sys_table_arg->boottime->f(__VA_ARGS__) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 968659c90f5c..79e75e45d560 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -52,6 +52,13 @@ #define PCI_IO_END (VMEMMAP_START - SZ_2M) #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) #define FIXADDR_TOP (PCI_IO_START - SZ_2M) +#if VA_BITS > 48 +#define VA_BITS_MIN (48) +#else +#define VA_BITS_MIN (VA_BITS) +#endif +#define _VA_START(va) (UL(0xffffffffffffffff) - \ + (UL(1) << ((va) - 1)) + 1) #define KERNEL_START _text #define KERNEL_END _end @@ -74,7 +81,7 @@ #define KASAN_THREAD_SHIFT 1 #else #define KASAN_THREAD_SHIFT 0 -#define KASAN_SHADOW_END (VA_START) +#define KASAN_SHADOW_END (_VA_START(VA_BITS_MIN)) #endif #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 844e2964b0f5..0e1f2770192a 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -42,7 +42,7 @@ * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area. */ -#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS) +#define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN) #define TASK_SIZE_64 (UL(1) << vabits_user) #ifdef CONFIG_COMPAT diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 2cdacd1c141b..ac58c69993ec 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -314,7 +314,7 @@ __create_page_tables: mov x5, #52 cbnz x6, 1f #endif - mov x5, #VA_BITS + mov x5, #VA_BITS_MIN 1: adr_l x6, vabits_user str x5, [x6] diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 708051655ad9..5a59f7567f9c 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -116,15 +116,15 @@ u64 __init kaslr_early_init(u64 dt_phys) /* * OK, so we are proceeding with KASLR enabled. Calculate a suitable * kernel image offset from the seed. Let's place the kernel in the - * middle half of the VMALLOC area (VA_BITS - 2), and stay clear of + * middle half of the VMALLOC area (VA_BITS_MIN - 2), and stay clear of * the lower and upper quarters to avoid colliding with other * allocations. * Even if we could randomize at page granularity for 16k and 64k pages, * let's always round to 2 MB so we don't interfere with the ability to * map using contiguous PTEs */ - mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1); - offset = BIT(VA_BITS - 3) + (seed & mask); + mask = ((1UL << (VA_BITS_MIN - 2)) - 1) & ~(SZ_2M - 1); + offset = BIT(VA_BITS_MIN - 3) + (seed & mask); /* use the top 16 bits to randomize the linear region */ memstart_offset_seed = seed >> 48; diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 05edfe9b02e4..725222271474 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -156,7 +156,8 @@ asmlinkage void __init kasan_early_init(void) { BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT))); - BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE)); + BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE)); + BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE)); BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true); -- cgit v1.2.3-58-ga151 From 5383cc6efed13784ddb3cff2cc183b6b8c50c8db Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Wed, 7 Aug 2019 16:55:18 +0100 Subject: arm64: mm: Introduce vabits_actual In order to support 52-bit kernel addresses detectable at boot time, one needs to know the actual VA_BITS detected. A new variable vabits_actual is introduced in this commit and employed for the KVM hypervisor layout, KASAN, fault handling and phys-to/from-virt translation where there would normally be compile time constants. In order to maintain performance in phys_to_virt, another variable physvirt_offset is introduced. Reviewed-by: Catalin Marinas Signed-off-by: Steve Capper Signed-off-by: Will Deacon --- arch/arm64/include/asm/kasan.h | 2 +- arch/arm64/include/asm/memory.h | 11 ++++++----- arch/arm64/include/asm/mmu_context.h | 2 +- arch/arm64/kernel/head.S | 5 +++++ arch/arm64/kvm/va_layout.c | 14 +++++++------- arch/arm64/mm/fault.c | 4 ++-- arch/arm64/mm/init.c | 7 ++++++- arch/arm64/mm/mmu.c | 3 +++ 8 files changed, 31 insertions(+), 17 deletions(-) diff --git a/arch/arm64/include/asm/kasan.h b/arch/arm64/include/asm/kasan.h index 10d2add842da..b0dc4abc3589 100644 --- a/arch/arm64/include/asm/kasan.h +++ b/arch/arm64/include/asm/kasan.h @@ -31,7 +31,7 @@ * (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT)) */ #define _KASAN_SHADOW_START(va) (KASAN_SHADOW_END - (1UL << ((va) - KASAN_SHADOW_SCALE_SHIFT))) -#define KASAN_SHADOW_START _KASAN_SHADOW_START(VA_BITS) +#define KASAN_SHADOW_START _KASAN_SHADOW_START(vabits_actual) void kasan_init(void); void kasan_copy_shadow(pgd_t *pgdir); diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 79e75e45d560..364635b8370a 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -37,8 +37,6 @@ * VA_START - the first kernel virtual address. */ #define VA_BITS (CONFIG_ARM64_VA_BITS) -#define VA_START (UL(0xffffffffffffffff) - \ - (UL(1) << (VA_BITS - 1)) + 1) #define PAGE_OFFSET (UL(0xffffffffffffffff) - \ (UL(1) << VA_BITS) + 1) #define KIMAGE_VADDR (MODULES_END) @@ -166,10 +164,13 @@ #endif #ifndef __ASSEMBLY__ +extern u64 vabits_actual; +#define VA_START (_VA_START(vabits_actual)) #include #include +extern s64 physvirt_offset; extern s64 memstart_addr; /* PHYS_OFFSET - the physical address of the start of memory. */ #define PHYS_OFFSET ({ VM_BUG_ON(memstart_addr & 1); memstart_addr; }) @@ -240,9 +241,9 @@ static inline const void *__tag_set(const void *addr, u8 tag) * space. Testing the top bit for the start of the region is a * sufficient check. */ -#define __is_lm_address(addr) (!((addr) & BIT(VA_BITS - 1))) +#define __is_lm_address(addr) (!((addr) & BIT(vabits_actual - 1))) -#define __lm_to_phys(addr) (((addr) & ~PAGE_OFFSET) + PHYS_OFFSET) +#define __lm_to_phys(addr) (((addr) + physvirt_offset)) #define __kimg_to_phys(addr) ((addr) - kimage_voffset) #define __virt_to_phys_nodebug(x) ({ \ @@ -261,7 +262,7 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x); #define __phys_addr_symbol(x) __pa_symbol_nodebug(x) #endif -#define __phys_to_virt(x) ((unsigned long)((x) - PHYS_OFFSET) | PAGE_OFFSET) +#define __phys_to_virt(x) ((unsigned long)((x) - physvirt_offset)) #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) /* diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 7ed0adb187a8..670003a55d28 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -95,7 +95,7 @@ static inline void __cpu_set_tcr_t0sz(unsigned long t0sz) isb(); } -#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)) +#define cpu_set_default_tcr_t0sz() __cpu_set_tcr_t0sz(TCR_T0SZ(vabits_actual)) #define cpu_set_idmap_tcr_t0sz() __cpu_set_tcr_t0sz(idmap_t0sz) /* diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index ac58c69993ec..6dc7349868d9 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -321,6 +321,11 @@ __create_page_tables: dmb sy dc ivac, x6 // Invalidate potentially stale cache line + adr_l x6, vabits_actual + str x5, [x6] + dmb sy + dc ivac, x6 // Invalidate potentially stale cache line + /* * VA_BITS may be too small to allow for an ID mapping to be created * that covers system RAM if that is located sufficiently high in the diff --git a/arch/arm64/kvm/va_layout.c b/arch/arm64/kvm/va_layout.c index acd8084f1f2c..2cf7d4b606c3 100644 --- a/arch/arm64/kvm/va_layout.c +++ b/arch/arm64/kvm/va_layout.c @@ -29,25 +29,25 @@ static void compute_layout(void) int kva_msb; /* Where is my RAM region? */ - hyp_va_msb = idmap_addr & BIT(VA_BITS - 1); - hyp_va_msb ^= BIT(VA_BITS - 1); + hyp_va_msb = idmap_addr & BIT(vabits_actual - 1); + hyp_va_msb ^= BIT(vabits_actual - 1); kva_msb = fls64((u64)phys_to_virt(memblock_start_of_DRAM()) ^ (u64)(high_memory - 1)); - if (kva_msb == (VA_BITS - 1)) { + if (kva_msb == (vabits_actual - 1)) { /* * No space in the address, let's compute the mask so - * that it covers (VA_BITS - 1) bits, and the region + * that it covers (vabits_actual - 1) bits, and the region * bit. The tag stays set to zero. */ - va_mask = BIT(VA_BITS - 1) - 1; + va_mask = BIT(vabits_actual - 1) - 1; va_mask |= hyp_va_msb; } else { /* * We do have some free bits to insert a random tag. * Hyp VAs are now created from kernel linear map VAs - * using the following formula (with V == VA_BITS): + * using the following formula (with V == vabits_actual): * * 63 ... V | V-1 | V-2 .. tag_lsb | tag_lsb - 1 .. 0 * --------------------------------------------------------- @@ -55,7 +55,7 @@ static void compute_layout(void) */ tag_lsb = kva_msb; va_mask = GENMASK_ULL(tag_lsb - 1, 0); - tag_val = get_random_long() & GENMASK_ULL(VA_BITS - 2, tag_lsb); + tag_val = get_random_long() & GENMASK_ULL(vabits_actual - 2, tag_lsb); tag_val |= hyp_va_msb; tag_val >>= tag_lsb; } diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index cfd65b63f36f..6b195871769a 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -138,9 +138,9 @@ static void show_pte(unsigned long addr) return; } - pr_alert("%s pgtable: %luk pages, %u-bit VAs, pgdp=%016lx\n", + pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n", mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K, - mm == &init_mm ? VA_BITS : (int)vabits_user, + mm == &init_mm ? vabits_actual : (int)vabits_user, (unsigned long)virt_to_phys(mm->pgd)); pgdp = pgd_offset(mm, addr); pgd = READ_ONCE(*pgdp); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 62927ed02229..e752f46d430e 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -50,6 +50,9 @@ s64 memstart_addr __ro_after_init = -1; EXPORT_SYMBOL(memstart_addr); +s64 physvirt_offset __ro_after_init; +EXPORT_SYMBOL(physvirt_offset); + phys_addr_t arm64_dma_phys_limit __ro_after_init; #ifdef CONFIG_KEXEC_CORE @@ -301,7 +304,7 @@ static void __init fdt_enforce_memory_region(void) void __init arm64_memblock_init(void) { - const s64 linear_region_size = BIT(VA_BITS - 1); + const s64 linear_region_size = BIT(vabits_actual - 1); /* Handle linux,usable-memory-range property */ fdt_enforce_memory_region(); @@ -315,6 +318,8 @@ void __init arm64_memblock_init(void) memstart_addr = round_down(memblock_start_of_DRAM(), ARM64_MEMSTART_ALIGN); + physvirt_offset = PHYS_OFFSET - PAGE_OFFSET; + /* * Remove the memory that we will not be able to cover with the * linear mapping. Take care not to clip the kernel which may be diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 1d4247f9a496..07b30e6d17f8 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -43,6 +43,9 @@ u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; u64 vabits_user __ro_after_init; EXPORT_SYMBOL(vabits_user); +u64 __section(".mmuoff.data.write") vabits_actual; +EXPORT_SYMBOL(vabits_actual); + u64 kimage_voffset __ro_after_init; EXPORT_SYMBOL(kimage_voffset); -- cgit v1.2.3-58-ga151 From c812026c54cfaec23fa1d78cdbfd0e56e787470a Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Wed, 7 Aug 2019 16:55:19 +0100 Subject: arm64: mm: Logic to make offset_ttbr1 conditional When running with a 52-bit userspace VA and a 48-bit kernel VA we offset ttbr1_el1 to allow the kernel pagetables with a 52-bit PTRS_PER_PGD to be used for both userspace and kernel. Moving on to a 52-bit kernel VA we no longer require this offset to ttbr1_el1 should we be running on a system with HW support for 52-bit VAs. This patch introduces conditional logic to offset_ttbr1 to query SYS_ID_AA64MMFR2_EL1 whenever 52-bit VAs are selected. If there is HW support for 52-bit VAs then the ttbr1 offset is skipped. We choose to read a system register rather than vabits_actual because offset_ttbr1 can be called in places where the kernel data is not actually mapped. Calls to offset_ttbr1 appear to be made from rarely called code paths so this extra logic is not expected to adversely affect performance. Signed-off-by: Steve Capper Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/assembler.h | 12 ++++++++++-- arch/arm64/kernel/head.S | 2 +- arch/arm64/kernel/hibernate-asm.S | 8 ++++---- arch/arm64/mm/proc.S | 6 +++--- 4 files changed, 18 insertions(+), 10 deletions(-) diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index e3a15c751b13..ede368bafa2c 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -538,9 +538,17 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU * In future this may be nop'ed out when dealing with 52-bit kernel VAs. * ttbr: Value of ttbr to set, modified. */ - .macro offset_ttbr1, ttbr + .macro offset_ttbr1, ttbr, tmp #ifdef CONFIG_ARM64_USER_VA_BITS_52 orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET +#endif + +#ifdef CONFIG_ARM64_VA_BITS_52 + mrs_s \tmp, SYS_ID_AA64MMFR2_EL1 + and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT) + cbnz \tmp, .Lskipoffs_\@ + orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET +.Lskipoffs_\@ : #endif .endm @@ -550,7 +558,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU * to be nop'ed out when dealing with 52-bit kernel VAs. */ .macro restore_ttbr1, ttbr -#ifdef CONFIG_ARM64_USER_VA_BITS_52 +#if defined(CONFIG_ARM64_USER_VA_BITS_52) || defined(CONFIG_ARM64_VA_BITS_52) bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET #endif .endm diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 6dc7349868d9..a96dc4386c7c 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -777,7 +777,7 @@ ENTRY(__enable_mmu) phys_to_ttbr x1, x1 phys_to_ttbr x2, x2 msr ttbr0_el1, x2 // load TTBR0 - offset_ttbr1 x1 + offset_ttbr1 x1, x3 msr ttbr1_el1, x1 // load TTBR1 isb msr sctlr_el1, x0 diff --git a/arch/arm64/kernel/hibernate-asm.S b/arch/arm64/kernel/hibernate-asm.S index 2f4a2ce7264b..38bcd4d4e43b 100644 --- a/arch/arm64/kernel/hibernate-asm.S +++ b/arch/arm64/kernel/hibernate-asm.S @@ -22,14 +22,14 @@ * Even switching to our copied tables will cause a changed output address at * each stage of the walk. */ -.macro break_before_make_ttbr_switch zero_page, page_table, tmp +.macro break_before_make_ttbr_switch zero_page, page_table, tmp, tmp2 phys_to_ttbr \tmp, \zero_page msr ttbr1_el1, \tmp isb tlbi vmalle1 dsb nsh phys_to_ttbr \tmp, \page_table - offset_ttbr1 \tmp + offset_ttbr1 \tmp, \tmp2 msr ttbr1_el1, \tmp isb .endm @@ -70,7 +70,7 @@ ENTRY(swsusp_arch_suspend_exit) * We execute from ttbr0, change ttbr1 to our copied linear map tables * with a break-before-make via the zero page */ - break_before_make_ttbr_switch x5, x0, x6 + break_before_make_ttbr_switch x5, x0, x6, x8 mov x21, x1 mov x30, x2 @@ -101,7 +101,7 @@ ENTRY(swsusp_arch_suspend_exit) dsb ish /* wait for PoU cleaning to finish */ /* switch to the restored kernels page tables */ - break_before_make_ttbr_switch x25, x21, x6 + break_before_make_ttbr_switch x25, x21, x6, x8 ic ialluis dsb ish diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 7dbf2be470f6..8d289ff7584d 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -168,7 +168,7 @@ ENDPROC(cpu_do_switch_mm) .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 adrp \tmp1, empty_zero_page phys_to_ttbr \tmp2, \tmp1 - offset_ttbr1 \tmp2 + offset_ttbr1 \tmp2, \tmp1 msr ttbr1_el1, \tmp2 isb tlbi vmalle1 @@ -187,7 +187,7 @@ ENTRY(idmap_cpu_replace_ttbr1) __idmap_cpu_set_reserved_ttbr1 x1, x3 - offset_ttbr1 x0 + offset_ttbr1 x0, x3 msr ttbr1_el1, x0 isb @@ -362,7 +362,7 @@ __idmap_kpti_secondary: cbnz w18, 1b /* All done, act like nothing happened */ - offset_ttbr1 swapper_ttb + offset_ttbr1 swapper_ttb, x18 msr ttbr1_el1, swapper_ttb isb ret -- cgit v1.2.3-58-ga151 From c8b6d2ccf9b10ce872cdea037f9685804440bb7e Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Wed, 7 Aug 2019 16:55:20 +0100 Subject: arm64: mm: Separate out vmemmap vmemmap is a preprocessor definition that depends on a variable, memstart_addr. In a later patch we will need to expand the size of the VMEMMAP region and optionally modify vmemmap depending upon whether or not hardware support is available for 52-bit virtual addresses. This patch changes vmemmap to be a variable. As the old definition depended on a variable load, this should not affect performance noticeably. Signed-off-by: Steve Capper Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable.h | 4 ++-- arch/arm64/mm/init.c | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 046b811309bb..4a695b9ee0f0 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -23,8 +23,6 @@ #define VMALLOC_START (MODULES_END) #define VMALLOC_END (- PUD_SIZE - VMEMMAP_SIZE - SZ_64K) -#define vmemmap ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)) - #define FIRST_USER_ADDRESS 0UL #ifndef __ASSEMBLY__ @@ -35,6 +33,8 @@ #include #include +extern struct page *vmemmap; + extern void __pte_error(const char *file, int line, unsigned long val); extern void __pmd_error(const char *file, int line, unsigned long val); extern void __pud_error(const char *file, int line, unsigned long val); diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index e752f46d430e..2940221e5519 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -53,6 +53,9 @@ EXPORT_SYMBOL(memstart_addr); s64 physvirt_offset __ro_after_init; EXPORT_SYMBOL(physvirt_offset); +struct page *vmemmap __ro_after_init; +EXPORT_SYMBOL(vmemmap); + phys_addr_t arm64_dma_phys_limit __ro_after_init; #ifdef CONFIG_KEXEC_CORE @@ -320,6 +323,8 @@ void __init arm64_memblock_init(void) physvirt_offset = PHYS_OFFSET - PAGE_OFFSET; + vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)); + /* * Remove the memory that we will not be able to cover with the * linear mapping. Take care not to clip the kernel which may be -- cgit v1.2.3-58-ga151 From ce3aaed87344c83c77135f80e7b76e1da9c92ee6 Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Wed, 7 Aug 2019 16:55:21 +0100 Subject: arm64: mm: Modify calculation of VMEMMAP_SIZE In a later patch we will need to have a slightly larger VMEMMAP region to accommodate boot time selection between 48/52-bit kernel VAs. This patch modifies the formula for computing VMEMMAP_SIZE to depend explicitly on the PAGE_OFFSET and start of kernel addressable memory. (This allows for a slightly larger direct linear map in future). Signed-off-by: Steve Capper Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 364635b8370a..0204c2006c92 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -26,8 +26,15 @@ /* * VMEMMAP_SIZE - allows the whole linear region to be covered by * a struct page array + * + * If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE + * neads to cover the memory region from the beginning of the 52-bit + * PAGE_OFFSET all the way to VA_START for 48-bit. This allows us to + * keep a constant PAGE_OFFSET and "fallback" to using the higher end + * of the VMEMMAP where 52-bit support is not available in hardware. */ -#define VMEMMAP_SIZE (UL(1) << (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)) +#define VMEMMAP_SIZE ((_VA_START(VA_BITS_MIN) - PAGE_OFFSET) \ + >> (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT)) /* * PAGE_OFFSET - the virtual address of the start of the linear map (top -- cgit v1.2.3-58-ga151 From b6d00d47e81a49f6cf462518c10408f37a3e6785 Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Wed, 7 Aug 2019 16:55:22 +0100 Subject: arm64: mm: Introduce 52-bit Kernel VAs Most of the machinery is now in place to enable 52-bit kernel VAs that are detectable at boot time. This patch adds a Kconfig option for 52-bit user and kernel addresses and plumbs in the requisite CONFIG_ macros as well as sets TCR.T1SZ, physvirt_offset and vmemmap at early boot. To simplify things this patch also removes the 52-bit user/48-bit kernel kconfig option. Signed-off-by: Steve Capper Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 20 +++++++++++--------- arch/arm64/include/asm/assembler.h | 13 ++++++++----- arch/arm64/include/asm/memory.h | 7 ++++--- arch/arm64/include/asm/mmu_context.h | 2 +- arch/arm64/include/asm/pgtable-hwdef.h | 2 +- arch/arm64/kernel/head.S | 4 ++-- arch/arm64/mm/init.c | 10 ++++++++++ arch/arm64/mm/proc.S | 3 ++- 8 files changed, 39 insertions(+), 22 deletions(-) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index f7f23e47c28f..f5f7cb75a698 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -286,7 +286,7 @@ config PGTABLE_LEVELS int default 2 if ARM64_16K_PAGES && ARM64_VA_BITS_36 default 2 if ARM64_64K_PAGES && ARM64_VA_BITS_42 - default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52) + default 3 if ARM64_64K_PAGES && (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) default 3 if ARM64_4K_PAGES && ARM64_VA_BITS_39 default 3 if ARM64_16K_PAGES && ARM64_VA_BITS_47 default 4 if !ARM64_64K_PAGES && ARM64_VA_BITS_48 @@ -300,12 +300,12 @@ config ARCH_PROC_KCORE_TEXT config KASAN_SHADOW_OFFSET hex depends on KASAN - default 0xdfffa00000000000 if (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52) && !KASAN_SW_TAGS + default 0xdfffa00000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && !KASAN_SW_TAGS default 0xdfffd00000000000 if ARM64_VA_BITS_47 && !KASAN_SW_TAGS default 0xdffffe8000000000 if ARM64_VA_BITS_42 && !KASAN_SW_TAGS default 0xdfffffd000000000 if ARM64_VA_BITS_39 && !KASAN_SW_TAGS default 0xdffffffa00000000 if ARM64_VA_BITS_36 && !KASAN_SW_TAGS - default 0xefff900000000000 if (ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52) && KASAN_SW_TAGS + default 0xefff900000000000 if (ARM64_VA_BITS_48 || ARM64_VA_BITS_52) && KASAN_SW_TAGS default 0xefffc80000000000 if ARM64_VA_BITS_47 && KASAN_SW_TAGS default 0xeffffe4000000000 if ARM64_VA_BITS_42 && KASAN_SW_TAGS default 0xefffffc800000000 if ARM64_VA_BITS_39 && KASAN_SW_TAGS @@ -759,13 +759,14 @@ config ARM64_VA_BITS_47 config ARM64_VA_BITS_48 bool "48-bit" -config ARM64_USER_VA_BITS_52 - bool "52-bit (user)" +config ARM64_VA_BITS_52 + bool "52-bit" depends on ARM64_64K_PAGES && (ARM64_PAN || !ARM64_SW_TTBR0_PAN) help Enable 52-bit virtual addressing for userspace when explicitly - requested via a hint to mmap(). The kernel will continue to - use 48-bit virtual addresses for its own mappings. + requested via a hint to mmap(). The kernel will also use 52-bit + virtual addresses for its own mappings (provided HW support for + this feature is available, otherwise it reverts to 48-bit). NOTE: Enabling 52-bit virtual addressing in conjunction with ARMv8.3 Pointer Authentication will result in the PAC being @@ -778,7 +779,7 @@ endchoice config ARM64_FORCE_52BIT bool "Force 52-bit virtual addresses for userspace" - depends on ARM64_USER_VA_BITS_52 && EXPERT + depends on ARM64_VA_BITS_52 && EXPERT help For systems with 52-bit userspace VAs enabled, the kernel will attempt to maintain compatibility with older software by providing 48-bit VAs @@ -795,7 +796,8 @@ config ARM64_VA_BITS default 39 if ARM64_VA_BITS_39 default 42 if ARM64_VA_BITS_42 default 47 if ARM64_VA_BITS_47 - default 48 if ARM64_VA_BITS_48 || ARM64_USER_VA_BITS_52 + default 48 if ARM64_VA_BITS_48 + default 52 if ARM64_VA_BITS_52 choice prompt "Physical address space size" diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index ede368bafa2c..c066fc4976cd 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -349,6 +349,13 @@ alternative_endif bfi \valreg, \t0sz, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH .endm +/* + * tcr_set_t1sz - update TCR.T1SZ + */ + .macro tcr_set_t1sz, valreg, t1sz + bfi \valreg, \t1sz, #TCR_T1SZ_OFFSET, #TCR_TxSZ_WIDTH + .endm + /* * tcr_compute_pa_size - set TCR.(I)PS to the highest supported * ID_AA64MMFR0_EL1.PARange value @@ -539,10 +546,6 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU * ttbr: Value of ttbr to set, modified. */ .macro offset_ttbr1, ttbr, tmp -#ifdef CONFIG_ARM64_USER_VA_BITS_52 - orr \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET -#endif - #ifdef CONFIG_ARM64_VA_BITS_52 mrs_s \tmp, SYS_ID_AA64MMFR2_EL1 and \tmp, \tmp, #(0xf << ID_AA64MMFR2_LVA_SHIFT) @@ -558,7 +561,7 @@ USER(\label, ic ivau, \tmp2) // invalidate I line PoU * to be nop'ed out when dealing with 52-bit kernel VAs. */ .macro restore_ttbr1, ttbr -#if defined(CONFIG_ARM64_USER_VA_BITS_52) || defined(CONFIG_ARM64_VA_BITS_52) +#ifdef CONFIG_ARM64_VA_BITS_52 bic \ttbr, \ttbr, #TTBR1_BADDR_4852_OFFSET #endif .endm diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 0204c2006c92..d911d0573460 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -44,8 +44,9 @@ * VA_START - the first kernel virtual address. */ #define VA_BITS (CONFIG_ARM64_VA_BITS) -#define PAGE_OFFSET (UL(0xffffffffffffffff) - \ - (UL(1) << VA_BITS) + 1) +#define _PAGE_OFFSET(va) (UL(0xffffffffffffffff) - \ + (UL(1) << (va)) + 1) +#define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) #define KIMAGE_VADDR (MODULES_END) #define BPF_JIT_REGION_START (KASAN_SHADOW_END) #define BPF_JIT_REGION_SIZE (SZ_128M) @@ -68,7 +69,7 @@ #define KERNEL_START _text #define KERNEL_END _end -#ifdef CONFIG_ARM64_USER_VA_BITS_52 +#ifdef CONFIG_ARM64_VA_BITS_52 #define MAX_USER_VA_BITS 52 #else #define MAX_USER_VA_BITS VA_BITS diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h index 670003a55d28..3827ff4040a3 100644 --- a/arch/arm64/include/asm/mmu_context.h +++ b/arch/arm64/include/asm/mmu_context.h @@ -63,7 +63,7 @@ extern u64 idmap_ptrs_per_pgd; static inline bool __cpu_uses_extended_idmap(void) { - if (IS_ENABLED(CONFIG_ARM64_USER_VA_BITS_52)) + if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52)) return false; return unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS)); diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h index db92950bb1a0..3df60f97da1f 100644 --- a/arch/arm64/include/asm/pgtable-hwdef.h +++ b/arch/arm64/include/asm/pgtable-hwdef.h @@ -304,7 +304,7 @@ #define TTBR_BADDR_MASK_52 (((UL(1) << 46) - 1) << 2) #endif -#ifdef CONFIG_ARM64_USER_VA_BITS_52 +#ifdef CONFIG_ARM64_VA_BITS_52 /* Must be at least 64-byte aligned to prevent corruption of the TTBR */ #define TTBR1_BADDR_4852_OFFSET (((UL(1) << (52 - PGDIR_SHIFT)) - \ (UL(1) << (48 - PGDIR_SHIFT))) * 8) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index a96dc4386c7c..c8446f8c81f5 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -308,7 +308,7 @@ __create_page_tables: adrp x0, idmap_pg_dir adrp x3, __idmap_text_start // __pa(__idmap_text_start) -#ifdef CONFIG_ARM64_USER_VA_BITS_52 +#ifdef CONFIG_ARM64_VA_BITS_52 mrs_s x6, SYS_ID_AA64MMFR2_EL1 and x6, x6, #(0xf << ID_AA64MMFR2_LVA_SHIFT) mov x5, #52 @@ -794,7 +794,7 @@ ENTRY(__enable_mmu) ENDPROC(__enable_mmu) ENTRY(__cpu_secondary_check52bitva) -#ifdef CONFIG_ARM64_USER_VA_BITS_52 +#ifdef CONFIG_ARM64_VA_BITS_52 ldr_l x0, vabits_user cmp x0, #52 b.ne 2f diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 2940221e5519..531c497c5758 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -325,6 +325,16 @@ void __init arm64_memblock_init(void) vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)); + /* + * If we are running with a 52-bit kernel VA config on a system that + * does not support it, we have to offset our vmemmap and physvirt_offset + * s.t. we avoid the 52-bit portion of the direct linear map + */ + if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) { + vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT; + physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48); + } + /* * Remove the memory that we will not be able to cover with the * linear mapping. Take care not to clip the kernel which may be diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 8d289ff7584d..8b021c5c0884 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -438,10 +438,11 @@ ENTRY(__cpu_setup) TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS tcr_clear_errata_bits x10, x9, x5 -#ifdef CONFIG_ARM64_USER_VA_BITS_52 +#ifdef CONFIG_ARM64_VA_BITS_52 ldr_l x9, vabits_user sub x9, xzr, x9 add x9, x9, #64 + tcr_set_t1sz x10, x9 #else ldr_l x9, idmap_t0sz #endif -- cgit v1.2.3-58-ga151 From 2c624fe68715e76eba1a7089f91e122310dc663c Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Wed, 7 Aug 2019 16:55:23 +0100 Subject: arm64: mm: Remove vabits_user Previous patches have enabled 52-bit kernel + user VAs and there is no longer any scenario where user VA != kernel VA size. This patch removes the, now redundant, vabits_user variable and replaces usage with vabits_actual where appropriate. Reviewed-by: Catalin Marinas Signed-off-by: Steve Capper Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 3 --- arch/arm64/include/asm/pointer_auth.h | 2 +- arch/arm64/include/asm/processor.h | 2 +- arch/arm64/kernel/head.S | 7 +------ arch/arm64/mm/fault.c | 3 +-- arch/arm64/mm/mmu.c | 2 -- arch/arm64/mm/proc.S | 2 +- 7 files changed, 5 insertions(+), 16 deletions(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index d911d0573460..ecc945ba8607 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -194,9 +194,6 @@ static inline unsigned long kaslr_offset(void) return kimage_vaddr - KIMAGE_VADDR; } -/* the actual size of a user virtual address */ -extern u64 vabits_user; - /* * Allow all memory at the discovery stage. We will clip it later. */ diff --git a/arch/arm64/include/asm/pointer_auth.h b/arch/arm64/include/asm/pointer_auth.h index d328540cb85e..7a24bad1a58b 100644 --- a/arch/arm64/include/asm/pointer_auth.h +++ b/arch/arm64/include/asm/pointer_auth.h @@ -69,7 +69,7 @@ extern int ptrauth_prctl_reset_keys(struct task_struct *tsk, unsigned long arg); * The EL0 pointer bits used by a pointer authentication code. * This is dependent on TBI0 being enabled, or bits 63:56 would also apply. */ -#define ptrauth_user_pac_mask() GENMASK(54, vabits_user) +#define ptrauth_user_pac_mask() GENMASK(54, vabits_actual) /* Only valid for EL0 TTBR0 instruction pointers */ static inline unsigned long ptrauth_strip_insn_pac(unsigned long ptr) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 0e1f2770192a..e4c93945e477 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -43,7 +43,7 @@ */ #define DEFAULT_MAP_WINDOW_64 (UL(1) << VA_BITS_MIN) -#define TASK_SIZE_64 (UL(1) << vabits_user) +#define TASK_SIZE_64 (UL(1) << vabits_actual) #ifdef CONFIG_COMPAT #if defined(CONFIG_ARM64_64K_PAGES) && defined(CONFIG_KUSER_HELPERS) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index c8446f8c81f5..949b001a73bb 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -316,11 +316,6 @@ __create_page_tables: #endif mov x5, #VA_BITS_MIN 1: - adr_l x6, vabits_user - str x5, [x6] - dmb sy - dc ivac, x6 // Invalidate potentially stale cache line - adr_l x6, vabits_actual str x5, [x6] dmb sy @@ -795,7 +790,7 @@ ENDPROC(__enable_mmu) ENTRY(__cpu_secondary_check52bitva) #ifdef CONFIG_ARM64_VA_BITS_52 - ldr_l x0, vabits_user + ldr_l x0, vabits_actual cmp x0, #52 b.ne 2f diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 6b195871769a..75eff57bd9ef 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -140,8 +140,7 @@ static void show_pte(unsigned long addr) pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n", mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K, - mm == &init_mm ? vabits_actual : (int)vabits_user, - (unsigned long)virt_to_phys(mm->pgd)); + vabits_actual, (unsigned long)virt_to_phys(mm->pgd)); pgdp = pgd_offset(mm, addr); pgd = READ_ONCE(*pgdp); pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd)); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 07b30e6d17f8..0c8f7e55f859 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -40,8 +40,6 @@ u64 idmap_t0sz = TCR_T0SZ(VA_BITS); u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; -u64 vabits_user __ro_after_init; -EXPORT_SYMBOL(vabits_user); u64 __section(".mmuoff.data.write") vabits_actual; EXPORT_SYMBOL(vabits_actual); diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 8b021c5c0884..391f9cabfe60 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -439,7 +439,7 @@ ENTRY(__cpu_setup) tcr_clear_errata_bits x10, x9, x5 #ifdef CONFIG_ARM64_VA_BITS_52 - ldr_l x9, vabits_user + ldr_l x9, vabits_actual sub x9, xzr, x9 add x9, x9, #64 tcr_set_t1sz x10, x9 -- cgit v1.2.3-58-ga151 From d2c68de192cfb90f607a80c6b10c41ebd8a3de6a Mon Sep 17 00:00:00 2001 From: Steve Capper Date: Wed, 7 Aug 2019 16:55:24 +0100 Subject: docs: arm64: Add layout and 52-bit info to memory document As the kernel no longer prints out the memory layout on boot, this patch adds this information back to the memory document. Also, as the 52-bit support introduces some subtle changes to the arm64 memory, the rationale behind these changes are also added to the memory document. Signed-off-by: Steve Capper Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- Documentation/arm64/memory.rst | 123 +++++++++++++++++++++++++++++++---------- 1 file changed, 95 insertions(+), 28 deletions(-) diff --git a/Documentation/arm64/memory.rst b/Documentation/arm64/memory.rst index 464b880fc4b7..b040909e45f8 100644 --- a/Documentation/arm64/memory.rst +++ b/Documentation/arm64/memory.rst @@ -14,6 +14,10 @@ with the 4KB page configuration, allowing 39-bit (512GB) or 48-bit 64KB pages, only 2 levels of translation tables, allowing 42-bit (4TB) virtual address, are used but the memory layout is the same. +ARMv8.2 adds optional support for Large Virtual Address space. This is +only available when running with a 64KB page size and expands the +number of descriptors in the first level of translation. + User addresses have bits 63:48 set to 0 while the kernel addresses have the same bits set to 1. TTBRx selection is given by bit 63 of the virtual address. The swapper_pg_dir contains only kernel (global) @@ -22,40 +26,43 @@ The swapper_pg_dir address is written to TTBR1 and never written to TTBR0. -AArch64 Linux memory layout with 4KB pages + 3 levels:: - - Start End Size Use - ----------------------------------------------------------------------- - 0000000000000000 0000007fffffffff 512GB user - ffffff8000000000 ffffffffffffffff 512GB kernel - - -AArch64 Linux memory layout with 4KB pages + 4 levels:: +AArch64 Linux memory layout with 4KB pages + 4 levels (48-bit):: Start End Size Use ----------------------------------------------------------------------- 0000000000000000 0000ffffffffffff 256TB user - ffff000000000000 ffffffffffffffff 256TB kernel - - -AArch64 Linux memory layout with 64KB pages + 2 levels:: + ffff000000000000 ffff7fffffffffff 128TB kernel logical memory map + ffff800000000000 ffff9fffffffffff 32TB kasan shadow region + ffffa00000000000 ffffa00007ffffff 128MB bpf jit region + ffffa00008000000 ffffa0000fffffff 128MB modules + ffffa00010000000 fffffdffbffeffff ~93TB vmalloc + fffffdffbfff0000 fffffdfffe5f8fff ~998MB [guard region] + fffffdfffe5f9000 fffffdfffe9fffff 4124KB fixed mappings + fffffdfffea00000 fffffdfffebfffff 2MB [guard region] + fffffdfffec00000 fffffdffffbfffff 16MB PCI I/O space + fffffdffffc00000 fffffdffffdfffff 2MB [guard region] + fffffdffffe00000 ffffffffffdfffff 2TB vmemmap + ffffffffffe00000 ffffffffffffffff 2MB [guard region] + + +AArch64 Linux memory layout with 64KB pages + 3 levels (52-bit with HW support):: Start End Size Use ----------------------------------------------------------------------- - 0000000000000000 000003ffffffffff 4TB user - fffffc0000000000 ffffffffffffffff 4TB kernel - - -AArch64 Linux memory layout with 64KB pages + 3 levels:: - - Start End Size Use - ----------------------------------------------------------------------- - 0000000000000000 0000ffffffffffff 256TB user - ffff000000000000 ffffffffffffffff 256TB kernel - - -For details of the virtual kernel memory layout please see the kernel -booting log. + 0000000000000000 000fffffffffffff 4PB user + fff0000000000000 fff7ffffffffffff 2PB kernel logical memory map + fff8000000000000 fffd9fffffffffff 1440TB [gap] + fffda00000000000 ffff9fffffffffff 512TB kasan shadow region + ffffa00000000000 ffffa00007ffffff 128MB bpf jit region + ffffa00008000000 ffffa0000fffffff 128MB modules + ffffa00010000000 fffff81ffffeffff ~88TB vmalloc + fffff81fffff0000 fffffc1ffe58ffff ~3TB [guard region] + fffffc1ffe590000 fffffc1ffe9fffff 4544KB fixed mappings + fffffc1ffea00000 fffffc1ffebfffff 2MB [guard region] + fffffc1ffec00000 fffffc1fffbfffff 16MB PCI I/O space + fffffc1fffc00000 fffffc1fffdfffff 2MB [guard region] + fffffc1fffe00000 ffffffffffdfffff 3968GB vmemmap + ffffffffffe00000 ffffffffffffffff 2MB [guard region] Translation table lookup with 4KB pages:: @@ -83,7 +90,8 @@ Translation table lookup with 64KB pages:: | | | | [15:0] in-page offset | | | +----------> [28:16] L3 index | | +--------------------------> [41:29] L2 index - | +-------------------------------> [47:42] L1 index + | +-------------------------------> [47:42] L1 index (48-bit) + | [51:42] L1 index (52-bit) +-------------------------------------------------> [63] TTBR0/1 @@ -96,3 +104,62 @@ ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs. When using KVM with the Virtualization Host Extensions, no additional mappings are created, since the host kernel runs directly in EL2. + +52-bit VA support in the kernel +------------------------------- +If the ARMv8.2-LVA optional feature is present, and we are running +with a 64KB page size; then it is possible to use 52-bits of address +space for both userspace and kernel addresses. However, any kernel +binary that supports 52-bit must also be able to fall back to 48-bit +at early boot time if the hardware feature is not present. + +This fallback mechanism necessitates the kernel .text to be in the +higher addresses such that they are invariant to 48/52-bit VAs. Due +to the kasan shadow being a fraction of the entire kernel VA space, +the end of the kasan shadow must also be in the higher half of the +kernel VA space for both 48/52-bit. (Switching from 48-bit to 52-bit, +the end of the kasan shadow is invariant and dependent on ~0UL, +whilst the start address will "grow" towards the lower addresses). + +In order to optimise phys_to_virt and virt_to_phys, the PAGE_OFFSET +is kept constant at 0xFFF0000000000000 (corresponding to 52-bit), +this obviates the need for an extra variable read. The physvirt +offset and vmemmap offsets are computed at early boot to enable +this logic. + +As a single binary will need to support both 48-bit and 52-bit VA +spaces, the VMEMMAP must be sized large enough for 52-bit VAs and +also must be sized large enought to accommodate a fixed PAGE_OFFSET. + +Most code in the kernel should not need to consider the VA_BITS, for +code that does need to know the VA size the variables are +defined as follows: + +VA_BITS constant the *maximum* VA space size + +VA_BITS_MIN constant the *minimum* VA space size + +vabits_actual variable the *actual* VA space size + + +Maximum and minimum sizes can be useful to ensure that buffers are +sized large enough or that addresses are positioned close enough for +the "worst" case. + +52-bit userspace VAs +-------------------- +To maintain compatibility with software that relies on the ARMv8.0 +VA space maximum size of 48-bits, the kernel will, by default, +return virtual addresses to userspace from a 48-bit range. + +Software can "opt-in" to receiving VAs from a 52-bit space by +specifying an mmap hint parameter that is larger than 48-bit. +For example: + maybe_high_address = mmap(~0UL, size, prot, flags,...); + +It is also possible to build a debug kernel that returns addresses +from a 52-bit space by enabling the following kernel config options: + CONFIG_EXPERT=y && CONFIG_ARM64_FORCE_52BIT=y + +Note that this option is only intended for debugging applications +and should not be used in production. -- cgit v1.2.3-58-ga151 From d2d73d2fef421ca0d447946cc430fdf5c4c5b06a Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 9 Aug 2019 15:27:32 +0100 Subject: arm64: mm: Simplify definition of virt_addr_valid() _virt_addr_valid() is defined as the same value in two places and rolls its own version of virt_to_pfn() in both cases. Consolidate these definitions by inlining a simplified version directly into virt_addr_valid(). Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index ecc945ba8607..2c3c4b145e95 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -312,7 +312,6 @@ static inline void *phys_to_virt(phys_addr_t x) #if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) -#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #else #define __virt_to_pgoff(kaddr) (((u64)(kaddr) - PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) #define __page_to_voff(kaddr) (((u64)(kaddr) - VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) @@ -326,15 +325,14 @@ static inline void *phys_to_virt(phys_addr_t x) }) #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) + VMEMMAP_START)) - -#define _virt_addr_valid(kaddr) pfn_valid(__virt_to_phys((u64)(kaddr)) >> PAGE_SHIFT) #endif #endif #define _virt_addr_is_linear(kaddr) \ (__tag_reset((u64)(kaddr)) >= PAGE_OFFSET) + #define virt_addr_valid(kaddr) \ - (_virt_addr_is_linear(kaddr) && _virt_addr_valid(kaddr)) + (_virt_addr_is_linear(kaddr) && pfn_valid(virt_to_pfn(kaddr))) /* * Given that the GIC architecture permits ITS implementations that can only be -- cgit v1.2.3-58-ga151 From 9c1cac424c93d2b3122014e07a54b003ddedc168 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 9 Aug 2019 15:39:37 +0100 Subject: arm64: mm: Really fix sparse warning in untagged_addr() untagged_addr() can be called with a '__user' pointer parameter and must therefore use '__force' casts both when passing this parameter through to sign_extend64() as a 'u64', but also when casting the 's64' return value back to the '__user' pointer type. Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 46c4c08a80a9..76e0b232a473 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -201,7 +201,7 @@ extern u64 vabits_user; * pass on to access_ok(), for instance. */ #define untagged_addr(addr) \ - ((__typeof__(addr))sign_extend64((__force u64)(addr), 55)) + ((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55)) #ifdef CONFIG_KASAN_SW_TAGS #define __tag_shifted(tag) ((u64)(tag) << 56) -- cgit v1.2.3-58-ga151 From 63e3ee61549595719145d23fdacd1d8744051108 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 9 Aug 2019 12:03:07 +0100 Subject: ARM: cpuidle: Remove useless header include The generic ARM CPUidle driver includes by mistake. Remove the topology header include. Signed-off-by: Lorenzo Pieralisi Acked-by: Daniel Lezcano Reviewed-by: Ulf Hansson Reviewed-by: Sudeep Holla Cc: Ulf Hansson Cc: Sudeep Holla Cc: Daniel Lezcano Cc: "Rafael J. Wysocki" Signed-off-by: Will Deacon --- drivers/cpuidle/cpuidle-arm.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index 5bcd82c35dcf..dc33b3d2954f 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c @@ -15,7 +15,6 @@ #include #include #include -#include #include -- cgit v1.2.3-58-ga151 From 6460d7ba488419dd5bc2cbfd989d5d7930ac1f9f Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 9 Aug 2019 12:03:08 +0100 Subject: ARM: cpuidle: Remove overzealous error logging CPUidle back-end operations are not implemented in some platforms but this should not be considered an error serious enough to be logged. Check the arm_cpuidle_init() return value to detect whether the failure must be reported or not in the kernel log and do not log it if the platform does not support CPUidle operations. Signed-off-by: Lorenzo Pieralisi Acked-by: Daniel Lezcano Reviewed-by: Ulf Hansson Reviewed-by: Sudeep Holla Cc: Ulf Hansson Cc: Sudeep Holla Cc: Daniel Lezcano Cc: "Rafael J. Wysocki" Signed-off-by: Will Deacon --- drivers/cpuidle/cpuidle-arm.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/cpuidle/cpuidle-arm.c b/drivers/cpuidle/cpuidle-arm.c index dc33b3d2954f..9e5156d39627 100644 --- a/drivers/cpuidle/cpuidle-arm.c +++ b/drivers/cpuidle/cpuidle-arm.c @@ -105,11 +105,17 @@ static int __init arm_idle_init_cpu(int cpu) ret = arm_cpuidle_init(cpu); /* - * Allow the initialization to continue for other CPUs, if the reported - * failure is a HW misconfiguration/breakage (-ENXIO). + * Allow the initialization to continue for other CPUs, if the + * reported failure is a HW misconfiguration/breakage (-ENXIO). + * + * Some platforms do not support idle operations + * (arm_cpuidle_init() returning -EOPNOTSUPP), we should + * not flag this case as an error, it is a valid + * configuration. */ if (ret) { - pr_err("CPU %d failed to init idle CPU ops\n", cpu); + if (ret != -EOPNOTSUPP) + pr_err("CPU %d failed to init idle CPU ops\n", cpu); ret = ret == -ENXIO ? 0 : ret; goto out_kfree_drv; } -- cgit v1.2.3-58-ga151 From e76d8b7027d9451cfdcb8cd605148ff5e0a1bf5d Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 9 Aug 2019 12:03:09 +0100 Subject: drivers: firmware: psci: Decouple checker from generic ARM CPUidle The PSCI checker currently relies on the generic ARM CPUidle infrastructure to enter an idle state, which in turn creates a dependency that is not really needed. The PSCI checker code to test PSCI CPU suspend is built on top of the CPUidle framework and can easily reuse the struct cpuidle_state.enter() function (previously initialized by an idle driver, with a PSCI back-end) to trigger an entry into an idle state, decoupling the PSCI checker from the generic ARM CPUidle infrastructure and simplyfing the code in the process. Convert the PSCI checker suspend entry function to use the struct cpuidle_state.enter() function callback. Signed-off-by: Lorenzo Pieralisi Acked-by: Daniel Lezcano Reviewed-by: Ulf Hansson Reviewed-by: Sudeep Holla Cc: Sudeep Holla Cc: Mark Rutland Signed-off-by: Will Deacon --- drivers/firmware/psci/psci_checker.c | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/drivers/firmware/psci/psci_checker.c b/drivers/firmware/psci/psci_checker.c index f3659443f8c2..6a445397771c 100644 --- a/drivers/firmware/psci/psci_checker.c +++ b/drivers/firmware/psci/psci_checker.c @@ -228,8 +228,11 @@ out_free_cpus: static void dummy_callback(struct timer_list *unused) {} -static int suspend_cpu(int index, bool broadcast) +static int suspend_cpu(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int index) { + struct cpuidle_state *state = &drv->states[index]; + bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP; int ret; arch_cpu_idle_enter(); @@ -254,11 +257,7 @@ static int suspend_cpu(int index, bool broadcast) } } - /* - * Replicate the common ARM cpuidle enter function - * (arm_enter_idle_state). - */ - ret = CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, index); + ret = state->enter(dev, drv, index); if (broadcast) tick_broadcast_exit(); @@ -301,9 +300,8 @@ static int suspend_test_thread(void *arg) * doesn't use PSCI). */ for (index = 1; index < drv->state_count; ++index) { - struct cpuidle_state *state = &drv->states[index]; - bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP; int ret; + struct cpuidle_state *state = &drv->states[index]; /* * Set the timer to wake this CPU up in some time (which @@ -318,7 +316,7 @@ static int suspend_test_thread(void *arg) /* IRQs must be disabled during suspend operations. */ local_irq_disable(); - ret = suspend_cpu(index, broadcast); + ret = suspend_cpu(dev, drv, index); /* * We have woken up. Re-enable IRQs to handle any -- cgit v1.2.3-58-ga151 From 81d549e0c810773bf003a25f59fa5509857bf9b2 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 9 Aug 2019 12:03:10 +0100 Subject: ARM: psci: cpuidle: Introduce PSCI CPUidle driver PSCI firmware is the standard power management control for all ARM64 based platforms and it is also deployed on some ARM 32 bit platforms to date. Idle state entry in PSCI is currently achieved by calling arm_cpuidle_init() and arm_cpuidle_suspend() in a generic idle driver, which in turn relies on ARM/ARM64 CPUidle back-end to relay the call into PSCI firmware if PSCI is the boot method. Given that PSCI is the standard idle entry method on ARM64 systems (which means that no other CPUidle driver are expected on ARM64 platforms - so PSCI is already a generic idle driver), in order to simplify idle entry and code maintenance, it makes sense to have a PSCI specific idle driver so that idle code that it is currently living in drivers/firmware directory can be hoisted out of it and moved where it belongs, into a full-fledged PSCI driver, leaving PSCI code in drivers/firmware as a pure firmware interface, as it should be. Implement a PSCI CPUidle driver. By default it is a silent Kconfig entry which is left unselected, since it selection would clash with the generic ARM CPUidle driver that provides a PSCI based idle driver through the arm/arm64 arches back-ends CPU operations. Signed-off-by: Lorenzo Pieralisi Acked-by: Daniel Lezcano Reviewed-by: Ulf Hansson Reviewed-by: Sudeep Holla Cc: Ulf Hansson Cc: Sudeep Holla Cc: Daniel Lezcano Cc: Mark Rutland Cc: "Rafael J. Wysocki" Signed-off-by: Will Deacon --- MAINTAINERS | 8 +++ drivers/cpuidle/Kconfig.arm | 10 +++ drivers/cpuidle/Makefile | 1 + drivers/cpuidle/cpuidle-psci.c | 151 +++++++++++++++++++++++++++++++++++++++++ 4 files changed, 170 insertions(+) create mode 100644 drivers/cpuidle/cpuidle-psci.c diff --git a/MAINTAINERS b/MAINTAINERS index a2c343ee3b2c..b6c2d21103fb 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4290,6 +4290,14 @@ S: Supported F: drivers/cpuidle/cpuidle-exynos.c F: arch/arm/mach-exynos/pm.c +CPUIDLE DRIVER - ARM PSCI +M: Lorenzo Pieralisi +M: Sudeep Holla +L: linux-pm@vger.kernel.org +L: linux-arm-kernel@lists.infradead.org +S: Supported +F: drivers/cpuidle/cpuidle-psci.c + CPU IDLE TIME MANAGEMENT FRAMEWORK M: "Rafael J. Wysocki" M: Daniel Lezcano diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index 48cb3d4bb7d1..eb014aa5ce6b 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm @@ -13,6 +13,16 @@ config ARM_CPUIDLE initialized by calling the CPU operations init idle hook provided by architecture code. +config ARM_PSCI_CPUIDLE + bool + depends on ARM_PSCI_FW + select DT_IDLE_STATES + select CPU_IDLE_MULTIPLE_DRIVERS + help + Select this to enable PSCI firmware based CPUidle driver for ARM. + It provides an idle driver that is capable of detecting and + managing idle states through the PSCI firmware interface. + config ARM_BIG_LITTLE_CPUIDLE bool "Support for ARM big.LITTLE processors" depends on ARCH_VEXPRESS_TC2_PM || ARCH_EXYNOS diff --git a/drivers/cpuidle/Makefile b/drivers/cpuidle/Makefile index 9d7176cee3d3..40d016339b29 100644 --- a/drivers/cpuidle/Makefile +++ b/drivers/cpuidle/Makefile @@ -20,6 +20,7 @@ obj-$(CONFIG_ARM_U8500_CPUIDLE) += cpuidle-ux500.o obj-$(CONFIG_ARM_AT91_CPUIDLE) += cpuidle-at91.o obj-$(CONFIG_ARM_EXYNOS_CPUIDLE) += cpuidle-exynos.o obj-$(CONFIG_ARM_CPUIDLE) += cpuidle-arm.o +obj-$(CONFIG_ARM_PSCI_CPUIDLE) += cpuidle-psci.o ############################################################################### # MIPS drivers diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c new file mode 100644 index 000000000000..ab1dea918ea3 --- /dev/null +++ b/drivers/cpuidle/cpuidle-psci.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PSCI CPU idle driver. + * + * Copyright (C) 2019 ARM Ltd. + * Author: Lorenzo Pieralisi + */ + +#define pr_fmt(fmt) "CPUidle PSCI: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include "dt_idle_states.h" + +static int psci_enter_idle_state(struct cpuidle_device *dev, + struct cpuidle_driver *drv, int idx) +{ + return CPU_PM_CPU_IDLE_ENTER(psci_cpu_suspend_enter, idx); +} + +static struct cpuidle_driver psci_idle_driver __initdata = { + .name = "psci_idle", + .owner = THIS_MODULE, + /* + * PSCI idle states relies on architectural WFI to + * be represented as state index 0. + */ + .states[0] = { + .enter = psci_enter_idle_state, + .exit_latency = 1, + .target_residency = 1, + .power_usage = UINT_MAX, + .name = "WFI", + .desc = "ARM WFI", + } +}; + +static const struct of_device_id psci_idle_state_match[] __initconst = { + { .compatible = "arm,idle-state", + .data = psci_enter_idle_state }, + { }, +}; + +static int __init psci_idle_init_cpu(int cpu) +{ + struct cpuidle_driver *drv; + struct device_node *cpu_node; + const char *enable_method; + int ret = 0; + + cpu_node = of_cpu_device_node_get(cpu); + if (!cpu_node) + return -ENODEV; + + /* + * Check whether the enable-method for the cpu is PSCI, fail + * if it is not. + */ + enable_method = of_get_property(cpu_node, "enable-method", NULL); + if (!enable_method || (strcmp(enable_method, "psci"))) + ret = -ENODEV; + + of_node_put(cpu_node); + if (ret) + return ret; + + drv = kmemdup(&psci_idle_driver, sizeof(*drv), GFP_KERNEL); + if (!drv) + return -ENOMEM; + + drv->cpumask = (struct cpumask *)cpumask_of(cpu); + + /* + * Initialize idle states data, starting at index 1, since + * by default idle state 0 is the quiescent state reached + * by the cpu by executing the wfi instruction. + * + * If no DT idle states are detected (ret == 0) let the driver + * initialization fail accordingly since there is no reason to + * initialize the idle driver if only wfi is supported, the + * default archictectural back-end already executes wfi + * on idle entry. + */ + ret = dt_init_idle_driver(drv, psci_idle_state_match, 1); + if (ret <= 0) { + ret = ret ? : -ENODEV; + goto out_kfree_drv; + } + + /* + * Initialize PSCI idle states. + */ + ret = psci_cpu_init_idle(cpu); + if (ret) { + pr_err("CPU %d failed to PSCI idle\n", cpu); + goto out_kfree_drv; + } + + ret = cpuidle_register(drv, NULL); + if (ret) + goto out_kfree_drv; + + return 0; + +out_kfree_drv: + kfree(drv); + return ret; +} + +/* + * psci_idle_init - Initializes PSCI cpuidle driver + * + * Initializes PSCI cpuidle driver for all CPUs, if any CPU fails + * to register cpuidle driver then rollback to cancel all CPUs + * registration. + */ +static int __init psci_idle_init(void) +{ + int cpu, ret; + struct cpuidle_driver *drv; + struct cpuidle_device *dev; + + for_each_possible_cpu(cpu) { + ret = psci_idle_init_cpu(cpu); + if (ret) + goto out_fail; + } + + return 0; + +out_fail: + while (--cpu >= 0) { + dev = per_cpu(cpuidle_devices, cpu); + drv = cpuidle_get_cpu_driver(dev); + cpuidle_unregister(drv); + kfree(drv); + } + + return ret; +} +device_initcall(psci_idle_init); -- cgit v1.2.3-58-ga151 From 788961462f3471617749edf10d0fcafad410d2bb Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 9 Aug 2019 12:03:11 +0100 Subject: ARM: psci: cpuidle: Enable PSCI CPUidle driver Allow selection of the PSCI CPUidle in the kernel by updating the respective Kconfig entry. Remove PSCI callbacks from ARM/ARM64 generic CPU ops to prevent the PSCI idle driver from clashing with the generic ARM CPUidle driver initialization, that relies on CPU ops to initialize and enter idle states. Signed-off-by: Lorenzo Pieralisi Reviewed-by: Ulf Hansson Cc: Will Deacon Cc: Ulf Hansson Cc: Sudeep Holla Cc: Daniel Lezcano Cc: Catalin Marinas Cc: Mark Rutland Cc: "Rafael J. Wysocki" Signed-off-by: Will Deacon --- arch/arm64/kernel/cpuidle.c | 7 ++++--- arch/arm64/kernel/psci.c | 4 ---- drivers/cpuidle/Kconfig.arm | 2 +- drivers/firmware/psci/psci.c | 10 ---------- 4 files changed, 5 insertions(+), 18 deletions(-) diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index d1048173fd8a..619e0ebb8399 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include @@ -48,15 +49,15 @@ int arm_cpuidle_suspend(int index) int acpi_processor_ffh_lpi_probe(unsigned int cpu) { - return arm_cpuidle_init(cpu); + return psci_cpu_init_idle(cpu); } int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) { if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags)) - return CPU_PM_CPU_IDLE_ENTER_RETENTION(arm_cpuidle_suspend, + return CPU_PM_CPU_IDLE_ENTER_RETENTION(psci_cpu_suspend_enter, lpi->index); else - return CPU_PM_CPU_IDLE_ENTER(arm_cpuidle_suspend, lpi->index); + return CPU_PM_CPU_IDLE_ENTER(psci_cpu_suspend_enter, lpi->index); } #endif diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index 85ee7d07889e..a543ab7e007c 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -105,10 +105,6 @@ static int cpu_psci_cpu_kill(unsigned int cpu) const struct cpu_operations cpu_psci_ops = { .name = "psci", -#ifdef CONFIG_CPU_IDLE - .cpu_init_idle = psci_cpu_init_idle, - .cpu_suspend = psci_cpu_suspend_enter, -#endif .cpu_init = cpu_psci_cpu_init, .cpu_prepare = cpu_psci_cpu_prepare, .cpu_boot = cpu_psci_cpu_boot, diff --git a/drivers/cpuidle/Kconfig.arm b/drivers/cpuidle/Kconfig.arm index eb014aa5ce6b..d8530475493c 100644 --- a/drivers/cpuidle/Kconfig.arm +++ b/drivers/cpuidle/Kconfig.arm @@ -14,7 +14,7 @@ config ARM_CPUIDLE provided by architecture code. config ARM_PSCI_CPUIDLE - bool + bool "PSCI CPU idle Driver" depends on ARM_PSCI_FW select DT_IDLE_STATES select CPU_IDLE_MULTIPLE_DRIVERS diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c index f82ccd39a913..b343f8a34c6a 100644 --- a/drivers/firmware/psci/psci.c +++ b/drivers/firmware/psci/psci.c @@ -436,16 +436,6 @@ int psci_cpu_suspend_enter(unsigned long index) return ret; } - -/* ARM specific CPU idle operations */ -#ifdef CONFIG_ARM -static const struct cpuidle_ops psci_cpuidle_ops __initconst = { - .suspend = psci_cpu_suspend_enter, - .init = psci_dt_cpu_init_idle, -}; - -CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops); -#endif #endif static int psci_system_suspend(unsigned long unused) -- cgit v1.2.3-58-ga151 From 9ffeb6d08c3a4bbd7b1e33711b241f511e2ded79 Mon Sep 17 00:00:00 2001 From: Lorenzo Pieralisi Date: Fri, 9 Aug 2019 12:03:12 +0100 Subject: PSCI: cpuidle: Refactor CPU suspend power_state parameter handling Current PSCI code handles idle state entry through the psci_cpu_suspend_enter() API, that takes an idle state index as a parameter and convert the index into a previously initialized power_state parameter before calling the PSCI.CPU_SUSPEND() with it. This is unwieldly, since it forces the PSCI firmware layer to keep track of power_state parameter for every idle state so that the index->power_state conversion can be made in the PSCI firmware layer instead of the CPUidle driver implementations. Move the power_state handling out of drivers/firmware/psci into the respective ACPI/DT PSCI CPUidle backends and convert the psci_cpu_suspend_enter() API to get the power_state parameter as input, which makes it closer to its firmware interface PSCI.CPU_SUSPEND() API. A notable side effect is that the PSCI ACPI/DT CPUidle backends now can directly handle (and if needed update) power_state parameters before handing them over to the PSCI firmware interface to trigger PSCI.CPU_SUSPEND() calls. Signed-off-by: Lorenzo Pieralisi Acked-by: Daniel Lezcano Reviewed-by: Ulf Hansson Reviewed-by: Sudeep Holla Cc: Will Deacon Cc: Ulf Hansson Cc: Sudeep Holla Cc: Daniel Lezcano Cc: Catalin Marinas Cc: Mark Rutland Cc: "Rafael J. Wysocki" Signed-off-by: Will Deacon --- arch/arm64/kernel/cpuidle.c | 49 +++++++++++-- drivers/cpuidle/cpuidle-psci.c | 87 ++++++++++++++++++++++- drivers/firmware/psci/psci.c | 157 +++-------------------------------------- include/linux/cpuidle.h | 17 +++-- include/linux/psci.h | 4 +- 5 files changed, 154 insertions(+), 160 deletions(-) diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index 619e0ebb8399..e4d6af2fdec7 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -47,17 +47,58 @@ int arm_cpuidle_suspend(int index) #define ARM64_LPI_IS_RETENTION_STATE(arch_flags) (!(arch_flags)) +static int psci_acpi_cpu_init_idle(unsigned int cpu) +{ + int i, count; + struct acpi_lpi_state *lpi; + struct acpi_processor *pr = per_cpu(processors, cpu); + + /* + * If the PSCI cpu_suspend function hook has not been initialized + * idle states must not be enabled, so bail out + */ + if (!psci_ops.cpu_suspend) + return -EOPNOTSUPP; + + if (unlikely(!pr || !pr->flags.has_lpi)) + return -EINVAL; + + count = pr->power.count - 1; + if (count <= 0) + return -ENODEV; + + for (i = 0; i < count; i++) { + u32 state; + + lpi = &pr->power.lpi_states[i + 1]; + /* + * Only bits[31:0] represent a PSCI power_state while + * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification + */ + state = lpi->address; + if (!psci_power_state_is_valid(state)) { + pr_warn("Invalid PSCI power state %#x\n", state); + return -EINVAL; + } + } + + return 0; +} + int acpi_processor_ffh_lpi_probe(unsigned int cpu) { - return psci_cpu_init_idle(cpu); + return psci_acpi_cpu_init_idle(cpu); } int acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi) { + u32 state = lpi->address; + if (ARM64_LPI_IS_RETENTION_STATE(lpi->arch_flags)) - return CPU_PM_CPU_IDLE_ENTER_RETENTION(psci_cpu_suspend_enter, - lpi->index); + return CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(psci_cpu_suspend_enter, + lpi->index, state); else - return CPU_PM_CPU_IDLE_ENTER(psci_cpu_suspend_enter, lpi->index); + return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, + lpi->index, state); } #endif diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c index ab1dea918ea3..f3c1a2396f98 100644 --- a/drivers/cpuidle/cpuidle-psci.c +++ b/drivers/cpuidle/cpuidle-psci.c @@ -22,10 +22,15 @@ #include "dt_idle_states.h" +static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); + static int psci_enter_idle_state(struct cpuidle_device *dev, struct cpuidle_driver *drv, int idx) { - return CPU_PM_CPU_IDLE_ENTER(psci_cpu_suspend_enter, idx); + u32 *state = __this_cpu_read(psci_power_state); + + return CPU_PM_CPU_IDLE_ENTER_PARAM(psci_cpu_suspend_enter, + idx, state[idx - 1]); } static struct cpuidle_driver psci_idle_driver __initdata = { @@ -51,6 +56,86 @@ static const struct of_device_id psci_idle_state_match[] __initconst = { { }, }; +static int __init psci_dt_parse_state_node(struct device_node *np, u32 *state) +{ + int err = of_property_read_u32(np, "arm,psci-suspend-param", state); + + if (err) { + pr_warn("%pOF missing arm,psci-suspend-param property\n", np); + return err; + } + + if (!psci_power_state_is_valid(*state)) { + pr_warn("Invalid PSCI power state %#x\n", *state); + return -EINVAL; + } + + return 0; +} + +static int __init psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu) +{ + int i, ret = 0, count = 0; + u32 *psci_states; + struct device_node *state_node; + + /* Count idle states */ + while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", + count))) { + count++; + of_node_put(state_node); + } + + if (!count) + return -ENODEV; + + psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); + if (!psci_states) + return -ENOMEM; + + for (i = 0; i < count; i++) { + state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); + ret = psci_dt_parse_state_node(state_node, &psci_states[i]); + of_node_put(state_node); + + if (ret) + goto free_mem; + + pr_debug("psci-power-state %#x index %d\n", psci_states[i], i); + } + + /* Idle states parsed correctly, initialize per-cpu pointer */ + per_cpu(psci_power_state, cpu) = psci_states; + return 0; + +free_mem: + kfree(psci_states); + return ret; +} + +static __init int psci_cpu_init_idle(unsigned int cpu) +{ + struct device_node *cpu_node; + int ret; + + /* + * If the PSCI cpu_suspend function hook has not been initialized + * idle states must not be enabled, so bail out + */ + if (!psci_ops.cpu_suspend) + return -EOPNOTSUPP; + + cpu_node = of_cpu_device_node_get(cpu); + if (!cpu_node) + return -ENODEV; + + ret = psci_dt_cpu_init_idle(cpu_node, cpu); + + of_node_put(cpu_node); + + return ret; +} + static int __init psci_idle_init_cpu(int cpu) { struct cpuidle_driver *drv; diff --git a/drivers/firmware/psci/psci.c b/drivers/firmware/psci/psci.c index b343f8a34c6a..84f4ff351c62 100644 --- a/drivers/firmware/psci/psci.c +++ b/drivers/firmware/psci/psci.c @@ -103,7 +103,7 @@ static inline bool psci_power_state_loses_context(u32 state) return state & mask; } -static inline bool psci_power_state_is_valid(u32 state) +bool psci_power_state_is_valid(u32 state) { const u32 valid_mask = psci_has_ext_power_state() ? PSCI_1_0_EXT_POWER_STATE_MASK : @@ -277,162 +277,21 @@ static int __init psci_features(u32 psci_func_id) } #ifdef CONFIG_CPU_IDLE -static DEFINE_PER_CPU_READ_MOSTLY(u32 *, psci_power_state); - -static int psci_dt_parse_state_node(struct device_node *np, u32 *state) -{ - int err = of_property_read_u32(np, "arm,psci-suspend-param", state); - - if (err) { - pr_warn("%pOF missing arm,psci-suspend-param property\n", np); - return err; - } - - if (!psci_power_state_is_valid(*state)) { - pr_warn("Invalid PSCI power state %#x\n", *state); - return -EINVAL; - } - - return 0; -} - -static int psci_dt_cpu_init_idle(struct device_node *cpu_node, int cpu) -{ - int i, ret = 0, count = 0; - u32 *psci_states; - struct device_node *state_node; - - /* Count idle states */ - while ((state_node = of_parse_phandle(cpu_node, "cpu-idle-states", - count))) { - count++; - of_node_put(state_node); - } - - if (!count) - return -ENODEV; - - psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); - if (!psci_states) - return -ENOMEM; - - for (i = 0; i < count; i++) { - state_node = of_parse_phandle(cpu_node, "cpu-idle-states", i); - ret = psci_dt_parse_state_node(state_node, &psci_states[i]); - of_node_put(state_node); - - if (ret) - goto free_mem; - - pr_debug("psci-power-state %#x index %d\n", psci_states[i], i); - } - - /* Idle states parsed correctly, initialize per-cpu pointer */ - per_cpu(psci_power_state, cpu) = psci_states; - return 0; - -free_mem: - kfree(psci_states); - return ret; -} - -#ifdef CONFIG_ACPI -#include - -static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu) -{ - int i, count; - u32 *psci_states; - struct acpi_lpi_state *lpi; - struct acpi_processor *pr = per_cpu(processors, cpu); - - if (unlikely(!pr || !pr->flags.has_lpi)) - return -EINVAL; - - count = pr->power.count - 1; - if (count <= 0) - return -ENODEV; - - psci_states = kcalloc(count, sizeof(*psci_states), GFP_KERNEL); - if (!psci_states) - return -ENOMEM; - - for (i = 0; i < count; i++) { - u32 state; - - lpi = &pr->power.lpi_states[i + 1]; - /* - * Only bits[31:0] represent a PSCI power_state while - * bits[63:32] must be 0x0 as per ARM ACPI FFH Specification - */ - state = lpi->address; - if (!psci_power_state_is_valid(state)) { - pr_warn("Invalid PSCI power state %#x\n", state); - kfree(psci_states); - return -EINVAL; - } - psci_states[i] = state; - } - /* Idle states parsed correctly, initialize per-cpu pointer */ - per_cpu(psci_power_state, cpu) = psci_states; - return 0; -} -#else -static int __maybe_unused psci_acpi_cpu_init_idle(unsigned int cpu) -{ - return -EINVAL; -} -#endif - -int psci_cpu_init_idle(unsigned int cpu) -{ - struct device_node *cpu_node; - int ret; - - /* - * If the PSCI cpu_suspend function hook has not been initialized - * idle states must not be enabled, so bail out - */ - if (!psci_ops.cpu_suspend) - return -EOPNOTSUPP; - - if (!acpi_disabled) - return psci_acpi_cpu_init_idle(cpu); - - cpu_node = of_get_cpu_node(cpu, NULL); - if (!cpu_node) - return -ENODEV; - - ret = psci_dt_cpu_init_idle(cpu_node, cpu); - - of_node_put(cpu_node); - - return ret; -} - -static int psci_suspend_finisher(unsigned long index) +static int psci_suspend_finisher(unsigned long state) { - u32 *state = __this_cpu_read(psci_power_state); + u32 power_state = state; - return psci_ops.cpu_suspend(state[index - 1], - __pa_symbol(cpu_resume)); + return psci_ops.cpu_suspend(power_state, __pa_symbol(cpu_resume)); } -int psci_cpu_suspend_enter(unsigned long index) +int psci_cpu_suspend_enter(u32 state) { int ret; - u32 *state = __this_cpu_read(psci_power_state); - /* - * idle state index 0 corresponds to wfi, should never be called - * from the cpu_suspend operations - */ - if (WARN_ON_ONCE(!index)) - return -EINVAL; - if (!psci_power_state_loses_context(state[index - 1])) - ret = psci_ops.cpu_suspend(state[index - 1], 0); + if (!psci_power_state_loses_context(state)) + ret = psci_ops.cpu_suspend(state, 0); else - ret = cpu_suspend(index, psci_suspend_finisher); + ret = cpu_suspend(state, psci_suspend_finisher); return ret; } diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index bb9a0db89f1a..12ae4b87494e 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h @@ -256,7 +256,10 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) {return 0;} #endif -#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \ +#define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, \ + idx, \ + state, \ + is_retention) \ ({ \ int __ret = 0; \ \ @@ -268,7 +271,7 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) if (!is_retention) \ __ret = cpu_pm_enter(); \ if (!__ret) { \ - __ret = low_level_idle_enter(idx); \ + __ret = low_level_idle_enter(state); \ if (!is_retention) \ cpu_pm_exit(); \ } \ @@ -277,9 +280,15 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) }) #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ - __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0) + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 0) #define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \ - __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1) + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, idx, 1) + +#define CPU_PM_CPU_IDLE_ENTER_PARAM(low_level_idle_enter, idx, state) \ + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 0) + +#define CPU_PM_CPU_IDLE_ENTER_RETENTION_PARAM(low_level_idle_enter, idx, state) \ + __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, state, 1) #endif /* _LINUX_CPUIDLE_H */ diff --git a/include/linux/psci.h b/include/linux/psci.h index a8a15613c157..e2bacc6fd2f2 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h @@ -15,8 +15,8 @@ bool psci_tos_resident_on(int cpu); -int psci_cpu_init_idle(unsigned int cpu); -int psci_cpu_suspend_enter(unsigned long index); +int psci_cpu_suspend_enter(u32 state); +bool psci_power_state_is_valid(u32 state); enum psci_conduit { PSCI_CONDUIT_NONE, -- cgit v1.2.3-58-ga151 From bbd1b70639f785a970d998f35155c713f975e3ac Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Thu, 8 Aug 2019 15:40:06 -0500 Subject: ACPI/PPTT: Add support for ACPI 6.3 thread flag ACPI 6.3 adds a flag to the CPU node to indicate whether the given PE is a thread. Add a function to return that information for a given linux logical CPU. Signed-off-by: Jeremy Linton Reviewed-by: Sudeep Holla Reviewed-by: Robert Richter Acked-by: Rafael J. Wysocki Signed-off-by: Will Deacon --- drivers/acpi/pptt.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++- include/linux/acpi.h | 5 +++++ 2 files changed, 57 insertions(+), 1 deletion(-) diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 1e7ac0bd0d3a..f31544d3656e 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -540,6 +540,44 @@ static int find_acpi_cpu_topology_tag(unsigned int cpu, int level, int flag) return retval; } +/** + * check_acpi_cpu_flag() - Determine if CPU node has a flag set + * @cpu: Kernel logical CPU number + * @rev: The minimum PPTT revision defining the flag + * @flag: The flag itself + * + * Check the node representing a CPU for a given flag. + * + * Return: -ENOENT if the PPTT doesn't exist, the CPU cannot be found or + * the table revision isn't new enough. + * 1, any passed flag set + * 0, flag unset + */ +static int check_acpi_cpu_flag(unsigned int cpu, int rev, u32 flag) +{ + struct acpi_table_header *table; + acpi_status status; + u32 acpi_cpu_id = get_acpi_id_for_cpu(cpu); + struct acpi_pptt_processor *cpu_node = NULL; + int ret = -ENOENT; + + status = acpi_get_table(ACPI_SIG_PPTT, 0, &table); + if (ACPI_FAILURE(status)) { + acpi_pptt_warn_missing(); + return ret; + } + + if (table->revision >= rev) + cpu_node = acpi_find_processor_node(table, acpi_cpu_id); + + if (cpu_node) + ret = (cpu_node->flags & flag) != 0; + + acpi_put_table(table); + + return ret; +} + /** * acpi_find_last_cache_level() - Determines the number of cache levels for a PE * @cpu: Kernel logical CPU number @@ -604,6 +642,20 @@ int cache_setup_acpi(unsigned int cpu) return status; } +/** + * acpi_pptt_cpu_is_thread() - Determine if CPU is a thread + * @cpu: Kernel logical CPU number + * + * Return: 1, a thread + * 0, not a thread + * -ENOENT ,if the PPTT doesn't exist, the CPU cannot be found or + * the table revision isn't new enough. + */ +int acpi_pptt_cpu_is_thread(unsigned int cpu) +{ + return check_acpi_cpu_flag(cpu, 2, ACPI_PPTT_ACPI_PROCESSOR_IS_THREAD); +} + /** * find_acpi_cpu_topology() - Determine a unique topology value for a given CPU * @cpu: Kernel logical CPU number @@ -664,7 +716,6 @@ int find_acpi_cpu_cache_topology(unsigned int cpu, int level) return ret; } - /** * find_acpi_cpu_topology_package() - Determine a unique CPU package value * @cpu: Kernel logical CPU number diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 9426b9aaed86..9d0e20a2ac83 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -1302,11 +1302,16 @@ static inline int lpit_read_residency_count_address(u64 *address) #endif #ifdef CONFIG_ACPI_PPTT +int acpi_pptt_cpu_is_thread(unsigned int cpu); int find_acpi_cpu_topology(unsigned int cpu, int level); int find_acpi_cpu_topology_package(unsigned int cpu); int find_acpi_cpu_topology_hetero_id(unsigned int cpu); int find_acpi_cpu_cache_topology(unsigned int cpu, int level); #else +static inline int acpi_pptt_cpu_is_thread(unsigned int cpu) +{ + return -EINVAL; +} static inline int find_acpi_cpu_topology(unsigned int cpu, int level) { return -EINVAL; -- cgit v1.2.3-58-ga151 From 98dc19902a0b2e5348e43d6a2c39a0a7d0fc639e Mon Sep 17 00:00:00 2001 From: Jeremy Linton Date: Thu, 8 Aug 2019 15:40:07 -0500 Subject: arm64: topology: Use PPTT to determine if PE is a thread ACPI 6.3 adds a thread flag to represent if a CPU/PE is actually a thread. Given that the MPIDR_MT bit may not represent this information consistently on homogeneous machines we should prefer the PPTT flag if its available. Signed-off-by: Jeremy Linton Reviewed-by: Sudeep Holla Reviewed-by: Robert Richter [will: made acpi_cpu_is_threaded() return 'bool'] Signed-off-by: Will Deacon --- arch/arm64/kernel/topology.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/arch/arm64/kernel/topology.c b/arch/arm64/kernel/topology.c index 0825c4a856e3..6106c49f84bc 100644 --- a/arch/arm64/kernel/topology.c +++ b/arch/arm64/kernel/topology.c @@ -340,17 +340,28 @@ void remove_cpu_topology(unsigned int cpu) } #ifdef CONFIG_ACPI +static bool __init acpi_cpu_is_threaded(int cpu) +{ + int is_threaded = acpi_pptt_cpu_is_thread(cpu); + + /* + * if the PPTT doesn't have thread information, assume a homogeneous + * machine and return the current CPU's thread state. + */ + if (is_threaded < 0) + is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK; + + return !!is_threaded; +} + /* * Propagate the topology information of the processor_topology_node tree to the * cpu_topology array. */ static int __init parse_acpi_topology(void) { - bool is_threaded; int cpu, topology_id; - is_threaded = read_cpuid_mpidr() & MPIDR_MT_BITMASK; - for_each_possible_cpu(cpu) { int i, cache_id; @@ -358,7 +369,7 @@ static int __init parse_acpi_topology(void) if (topology_id < 0) return topology_id; - if (is_threaded) { + if (acpi_cpu_is_threaded(cpu)) { cpu_topology[cpu].thread_id = topology_id; topology_id = find_acpi_cpu_topology(cpu, 1); cpu_topology[cpu].core_id = topology_id; -- cgit v1.2.3-58-ga151 From 80d838122643a09a9f99824adea4b4261e4451e6 Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Mon, 12 Aug 2019 14:50:45 -0700 Subject: arm64: prefer __section from compiler_attributes.h GCC unescapes escaped string section names while Clang does not. Because __section uses the `#` stringification operator for the section name, it doesn't need to be escaped. This antipattern was found with: $ grep -e __section\(\" -e __section__\(\" -r Reported-by: Sedat Dilek Suggested-by: Josh Poimboeuf Signed-off-by: Nick Desaulniers Signed-off-by: Will Deacon --- arch/arm64/include/asm/cache.h | 2 +- arch/arm64/kernel/smp_spin_table.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h index 64eeaa41e7ca..43da6dd29592 100644 --- a/arch/arm64/include/asm/cache.h +++ b/arch/arm64/include/asm/cache.h @@ -78,7 +78,7 @@ static inline u32 cache_type_cwg(void) return (read_cpuid_cachetype() >> CTR_CWG_SHIFT) & CTR_CWG_MASK; } -#define __read_mostly __attribute__((__section__(".data..read_mostly"))) +#define __read_mostly __section(.data..read_mostly) static inline int cache_line_size_of_cpu(void) { diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index 76c2739ba8a4..c8a3fee00c11 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c @@ -19,7 +19,7 @@ #include extern void secondary_holding_pen(void); -volatile unsigned long __section(".mmuoff.data.read") +volatile unsigned long __section(.mmuoff.data.read) secondary_holding_pen_release = INVALID_HWID; static phys_addr_t cpu_release_addr[NR_CPUS]; -- cgit v1.2.3-58-ga151 From 0bf136a8cf11502643c27c8de1b1db8f161a31ed Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 13 Aug 2019 15:16:37 +0100 Subject: arm64: constify icache_policy_str[] The icache_policy_str[] array contains compile-time constant data, and is never intentionally modified, so let's mark it as const. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/kernel/cpuinfo.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 876055e37352..05933c065732 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -33,7 +33,7 @@ DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); static struct cpuinfo_arm64 boot_cpu_data; -static char *icache_policy_str[] = { +static const char *icache_policy_str[] = { [0 ... ICACHE_POLICY_PIPT] = "RESERVED/UNKNOWN", [ICACHE_POLICY_VIPT] = "VIPT", [ICACHE_POLICY_PIPT] = "PIPT", -- cgit v1.2.3-58-ga151 From 0da23df2ff043c8d39b389e32ee68af64b5f408e Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 13 Aug 2019 15:16:38 +0100 Subject: arm64: constify aarch64_insn_encoding_class[] The aarch64_insn_encoding_class[] array contains compile-time constant data, and is never intentionally modified, so let's mark it as const. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/kernel/insn.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c index 84b059ed04fc..d801a7094076 100644 --- a/arch/arm64/kernel/insn.c +++ b/arch/arm64/kernel/insn.c @@ -26,7 +26,7 @@ #define AARCH64_INSN_N_BIT BIT(22) #define AARCH64_INSN_LSL_12 BIT(22) -static int aarch64_insn_encoding_class[] = { +static const int aarch64_insn_encoding_class[] = { AARCH64_INSN_CLS_UNKNOWN, AARCH64_INSN_CLS_UNKNOWN, AARCH64_INSN_CLS_UNKNOWN, -- cgit v1.2.3-58-ga151 From 37143dcc44f8f3348d47616598137b1580468973 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 13 Aug 2019 15:16:39 +0100 Subject: arm64: constify sys64_hook instances All instances of struct sys64_hook contain compile-time constant data, and are never inentionally modified, so let's make them all const. Signed-off-by: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/kernel/traps.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index 42c8422cdf4a..a5d7ce4297b0 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -511,7 +511,7 @@ struct sys64_hook { void (*handler)(unsigned int esr, struct pt_regs *regs); }; -static struct sys64_hook sys64_hooks[] = { +static const struct sys64_hook sys64_hooks[] = { { .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK, .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL, @@ -636,7 +636,7 @@ static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs) arm64_compat_skip_faulting_instruction(regs, 4); } -static struct sys64_hook cp15_32_hooks[] = { +static const struct sys64_hook cp15_32_hooks[] = { { .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK, .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ, @@ -656,7 +656,7 @@ static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs) arm64_compat_skip_faulting_instruction(regs, 4); } -static struct sys64_hook cp15_64_hooks[] = { +static const struct sys64_hook cp15_64_hooks[] = { { .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK, .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT, @@ -667,7 +667,7 @@ static struct sys64_hook cp15_64_hooks[] = { asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs) { - struct sys64_hook *hook, *hook_base; + const struct sys64_hook *hook, *hook_base; if (!cp15_cond_valid(esr, regs)) { /* @@ -707,7 +707,7 @@ asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs) asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs) { - struct sys64_hook *hook; + const struct sys64_hook *hook; for (hook = sys64_hooks; hook->handler; hook++) if ((hook->esr_mask & esr) == hook->esr_val) { -- cgit v1.2.3-58-ga151 From 68dd8ef321626f14ae9ef2039b7a03c707149489 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 13 Aug 2019 15:52:23 +0100 Subject: arm64: memory: Fix virt_addr_valid() using __is_lm_address() virt_addr_valid() is intended to test whether or not the passed address is a valid linear map address. Unfortunately, it relies on _virt_addr_is_linear() which is broken because it assumes the linear map is at the top of the address space, which it no longer is. Reimplement virt_addr_valid() using __is_lm_address() and remove _virt_addr_is_linear() entirely. At the same time, ensure we evaluate the macro parameter only once and move it within the __ASSEMBLY__ block. Reported-by: Qian Cai Reported-by: Geert Uytterhoeven Tested-by: Steve Capper Reviewed-by: Steve Capper Tested-by: Geert Uytterhoeven Reviewed-by: Catalin Marinas Reviewed-by: Mark Rutland Fixes: 14c127c957c1 ("arm64: mm: Flip kernel VA space") Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 2c3c4b145e95..93ef8e5c6971 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -242,11 +242,11 @@ static inline const void *__tag_set(const void *addr, u8 tag) /* - * The linear kernel range starts in the middle of the virtual adddress + * The linear kernel range starts at the bottom of the virtual address * space. Testing the top bit for the start of the region is a - * sufficient check. + * sufficient check and avoids having to worry about the tag. */ -#define __is_lm_address(addr) (!((addr) & BIT(vabits_actual - 1))) +#define __is_lm_address(addr) (!(((u64)addr) & BIT(vabits_actual - 1))) #define __lm_to_phys(addr) (((addr) + physvirt_offset)) #define __kimg_to_phys(addr) ((addr) - kimage_voffset) @@ -326,13 +326,13 @@ static inline void *phys_to_virt(phys_addr_t x) #define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) + VMEMMAP_START)) #endif -#endif -#define _virt_addr_is_linear(kaddr) \ - (__tag_reset((u64)(kaddr)) >= PAGE_OFFSET) +#define virt_addr_valid(addr) ({ \ + __typeof__(addr) __addr = addr; \ + __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \ +}) -#define virt_addr_valid(kaddr) \ - (_virt_addr_is_linear(kaddr) && pfn_valid(virt_to_pfn(kaddr))) +#endif /* * Given that the GIC architecture permits ITS implementations that can only be -- cgit v1.2.3-58-ga151 From 577c2b35283fbadcc9ce4b56304ccea3ec8a5ca1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 13 Aug 2019 16:26:54 +0100 Subject: arm64: memory: Ensure address tag is masked in conversion macros When converting a linear virtual address to a physical address, pfn or struct page *, we must make sure that the tag bits are masked before the calculation otherwise we end up with corrupt pointers when running with CONFIG_KASAN_SW_TAGS=y: | Unable to handle kernel paging request at virtual address 0037fe0007580d08 | [0037fe0007580d08] address between user and kernel address ranges Mask out the tag in __virt_to_phys_nodebug() and virt_to_page(). Reported-by: Qian Cai Reported-by: Geert Uytterhoeven Tested-by: Steve Capper Reviewed-by: Steve Capper Tested-by: Geert Uytterhoeven Reviewed-by: Catalin Marinas Reviewed-by: Mark Rutland Fixes: 9cb1c5ddd2c4 ("arm64: mm: Remove bit-masking optimisations for PAGE_OFFSET and VMEMMAP_START") Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 93ef8e5c6971..243e05ad4a67 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -252,7 +252,7 @@ static inline const void *__tag_set(const void *addr, u8 tag) #define __kimg_to_phys(addr) ((addr) - kimage_voffset) #define __virt_to_phys_nodebug(x) ({ \ - phys_addr_t __x = (phys_addr_t)(x); \ + phys_addr_t __x = (phys_addr_t)(__tag_reset(x)); \ __is_lm_address(__x) ? __lm_to_phys(__x) : \ __kimg_to_phys(__x); \ }) @@ -324,7 +324,8 @@ static inline void *phys_to_virt(phys_addr_t x) ((void *)__addr_tag); \ }) -#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) + VMEMMAP_START)) +#define virt_to_page(vaddr) \ + ((struct page *)((__virt_to_pgoff(__tag_reset(vaddr))) + VMEMMAP_START)) #endif #define virt_addr_valid(addr) ({ \ -- cgit v1.2.3-58-ga151 From 96628f0fb18080a4166fc9eab8f7fd062d860667 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 13 Aug 2019 16:46:11 +0100 Subject: arm64: memory: Rewrite default page_to_virt()/virt_to_page() The default implementations of page_to_virt() and virt_to_page() are fairly confusing to read and the former evaluates its 'page' parameter twice in the macro Rewrite them so that the computation is expressed as 'base + index' in both cases and the parameter is always evaluated exactly once. Tested-by: Steve Capper Reviewed-by: Steve Capper Tested-by: Geert Uytterhoeven Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 243e05ad4a67..636d414608cb 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -313,19 +313,18 @@ static inline void *phys_to_virt(phys_addr_t x) #if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #else -#define __virt_to_pgoff(kaddr) (((u64)(kaddr) - PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page)) -#define __page_to_voff(kaddr) (((u64)(kaddr) - VMEMMAP_START) * PAGE_SIZE / sizeof(struct page)) - -#define page_to_virt(page) ({ \ - unsigned long __addr = \ - ((__page_to_voff(page)) + PAGE_OFFSET); \ - const void *__addr_tag = \ - __tag_set((void *)__addr, page_kasan_tag(page)); \ - ((void *)__addr_tag); \ +#define page_to_virt(x) ({ \ + __typeof__(x) __page = x; \ + u64 __idx = ((u64)__page - VMEMMAP_START) / sizeof(struct page);\ + u64 __addr = PAGE_OFFSET + (__idx * PAGE_SIZE); \ + (void *)__tag_set((const void *)__addr, page_kasan_tag(__page));\ }) -#define virt_to_page(vaddr) \ - ((struct page *)((__virt_to_pgoff(__tag_reset(vaddr))) + VMEMMAP_START)) +#define virt_to_page(x) ({ \ + u64 __idx = (__tag_reset((u64)x) - PAGE_OFFSET) / PAGE_SIZE; \ + u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \ + (struct page *)__addr; \ +}) #endif #define virt_addr_valid(addr) ({ \ -- cgit v1.2.3-58-ga151 From 9ba33dcc6bef4e56c762b446f0000f27ee737b8b Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 13 Aug 2019 16:34:32 +0100 Subject: arm64: memory: Simplify virt_to_page() implementation Build virt_to_page() on top of virt_to_pfn() so we can avoid the need for explicit shifting. Tested-by: Steve Capper Reviewed-by: Steve Capper Tested-by: Geert Uytterhoeven Reviewed-by: Catalin Marinas Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 636d414608cb..e6353d1a75fa 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -311,7 +311,7 @@ static inline void *phys_to_virt(phys_addr_t x) #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) #if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL) -#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) +#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) #else #define page_to_virt(x) ({ \ __typeof__(x) __page = x; \ -- cgit v1.2.3-58-ga151 From a5ac40f53bfa5e43d9c76e3c23415ee4afd71932 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 13 Aug 2019 16:58:36 +0100 Subject: arm64: memory: Simplify _VA_START and _PAGE_OFFSET definitions Rather than subtracting from -1 and then adding 1, we can simply subtract from 0. Tested-by: Steve Capper Reviewed-by: Steve Capper Tested-by: Geert Uytterhoeven Reviewed-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index e6353d1a75fa..a7a985602cba 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -44,8 +44,7 @@ * VA_START - the first kernel virtual address. */ #define VA_BITS (CONFIG_ARM64_VA_BITS) -#define _PAGE_OFFSET(va) (UL(0xffffffffffffffff) - \ - (UL(1) << (va)) + 1) +#define _PAGE_OFFSET(va) (-(UL(1) << (va))) #define PAGE_OFFSET (_PAGE_OFFSET(VA_BITS)) #define KIMAGE_VADDR (MODULES_END) #define BPF_JIT_REGION_START (KASAN_SHADOW_END) @@ -63,8 +62,7 @@ #else #define VA_BITS_MIN (VA_BITS) #endif -#define _VA_START(va) (UL(0xffffffffffffffff) - \ - (UL(1) << ((va) - 1)) + 1) +#define _VA_START(va) (-(UL(1) << ((va) - 1))) #define KERNEL_START _text #define KERNEL_END _end -- cgit v1.2.3-58-ga151 From 6bbd497f027332b14cf2a6792c418c32286b66c2 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 13 Aug 2019 17:01:05 +0100 Subject: arm64: memory: Implement __tag_set() as common function There's no need for __tag_set() to be a complicated macro when CONFIG_KASAN_SW_TAGS=y and a simple static inline otherwise. Rewrite the thing as a common static inline function. Tested-by: Steve Capper Reviewed-by: Steve Capper Tested-by: Geert Uytterhoeven Reviewed-by: Catalin Marinas Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index a7a985602cba..fb0062555305 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -218,20 +218,20 @@ static inline unsigned long kaslr_offset(void) #ifdef CONFIG_KASAN_SW_TAGS #define __tag_shifted(tag) ((u64)(tag) << 56) -#define __tag_set(addr, tag) (__typeof__(addr))( \ - ((u64)(addr) & ~__tag_shifted(0xff)) | __tag_shifted(tag)) #define __tag_reset(addr) untagged_addr(addr) #define __tag_get(addr) (__u8)((u64)(addr) >> 56) #else -static inline const void *__tag_set(const void *addr, u8 tag) -{ - return addr; -} - +#define __tag_shifted(tag) 0UL #define __tag_reset(addr) (addr) #define __tag_get(addr) 0 #endif +static inline const void *__tag_set(const void *addr, u8 tag) +{ + u64 __addr = (u64)addr & ~__tag_shifted(0xff); + return (const void *)(__addr | __tag_shifted(tag)); +} + /* * Physical vs virtual RAM address space conversion. These are * private definitions which should NOT be used outside memory.h -- cgit v1.2.3-58-ga151 From 68933aa973740796895e297e7dbf7baf3e9c51b1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 13 Aug 2019 17:06:29 +0100 Subject: arm64: memory: Add comments to end of non-trivial #ifdef blocks Commenting the #endif of a multi-statement #ifdef block with the condition which guards it is useful and can save having to scroll back through the file to figure out which set of Kconfig options apply to a particular piece of code. Reviewed-by: Steve Capper Acked-by: Catalin Marinas Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index fb0062555305..27f35ce2e2ed 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -57,11 +57,13 @@ #define PCI_IO_END (VMEMMAP_START - SZ_2M) #define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) #define FIXADDR_TOP (PCI_IO_START - SZ_2M) + #if VA_BITS > 48 #define VA_BITS_MIN (48) #else #define VA_BITS_MIN (VA_BITS) #endif + #define _VA_START(va) (-(UL(1) << ((va) - 1))) #define KERNEL_START _text @@ -86,7 +88,7 @@ #else #define KASAN_THREAD_SHIFT 0 #define KASAN_SHADOW_END (_VA_START(VA_BITS_MIN)) -#endif +#endif /* CONFIG_KASAN */ #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) @@ -224,7 +226,7 @@ static inline unsigned long kaslr_offset(void) #define __tag_shifted(tag) 0UL #define __tag_reset(addr) (addr) #define __tag_get(addr) 0 -#endif +#endif /* CONFIG_KASAN_SW_TAGS */ static inline const void *__tag_set(const void *addr, u8 tag) { @@ -263,7 +265,7 @@ extern phys_addr_t __phys_addr_symbol(unsigned long x); #else #define __virt_to_phys(x) __virt_to_phys_nodebug(x) #define __phys_addr_symbol(x) __pa_symbol_nodebug(x) -#endif +#endif /* CONFIG_DEBUG_VIRTUAL */ #define __phys_to_virt(x) ((unsigned long)((x) - physvirt_offset)) #define __phys_to_kimg(x) ((unsigned long)((x) + kimage_voffset)) @@ -323,14 +325,14 @@ static inline void *phys_to_virt(phys_addr_t x) u64 __addr = VMEMMAP_START + (__idx * sizeof(struct page)); \ (struct page *)__addr; \ }) -#endif +#endif /* !CONFIG_SPARSEMEM_VMEMMAP || CONFIG_DEBUG_VIRTUAL */ #define virt_addr_valid(addr) ({ \ __typeof__(addr) __addr = addr; \ __is_lm_address(__addr) && pfn_valid(virt_to_pfn(__addr)); \ }) -#endif +#endif /* !ASSEMBLY */ /* * Given that the GIC architecture permits ITS implementations that can only be @@ -345,4 +347,4 @@ static inline void *phys_to_virt(phys_addr_t x) #include -#endif +#endif /* __ASM_MEMORY_H */ -- cgit v1.2.3-58-ga151 From d0b3c32ed9220616548ff63808751cf2f6608df1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 13 Aug 2019 17:22:51 +0100 Subject: arm64: memory: Cosmetic cleanups Cleanup memory.h so that the indentation is consistent, remove pointless line-wrapping and use consistent parameter names for different versions of the same macro. Reviewed-by: Steve Capper Acked-by: Catalin Marinas Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index 27f35ce2e2ed..d69c2865ae40 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -12,10 +12,10 @@ #include #include +#include #include #include #include -#include /* * Size of the PCI I/O space. This must remain a power of two so that @@ -66,8 +66,8 @@ #define _VA_START(va) (-(UL(1) << ((va) - 1))) -#define KERNEL_START _text -#define KERNEL_END _end +#define KERNEL_START _text +#define KERNEL_END _end #ifdef CONFIG_ARM64_VA_BITS_52 #define MAX_USER_VA_BITS 52 @@ -132,14 +132,14 @@ * 16 KB granule: 128 level 3 entries, with contiguous bit * 64 KB granule: 32 level 3 entries, with contiguous bit */ -#define SEGMENT_ALIGN SZ_2M +#define SEGMENT_ALIGN SZ_2M #else /* * 4 KB granule: 16 level 3 entries, with contiguous bit * 16 KB granule: 4 level 3 entries, without contiguous bit * 64 KB granule: 1 level 3 entry */ -#define SEGMENT_ALIGN SZ_64K +#define SEGMENT_ALIGN SZ_64K #endif /* @@ -253,8 +253,7 @@ static inline const void *__tag_set(const void *addr, u8 tag) #define __virt_to_phys_nodebug(x) ({ \ phys_addr_t __x = (phys_addr_t)(__tag_reset(x)); \ - __is_lm_address(__x) ? __lm_to_phys(__x) : \ - __kimg_to_phys(__x); \ + __is_lm_address(__x) ? __lm_to_phys(__x) : __kimg_to_phys(__x); \ }) #define __pa_symbol_nodebug(x) __kimg_to_phys((phys_addr_t)(x)) @@ -301,17 +300,17 @@ static inline void *phys_to_virt(phys_addr_t x) #define __pa_nodebug(x) __virt_to_phys_nodebug((unsigned long)(x)) #define __va(x) ((void *)__phys_to_virt((phys_addr_t)(x))) #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT) -#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x))) -#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) +#define virt_to_pfn(x) __phys_to_pfn(__virt_to_phys((unsigned long)(x))) +#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) /* - * virt_to_page(k) convert a _valid_ virtual address to struct page * - * virt_addr_valid(k) indicates whether a virtual address is valid + * virt_to_page(x) convert a _valid_ virtual address to struct page * + * virt_addr_valid(x) indicates whether a virtual address is valid */ #define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET) #if !defined(CONFIG_SPARSEMEM_VMEMMAP) || defined(CONFIG_DEBUG_VIRTUAL) -#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) +#define virt_to_page(x) pfn_to_page(virt_to_pfn(x)) #else #define page_to_virt(x) ({ \ __typeof__(x) __page = x; \ -- cgit v1.2.3-58-ga151 From 233947ef16a18952d22786770dab1ddafa1ac377 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 14 Aug 2019 14:28:47 +0100 Subject: arm64: memory: fix flipped VA space fallout VA_START used to be the start of the TTBR1 address space, but now it's a point midway though. In a couple of places we still use VA_START to get the start of the TTBR1 address space, so let's fix these up to use PAGE_OFFSET instead. Fixes: 14c127c957c1c607 ("arm64: mm: Flip kernel VA space") Signed-off-by: Mark Rutland Cc: Catalin Marinas Tested-by: Steve Capper Reviewed-by: Steve Capper Signed-off-by: Will Deacon --- arch/arm64/mm/dump.c | 2 +- arch/arm64/mm/fault.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index 6ec75305828e..8e10b4ba215a 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -400,7 +400,7 @@ void ptdump_check_wx(void) .check_wx = true, }; - walk_pgd(&st, &init_mm, VA_START); + walk_pgd(&st, &init_mm, PAGE_OFFSET); note_page(&st, 0, 0, 0); if (st.wx_pages || st.uxn_pages) pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n", diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 75eff57bd9ef..bb4e4f3fffd8 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -109,7 +109,7 @@ static inline bool is_ttbr0_addr(unsigned long addr) static inline bool is_ttbr1_addr(unsigned long addr) { /* TTBR1 addresses may have a tag if KASAN_SW_TAGS is in use */ - return arch_kasan_reset_tag(addr) >= VA_START; + return arch_kasan_reset_tag(addr) >= PAGE_OFFSET; } /* -- cgit v1.2.3-58-ga151 From 77ad4ce69321abbe26ec92b2a2691a66531eb688 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 14 Aug 2019 14:28:48 +0100 Subject: arm64: memory: rename VA_START to PAGE_END Prior to commit: 14c127c957c1c607 ("arm64: mm: Flip kernel VA space") ... VA_START described the start of the TTBR1 address space for a given VA size described by VA_BITS, where all kernel mappings began. Since that commit, VA_START described a portion midway through the address space, where the linear map ends and other kernel mappings begin. To avoid confusion, let's rename VA_START to PAGE_END, making it clear that it's not the start of the TTBR1 address space and implying that it's related to PAGE_OFFSET. Comments and other mnemonics are updated accordingly, along with a typo fix in the decription of VMEMMAP_SIZE. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Cc: Catalin Marinas Tested-by: Steve Capper Reviewed-by: Steve Capper Signed-off-by: Will Deacon --- arch/arm64/include/asm/memory.h | 20 ++++++++++---------- arch/arm64/include/asm/pgtable.h | 4 ++-- arch/arm64/kernel/hibernate.c | 2 +- arch/arm64/mm/dump.c | 6 +++--- arch/arm64/mm/kasan_init.c | 2 +- arch/arm64/mm/mmu.c | 4 ++-- 6 files changed, 19 insertions(+), 19 deletions(-) diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h index d69c2865ae40..a713bad71db5 100644 --- a/arch/arm64/include/asm/memory.h +++ b/arch/arm64/include/asm/memory.h @@ -28,20 +28,20 @@ * a struct page array * * If we are configured with a 52-bit kernel VA then our VMEMMAP_SIZE - * neads to cover the memory region from the beginning of the 52-bit - * PAGE_OFFSET all the way to VA_START for 48-bit. This allows us to + * needs to cover the memory region from the beginning of the 52-bit + * PAGE_OFFSET all the way to PAGE_END for 48-bit. This allows us to * keep a constant PAGE_OFFSET and "fallback" to using the higher end * of the VMEMMAP where 52-bit support is not available in hardware. */ -#define VMEMMAP_SIZE ((_VA_START(VA_BITS_MIN) - PAGE_OFFSET) \ +#define VMEMMAP_SIZE ((_PAGE_END(VA_BITS_MIN) - PAGE_OFFSET) \ >> (PAGE_SHIFT - STRUCT_PAGE_MAX_SHIFT)) /* - * PAGE_OFFSET - the virtual address of the start of the linear map (top - * (VA_BITS - 1)) - * KIMAGE_VADDR - the virtual address of the start of the kernel image + * PAGE_OFFSET - the virtual address of the start of the linear map, at the + * start of the TTBR1 address space. + * PAGE_END - the end of the linear map, where all other kernel mappings begin. + * KIMAGE_VADDR - the virtual address of the start of the kernel image. * VA_BITS - the maximum number of bits for virtual addresses. - * VA_START - the first kernel virtual address. */ #define VA_BITS (CONFIG_ARM64_VA_BITS) #define _PAGE_OFFSET(va) (-(UL(1) << (va))) @@ -64,7 +64,7 @@ #define VA_BITS_MIN (VA_BITS) #endif -#define _VA_START(va) (-(UL(1) << ((va) - 1))) +#define _PAGE_END(va) (-(UL(1) << ((va) - 1))) #define KERNEL_START _text #define KERNEL_END _end @@ -87,7 +87,7 @@ #define KASAN_THREAD_SHIFT 1 #else #define KASAN_THREAD_SHIFT 0 -#define KASAN_SHADOW_END (_VA_START(VA_BITS_MIN)) +#define KASAN_SHADOW_END (_PAGE_END(VA_BITS_MIN)) #endif /* CONFIG_KASAN */ #define MIN_THREAD_SHIFT (14 + KASAN_THREAD_SHIFT) @@ -173,7 +173,7 @@ #ifndef __ASSEMBLY__ extern u64 vabits_actual; -#define VA_START (_VA_START(vabits_actual)) +#define PAGE_END (_PAGE_END(vabits_actual)) #include #include diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 4a695b9ee0f0..979e24fadf35 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -856,8 +856,8 @@ static inline void update_mmu_cache(struct vm_area_struct *vma, #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0) -#define kc_vaddr_to_offset(v) ((v) & ~VA_START) -#define kc_offset_to_vaddr(o) ((o) | VA_START) +#define kc_vaddr_to_offset(v) ((v) & ~PAGE_END) +#define kc_offset_to_vaddr(o) ((o) | PAGE_END) #ifdef CONFIG_ARM64_PA_BITS_52 #define phys_to_ttbr(addr) (((addr) | ((addr) >> 46)) & TTBR_BADDR_MASK_52) diff --git a/arch/arm64/kernel/hibernate.c b/arch/arm64/kernel/hibernate.c index e130db05d932..e0a7fce0e01c 100644 --- a/arch/arm64/kernel/hibernate.c +++ b/arch/arm64/kernel/hibernate.c @@ -496,7 +496,7 @@ int swsusp_arch_resume(void) rc = -ENOMEM; goto out; } - rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, VA_START); + rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, PAGE_END); if (rc) goto out; diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c index 8e10b4ba215a..93f9f77582ae 100644 --- a/arch/arm64/mm/dump.c +++ b/arch/arm64/mm/dump.c @@ -28,7 +28,7 @@ enum address_markers_idx { PAGE_OFFSET_NR = 0, - VA_START_NR, + PAGE_END_NR, #ifdef CONFIG_KASAN KASAN_START_NR, #endif @@ -36,7 +36,7 @@ enum address_markers_idx { static struct addr_marker address_markers[] = { { PAGE_OFFSET, "Linear Mapping start" }, - { 0 /* VA_START */, "Linear Mapping end" }, + { 0 /* PAGE_END */, "Linear Mapping end" }, #ifdef CONFIG_KASAN { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" }, { KASAN_SHADOW_END, "Kasan shadow end" }, @@ -411,7 +411,7 @@ void ptdump_check_wx(void) static int ptdump_init(void) { - address_markers[VA_START_NR].start_address = VA_START; + address_markers[PAGE_END_NR].start_address = PAGE_END; #ifdef CONFIG_KASAN address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START; #endif diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 725222271474..f87a32484ea8 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -226,7 +226,7 @@ void __init kasan_init(void) kasan_map_populate(kimg_shadow_start, kimg_shadow_end, early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); - kasan_populate_early_shadow(kasan_mem_to_shadow((void *) VA_START), + kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END), (void *)mod_shadow_start); kasan_populate_early_shadow((void *)kimg_shadow_end, (void *)KASAN_SHADOW_END); diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 0c8f7e55f859..8e4b7eaff8ce 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -399,7 +399,7 @@ static phys_addr_t pgd_pgtable_alloc(int shift) static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot) { - if ((virt >= VA_START) && (virt < VMALLOC_START)) { + if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", &phys, virt); return; @@ -426,7 +426,7 @@ void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, static void update_mapping_prot(phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot) { - if ((virt >= VA_START) && (virt < VMALLOC_START)) { + if ((virt >= PAGE_END) && (virt < VMALLOC_START)) { pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", &phys, virt); return; -- cgit v1.2.3-58-ga151 From 38d16667604e31f30a715baefec2fe3aa88024f0 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Thu, 8 Aug 2019 15:05:54 +0100 Subject: arm64: Clarify when cpu_enable() is called Strengthen the wording in the documentation for cpu_enable() to make it more obvious to readers not already familiar with the code when the core will call this callback and that this is intentional. Signed-off-by: Mark Brown Reviewed-by: Suzuki K Poulose [will: minor tweak to emphasis in the comment] Signed-off-by: Will Deacon --- arch/arm64/include/asm/cpufeature.h | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index cf65a47ee6b4..9cde5d2e768f 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -289,9 +289,16 @@ struct arm64_cpu_capabilities { u16 type; bool (*matches)(const struct arm64_cpu_capabilities *caps, int scope); /* - * Take the appropriate actions to enable this capability for this CPU. - * For each successfully booted CPU, this method is called for each - * globally detected capability. + * Take the appropriate actions to configure this capability + * for this CPU. If the capability is detected by the kernel + * this will be called on all the CPUs in the system, + * including the hotplugged CPUs, regardless of whether the + * capability is available on that specific CPU. This is + * useful for some capabilities (e.g, working around CPU + * errata), where all the CPUs must take some action (e.g, + * changing system control/configuration). Thus, if an action + * is required only if the CPU has the capability, then the + * routine must check it before taking any action. */ void (*cpu_enable)(const struct arm64_cpu_capabilities *cap); union { -- cgit v1.2.3-58-ga151 From 90776dd1c427cbb4d381aa4b13338f1fb1d20f5e Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Tue, 13 Aug 2019 16:04:50 -0700 Subject: arm64/efi: Move variable assignments after SECTIONS It seems that LLVM's linker does not correctly handle variable assignments involving section positions that are updated during the SECTIONS parsing. Commit aa69fb62bea1 ("arm64/efi: Mark __efistub_stext_offset as an absolute symbol explicitly") ran into this too, but found a different workaround. However, this was not enough, as other variables were also miscalculated which manifested as boot failures under UEFI where __efistub__end was not taking the correct _end value (they should be the same): $ ld.lld -EL -maarch64elf --no-undefined -X -shared \ -Bsymbolic -z notext -z norelro --no-apply-dynamic-relocs \ -o vmlinux.lld -T poc.lds --whole-archive vmlinux.o && \ readelf -Ws vmlinux.lld | egrep '\b(__efistub_|)_end\b' 368272: ffff000002218000 0 NOTYPE LOCAL HIDDEN 38 __efistub__end 368322: ffff000012318000 0 NOTYPE GLOBAL DEFAULT 38 _end $ aarch64-linux-gnu-ld.bfd -EL -maarch64elf --no-undefined -X -shared \ -Bsymbolic -z notext -z norelro --no-apply-dynamic-relocs \ -o vmlinux.bfd -T poc.lds --whole-archive vmlinux.o && \ readelf -Ws vmlinux.bfd | egrep '\b(__efistub_|)_end\b' 338124: ffff000012318000 0 NOTYPE LOCAL DEFAULT ABS __efistub__end 383812: ffff000012318000 0 NOTYPE GLOBAL DEFAULT 15325 _end To work around this, all of the __efistub_-prefixed variable assignments need to be moved after the linker script's SECTIONS entry. As it turns out, this also solves the problem fixed in commit aa69fb62bea1, so those changes are reverted here. Link: https://github.com/ClangBuiltLinux/linux/issues/634 Link: https://bugs.llvm.org/show_bug.cgi?id=42990 Acked-by: Ard Biesheuvel Signed-off-by: Kees Cook Signed-off-by: Will Deacon --- arch/arm64/kernel/image-vars.h | 51 +++++++++++++++++++++++++++++++++++++++++ arch/arm64/kernel/image.h | 42 --------------------------------- arch/arm64/kernel/vmlinux.lds.S | 2 ++ 3 files changed, 53 insertions(+), 42 deletions(-) create mode 100644 arch/arm64/kernel/image-vars.h diff --git a/arch/arm64/kernel/image-vars.h b/arch/arm64/kernel/image-vars.h new file mode 100644 index 000000000000..25a2a9b479c2 --- /dev/null +++ b/arch/arm64/kernel/image-vars.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Linker script variables to be set after section resolution, as + * ld.lld does not like variables assigned before SECTIONS is processed. + */ +#ifndef __ARM64_KERNEL_IMAGE_VARS_H +#define __ARM64_KERNEL_IMAGE_VARS_H + +#ifndef LINKER_SCRIPT +#error This file should only be included in vmlinux.lds.S +#endif + +#ifdef CONFIG_EFI + +__efistub_stext_offset = stext - _text; + +/* + * The EFI stub has its own symbol namespace prefixed by __efistub_, to + * isolate it from the kernel proper. The following symbols are legally + * accessed by the stub, so provide some aliases to make them accessible. + * Only include data symbols here, or text symbols of functions that are + * guaranteed to be safe when executed at another offset than they were + * linked at. The routines below are all implemented in assembler in a + * position independent manner + */ +__efistub_memcmp = __pi_memcmp; +__efistub_memchr = __pi_memchr; +__efistub_memcpy = __pi_memcpy; +__efistub_memmove = __pi_memmove; +__efistub_memset = __pi_memset; +__efistub_strlen = __pi_strlen; +__efistub_strnlen = __pi_strnlen; +__efistub_strcmp = __pi_strcmp; +__efistub_strncmp = __pi_strncmp; +__efistub_strrchr = __pi_strrchr; +__efistub___flush_dcache_area = __pi___flush_dcache_area; + +#ifdef CONFIG_KASAN +__efistub___memcpy = __pi_memcpy; +__efistub___memmove = __pi_memmove; +__efistub___memset = __pi_memset; +#endif + +__efistub__text = _text; +__efistub__end = _end; +__efistub__edata = _edata; +__efistub_screen_info = screen_info; + +#endif + +#endif /* __ARM64_KERNEL_IMAGE_VARS_H */ diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h index 2b85c0d6fa3d..c7d38c660372 100644 --- a/arch/arm64/kernel/image.h +++ b/arch/arm64/kernel/image.h @@ -65,46 +65,4 @@ DEFINE_IMAGE_LE64(_kernel_offset_le, TEXT_OFFSET); \ DEFINE_IMAGE_LE64(_kernel_flags_le, __HEAD_FLAGS); -#ifdef CONFIG_EFI - -/* - * Use ABSOLUTE() to avoid ld.lld treating this as a relative symbol: - * https://github.com/ClangBuiltLinux/linux/issues/561 - */ -__efistub_stext_offset = ABSOLUTE(stext - _text); - -/* - * The EFI stub has its own symbol namespace prefixed by __efistub_, to - * isolate it from the kernel proper. The following symbols are legally - * accessed by the stub, so provide some aliases to make them accessible. - * Only include data symbols here, or text symbols of functions that are - * guaranteed to be safe when executed at another offset than they were - * linked at. The routines below are all implemented in assembler in a - * position independent manner - */ -__efistub_memcmp = __pi_memcmp; -__efistub_memchr = __pi_memchr; -__efistub_memcpy = __pi_memcpy; -__efistub_memmove = __pi_memmove; -__efistub_memset = __pi_memset; -__efistub_strlen = __pi_strlen; -__efistub_strnlen = __pi_strnlen; -__efistub_strcmp = __pi_strcmp; -__efistub_strncmp = __pi_strncmp; -__efistub_strrchr = __pi_strrchr; -__efistub___flush_dcache_area = __pi___flush_dcache_area; - -#ifdef CONFIG_KASAN -__efistub___memcpy = __pi_memcpy; -__efistub___memmove = __pi_memmove; -__efistub___memset = __pi_memset; -#endif - -__efistub__text = _text; -__efistub__end = _end; -__efistub__edata = _edata; -__efistub_screen_info = screen_info; - -#endif - #endif /* __ARM64_KERNEL_IMAGE_H */ diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 31716afa30f6..aa76f7259668 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -254,6 +254,8 @@ SECTIONS HEAD_SYMBOLS } +#include "image-vars.h" + /* * The HYP init code and ID map text can't be longer than a page each, * and should not cross a page boundary. -- cgit v1.2.3-58-ga151 From d55c5f28afafb6b1f0a6978916b23338b383faab Mon Sep 17 00:00:00 2001 From: Sudeep Holla Date: Wed, 12 Jun 2019 13:51:37 +0100 Subject: arm64: smp: disable hotplug on trusted OS resident CPU The trusted OS may reject CPU_OFF calls to its resident CPU, so we must avoid issuing those. We never migrate a Trusted OS and we already take care to prevent CPU_OFF PSCI call. However, this is not reflected explicitly to the userspace. Any user can attempt to hotplug trusted OS resident CPU. The entire motion of going through the various state transitions in the CPU hotplug state machine gets executed and the PSCI layer finally refuses to make CPU_OFF call. This results is unnecessary unwinding of CPU hotplug state machine in the kernel. Instead we can mark the trusted OS resident CPU as not available for hotplug, so that the user attempt or request to do the same will get immediately rejected. Cc: Mark Rutland Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Sudeep Holla Signed-off-by: Will Deacon --- arch/arm64/include/asm/cpu_ops.h | 3 +++ arch/arm64/kernel/psci.c | 6 ++++++ arch/arm64/kernel/setup.c | 11 ++++++++++- 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/arch/arm64/include/asm/cpu_ops.h b/arch/arm64/include/asm/cpu_ops.h index c09d633c3109..86aabf1e0199 100644 --- a/arch/arm64/include/asm/cpu_ops.h +++ b/arch/arm64/include/asm/cpu_ops.h @@ -23,6 +23,8 @@ * @cpu_boot: Boots a cpu into the kernel. * @cpu_postboot: Optionally, perform any post-boot cleanup or necesary * synchronisation. Called from the cpu being booted. + * @cpu_can_disable: Determines whether a CPU can be disabled based on + * mechanism-specific information. * @cpu_disable: Prepares a cpu to die. May fail for some mechanism-specific * reason, which will cause the hot unplug to be aborted. Called * from the cpu to be killed. @@ -42,6 +44,7 @@ struct cpu_operations { int (*cpu_boot)(unsigned int); void (*cpu_postboot)(void); #ifdef CONFIG_HOTPLUG_CPU + bool (*cpu_can_disable)(unsigned int cpu); int (*cpu_disable)(unsigned int cpu); void (*cpu_die)(unsigned int cpu); int (*cpu_kill)(unsigned int cpu); diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c index a543ab7e007c..c9f72b2665f1 100644 --- a/arch/arm64/kernel/psci.c +++ b/arch/arm64/kernel/psci.c @@ -46,6 +46,11 @@ static int cpu_psci_cpu_boot(unsigned int cpu) } #ifdef CONFIG_HOTPLUG_CPU +static bool cpu_psci_cpu_can_disable(unsigned int cpu) +{ + return !psci_tos_resident_on(cpu); +} + static int cpu_psci_cpu_disable(unsigned int cpu) { /* Fail early if we don't have CPU_OFF support */ @@ -109,6 +114,7 @@ const struct cpu_operations cpu_psci_ops = { .cpu_prepare = cpu_psci_cpu_prepare, .cpu_boot = cpu_psci_cpu_boot, #ifdef CONFIG_HOTPLUG_CPU + .cpu_can_disable = cpu_psci_cpu_can_disable, .cpu_disable = cpu_psci_cpu_disable, .cpu_die = cpu_psci_cpu_die, .cpu_kill = cpu_psci_cpu_kill, diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 9c4bad7d7131..57ff38600828 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -357,6 +357,15 @@ void __init setup_arch(char **cmdline_p) } } +static inline bool cpu_can_disable(unsigned int cpu) +{ +#ifdef CONFIG_HOTPLUG_CPU + if (cpu_ops[cpu] && cpu_ops[cpu]->cpu_can_disable) + return cpu_ops[cpu]->cpu_can_disable(cpu); +#endif + return false; +} + static int __init topology_init(void) { int i; @@ -366,7 +375,7 @@ static int __init topology_init(void) for_each_possible_cpu(i) { struct cpu *cpu = &per_cpu(cpu_data.cpu, i); - cpu->hotpluggable = 1; + cpu->hotpluggable = cpu_can_disable(i); register_cpu(cpu, i); } -- cgit v1.2.3-58-ga151 From d225bb8d8a897d35c7beedcaba6caf57b3a4d292 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 13 Aug 2019 11:01:41 +0200 Subject: arm64: unexport set_memory_x and set_memory_nx No module currently messed with clearing or setting the execute permission of kernel memory, and none really should. Signed-off-by: Christoph Hellwig Signed-off-by: Will Deacon --- arch/arm64/mm/pageattr.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c index 03c53f16ee77..9ce7bd9d4d9c 100644 --- a/arch/arm64/mm/pageattr.c +++ b/arch/arm64/mm/pageattr.c @@ -128,7 +128,6 @@ int set_memory_nx(unsigned long addr, int numpages) __pgprot(PTE_PXN), __pgprot(0)); } -EXPORT_SYMBOL_GPL(set_memory_nx); int set_memory_x(unsigned long addr, int numpages) { @@ -136,7 +135,6 @@ int set_memory_x(unsigned long addr, int numpages) __pgprot(0), __pgprot(PTE_PXN)); } -EXPORT_SYMBOL_GPL(set_memory_x); int set_memory_valid(unsigned long addr, int numpages, int enable) { -- cgit v1.2.3-58-ga151 From 34b5560db40d2941cfbe82eca1641353d5aed1a9 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Wed, 14 Aug 2019 15:31:57 +0100 Subject: kasan/arm64: fix CONFIG_KASAN_SW_TAGS && KASAN_INLINE The generic Makefile.kasan propagates CONFIG_KASAN_SHADOW_OFFSET into KASAN_SHADOW_OFFSET, but only does so for CONFIG_KASAN_GENERIC. Since commit: 6bd1d0be0e97936d ("arm64: kasan: Switch to using KASAN_SHADOW_OFFSET") ... arm64 defines CONFIG_KASAN_SHADOW_OFFSET in Kconfig rather than defining KASAN_SHADOW_OFFSET in a Makefile. Thus, if CONFIG_KASAN_SW_TAGS && KASAN_INLINE are selected, we get build time splats due to KASAN_SHADOW_OFFSET not being set: | [mark@lakrids:~/src/linux]% usellvm 8.0.1 usekorg 8.1.0 make ARCH=arm64 CROSS_COMPILE=aarch64-linux- CC=clang | scripts/kconfig/conf --syncconfig Kconfig | CC scripts/mod/empty.o | clang (LLVM option parsing): for the -hwasan-mapping-offset option: '' value invalid for uint argument! | scripts/Makefile.build:273: recipe for target 'scripts/mod/empty.o' failed | make[1]: *** [scripts/mod/empty.o] Error 1 | Makefile:1123: recipe for target 'prepare0' failed | make: *** [prepare0] Error 2 Let's fix this by always propagating CONFIG_KASAN_SHADOW_OFFSET into KASAN_SHADOW_OFFSET if CONFIG_KASAN is selected, moving the existing common definition of +CFLAGS_KASAN_NOSANITIZE to the top of Makefile.kasan. Cc: Catalin Marinas Signed-off-by: Mark Rutland Acked-by: Andrey Ryabinin Tested-by Steve Capper Signed-off-by: Will Deacon --- scripts/Makefile.kasan | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan index 6410bd22fe38..03757cc60e06 100644 --- a/scripts/Makefile.kasan +++ b/scripts/Makefile.kasan @@ -1,4 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 +ifdef CONFIG_KASAN +CFLAGS_KASAN_NOSANITIZE := -fno-builtin +KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET) +endif + ifdef CONFIG_KASAN_GENERIC ifdef CONFIG_KASAN_INLINE @@ -7,8 +12,6 @@ else call_threshold := 0 endif -KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET) - CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1))) @@ -45,7 +48,3 @@ CFLAGS_KASAN := -fsanitize=kernel-hwaddress \ $(instrumentation_flags) endif # CONFIG_KASAN_SW_TAGS - -ifdef CONFIG_KASAN -CFLAGS_KASAN_NOSANITIZE := -fno-builtin -endif -- cgit v1.2.3-58-ga151 From 74585fcb7b3ccb35135e2418dd66251022a916e5 Mon Sep 17 00:00:00 2001 From: Andrey Konovalov Date: Mon, 19 Aug 2019 15:14:42 +0200 Subject: selftests, arm64: fix uninitialized symbol in tags_test.c Fix tagged_ptr not being initialized when TBI is not enabled. Link: https://www.spinics.net/lists/linux-kselftest/msg09446.html Reported-by: Dan Carpenter Signed-off-by: Andrey Konovalov Signed-off-by: Will Deacon --- tools/testing/selftests/arm64/tags_test.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/tools/testing/selftests/arm64/tags_test.c b/tools/testing/selftests/arm64/tags_test.c index 22a1b266e373..5701163460ef 100644 --- a/tools/testing/selftests/arm64/tags_test.c +++ b/tools/testing/selftests/arm64/tags_test.c @@ -14,15 +14,17 @@ int main(void) { static int tbi_enabled = 0; - struct utsname *ptr, *tagged_ptr; + unsigned long tag = 0; + struct utsname *ptr; int err; if (prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0) == 0) tbi_enabled = 1; ptr = (struct utsname *)malloc(sizeof(*ptr)); if (tbi_enabled) - tagged_ptr = (struct utsname *)SET_TAG(ptr, 0x42); - err = uname(tagged_ptr); + tag = 0x42; + ptr = (struct utsname *)SET_TAG(ptr, tag); + err = uname(ptr); free(ptr); return err; -- cgit v1.2.3-58-ga151 From 117acf5c29dd89e4c86761c365b9724dba0d9763 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 20 Aug 2019 17:23:19 +1000 Subject: powerpc/Makefile: Always pass --synthetic to nm if supported Back in 2004 we added logic to arch/ppc64/Makefile to pass the --synthetic option to nm, if it was supported by nm. Then in 2005 when arch/ppc64 and arch/ppc were merged, the logic to add --synthetic was moved inside an #ifdef CONFIG_PPC64 block within arch/powerpc/Makefile, and has remained there since. That was fine, though crufty, until recently when a change to init/Kconfig added a config time check that uses $(NM). On powerpc that leads to an infinite loop because Kconfig uses $(NM) to calculate some values, then the powerpc Makefile changes $(NM), which Kconfig notices and restarts. The original commit that added --synthetic simply said: On new toolchains we need to use nm --synthetic or we miss code symbols. And the nm man page says that the --synthetic option causes nm to: Include synthetic symbols in the output. These are special symbols created by the linker for various purposes. So it seems safe to always pass --synthetic if nm supports it, ie. on 32-bit and 64-bit, it just means 32-bit kernels might have more symbols reported (and in practice I see no extra symbols). Making it unconditional avoids the #ifdef CONFIG_PPC64, which in turn avoids the infinite loop. Debugged-by: Peter Collingbourne Signed-off-by: Michael Ellerman Signed-off-by: Will Deacon --- arch/powerpc/Makefile | 2 -- 1 file changed, 2 deletions(-) diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index c345b79414a9..403f7e193833 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -39,13 +39,11 @@ endif uname := $(shell uname -m) KBUILD_DEFCONFIG := $(if $(filter ppc%,$(uname)),$(uname),ppc64)_defconfig -ifdef CONFIG_PPC64 new_nm := $(shell if $(NM) --help 2>&1 | grep -- '--synthetic' > /dev/null; then echo y; else echo n; fi) ifeq ($(new_nm),y) NM := $(NM) --synthetic endif -endif # BITS is used as extension for files which are available in a 32 bit # and a 64 bit version to simplify shared Makefiles. -- cgit v1.2.3-58-ga151 From 2d122942484c20bde308bdf694459800b63211d1 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 20 Aug 2019 10:11:54 +0100 Subject: Revert "init/Kconfig: Fix infinite Kconfig recursion on PPC" This reverts commit 71c67a31f09fa8fdd1495dffd96a5f0d4cef2ede. Commit 117acf5c29dd ("powerpc/Makefile: Always pass --synthetic to nm if supported") removed the only conditional definition of $(NM), so we can revert our temporary bodge to avoid Kconfig recursion and go back to passing $(NM) through to the 'tools-support-relr.sh' when detecting support for RELR relocations. Signed-off-by: Will Deacon --- init/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/init/Kconfig b/init/Kconfig index 8b4596edda4e..d96127ebc44e 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -31,7 +31,7 @@ config CC_HAS_ASM_GOTO def_bool $(success,$(srctree)/scripts/gcc-goto.sh $(CC)) config TOOLS_SUPPORT_RELR - def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh) + def_bool $(success,env "CC=$(CC)" "LD=$(LD)" "NM=$(NM)" "OBJCOPY=$(OBJCOPY)" $(srctree)/scripts/tools-support-relr.sh) config CC_HAS_WARN_MAYBE_UNINITIALIZED def_bool $(cc-option,-Wmaybe-uninitialized) -- cgit v1.2.3-58-ga151 From d91cc2f46ad5bd8020902093385fedf7e9e9d755 Mon Sep 17 00:00:00 2001 From: Raphael Gault Date: Tue, 20 Aug 2019 16:57:45 +0100 Subject: arm64: perf_event: Add missing header needed for smp_processor_id() In perf_event.c we use smp_processor_id(), but we haven't included where it is defined, and rely on this being pulled in via a transitive include. Let's make this more robust by including explicitly. Acked-by: Mark Rutland Signed-off-by: Raphael Gault Signed-off-by: Will Deacon --- arch/arm64/kernel/perf_event.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c index 2d3bdebdf6df..a0b4f1bca491 100644 --- a/arch/arm64/kernel/perf_event.c +++ b/arch/arm64/kernel/perf_event.c @@ -19,6 +19,7 @@ #include #include #include +#include /* ARMv8 Cortex-A53 specific event types. */ #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2 -- cgit v1.2.3-58-ga151 From 3e91ec89f527b9870fe42dcbdb74fd389d123a95 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 15 Aug 2019 16:44:00 +0100 Subject: arm64: Tighten the PR_{SET, GET}_TAGGED_ADDR_CTRL prctl() unused arguments Require that arg{3,4,5} of the PR_{SET,GET}_TAGGED_ADDR_CTRL prctl and arg2 of the PR_GET_TAGGED_ADDR_CTRL prctl() are zero rather than ignored for future extensions. Acked-by: Andrey Konovalov Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon --- kernel/sys.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/kernel/sys.c b/kernel/sys.c index c6c4d5358bd3..ec48396b4943 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -2499,9 +2499,13 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, error = PAC_RESET_KEYS(me, arg2); break; case PR_SET_TAGGED_ADDR_CTRL: + if (arg3 || arg4 || arg5) + return -EINVAL; error = SET_TAGGED_ADDR_CTRL(arg2); break; case PR_GET_TAGGED_ADDR_CTRL: + if (arg2 || arg3 || arg4 || arg5) + return -EINVAL; error = GET_TAGGED_ADDR_CTRL(); break; default: -- cgit v1.2.3-58-ga151 From 413235fcedc7f61e925fe9818bc3f5eff8ad2494 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 15 Aug 2019 16:44:01 +0100 Subject: arm64: Change the tagged_addr sysctl control semantics to only prevent the opt-in First rename the sysctl control to abi.tagged_addr_disabled and make it default off (zero). When abi.tagged_addr_disabled == 1, only block the enabling of the TBI ABI via prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE). Getting the status of the ABI or disabling it is still allowed. Acked-by: Andrey Konovalov Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon --- arch/arm64/kernel/process.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 76b7c55026aa..03689c0beb34 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -579,17 +579,22 @@ void arch_setup_new_exec(void) /* * Control the relaxed ABI allowing tagged user addresses into the kernel. */ -static unsigned int tagged_addr_prctl_allowed = 1; +static unsigned int tagged_addr_disabled; long set_tagged_addr_ctrl(unsigned long arg) { - if (!tagged_addr_prctl_allowed) - return -EINVAL; if (is_compat_task()) return -EINVAL; if (arg & ~PR_TAGGED_ADDR_ENABLE) return -EINVAL; + /* + * Do not allow the enabling of the tagged address ABI if globally + * disabled via sysctl abi.tagged_addr_disabled. + */ + if (arg & PR_TAGGED_ADDR_ENABLE && tagged_addr_disabled) + return -EINVAL; + update_thread_flag(TIF_TAGGED_ADDR, arg & PR_TAGGED_ADDR_ENABLE); return 0; @@ -597,8 +602,6 @@ long set_tagged_addr_ctrl(unsigned long arg) long get_tagged_addr_ctrl(void) { - if (!tagged_addr_prctl_allowed) - return -EINVAL; if (is_compat_task()) return -EINVAL; @@ -618,9 +621,9 @@ static int one = 1; static struct ctl_table tagged_addr_sysctl_table[] = { { - .procname = "tagged_addr", + .procname = "tagged_addr_disabled", .mode = 0644, - .data = &tagged_addr_prctl_allowed, + .data = &tagged_addr_disabled, .maxlen = sizeof(int), .proc_handler = proc_dointvec_minmax, .extra1 = &zero, -- cgit v1.2.3-58-ga151 From 2671828c3ff4ffadf777f793a1f3232d6e51394a Mon Sep 17 00:00:00 2001 From: James Morse Date: Tue, 20 Aug 2019 18:45:57 +0100 Subject: arm64: entry: Move ct_user_exit before any other exception When taking an SError or Debug exception from EL0, we run the C handler for these exceptions before updating the context tracking code and unmasking lower priority interrupts. When booting with nohz_full lockdep tells us we got this wrong: | ============================= | WARNING: suspicious RCU usage | 5.3.0-rc2-00010-gb4b5e9dcb11b-dirty #11271 Not tainted | ----------------------------- | include/linux/rcupdate.h:643 rcu_read_unlock() used illegally wh! | | other info that might help us debug this: | | | RCU used illegally from idle CPU! | rcu_scheduler_active = 2, debug_locks = 1 | RCU used illegally from extended quiescent state! | 1 lock held by a.out/432: | #0: 00000000c7a79515 (rcu_read_lock){....}, at: brk_handler+0x00 | | stack backtrace: | CPU: 1 PID: 432 Comm: a.out Not tainted 5.3.0-rc2-00010-gb4b5e9d1 | Hardware name: ARM LTD ARM Juno Development Platform/ARM Juno De8 | Call trace: | dump_backtrace+0x0/0x140 | show_stack+0x14/0x20 | dump_stack+0xbc/0x104 | lockdep_rcu_suspicious+0xf8/0x108 | brk_handler+0x164/0x1b0 | do_debug_exception+0x11c/0x278 | el0_dbg+0x14/0x20 Moving the ct_user_exit calls to be before do_debug_exception() means they are also before trace_hardirqs_off() has been updated. Add a new ct_user_exit_irqoff macro to avoid the context-tracking code using irqsave/restore before we've updated trace_hardirqs_off(). To be consistent, do this everywhere. The C helper is called enter_from_user_mode() to match x86 in the hope we can merge them into kernel/context_tracking.c later. Cc: Masami Hiramatsu Fixes: 6c81fe7925cc4c42 ("arm64: enable context tracking") Signed-off-by: James Morse Signed-off-by: Will Deacon --- arch/arm64/include/asm/exception.h | 2 ++ arch/arm64/kernel/entry.S | 36 +++++++++++++++++++----------------- arch/arm64/kernel/traps.c | 9 +++++++++ 3 files changed, 30 insertions(+), 17 deletions(-) diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index ed57b760f38c..a17393ff6677 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -30,4 +30,6 @@ static inline u32 disr_to_esr(u64 disr) return esr; } +asmlinkage void enter_from_user_mode(void); + #endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S index 320a30dbe35e..84a822748c84 100644 --- a/arch/arm64/kernel/entry.S +++ b/arch/arm64/kernel/entry.S @@ -30,9 +30,9 @@ * Context tracking subsystem. Used to instrument transitions * between user and kernel mode. */ - .macro ct_user_exit + .macro ct_user_exit_irqoff #ifdef CONFIG_CONTEXT_TRACKING - bl context_tracking_user_exit + bl enter_from_user_mode #endif .endm @@ -792,8 +792,8 @@ el0_cp15: /* * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions */ + ct_user_exit_irqoff enable_daif - ct_user_exit mov x0, x25 mov x1, sp bl do_cp15instr @@ -805,8 +805,8 @@ el0_da: * Data abort handling */ mrs x26, far_el1 + ct_user_exit_irqoff enable_daif - ct_user_exit clear_address_tag x0, x26 mov x1, x25 mov x2, sp @@ -818,11 +818,11 @@ el0_ia: */ mrs x26, far_el1 gic_prio_kentry_setup tmp=x0 + ct_user_exit_irqoff enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif - ct_user_exit mov x0, x26 mov x1, x25 mov x2, sp @@ -832,8 +832,8 @@ el0_fpsimd_acc: /* * Floating Point or Advanced SIMD access */ + ct_user_exit_irqoff enable_daif - ct_user_exit mov x0, x25 mov x1, sp bl do_fpsimd_acc @@ -842,8 +842,8 @@ el0_sve_acc: /* * Scalable Vector Extension access */ + ct_user_exit_irqoff enable_daif - ct_user_exit mov x0, x25 mov x1, sp bl do_sve_acc @@ -852,8 +852,8 @@ el0_fpsimd_exc: /* * Floating Point, Advanced SIMD or SVE exception */ + ct_user_exit_irqoff enable_daif - ct_user_exit mov x0, x25 mov x1, sp bl do_fpsimd_exc @@ -868,11 +868,11 @@ el0_sp_pc: * Stack or PC alignment exception handling */ gic_prio_kentry_setup tmp=x0 + ct_user_exit_irqoff enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif - ct_user_exit mov x0, x26 mov x1, x25 mov x2, sp @@ -882,8 +882,8 @@ el0_undef: /* * Undefined instruction */ + ct_user_exit_irqoff enable_daif - ct_user_exit mov x0, sp bl do_undefinstr b ret_to_user @@ -891,8 +891,8 @@ el0_sys: /* * System instructions, for trapped cache maintenance instructions */ + ct_user_exit_irqoff enable_daif - ct_user_exit mov x0, x25 mov x1, sp bl do_sysinstr @@ -902,17 +902,18 @@ el0_dbg: * Debug exception handling */ tbnz x24, #0, el0_inv // EL0 only + mrs x24, far_el1 gic_prio_kentry_setup tmp=x3 - mrs x0, far_el1 + ct_user_exit_irqoff + mov x0, x24 mov x1, x25 mov x2, sp bl do_debug_exception enable_da_f - ct_user_exit b ret_to_user el0_inv: + ct_user_exit_irqoff enable_daif - ct_user_exit mov x0, sp mov x1, #BAD_SYNC mov x2, x25 @@ -925,13 +926,13 @@ el0_irq: kernel_entry 0 el0_irq_naked: gic_prio_irq_setup pmr=x20, tmp=x0 + ct_user_exit_irqoff enable_da_f #ifdef CONFIG_TRACE_IRQFLAGS bl trace_hardirqs_off #endif - ct_user_exit #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR tbz x22, #55, 1f bl do_el0_irq_bp_hardening @@ -958,13 +959,14 @@ ENDPROC(el1_error) el0_error: kernel_entry 0 el0_error_naked: - mrs x1, esr_el1 + mrs x25, esr_el1 gic_prio_kentry_setup tmp=x2 + ct_user_exit_irqoff enable_dbg mov x0, sp + mov x1, x25 bl do_serror enable_da_f - ct_user_exit b ret_to_user ENDPROC(el0_error) diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c index a5d7ce4297b0..6e950908eb97 100644 --- a/arch/arm64/kernel/traps.c +++ b/arch/arm64/kernel/traps.c @@ -7,9 +7,11 @@ */ #include +#include #include #include #include +#include #include #include #include @@ -900,6 +902,13 @@ asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr) nmi_exit(); } +asmlinkage void enter_from_user_mode(void) +{ + CT_WARN_ON(ct_state() != CONTEXT_USER); + user_exit_irqoff(); +} +NOKPROBE_SYMBOL(enter_from_user_mode); + void __pte_error(const char *file, int line, unsigned long val) { pr_err("%s:%d: bad pte %016lx.\n", file, line, val); -- cgit v1.2.3-58-ga151 From 6bfa3134bd3a185e98031c8a8dee18e82153df2d Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Wed, 21 Aug 2019 18:11:17 +0900 Subject: arm64: add arch/arm64/Kbuild Use the standard obj-y form to specify the sub-directories under arch/arm64/. No functional change intended. Signed-off-by: Masahiro Yamada Signed-off-by: Will Deacon --- arch/arm64/Kbuild | 6 ++++++ arch/arm64/Makefile | 6 +----- 2 files changed, 7 insertions(+), 5 deletions(-) create mode 100644 arch/arm64/Kbuild diff --git a/arch/arm64/Kbuild b/arch/arm64/Kbuild new file mode 100644 index 000000000000..d6465823b281 --- /dev/null +++ b/arch/arm64/Kbuild @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-y += kernel/ mm/ +obj-$(CONFIG_NET) += net/ +obj-$(CONFIG_KVM) += kvm/ +obj-$(CONFIG_XEN) += xen/ +obj-$(CONFIG_CRYPTO) += crypto/ diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 61de992bbea3..dcbbd53b7f5a 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -136,11 +136,7 @@ KASAN_SHADOW_OFFSET := $(shell printf "0x%08x00000000\n" $$(( \ export TEXT_OFFSET GZFLAGS -core-y += arch/arm64/kernel/ arch/arm64/mm/ -core-$(CONFIG_NET) += arch/arm64/net/ -core-$(CONFIG_KVM) += arch/arm64/kvm/ -core-$(CONFIG_XEN) += arch/arm64/xen/ -core-$(CONFIG_CRYPTO) += arch/arm64/crypto/ +core-y += arch/arm64/ libs-y := arch/arm64/lib/ $(libs-y) core-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a -- cgit v1.2.3-58-ga151 From e1b832503e8f29ea6e20c30db9c3176576c0fc78 Mon Sep 17 00:00:00 2001 From: Vincenzo Frascino Date: Wed, 21 Aug 2019 17:47:29 +0100 Subject: arm64: Define Documentation/arm64/tagged-address-abi.rst On AArch64 the TCR_EL1.TBI0 bit is set by default, allowing userspace (EL0) to perform memory accesses through 64-bit pointers with a non-zero top byte. Introduce the document describing the relaxation of the syscall ABI that allows userspace to pass certain tagged pointers to kernel syscalls. Cc: Will Deacon Cc: Szabolcs Nagy Acked-by: Kevin Brodsky Acked-by: Andrey Konovalov Signed-off-by: Vincenzo Frascino Co-developed-by: Catalin Marinas Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon --- Documentation/arm64/tagged-address-abi.rst | 156 +++++++++++++++++++++++++++++ 1 file changed, 156 insertions(+) create mode 100644 Documentation/arm64/tagged-address-abi.rst diff --git a/Documentation/arm64/tagged-address-abi.rst b/Documentation/arm64/tagged-address-abi.rst new file mode 100644 index 000000000000..d4a85d535bf9 --- /dev/null +++ b/Documentation/arm64/tagged-address-abi.rst @@ -0,0 +1,156 @@ +========================== +AArch64 TAGGED ADDRESS ABI +========================== + +Authors: Vincenzo Frascino + Catalin Marinas + +Date: 21 August 2019 + +This document describes the usage and semantics of the Tagged Address +ABI on AArch64 Linux. + +1. Introduction +--------------- + +On AArch64 the ``TCR_EL1.TBI0`` bit is set by default, allowing +userspace (EL0) to perform memory accesses through 64-bit pointers with +a non-zero top byte. This document describes the relaxation of the +syscall ABI that allows userspace to pass certain tagged pointers to +kernel syscalls. + +2. AArch64 Tagged Address ABI +----------------------------- + +From the kernel syscall interface perspective and for the purposes of +this document, a "valid tagged pointer" is a pointer with a potentially +non-zero top-byte that references an address in the user process address +space obtained in one of the following ways: + +- ``mmap()`` syscall where either: + + - flags have the ``MAP_ANONYMOUS`` bit set or + - the file descriptor refers to a regular file (including those + returned by ``memfd_create()``) or ``/dev/zero`` + +- ``brk()`` syscall (i.e. the heap area between the initial location of + the program break at process creation and its current location). + +- any memory mapped by the kernel in the address space of the process + during creation and with the same restrictions as for ``mmap()`` above + (e.g. data, bss, stack). + +The AArch64 Tagged Address ABI has two stages of relaxation depending +how the user addresses are used by the kernel: + +1. User addresses not accessed by the kernel but used for address space + management (e.g. ``mmap()``, ``mprotect()``, ``madvise()``). The use + of valid tagged pointers in this context is always allowed. + +2. User addresses accessed by the kernel (e.g. ``write()``). This ABI + relaxation is disabled by default and the application thread needs to + explicitly enable it via ``prctl()`` as follows: + + - ``PR_SET_TAGGED_ADDR_CTRL``: enable or disable the AArch64 Tagged + Address ABI for the calling thread. + + The ``(unsigned int) arg2`` argument is a bit mask describing the + control mode used: + + - ``PR_TAGGED_ADDR_ENABLE``: enable AArch64 Tagged Address ABI. + Default status is disabled. + + Arguments ``arg3``, ``arg4``, and ``arg5`` must be 0. + + - ``PR_GET_TAGGED_ADDR_CTRL``: get the status of the AArch64 Tagged + Address ABI for the calling thread. + + Arguments ``arg2``, ``arg3``, ``arg4``, and ``arg5`` must be 0. + + The ABI properties described above are thread-scoped, inherited on + clone() and fork() and cleared on exec(). + + Calling ``prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0)`` + returns ``-EINVAL`` if the AArch64 Tagged Address ABI is globally + disabled by ``sysctl abi.tagged_addr_disabled=1``. The default + ``sysctl abi.tagged_addr_disabled`` configuration is 0. + +When the AArch64 Tagged Address ABI is enabled for a thread, the +following behaviours are guaranteed: + +- All syscalls except the cases mentioned in section 3 can accept any + valid tagged pointer. + +- The syscall behaviour is undefined for invalid tagged pointers: it may + result in an error code being returned, a (fatal) signal being raised, + or other modes of failure. + +- The syscall behaviour for a valid tagged pointer is the same as for + the corresponding untagged pointer. + + +A definition of the meaning of tagged pointers on AArch64 can be found +in Documentation/arm64/tagged-pointers.rst. + +3. AArch64 Tagged Address ABI Exceptions +----------------------------------------- + +The following system call parameters must be untagged regardless of the +ABI relaxation: + +- ``prctl()`` other than pointers to user data either passed directly or + indirectly as arguments to be accessed by the kernel. + +- ``ioctl()`` other than pointers to user data either passed directly or + indirectly as arguments to be accessed by the kernel. + +- ``shmat()`` and ``shmdt()``. + +Any attempt to use non-zero tagged pointers may result in an error code +being returned, a (fatal) signal being raised, or other modes of +failure. + +4. Example of correct usage +--------------------------- +.. code-block:: c + + #include + #include + #include + #include + #include + + #define PR_SET_TAGGED_ADDR_CTRL 55 + #define PR_TAGGED_ADDR_ENABLE (1UL << 0) + + #define TAG_SHIFT 56 + + int main(void) + { + int tbi_enabled = 0; + unsigned long tag = 0; + char *ptr; + + /* check/enable the tagged address ABI */ + if (!prctl(PR_SET_TAGGED_ADDR_CTRL, PR_TAGGED_ADDR_ENABLE, 0, 0, 0)) + tbi_enabled = 1; + + /* memory allocation */ + ptr = mmap(NULL, sysconf(_SC_PAGE_SIZE), PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + if (ptr == MAP_FAILED) + return 1; + + /* set a non-zero tag if the ABI is available */ + if (tbi_enabled) + tag = rand() & 0xff; + ptr = (char *)((unsigned long)ptr | (tag << TAG_SHIFT)); + + /* memory access to a tagged address */ + strcpy(ptr, "tagged pointer\n"); + + /* syscall with a tagged pointer */ + write(1, ptr, strlen(ptr)); + + return 0; + } -- cgit v1.2.3-58-ga151 From 1243cb6a676ffcfa72dd25859edddf66cde0b638 Mon Sep 17 00:00:00 2001 From: Vincenzo Frascino Date: Thu, 22 Aug 2019 15:17:43 +0100 Subject: arm64: Add tagged-address-abi.rst to index.rst Documentation/arm64/tagged-address-abi.rst introduces the relaxation of the syscall ABI that allows userspace to pass certain tagged pointers to kernel syscalls. Add the document to index.rst for a correct generation of the table of content. Cc: Will Deacon Cc: Catalin Marinas Signed-off-by: Vincenzo Frascino Signed-off-by: Will Deacon --- Documentation/arm64/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/arm64/index.rst b/Documentation/arm64/index.rst index 96b696ba4e6c..5c0c69dc58aa 100644 --- a/Documentation/arm64/index.rst +++ b/Documentation/arm64/index.rst @@ -16,6 +16,7 @@ ARM64 Architecture pointer-authentication silicon-errata sve + tagged-address-abi tagged-pointers .. only:: subproject and html -- cgit v1.2.3-58-ga151 From e112b032a72c78f15d0c803c5dc6be444c2e6c66 Mon Sep 17 00:00:00 2001 From: Hsin-Yi Wang Date: Fri, 23 Aug 2019 14:24:50 +0800 Subject: arm64: map FDT as RW for early_init_dt_scan() Currently in arm64, FDT is mapped to RO before it's passed to early_init_dt_scan(). However, there might be some codes (eg. commit "fdt: add support for rng-seed") that need to modify FDT during init. Map FDT to RO after early fixups are done. Signed-off-by: Hsin-Yi Wang Reviewed-by: Stephen Boyd Reviewed-by: Mike Rapoport Signed-off-by: Will Deacon --- arch/arm64/include/asm/mmu.h | 2 +- arch/arm64/kernel/kaslr.c | 5 +---- arch/arm64/kernel/setup.c | 9 ++++++++- arch/arm64/mm/mmu.c | 15 +-------------- 4 files changed, 11 insertions(+), 20 deletions(-) diff --git a/arch/arm64/include/asm/mmu.h b/arch/arm64/include/asm/mmu.h index fd6161336653..f217e3292919 100644 --- a/arch/arm64/include/asm/mmu.h +++ b/arch/arm64/include/asm/mmu.h @@ -126,7 +126,7 @@ extern void init_mem_pgprot(void); extern void create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, unsigned long virt, phys_addr_t size, pgprot_t prot, bool page_mappings_only); -extern void *fixmap_remap_fdt(phys_addr_t dt_phys); +extern void *fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot); extern void mark_linear_text_alias_ro(void); #define INIT_MM_CONTEXT(name) \ diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 708051655ad9..d94a3e41cef9 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -62,9 +62,6 @@ out: return default_cmdline; } -extern void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, - pgprot_t prot); - /* * This routine will be executed with the kernel mapped at its default virtual * address, and if it returns successfully, the kernel will be remapped, and @@ -93,7 +90,7 @@ u64 __init kaslr_early_init(u64 dt_phys) * attempt at mapping the FDT in setup_machine() */ early_fixmap_init(); - fdt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL); + fdt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL); if (!fdt) return 0; diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 9c4bad7d7131..25f5127210f8 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -170,9 +170,13 @@ static void __init smp_build_mpidr_hash(void) static void __init setup_machine_fdt(phys_addr_t dt_phys) { - void *dt_virt = fixmap_remap_fdt(dt_phys); + int size; + void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL); const char *name; + if (dt_virt) + memblock_reserve(dt_phys, size); + if (!dt_virt || !early_init_dt_scan(dt_virt)) { pr_crit("\n" "Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n" @@ -184,6 +188,9 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) cpu_relax(); } + /* Early fixups are done, map the FDT as read-only now */ + fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO); + name = of_flat_dt_get_machine_name(); if (!name) return; diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 750a69dde39b..54e93583085c 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -876,7 +876,7 @@ void __set_fixmap(enum fixed_addresses idx, } } -void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) +void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) { const u64 dt_virt_base = __fix_to_virt(FIX_FDT); int offset; @@ -929,19 +929,6 @@ void *__init __fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) return dt_virt; } -void *__init fixmap_remap_fdt(phys_addr_t dt_phys) -{ - void *dt_virt; - int size; - - dt_virt = __fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO); - if (!dt_virt) - return NULL; - - memblock_reserve(dt_phys, size); - return dt_virt; -} - int __init arch_ioremap_p4d_supported(void) { return 0; -- cgit v1.2.3-58-ga151 From 428826f5358c922dc378830a1717b682c0823160 Mon Sep 17 00:00:00 2001 From: Hsin-Yi Wang Date: Fri, 23 Aug 2019 14:24:51 +0800 Subject: fdt: add support for rng-seed Introducing a chosen node, rng-seed, which is an entropy that can be passed to kernel called very early to increase initial device randomness. Bootloader should provide this entropy and the value is read from /chosen/rng-seed in DT. Obtain of_fdt_crc32 for CRC check after early_init_dt_scan_nodes(), since early_init_dt_scan_chosen() would modify fdt to erase rng-seed. Add a new interface add_bootloader_randomness() for rng-seed use case. Depends on whether the seed is trustworthy, rng seed would be passed to add_hwgenerator_randomness(). Otherwise it would be passed to add_device_randomness(). Decision is controlled by kernel config RANDOM_TRUST_BOOTLOADER. Signed-off-by: Hsin-Yi Wang Reviewed-by: Stephen Boyd Reviewed-by: Rob Herring Reviewed-by: Theodore Ts'o # drivers/char/random.c Signed-off-by: Will Deacon --- drivers/char/Kconfig | 9 +++++++++ drivers/char/random.c | 14 ++++++++++++++ drivers/of/fdt.c | 14 ++++++++++++-- include/linux/random.h | 1 + 4 files changed, 36 insertions(+), 2 deletions(-) diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 3e866885a405..2794f4b3f62d 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig @@ -573,3 +573,12 @@ config RANDOM_TRUST_CPU has not installed a hidden back door to compromise the CPU's random number generation facilities. This can also be configured at boot with "random.trust_cpu=on/off". + +config RANDOM_TRUST_BOOTLOADER + bool "Trust the bootloader to initialize Linux's CRNG" + help + Some bootloaders can provide entropy to increase the kernel's initial + device randomness. Say Y here to assume the entropy provided by the + booloader is trustworthy so it will be added to the kernel's entropy + pool. Otherwise, say N here so it will be regarded as device input that + only mixes the entropy pool. \ No newline at end of file diff --git a/drivers/char/random.c b/drivers/char/random.c index 5d5ea4ce1442..566922df4b7b 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -2445,3 +2445,17 @@ void add_hwgenerator_randomness(const char *buffer, size_t count, credit_entropy_bits(poolp, entropy); } EXPORT_SYMBOL_GPL(add_hwgenerator_randomness); + +/* Handle random seed passed by bootloader. + * If the seed is trustworthy, it would be regarded as hardware RNGs. Otherwise + * it would be regarded as device data. + * The decision is controlled by CONFIG_RANDOM_TRUST_BOOTLOADER. + */ +void add_bootloader_randomness(const void *buf, unsigned int size) +{ + if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER)) + add_hwgenerator_randomness(buf, size, size * 8); + else + add_device_randomness(buf, size); +} +EXPORT_SYMBOL_GPL(add_bootloader_randomness); \ No newline at end of file diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 9cdf14b9aaab..7d97ab6d0e31 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -24,6 +24,7 @@ #include #include #include +#include #include /* for COMMAND_LINE_SIZE */ #include @@ -1044,6 +1045,7 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, { int l; const char *p; + const void *rng_seed; pr_debug("search \"chosen\", depth: %d, uname: %s\n", depth, uname); @@ -1078,6 +1080,14 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, pr_debug("Command line is: %s\n", (char*)data); + rng_seed = of_get_flat_dt_prop(node, "rng-seed", &l); + if (rng_seed && l > 0) { + add_bootloader_randomness(rng_seed, l); + + /* try to clear seed so it won't be found. */ + fdt_nop_property(initial_boot_params, node, "rng-seed"); + } + /* break now */ return 1; } @@ -1166,8 +1176,6 @@ bool __init early_init_dt_verify(void *params) /* Setup flat device-tree pointer */ initial_boot_params = params; - of_fdt_crc32 = crc32_be(~0, initial_boot_params, - fdt_totalsize(initial_boot_params)); return true; } @@ -1197,6 +1205,8 @@ bool __init early_init_dt_scan(void *params) return false; early_init_dt_scan_nodes(); + of_fdt_crc32 = crc32_be(~0, initial_boot_params, + fdt_totalsize(initial_boot_params)); return true; } diff --git a/include/linux/random.h b/include/linux/random.h index 1f7dced2bba6..f189c927fdea 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -19,6 +19,7 @@ struct random_ready_callback { }; extern void add_device_randomness(const void *, unsigned int); +extern void add_bootloader_randomness(const void *, unsigned int); #if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__) static inline void add_latent_entropy(void) -- cgit v1.2.3-58-ga151 From 7f591fa7a62d3a3f585fd4ba5c3e7b05f4b931be Mon Sep 17 00:00:00 2001 From: Hsin-Yi Wang Date: Fri, 23 Aug 2019 14:24:52 +0800 Subject: arm64: kexec_file: add rng-seed support Adding "rng-seed" to dtb. It's fine to add this property if original fdt doesn't contain it. Since original seed will be wiped after read, so use a default size 128 bytes here. Signed-off-by: Hsin-Yi Wang Reviewed-by: Stephen Boyd Signed-off-by: Will Deacon --- arch/arm64/kernel/machine_kexec_file.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/arch/arm64/kernel/machine_kexec_file.c b/arch/arm64/kernel/machine_kexec_file.c index 58871333737a..81b5baad97aa 100644 --- a/arch/arm64/kernel/machine_kexec_file.c +++ b/arch/arm64/kernel/machine_kexec_file.c @@ -27,6 +27,8 @@ #define FDT_PROP_INITRD_END "linux,initrd-end" #define FDT_PROP_BOOTARGS "bootargs" #define FDT_PROP_KASLR_SEED "kaslr-seed" +#define FDT_PROP_RNG_SEED "rng-seed" +#define RNG_SEED_SIZE 128 const struct kexec_file_ops * const kexec_file_loaders[] = { &kexec_image_ops, @@ -102,6 +104,19 @@ static int setup_dtb(struct kimage *image, FDT_PROP_KASLR_SEED); } + /* add rng-seed */ + if (rng_is_initialized()) { + u8 rng_seed[RNG_SEED_SIZE]; + get_random_bytes(rng_seed, RNG_SEED_SIZE); + ret = fdt_setprop(dtb, off, FDT_PROP_RNG_SEED, rng_seed, + RNG_SEED_SIZE); + if (ret) + goto out; + } else { + pr_notice("RNG is not initialised: omitting \"%s\" property\n", + FDT_PROP_RNG_SEED); + } + out: if (ret) return (ret == -FDT_ERR_NOSPACE) ? -ENOMEM : -EINVAL; @@ -110,7 +125,8 @@ out: } /* - * More space needed so that we can add initrd, bootargs and kaslr-seed. + * More space needed so that we can add initrd, bootargs, kaslr-seed, and + * rng-seed. */ #define DTB_EXTRA_SPACE 0x1000 -- cgit v1.2.3-58-ga151 From dd753d961c4844a39f947be115b3d81e10376ee5 Mon Sep 17 00:00:00 2001 From: Hsin-Yi Wang Date: Tue, 27 Aug 2019 18:33:53 +0800 Subject: fdt: Update CRC check for rng-seed Commit 428826f5358c ("fdt: add support for rng-seed") moves of_fdt_crc32 from early_init_dt_verify() to early_init_dt_scan() since early_init_dt_scan_chosen() may modify fdt to erase rng-seed. However, arm and some other arch won't call early_init_dt_scan(), they call early_init_dt_verify() then early_init_dt_scan_nodes(). Restore of_fdt_crc32 to early_init_dt_verify() then update it in early_init_dt_scan_chosen() if fdt if updated. Fixes: 428826f5358c ("fdt: add support for rng-seed") Reported-by: Geert Uytterhoeven Signed-off-by: Hsin-Yi Wang Tested-by: Geert Uytterhoeven Signed-off-by: Will Deacon --- drivers/of/fdt.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 7d97ab6d0e31..223d617ecfe1 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -1086,6 +1086,10 @@ int __init early_init_dt_scan_chosen(unsigned long node, const char *uname, /* try to clear seed so it won't be found. */ fdt_nop_property(initial_boot_params, node, "rng-seed"); + + /* update CRC check value */ + of_fdt_crc32 = crc32_be(~0, initial_boot_params, + fdt_totalsize(initial_boot_params)); } /* break now */ @@ -1176,6 +1180,8 @@ bool __init early_init_dt_verify(void *params) /* Setup flat device-tree pointer */ initial_boot_params = params; + of_fdt_crc32 = crc32_be(~0, initial_boot_params, + fdt_totalsize(initial_boot_params)); return true; } @@ -1205,8 +1211,6 @@ bool __init early_init_dt_scan(void *params) return false; early_init_dt_scan_nodes(); - of_fdt_crc32 = crc32_be(~0, initial_boot_params, - fdt_totalsize(initial_boot_params)); return true; } -- cgit v1.2.3-58-ga151 From 0e1645557d19fc6d88d3c40431f63a3c3a4c417b Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 27 Aug 2019 14:25:44 +0100 Subject: arm64: smp: Increase secondary CPU boot timeout value When many debug options are enabled simultaneously (e.g. PROVE_LOCKING, KMEMLEAK, DEBUG_PAGE_ALLOC, KASAN etc), it is possible for us to timeout when attempting to boot a secondary CPU and give up. Unfortunately, the CPU will /eventually/ appear, and sit in the background happily stuck in a recursive exception due to a NULL stack pointer. Increase the timeout to 5s, which will of course be enough for anybody. Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/kernel/smp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 018a33e01b0e..63c7a7682e93 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -123,7 +123,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) * time out. */ wait_for_completion_timeout(&cpu_running, - msecs_to_jiffies(1000)); + msecs_to_jiffies(5000)); if (!cpu_online(cpu)) { pr_crit("CPU%u: failed to come online\n", cpu); -- cgit v1.2.3-58-ga151 From 5b1cfe3a0ba74c1f2b83b607712a217b9f9463a2 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 27 Aug 2019 14:36:38 +0100 Subject: arm64: smp: Don't enter kernel with NULL stack pointer or task struct Although SMP bringup is inherently racy, we can significantly reduce the window during which secondary CPUs can unexpectedly enter the kernel by sanity checking the 'stack' and 'task' fields of the 'secondary_data' structure. If the booting CPU gave up waiting for us, then they will have been cleared to NULL and we should spin in a WFE; WFI loop instead. Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/kernel/head.S | 8 ++++++++ arch/arm64/kernel/smp.c | 1 + 2 files changed, 9 insertions(+) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index 2cdacd1c141b..0baadf335172 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -724,14 +724,22 @@ __secondary_switched: adr_l x0, secondary_data ldr x1, [x0, #CPU_BOOT_STACK] // get secondary_data.stack + cbz x1, __secondary_too_slow mov sp, x1 ldr x2, [x0, #CPU_BOOT_TASK] + cbz x2, __secondary_too_slow msr sp_el0, x2 mov x29, #0 mov x30, #0 b secondary_start_kernel ENDPROC(__secondary_switched) +__secondary_too_slow: + wfe + wfi + b __secondary_too_slow +ENDPROC(__secondary_too_slow) + /* * The booting CPU updates the failed status @__early_cpu_boot_status, * with MMU turned off. diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 63c7a7682e93..1f8aeb77cba5 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -136,6 +136,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) secondary_data.task = NULL; secondary_data.stack = NULL; + __flush_dcache_area(&secondary_data, sizeof(secondary_data)); status = READ_ONCE(secondary_data.status); if (ret && status) { -- cgit v1.2.3-58-ga151 From ebef746543fd1aa162216b0e484eb9062b65741d Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 27 Aug 2019 15:54:56 +0100 Subject: arm64: smp: Treat unknown boot failures as being 'stuck in kernel' When we fail to bring a secondary CPU online and it fails in an unknown state, we should assume the worst and increment 'cpus_stuck_in_kernel' so that things like kexec() are disabled. Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/kernel/smp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 1f8aeb77cba5..dc9fe879c279 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c @@ -147,6 +147,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle) default: pr_err("CPU%u: failed in unknown state : 0x%lx\n", cpu, status); + cpus_stuck_in_kernel++; break; case CPU_KILL_ME: if (!op_cpu_kill(cpu)) { -- cgit v1.2.3-58-ga151 From d0b7a302d58abe24ed0f32a0672dd4c356bb73db Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 22 Aug 2019 14:58:37 +0100 Subject: Revert "arm64: Remove unnecessary ISBs from set_{pte,pmd,pud}" This reverts commit 24fe1b0efad4fcdd32ce46cffeab297f22581707. Commit 24fe1b0efad4fcdd ("arm64: Remove unnecessary ISBs from set_{pte,pmd,pud}") removed ISB instructions immediately following updates to the page table, on the grounds that they are not required by the architecture and a DSB alone is sufficient to ensure that subsequent data accesses use the new translation: DDI0487E_a, B2-128: | ... no instruction that appears in program order after the DSB | instruction can alter any state of the system or perform any part of | its functionality until the DSB completes other than: | | * Being fetched from memory and decoded | * Reading the general-purpose, SIMD and floating-point, | Special-purpose, or System registers that are directly or indirectly | read without causing side-effects. However, the same document also states the following: DDI0487E_a, B2-125: | DMB and DSB instructions affect reads and writes to the memory system | generated by Load/Store instructions and data or unified cache | maintenance instructions being executed by the PE. Instruction fetches | or accesses caused by a hardware translation table access are not | explicit accesses. which appears to claim that the DSB alone is insufficient. Unfortunately, some CPU designers have followed the second clause above, whereas in Linux we've been relying on the first. This means that our mapping sequence: MOV X0, STR X0, [Xptep] // Store new PTE to page table DSB ISHST LDR X1, [X2] // Translates using the new PTE can actually raise a translation fault on the load instruction because the translation can be performed speculatively before the page table update and then marked as "faulting" by the CPU. For user PTEs, this is ok because we can handle the spurious fault, but for kernel PTEs and intermediate table entries this results in a panic(). Revert the offending commit to reintroduce the missing barriers. Cc: Fixes: 24fe1b0efad4fcdd ("arm64: Remove unnecessary ISBs from set_{pte,pmd,pud}") Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable.h | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 5fdcfe237338..feda7294320c 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -220,8 +220,10 @@ static inline void set_pte(pte_t *ptep, pte_t pte) * Only if the new pte is valid and kernel, otherwise TLB maintenance * or update_mmu_cache() have the necessary barriers. */ - if (pte_valid_not_user(pte)) + if (pte_valid_not_user(pte)) { dsb(ishst); + isb(); + } } extern void __sync_icache_dcache(pte_t pteval); @@ -481,8 +483,10 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) WRITE_ONCE(*pmdp, pmd); - if (pmd_valid(pmd)) + if (pmd_valid(pmd)) { dsb(ishst); + isb(); + } } static inline void pmd_clear(pmd_t *pmdp) @@ -540,8 +544,10 @@ static inline void set_pud(pud_t *pudp, pud_t pud) WRITE_ONCE(*pudp, pud); - if (pud_valid(pud)) + if (pud_valid(pud)) { dsb(ishst); + isb(); + } } static inline void pud_clear(pud_t *pudp) -- cgit v1.2.3-58-ga151 From 51696d346c49c6cf4f29e9b20d6e15832a2e3408 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 22 Aug 2019 15:03:45 +0100 Subject: arm64: tlb: Ensure we execute an ISB following walk cache invalidation 05f2d2f83b5a ("arm64: tlbflush: Introduce __flush_tlb_kernel_pgtable") added a new TLB invalidation helper which is used when freeing intermediate levels of page table used for kernel mappings, but is missing the required ISB instruction after completion of the TLBI instruction. Add the missing barrier. Cc: Fixes: 05f2d2f83b5a ("arm64: tlbflush: Introduce __flush_tlb_kernel_pgtable") Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/tlbflush.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/include/asm/tlbflush.h b/arch/arm64/include/asm/tlbflush.h index 8af7a85f76bd..bc3949064725 100644 --- a/arch/arm64/include/asm/tlbflush.h +++ b/arch/arm64/include/asm/tlbflush.h @@ -251,6 +251,7 @@ static inline void __flush_tlb_kernel_pgtable(unsigned long kaddr) dsb(ishst); __tlbi(vaae1is, addr); dsb(ish); + isb(); } #endif -- cgit v1.2.3-58-ga151 From eb6a4dcce33925ac95023bbe5199474f8db40ba7 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 23 Aug 2019 13:03:55 +0100 Subject: arm64: mm: Add ISB instruction to set_pgd() Commit 6a4cbd63c25a ("Revert "arm64: Remove unnecessary ISBs from set_{pte,pmd,pud}"") reintroduced ISB instructions to some of our page table setter functions in light of a recent clarification to the Armv8 architecture. Although 'set_pgd()' isn't currently used to update a live page table, add the ISB instruction there too for consistency with the other macros and to provide some future-proofing if we use it on live tables in the future. Reported-by: Mark Rutland Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/pgtable.h | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index feda7294320c..2faa77635942 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -605,6 +605,7 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) WRITE_ONCE(*pgdp, pgd); dsb(ishst); + isb(); } static inline void pgd_clear(pgd_t *pgdp) -- cgit v1.2.3-58-ga151 From e8620cff99946ea1f7891d7bec071a23a1fdaef3 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 22 Aug 2019 17:19:17 +0100 Subject: arm64: sysreg: Add some field definitions for PAR_EL1 PAR_EL1 is a mysterious creature, but sometimes it's necessary to read it when translating addresses in situations where we cannot walk the page table directly. Add a couple of system register definitions for the fault indication field ('F') and the fault status code ('FST'). Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/include/asm/sysreg.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h index 06ebcfef73df..2b229c23f3c1 100644 --- a/arch/arm64/include/asm/sysreg.h +++ b/arch/arm64/include/asm/sysreg.h @@ -212,6 +212,9 @@ #define SYS_FAR_EL1 sys_reg(3, 0, 6, 0, 0) #define SYS_PAR_EL1 sys_reg(3, 0, 7, 4, 0) +#define SYS_PAR_EL1_F BIT(1) +#define SYS_PAR_EL1_FST GENMASK(6, 1) + /*** Statistical Profiling Extension ***/ /* ID registers */ #define SYS_PMSIDR_EL1 sys_reg(3, 0, 9, 9, 7) -- cgit v1.2.3-58-ga151 From 42f91093b043332ad75cea7aeafecda6fe81814c Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 22 Aug 2019 17:22:14 +0100 Subject: arm64: mm: Ignore spurious translation faults taken from the kernel Thanks to address translation being performed out of order with respect to loads and stores, it is possible for a CPU to take a translation fault when accessing a page that was mapped by a different CPU. For example, in the case that one CPU maps a page and then sets a flag to tell another CPU: CPU 0 ----- MOV X0, STR X0, [Xptep] // Store new PTE to page table DSB ISHST ISB MOV X1, #1 STR X1, [Xflag] // Set the flag CPU 1 ----- loop: LDAR X0, [Xflag] // Poll flag with Acquire semantics CBZ X0, loop LDR X1, [X2] // Translates using the new PTE then the final load on CPU 1 can raise a translation fault because the translation can be performed speculatively before the read of the flag and marked as "faulting" by the CPU. This isn't quite as bad as it sounds since, in reality, code such as: CPU 0 CPU 1 ----- ----- spin_lock(&lock); spin_lock(&lock); *ptr = vmalloc(size); if (*ptr) spin_unlock(&lock); foo = **ptr; spin_unlock(&lock); will not trigger the fault because there is an address dependency on CPU 1 which prevents the speculative translation. However, more exotic code where the virtual address is known ahead of time, such as: CPU 0 CPU 1 ----- ----- spin_lock(&lock); spin_lock(&lock); set_fixmap(0, paddr, prot); if (mapped) mapped = true; foo = *fix_to_virt(0); spin_unlock(&lock); spin_unlock(&lock); could fault. This can be avoided by any of: * Introducing broadcast TLB maintenance on the map path * Adding a DSB;ISB sequence after checking a flag which indicates that a virtual address is now mapped * Handling the spurious fault Given that we have never observed a problem due to this under Linux and future revisions of the architecture are being tightened so that translation table walks are effectively ordered in the same way as explicit memory accesses, we no longer treat spurious kernel faults as fatal if an AT instruction indicates that the access does not trigger a translation fault. Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/mm/fault.c | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index cfd65b63f36f..9808da29a653 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -8,6 +8,7 @@ */ #include +#include #include #include #include @@ -242,6 +243,34 @@ static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, return false; } +static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, + unsigned int esr, + struct pt_regs *regs) +{ + unsigned long flags; + u64 par, dfsc; + + if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR || + (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT) + return false; + + local_irq_save(flags); + asm volatile("at s1e1r, %0" :: "r" (addr)); + isb(); + par = read_sysreg(par_el1); + local_irq_restore(flags); + + if (!(par & SYS_PAR_EL1_F)) + return false; + + /* + * If we got a different type of fault from the AT instruction, + * treat the translation fault as spurious. + */ + dfsc = FIELD_PREP(SYS_PAR_EL1_FST, par); + return (dfsc & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT; +} + static void die_kernel_fault(const char *msg, unsigned long addr, unsigned int esr, struct pt_regs *regs) { @@ -270,6 +299,10 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) return; + if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs), + "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr)) + return; + if (is_el1_permission_fault(addr, esr, regs)) { if (esr & ESR_ELx_WNR) msg = "write to read-only memory"; -- cgit v1.2.3-58-ga151 From 5c062ef4155b60018c547552ca48823297d00998 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 22 Aug 2019 17:21:21 +0100 Subject: arm64: kvm: Replace hardcoded '1' with SYS_PAR_EL1_F Now that we have a definition for the 'F' field of PAR_EL1, use that instead of coding the immediate directly. Acked-by: Marc Zyngier Reviewed-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/kvm/hyp/switch.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index adaf266d8de8..bd978ad71936 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -264,7 +264,7 @@ static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) tmp = read_sysreg(par_el1); write_sysreg(par, par_el1); - if (unlikely(tmp & 1)) + if (unlikely(tmp & SYS_PAR_EL1_F)) return false; /* Translation failed, back to guest */ /* Convert PAR to HPFAR format */ -- cgit v1.2.3-58-ga151 From 92af2b696119e491a95d77acdd8832b582d300d4 Mon Sep 17 00:00:00 2001 From: Vincenzo Frascino Date: Fri, 23 Aug 2019 17:37:17 +0100 Subject: arm64: Relax Documentation/arm64/tagged-pointers.rst On AArch64 the TCR_EL1.TBI0 bit is set by default, allowing userspace (EL0) to perform memory accesses through 64-bit pointers with a non-zero top byte. However, such pointers were not allowed at the user-kernel syscall ABI boundary. With the Tagged Address ABI patchset, it is now possible to pass tagged pointers to the syscalls. Relax the requirements described in tagged-pointers.rst to be compliant with the behaviours guaranteed by the AArch64 Tagged Address ABI. Cc: Will Deacon Cc: Szabolcs Nagy Cc: Kevin Brodsky Acked-by: Andrey Konovalov Signed-off-by: Vincenzo Frascino Co-developed-by: Catalin Marinas Signed-off-by: Catalin Marinas Signed-off-by: Will Deacon --- Documentation/arm64/tagged-pointers.rst | 21 ++++++++++++++------- 1 file changed, 14 insertions(+), 7 deletions(-) diff --git a/Documentation/arm64/tagged-pointers.rst b/Documentation/arm64/tagged-pointers.rst index 2acdec3ebbeb..eab4323609b9 100644 --- a/Documentation/arm64/tagged-pointers.rst +++ b/Documentation/arm64/tagged-pointers.rst @@ -20,7 +20,9 @@ Passing tagged addresses to the kernel -------------------------------------- All interpretation of userspace memory addresses by the kernel assumes -an address tag of 0x00. +an address tag of 0x00, unless the application enables the AArch64 +Tagged Address ABI explicitly +(Documentation/arm64/tagged-address-abi.rst). This includes, but is not limited to, addresses found in: @@ -33,13 +35,15 @@ This includes, but is not limited to, addresses found in: - the frame pointer (x29) and frame records, e.g. when interpreting them to generate a backtrace or call graph. -Using non-zero address tags in any of these locations may result in an -error code being returned, a (fatal) signal being raised, or other modes -of failure. +Using non-zero address tags in any of these locations when the +userspace application did not enable the AArch64 Tagged Address ABI may +result in an error code being returned, a (fatal) signal being raised, +or other modes of failure. -For these reasons, passing non-zero address tags to the kernel via -system calls is forbidden, and using a non-zero address tag for sp is -strongly discouraged. +For these reasons, when the AArch64 Tagged Address ABI is disabled, +passing non-zero address tags to the kernel via system calls is +forbidden, and using a non-zero address tag for sp is strongly +discouraged. Programs maintaining a frame pointer and frame records that use non-zero address tags may suffer impaired or inaccurate debug and profiling @@ -59,6 +63,9 @@ be preserved. The architecture prevents the use of a tagged PC, so the upper byte will be set to a sign-extension of bit 55 on exception return. +This behaviour is maintained when the AArch64 Tagged Address ABI is +enabled. + Other considerations -------------------- -- cgit v1.2.3-58-ga151 From 33e84ea4330da8a16bda8a871d0cd3c872bcd89f Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 1 Aug 2019 16:22:44 +0100 Subject: perf/smmuv3: Validate group size Ensure that a group will actually fit into the available counters. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/perf/arm_smmuv3_pmu.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index da71c741cb46..c65c197b52a7 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -323,6 +323,7 @@ static int smmu_pmu_event_init(struct perf_event *event) struct smmu_pmu *smmu_pmu = to_smmu_pmu(event->pmu); struct device *dev = smmu_pmu->dev; struct perf_event *sibling; + int group_num_events = 1; u16 event_id; if (event->attr.type != event->pmu->type) @@ -347,18 +348,23 @@ static int smmu_pmu_event_init(struct perf_event *event) } /* Don't allow groups with mixed PMUs, except for s/w events */ - if (event->group_leader->pmu != event->pmu && - !is_software_event(event->group_leader)) { - dev_dbg(dev, "Can't create mixed PMU group\n"); - return -EINVAL; + if (!is_software_event(event->group_leader)) { + if (event->group_leader->pmu != event->pmu) + return -EINVAL; + + if (++group_num_events > smmu_pmu->num_counters) + return -EINVAL; } for_each_sibling_event(sibling, event->group_leader) { - if (sibling->pmu != event->pmu && - !is_software_event(sibling)) { - dev_dbg(dev, "Can't create mixed PMU group\n"); + if (is_software_event(sibling)) + continue; + + if (sibling->pmu != event->pmu) + return -EINVAL; + + if (++group_num_events > smmu_pmu->num_counters) return -EINVAL; - } } hwc->idx = -1; -- cgit v1.2.3-58-ga151 From 3c9347351a6ea1234aa647b36f89052de050d2a2 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Thu, 1 Aug 2019 16:22:45 +0100 Subject: perf/smmuv3: Validate groups for global filtering With global filtering, it becomes possible for users to construct self-contradictory groups with conflicting filters. Make sure we cover that when initially validating events. Signed-off-by: Robin Murphy Signed-off-by: Will Deacon --- drivers/perf/arm_smmuv3_pmu.c | 47 +++++++++++++++++++++++++++++++------------ 1 file changed, 34 insertions(+), 13 deletions(-) diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c index c65c197b52a7..abcf54f7d19c 100644 --- a/drivers/perf/arm_smmuv3_pmu.c +++ b/drivers/perf/arm_smmuv3_pmu.c @@ -113,8 +113,6 @@ struct smmu_pmu { u64 counter_mask; u32 options; bool global_filter; - u32 global_filter_span; - u32 global_filter_sid; }; #define to_smmu_pmu(p) (container_of(p, struct smmu_pmu, pmu)) @@ -260,6 +258,19 @@ static void smmu_pmu_set_event_filter(struct perf_event *event, smmu_pmu_set_smr(smmu_pmu, idx, sid); } +static bool smmu_pmu_check_global_filter(struct perf_event *curr, + struct perf_event *new) +{ + if (get_filter_enable(new) != get_filter_enable(curr)) + return false; + + if (!get_filter_enable(new)) + return true; + + return get_filter_span(new) == get_filter_span(curr) && + get_filter_stream_id(new) == get_filter_stream_id(curr); +} + static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu, struct perf_event *event, int idx) { @@ -279,17 +290,14 @@ static int smmu_pmu_apply_event_filter(struct smmu_pmu *smmu_pmu, } /* Requested settings same as current global settings*/ - if (span == smmu_pmu->global_filter_span && - sid == smmu_pmu->global_filter_sid) + idx = find_first_bit(smmu_pmu->used_counters, num_ctrs); + if (idx == num_ctrs || + smmu_pmu_check_global_filter(smmu_pmu->events[idx], event)) { + smmu_pmu_set_event_filter(event, 0, span, sid); return 0; + } - if (!bitmap_empty(smmu_pmu->used_counters, num_ctrs)) - return -EAGAIN; - - smmu_pmu_set_event_filter(event, 0, span, sid); - smmu_pmu->global_filter_span = span; - smmu_pmu->global_filter_sid = sid; - return 0; + return -EAGAIN; } static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu, @@ -312,6 +320,19 @@ static int smmu_pmu_get_event_idx(struct smmu_pmu *smmu_pmu, return idx; } +static bool smmu_pmu_events_compatible(struct perf_event *curr, + struct perf_event *new) +{ + if (new->pmu != curr->pmu) + return false; + + if (to_smmu_pmu(new->pmu)->global_filter && + !smmu_pmu_check_global_filter(curr, new)) + return false; + + return true; +} + /* * Implementation of abstract pmu functionality required by * the core perf events code. @@ -349,7 +370,7 @@ static int smmu_pmu_event_init(struct perf_event *event) /* Don't allow groups with mixed PMUs, except for s/w events */ if (!is_software_event(event->group_leader)) { - if (event->group_leader->pmu != event->pmu) + if (!smmu_pmu_events_compatible(event->group_leader, event)) return -EINVAL; if (++group_num_events > smmu_pmu->num_counters) @@ -360,7 +381,7 @@ static int smmu_pmu_event_init(struct perf_event *event) if (is_software_event(sibling)) continue; - if (sibling->pmu != event->pmu) + if (!smmu_pmu_events_compatible(sibling, event)) return -EINVAL; if (++group_num_events > smmu_pmu->num_counters) -- cgit v1.2.3-58-ga151 From b333b0ba2346f2071390533b8f37f36f191d705d Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 27 Aug 2019 16:57:08 +0100 Subject: arm64: fix fixmap copy for 16K pages and 48-bit VA With 16K pages and 48-bit VAs, the PGD level of table has two entries, and so the fixmap shares a PGD with the kernel image. Since commit: f9040773b7bbbd9e ("arm64: move kernel image to base of vmalloc area") ... we copy the existing fixmap to the new fine-grained page tables at the PUD level in this case. When walking to the new PUD, we forgot to offset the PGD entry and always used the PGD entry at index 0, but this worked as the kernel image and fixmap were in the low half of the TTBR1 address space. As of commit: 14c127c957c1c607 ("arm64: mm: Flip kernel VA space") ... the kernel image and fixmap are in the high half of the TTBR1 address space, and hence use the PGD at index 1, but we didn't update the fixmap copying code to account for this. Thus, we'll erroneously try to copy the fixmap slots into a PUD under the PGD entry at index 0. At the point we do so this PGD entry has not been initialised, and thus we'll try to write a value to a small offset from physical address 0, causing a number of potential problems. Fix this be correctly offsetting the PGD. This is split over a few steps for legibility. Fixes: 14c127c957c1c607 ("arm64: mm: Flip kernel VA space") Reported-by: Anshuman Khandual Cc: Ard Biesheuvel Cc: Catalin Marinas Signed-off-by: Mark Rutland Acked-by: Marc Zyngier Tested-by: Marc Zyngier Acked-by: Steve Capper Tested-by: Steve Capper Tested-by: Anshuman Khandual Signed-off-by: Will Deacon --- arch/arm64/mm/mmu.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index 8e4b7eaff8ce..3ed44008230e 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -647,6 +647,8 @@ static void __init map_kernel(pgd_t *pgdp) set_pgd(pgd_offset_raw(pgdp, FIXADDR_START), READ_ONCE(*pgd_offset_k(FIXADDR_START))); } else if (CONFIG_PGTABLE_LEVELS > 3) { + pgd_t *bm_pgdp; + pud_t *bm_pudp; /* * The fixmap shares its top level pgd entry with the kernel * mapping. This can really only occur when we are running @@ -654,9 +656,9 @@ static void __init map_kernel(pgd_t *pgdp) * entry instead. */ BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); - pud_populate(&init_mm, - pud_set_fixmap_offset(pgdp, FIXADDR_START), - lm_alias(bm_pmd)); + bm_pgdp = pgd_offset_raw(pgdp, FIXADDR_START); + bm_pudp = pud_set_fixmap_offset(bm_pgdp, FIXADDR_START); + pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd)); pud_clear_fixmap(); } else { BUG(); -- cgit v1.2.3-58-ga151 From f32c7a8e45105bd0af76872bf6eef0438ff12fb2 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 27 Aug 2019 18:12:57 +0100 Subject: arm64: kpti: ensure patched kernel text is fetched from PoU While the MMUs is disabled, I-cache speculation can result in instructions being fetched from the PoC. During boot we may patch instructions (e.g. for alternatives and jump labels), and these may be dirty at the PoU (and stale at the PoC). Thus, while the MMU is disabled in the KPTI pagetable fixup code we may load stale instructions into the I-cache, potentially leading to subsequent crashes when executing regions of code which have been modified at runtime. Similarly to commit: 8ec41987436d566f ("arm64: mm: ensure patched kernel text is fetched from PoU") ... we can invalidate the I-cache after enabling the MMU to prevent such issues. The KPTI pagetable fixup code itself should be clean to the PoC per the boot protocol, so no maintenance is required for this code. Signed-off-by: Mark Rutland Cc: Catalin Marinas Reviewed-by: James Morse Signed-off-by: Will Deacon --- arch/arm64/mm/proc.S | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S index 7dbf2be470f6..28a8f7b87ff0 100644 --- a/arch/arm64/mm/proc.S +++ b/arch/arm64/mm/proc.S @@ -286,6 +286,15 @@ skip_pgd: msr sctlr_el1, x18 isb + /* + * Invalidate the local I-cache so that any instructions fetched + * speculatively from the PoC are discarded, since they may have + * been dynamically patched at the PoU. + */ + ic iallu + dsb nsh + isb + /* Set the flag to zero to indicate that we're all done */ str wzr, [flag_ptr] ret -- cgit v1.2.3-58-ga151 From c12c0288e35a4693b1606e229dab54a62f1ad568 Mon Sep 17 00:00:00 2001 From: Joakim Zhang Date: Wed, 28 Aug 2019 12:07:52 +0000 Subject: perf/imx_ddr: Add support for AXI ID filtering AXI filtering is used by events 0x41 and 0x42 to count reads or writes with an ARID or AWID matching a specified filter. The filter is exposed to userspace as an (ID, MASK) pair, where each set bit in the mask causes the corresponding bit in the ID to be ignored when matching against the ID of memory transactions for the purposes of incrementing the counter. For example: # perf stat -a -e imx8_ddr0/axid-read,axi_mask=0xff,axi_id=0x800/ cmd will count all read transactions from AXI IDs 0x800 - 0x8ff. If the 'axi_mask' is omitted, then it is treated as 0x0 which means that the 'axi_id' will be matched exactly. Signed-off-by: Joakim Zhang Signed-off-by: Will Deacon --- drivers/perf/fsl_imx8_ddr_perf.c | 74 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 72 insertions(+), 2 deletions(-) diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c index 0e3310dbb145..ce7345745b42 100644 --- a/drivers/perf/fsl_imx8_ddr_perf.c +++ b/drivers/perf/fsl_imx8_ddr_perf.c @@ -35,6 +35,8 @@ #define EVENT_CYCLES_COUNTER 0 #define NUM_COUNTERS 4 +#define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */ + #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) #define DDR_PERF_DEV_NAME "imx8_ddr" @@ -42,9 +44,22 @@ static DEFINE_IDA(ddr_ida); +/* DDR Perf hardware feature */ +#define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */ + +struct fsl_ddr_devtype_data { + unsigned int quirks; /* quirks needed for different DDR Perf core */ +}; + +static const struct fsl_ddr_devtype_data imx8_devtype_data; + +static const struct fsl_ddr_devtype_data imx8m_devtype_data = { + .quirks = DDR_CAP_AXI_ID_FILTER, +}; + static const struct of_device_id imx_ddr_pmu_dt_ids[] = { - { .compatible = "fsl,imx8-ddr-pmu",}, - { .compatible = "fsl,imx8m-ddr-pmu",}, + { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data}, + { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids); @@ -58,6 +73,7 @@ struct ddr_pmu { struct perf_event *events[NUM_COUNTERS]; int active_events; enum cpuhp_state cpuhp_state; + const struct fsl_ddr_devtype_data *devtype_data; int irq; int id; }; @@ -129,6 +145,8 @@ static struct attribute *ddr_perf_events_attrs[] = { IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37), IMX8_DDR_PMU_EVENT_ATTR(write, 0x38), IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39), + IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41), + IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42), NULL, }; @@ -138,9 +156,13 @@ static struct attribute_group ddr_perf_events_attr_group = { }; PMU_FORMAT_ATTR(event, "config:0-7"); +PMU_FORMAT_ATTR(axi_id, "config1:0-15"); +PMU_FORMAT_ATTR(axi_mask, "config1:16-31"); static struct attribute *ddr_perf_format_attrs[] = { &format_attr_event.attr, + &format_attr_axi_id.attr, + &format_attr_axi_mask.attr, NULL, }; @@ -190,6 +212,26 @@ static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter) return readl_relaxed(pmu->base + COUNTER_READ + counter * 4); } +static bool ddr_perf_is_filtered(struct perf_event *event) +{ + return event->attr.config == 0x41 || event->attr.config == 0x42; +} + +static u32 ddr_perf_filter_val(struct perf_event *event) +{ + return event->attr.config1; +} + +static bool ddr_perf_filters_compatible(struct perf_event *a, + struct perf_event *b) +{ + if (!ddr_perf_is_filtered(a)) + return true; + if (!ddr_perf_is_filtered(b)) + return true; + return ddr_perf_filter_val(a) == ddr_perf_filter_val(b); +} + static int ddr_perf_event_init(struct perf_event *event) { struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); @@ -216,6 +258,15 @@ static int ddr_perf_event_init(struct perf_event *event) !is_software_event(event->group_leader)) return -EINVAL; + if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { + if (!ddr_perf_filters_compatible(event, event->group_leader)) + return -EINVAL; + for_each_sibling_event(sibling, event->group_leader) { + if (!ddr_perf_filters_compatible(event, sibling)) + return -EINVAL; + } + } + for_each_sibling_event(sibling, event->group_leader) { if (sibling->pmu != event->pmu && !is_software_event(sibling)) @@ -288,6 +339,23 @@ static int ddr_perf_event_add(struct perf_event *event, int flags) struct hw_perf_event *hwc = &event->hw; int counter; int cfg = event->attr.config; + int cfg1 = event->attr.config1; + + if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { + int i; + + for (i = 1; i < NUM_COUNTERS; i++) { + if (pmu->events[i] && + !ddr_perf_filters_compatible(event, pmu->events[i])) + return -EINVAL; + } + + if (ddr_perf_is_filtered(event)) { + /* revert axi id masking(axi_mask) value */ + cfg1 ^= AXI_MASKING_REVERT; + writel(cfg1, pmu->base + COUNTER_DPCR1); + } + } counter = ddr_perf_alloc_counter(pmu, cfg); if (counter < 0) { @@ -473,6 +541,8 @@ static int ddr_perf_probe(struct platform_device *pdev) if (!name) return -ENOMEM; + pmu->devtype_data = of_device_get_match_data(&pdev->dev); + pmu->cpu = raw_smp_processor_id(); ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, DDR_CPUHP_CB_NAME, -- cgit v1.2.3-58-ga151 From 3724e186fead350d1446d5202cd92fa6250bffda Mon Sep 17 00:00:00 2001 From: Joakim Zhang Date: Wed, 28 Aug 2019 12:07:56 +0000 Subject: docs/perf: Add documentation for the i.MX8 DDR PMU Add some documentation describing the DDR PMU residing in the Freescale i.MDX SoC and its perf driver implementation in Linux. Signed-off-by: Joakim Zhang Signed-off-by: Will Deacon --- Documentation/admin-guide/perf/imx-ddr.rst | 52 ++++++++++++++++++++++++++++++ MAINTAINERS | 1 + 2 files changed, 53 insertions(+) create mode 100644 Documentation/admin-guide/perf/imx-ddr.rst diff --git a/Documentation/admin-guide/perf/imx-ddr.rst b/Documentation/admin-guide/perf/imx-ddr.rst new file mode 100644 index 000000000000..517a205abad6 --- /dev/null +++ b/Documentation/admin-guide/perf/imx-ddr.rst @@ -0,0 +1,52 @@ +===================================================== +Freescale i.MX8 DDR Performance Monitoring Unit (PMU) +===================================================== + +There are no performance counters inside the DRAM controller, so performance +signals are brought out to the edge of the controller where a set of 4 x 32 bit +counters is implemented. This is controlled by the CSV modes programed in counter +control register which causes a large number of PERF signals to be generated. + +Selection of the value for each counter is done via the config registers. There +is one register for each counter. Counter 0 is special in that it always counts +“time” and when expired causes a lock on itself and the other counters and an +interrupt is raised. If any other counter overflows, it continues counting, and +no interrupt is raised. + +The "format" directory describes format of the config (event ID) and config1 +(AXI filtering) fields of the perf_event_attr structure, see /sys/bus/event_source/ +devices/imx8_ddr0/format/. The "events" directory describes the events types +hardware supported that can be used with perf tool, see /sys/bus/event_source/ +devices/imx8_ddr0/events/. + e.g.:: + perf stat -a -e imx8_ddr0/cycles/ cmd + perf stat -a -e imx8_ddr0/read/,imx8_ddr0/write/ cmd + +AXI filtering is only used by CSV modes 0x41 (axid-read) and 0x42 (axid-write) +to count reading or writing matches filter setting. Filter setting is various +from different DRAM controller implementations, which is distinguished by quirks +in the driver. + +* With DDR_CAP_AXI_ID_FILTER quirk. + Filter is defined with two configuration parts: + --AXI_ID defines AxID matching value. + --AXI_MASKING defines which bits of AxID are meaningful for the matching. + 0:corresponding bit is masked. + 1: corresponding bit is not masked, i.e. used to do the matching. + + AXI_ID and AXI_MASKING are mapped on DPCR1 register in performance counter. + When non-masked bits are matching corresponding AXI_ID bits then counter is + incremented. Perf counter is incremented if + AxID && AXI_MASKING == AXI_ID && AXI_MASKING + + This filter doesn't support filter different AXI ID for axid-read and axid-write + event at the same time as this filter is shared between counters. + e.g.:: + perf stat -a -e imx8_ddr0/axid-read,axi_mask=0xMMMM,axi_id=0xDDDD/ cmd + perf stat -a -e imx8_ddr0/axid-write,axi_mask=0xMMMM,axi_id=0xDDDD/ cmd + + NOTE: axi_mask is inverted in userspace(i.e. set bits are bits to mask), and + it will be reverted in driver automatically. so that the user can just specify + axi_id to monitor a specific id, rather than having to specify axi_mask. + e.g.:: + perf stat -a -e imx8_ddr0/axid-read,axi_id=0x12/ cmd, which will monitor ARID=0x12 diff --git a/MAINTAINERS b/MAINTAINERS index 783569e3c4b4..1d442f9e3276 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6434,6 +6434,7 @@ M: Frank Li L: linux-arm-kernel@lists.infradead.org S: Maintained F: drivers/perf/fsl_imx8_ddr_perf.c +F: Documentation/admin-guide/perf/imx-ddr.rst F: Documentation/devicetree/bindings/perf/fsl-imx-ddr.txt FREESCALE IMX LPI2C DRIVER -- cgit v1.2.3-58-ga151 From 8f35eaa5f2de020073a48ad51112237c5932cfcc Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Wed, 28 Aug 2019 18:50:05 +0100 Subject: jump_label: Don't warn on __exit jump entries On architectures that discard .exit.* sections at runtime, a warning is printed for each jump label that is used within an in-kernel __exit annotated function: can't patch jump_label at ehci_hcd_cleanup+0x8/0x3c WARNING: CPU: 0 PID: 1 at kernel/jump_label.c:410 __jump_label_update+0x12c/0x138 As these functions will never get executed (they are free'd along with the rest of initmem) - we do not need to patch them and should not display any warnings. The warning is displayed because the test required to satisfy jump_entry_is_init is based on init_section_contains (__init_begin to __init_end) whereas the test in __jump_label_update is based on init_kernel_text (_sinittext to _einittext) via kernel_text_address). Fixes: 19483677684b ("jump_label: Annotate entries that operate on __init code earlier") Signed-off-by: Andrew Murray Acked-by: Peter Zijlstra (Intel) Acked-by: Mark Rutland Signed-off-by: Will Deacon --- kernel/jump_label.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/kernel/jump_label.c b/kernel/jump_label.c index df3008419a1d..cdb3ffab128b 100644 --- a/kernel/jump_label.c +++ b/kernel/jump_label.c @@ -407,7 +407,9 @@ static bool jump_label_can_update(struct jump_entry *entry, bool init) return false; if (!kernel_text_address(jump_entry_code(entry))) { - WARN_ONCE(1, "can't patch jump_label at %pS", (void *)jump_entry_code(entry)); + WARN_ONCE(!jump_entry_is_init(entry), + "can't patch jump_label at %pS", + (void *)jump_entry_code(entry)); return false; } -- cgit v1.2.3-58-ga151 From 580fa1b874711d633f9b145b7777b0e83ebf3787 Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Wed, 28 Aug 2019 18:50:06 +0100 Subject: arm64: Use correct ll/sc atomic constraints The A64 ISA accepts distinct (but overlapping) ranges of immediates for: * add arithmetic instructions ('I' machine constraint) * sub arithmetic instructions ('J' machine constraint) * 32-bit logical instructions ('K' machine constraint) * 64-bit logical instructions ('L' machine constraint) ... but we currently use the 'I' constraint for many atomic operations using sub or logical instructions, which is not always valid. When CONFIG_ARM64_LSE_ATOMICS is not set, this allows invalid immediates to be passed to instructions, potentially resulting in a build failure. When CONFIG_ARM64_LSE_ATOMICS is selected the out-of-line ll/sc atomics always use a register as they have no visibility of the value passed by the caller. This patch adds a constraint parameter to the ATOMIC_xx and __CMPXCHG_CASE macros so that we can pass appropriate constraints for each case, with uses updated accordingly. Unfortunately prior to GCC 8.1.0 the 'K' constraint erroneously accepted '4294967295', so we must instead force the use of a register. Signed-off-by: Andrew Murray Signed-off-by: Will Deacon --- arch/arm64/include/asm/atomic_ll_sc.h | 89 ++++++++++++++++++----------------- 1 file changed, 47 insertions(+), 42 deletions(-) diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index c8c850bc3dfb..6dd011e0b434 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -26,7 +26,7 @@ * (the optimize attribute silently ignores these options). */ -#define ATOMIC_OP(op, asm_op) \ +#define ATOMIC_OP(op, asm_op, constraint) \ __LL_SC_INLINE void \ __LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \ { \ @@ -40,11 +40,11 @@ __LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \ " stxr %w1, %w0, %2\n" \ " cbnz %w1, 1b" \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i)); \ + : #constraint "r" (i)); \ } \ __LL_SC_EXPORT(arch_atomic_##op); -#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ +#define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ __LL_SC_INLINE int \ __LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \ { \ @@ -59,14 +59,14 @@ __LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \ " cbnz %w1, 1b\n" \ " " #mb \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i) \ + : #constraint "r" (i) \ : cl); \ \ return result; \ } \ __LL_SC_EXPORT(arch_atomic_##op##_return##name); -#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ +#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \ __LL_SC_INLINE int \ __LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \ { \ @@ -81,7 +81,7 @@ __LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \ " cbnz %w2, 1b\n" \ " " #mb \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i) \ + : #constraint "r" (i) \ : cl); \ \ return result; \ @@ -99,8 +99,8 @@ __LL_SC_EXPORT(arch_atomic_fetch_##op##name); ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) -ATOMIC_OPS(add, add) -ATOMIC_OPS(sub, sub) +ATOMIC_OPS(add, add, I) +ATOMIC_OPS(sub, sub, J) #undef ATOMIC_OPS #define ATOMIC_OPS(...) \ @@ -110,17 +110,17 @@ ATOMIC_OPS(sub, sub) ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) -ATOMIC_OPS(and, and) -ATOMIC_OPS(andnot, bic) -ATOMIC_OPS(or, orr) -ATOMIC_OPS(xor, eor) +ATOMIC_OPS(and, and, ) +ATOMIC_OPS(andnot, bic, ) +ATOMIC_OPS(or, orr, ) +ATOMIC_OPS(xor, eor, ) #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP #undef ATOMIC_OP_RETURN #undef ATOMIC_OP -#define ATOMIC64_OP(op, asm_op) \ +#define ATOMIC64_OP(op, asm_op, constraint) \ __LL_SC_INLINE void \ __LL_SC_PREFIX(arch_atomic64_##op(s64 i, atomic64_t *v)) \ { \ @@ -134,11 +134,11 @@ __LL_SC_PREFIX(arch_atomic64_##op(s64 i, atomic64_t *v)) \ " stxr %w1, %0, %2\n" \ " cbnz %w1, 1b" \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i)); \ + : #constraint "r" (i)); \ } \ __LL_SC_EXPORT(arch_atomic64_##op); -#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op) \ +#define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ __LL_SC_INLINE s64 \ __LL_SC_PREFIX(arch_atomic64_##op##_return##name(s64 i, atomic64_t *v))\ { \ @@ -153,14 +153,14 @@ __LL_SC_PREFIX(arch_atomic64_##op##_return##name(s64 i, atomic64_t *v))\ " cbnz %w1, 1b\n" \ " " #mb \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i) \ + : #constraint "r" (i) \ : cl); \ \ return result; \ } \ __LL_SC_EXPORT(arch_atomic64_##op##_return##name); -#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op) \ +#define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\ __LL_SC_INLINE s64 \ __LL_SC_PREFIX(arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v)) \ { \ @@ -175,7 +175,7 @@ __LL_SC_PREFIX(arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v)) \ " cbnz %w2, 1b\n" \ " " #mb \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ - : "Ir" (i) \ + : #constraint "r" (i) \ : cl); \ \ return result; \ @@ -193,8 +193,8 @@ __LL_SC_EXPORT(arch_atomic64_fetch_##op##name); ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \ ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) -ATOMIC64_OPS(add, add) -ATOMIC64_OPS(sub, sub) +ATOMIC64_OPS(add, add, I) +ATOMIC64_OPS(sub, sub, J) #undef ATOMIC64_OPS #define ATOMIC64_OPS(...) \ @@ -204,10 +204,10 @@ ATOMIC64_OPS(sub, sub) ATOMIC64_FETCH_OP (_acquire,, a, , "memory", __VA_ARGS__) \ ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) -ATOMIC64_OPS(and, and) -ATOMIC64_OPS(andnot, bic) -ATOMIC64_OPS(or, orr) -ATOMIC64_OPS(xor, eor) +ATOMIC64_OPS(and, and, L) +ATOMIC64_OPS(andnot, bic, ) +ATOMIC64_OPS(or, orr, L) +ATOMIC64_OPS(xor, eor, L) #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP @@ -237,7 +237,7 @@ __LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v)) } __LL_SC_EXPORT(arch_atomic64_dec_if_positive); -#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl) \ +#define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \ __LL_SC_INLINE u##sz \ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \ unsigned long old, \ @@ -265,29 +265,34 @@ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \ "2:" \ : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ [v] "+Q" (*(u##sz *)ptr) \ - : [old] "Kr" (old), [new] "r" (new) \ + : [old] #constraint "r" (old), [new] "r" (new) \ : cl); \ \ return oldval; \ } \ __LL_SC_EXPORT(__cmpxchg_case_##name##sz); -__CMPXCHG_CASE(w, b, , 8, , , , ) -__CMPXCHG_CASE(w, h, , 16, , , , ) -__CMPXCHG_CASE(w, , , 32, , , , ) -__CMPXCHG_CASE( , , , 64, , , , ) -__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory") -__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory") -__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory") -__CMPXCHG_CASE( , , acq_, 64, , a, , "memory") -__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory") -__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory") -__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory") -__CMPXCHG_CASE( , , rel_, 64, , , l, "memory") -__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory") -__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory") -__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory") -__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory") +/* + * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly + * handle the 'K' constraint for the value 4294967295 - thus we use no + * constraint for 32 bit operations. + */ +__CMPXCHG_CASE(w, b, , 8, , , , , ) +__CMPXCHG_CASE(w, h, , 16, , , , , ) +__CMPXCHG_CASE(w, , , 32, , , , , ) +__CMPXCHG_CASE( , , , 64, , , , , L) +__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", ) +__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", ) +__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", ) +__CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L) +__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", ) +__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", ) +__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", ) +__CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L) +__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", ) +__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", ) +__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", ) +__CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L) #undef __CMPXCHG_CASE -- cgit v1.2.3-58-ga151 From addfc38672c73efd5c4e559a2e455b086e3e20c5 Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Wed, 28 Aug 2019 18:50:07 +0100 Subject: arm64: atomics: avoid out-of-line ll/sc atomics When building for LSE atomics (CONFIG_ARM64_LSE_ATOMICS), if the hardware or toolchain doesn't support it the existing code will fallback to ll/sc atomics. It achieves this by branching from inline assembly to a function that is built with special compile flags. Further this results in the clobbering of registers even when the fallback isn't used increasing register pressure. Improve this by providing inline implementations of both LSE and ll/sc and use a static key to select between them, which allows for the compiler to generate better atomics code. Put the LL/SC fallback atomics in their own subsection to improve icache performance. Signed-off-by: Andrew Murray Signed-off-by: Will Deacon --- arch/arm64/include/asm/atomic.h | 11 +- arch/arm64/include/asm/atomic_arch.h | 155 +++++++++++++++ arch/arm64/include/asm/atomic_ll_sc.h | 113 ++++++----- arch/arm64/include/asm/atomic_lse.h | 365 +++++++++++----------------------- arch/arm64/include/asm/cmpxchg.h | 2 +- arch/arm64/include/asm/lse.h | 11 - 6 files changed, 329 insertions(+), 328 deletions(-) create mode 100644 arch/arm64/include/asm/atomic_arch.h diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 657b0457d83c..c70d3f389d29 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -17,16 +17,7 @@ #ifdef __KERNEL__ -#define __ARM64_IN_ATOMIC_IMPL - -#if defined(CONFIG_ARM64_LSE_ATOMICS) && defined(CONFIG_AS_LSE) -#include -#else -#include -#endif - -#undef __ARM64_IN_ATOMIC_IMPL - +#include #include #define ATOMIC_INIT(i) { (i) } diff --git a/arch/arm64/include/asm/atomic_arch.h b/arch/arm64/include/asm/atomic_arch.h new file mode 100644 index 000000000000..1aac7fc65084 --- /dev/null +++ b/arch/arm64/include/asm/atomic_arch.h @@ -0,0 +1,155 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Selection between LSE and LL/SC atomics. + * + * Copyright (C) 2018 ARM Ltd. + * Author: Andrew Murray + */ + +#ifndef __ASM_ATOMIC_ARCH_H +#define __ASM_ATOMIC_ARCH_H + + +#include + +#include +#include +#include + +extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; +extern struct static_key_false arm64_const_caps_ready; + +static inline bool system_uses_lse_atomics(void) +{ + return (IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && + IS_ENABLED(CONFIG_AS_LSE) && + static_branch_likely(&arm64_const_caps_ready)) && + static_branch_likely(&cpu_hwcap_keys[ARM64_HAS_LSE_ATOMICS]); +} + +#define __lse_ll_sc_body(op, ...) \ +({ \ + system_uses_lse_atomics() ? \ + __lse_##op(__VA_ARGS__) : \ + __ll_sc_##op(__VA_ARGS__); \ +}) + +#define ATOMIC_OP(op) \ +static inline void arch_##op(int i, atomic_t *v) \ +{ \ + __lse_ll_sc_body(op, i, v); \ +} + +ATOMIC_OP(atomic_andnot) +ATOMIC_OP(atomic_or) +ATOMIC_OP(atomic_xor) +ATOMIC_OP(atomic_add) +ATOMIC_OP(atomic_and) +ATOMIC_OP(atomic_sub) + + +#define ATOMIC_FETCH_OP(name, op) \ +static inline int arch_##op##name(int i, atomic_t *v) \ +{ \ + return __lse_ll_sc_body(op##name, i, v); \ +} + +#define ATOMIC_FETCH_OPS(op) \ + ATOMIC_FETCH_OP(_relaxed, op) \ + ATOMIC_FETCH_OP(_acquire, op) \ + ATOMIC_FETCH_OP(_release, op) \ + ATOMIC_FETCH_OP( , op) + +ATOMIC_FETCH_OPS(atomic_fetch_andnot) +ATOMIC_FETCH_OPS(atomic_fetch_or) +ATOMIC_FETCH_OPS(atomic_fetch_xor) +ATOMIC_FETCH_OPS(atomic_fetch_add) +ATOMIC_FETCH_OPS(atomic_fetch_and) +ATOMIC_FETCH_OPS(atomic_fetch_sub) +ATOMIC_FETCH_OPS(atomic_add_return) +ATOMIC_FETCH_OPS(atomic_sub_return) + + +#define ATOMIC64_OP(op) \ +static inline void arch_##op(long i, atomic64_t *v) \ +{ \ + __lse_ll_sc_body(op, i, v); \ +} + +ATOMIC64_OP(atomic64_andnot) +ATOMIC64_OP(atomic64_or) +ATOMIC64_OP(atomic64_xor) +ATOMIC64_OP(atomic64_add) +ATOMIC64_OP(atomic64_and) +ATOMIC64_OP(atomic64_sub) + + +#define ATOMIC64_FETCH_OP(name, op) \ +static inline long arch_##op##name(long i, atomic64_t *v) \ +{ \ + return __lse_ll_sc_body(op##name, i, v); \ +} + +#define ATOMIC64_FETCH_OPS(op) \ + ATOMIC64_FETCH_OP(_relaxed, op) \ + ATOMIC64_FETCH_OP(_acquire, op) \ + ATOMIC64_FETCH_OP(_release, op) \ + ATOMIC64_FETCH_OP( , op) + +ATOMIC64_FETCH_OPS(atomic64_fetch_andnot) +ATOMIC64_FETCH_OPS(atomic64_fetch_or) +ATOMIC64_FETCH_OPS(atomic64_fetch_xor) +ATOMIC64_FETCH_OPS(atomic64_fetch_add) +ATOMIC64_FETCH_OPS(atomic64_fetch_and) +ATOMIC64_FETCH_OPS(atomic64_fetch_sub) +ATOMIC64_FETCH_OPS(atomic64_add_return) +ATOMIC64_FETCH_OPS(atomic64_sub_return) + + +static inline long arch_atomic64_dec_if_positive(atomic64_t *v) +{ + return __lse_ll_sc_body(atomic64_dec_if_positive, v); +} + +#define __CMPXCHG_CASE(name, sz) \ +static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \ + u##sz old, \ + u##sz new) \ +{ \ + return __lse_ll_sc_body(_cmpxchg_case_##name##sz, \ + ptr, old, new); \ +} + +__CMPXCHG_CASE( , 8) +__CMPXCHG_CASE( , 16) +__CMPXCHG_CASE( , 32) +__CMPXCHG_CASE( , 64) +__CMPXCHG_CASE(acq_, 8) +__CMPXCHG_CASE(acq_, 16) +__CMPXCHG_CASE(acq_, 32) +__CMPXCHG_CASE(acq_, 64) +__CMPXCHG_CASE(rel_, 8) +__CMPXCHG_CASE(rel_, 16) +__CMPXCHG_CASE(rel_, 32) +__CMPXCHG_CASE(rel_, 64) +__CMPXCHG_CASE(mb_, 8) +__CMPXCHG_CASE(mb_, 16) +__CMPXCHG_CASE(mb_, 32) +__CMPXCHG_CASE(mb_, 64) + + +#define __CMPXCHG_DBL(name) \ +static inline long __cmpxchg_double##name(unsigned long old1, \ + unsigned long old2, \ + unsigned long new1, \ + unsigned long new2, \ + volatile void *ptr) \ +{ \ + return __lse_ll_sc_body(_cmpxchg_double##name, \ + old1, old2, new1, new2, ptr); \ +} + +__CMPXCHG_DBL( ) +__CMPXCHG_DBL(_mb) + +#endif /* __ASM_ATOMIC_LSE_H */ diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index 6dd011e0b434..95091f72228b 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -10,83 +10,86 @@ #ifndef __ASM_ATOMIC_LL_SC_H #define __ASM_ATOMIC_LL_SC_H -#ifndef __ARM64_IN_ATOMIC_IMPL -#error "please don't include this file directly" +#if IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && IS_ENABLED(CONFIG_AS_LSE) +#define __LL_SC_FALLBACK(asm_ops) \ +" b 3f\n" \ +" .subsection 1\n" \ +"3:\n" \ +asm_ops "\n" \ +" b 4f\n" \ +" .previous\n" \ +"4:\n" +#else +#define __LL_SC_FALLBACK(asm_ops) asm_ops #endif /* * AArch64 UP and SMP safe atomic ops. We use load exclusive and * store exclusive to ensure that these are atomic. We may loop * to ensure that the update happens. - * - * NOTE: these functions do *not* follow the PCS and must explicitly - * save any clobbered registers other than x0 (regardless of return - * value). This is achieved through -fcall-saved-* compiler flags for - * this file, which unfortunately don't work on a per-function basis - * (the optimize attribute silently ignores these options). */ #define ATOMIC_OP(op, asm_op, constraint) \ -__LL_SC_INLINE void \ -__LL_SC_PREFIX(arch_atomic_##op(int i, atomic_t *v)) \ +static inline void \ +__ll_sc_atomic_##op(int i, atomic_t *v) \ { \ unsigned long tmp; \ int result; \ \ asm volatile("// atomic_" #op "\n" \ + __LL_SC_FALLBACK( \ " prfm pstl1strm, %2\n" \ "1: ldxr %w0, %2\n" \ " " #asm_op " %w0, %w0, %w3\n" \ " stxr %w1, %w0, %2\n" \ -" cbnz %w1, 1b" \ +" cbnz %w1, 1b\n") \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : #constraint "r" (i)); \ -} \ -__LL_SC_EXPORT(arch_atomic_##op); +} #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ -__LL_SC_INLINE int \ -__LL_SC_PREFIX(arch_atomic_##op##_return##name(int i, atomic_t *v)) \ +static inline int \ +__ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \ { \ unsigned long tmp; \ int result; \ \ asm volatile("// atomic_" #op "_return" #name "\n" \ + __LL_SC_FALLBACK( \ " prfm pstl1strm, %2\n" \ "1: ld" #acq "xr %w0, %2\n" \ " " #asm_op " %w0, %w0, %w3\n" \ " st" #rel "xr %w1, %w0, %2\n" \ " cbnz %w1, 1b\n" \ -" " #mb \ +" " #mb ) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : #constraint "r" (i) \ : cl); \ \ return result; \ -} \ -__LL_SC_EXPORT(arch_atomic_##op##_return##name); +} -#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \ -__LL_SC_INLINE int \ -__LL_SC_PREFIX(arch_atomic_fetch_##op##name(int i, atomic_t *v)) \ +#define ATOMIC_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint) \ +static inline int \ +__ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \ { \ unsigned long tmp; \ int val, result; \ \ asm volatile("// atomic_fetch_" #op #name "\n" \ + __LL_SC_FALLBACK( \ " prfm pstl1strm, %3\n" \ "1: ld" #acq "xr %w0, %3\n" \ " " #asm_op " %w1, %w0, %w4\n" \ " st" #rel "xr %w2, %w1, %3\n" \ " cbnz %w2, 1b\n" \ -" " #mb \ +" " #mb ) \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ : #constraint "r" (i) \ : cl); \ \ return result; \ -} \ -__LL_SC_EXPORT(arch_atomic_fetch_##op##name); +} #define ATOMIC_OPS(...) \ ATOMIC_OP(__VA_ARGS__) \ @@ -121,66 +124,66 @@ ATOMIC_OPS(xor, eor, ) #undef ATOMIC_OP #define ATOMIC64_OP(op, asm_op, constraint) \ -__LL_SC_INLINE void \ -__LL_SC_PREFIX(arch_atomic64_##op(s64 i, atomic64_t *v)) \ +static inline void \ +__ll_sc_atomic64_##op(s64 i, atomic64_t *v) \ { \ s64 result; \ unsigned long tmp; \ \ asm volatile("// atomic64_" #op "\n" \ + __LL_SC_FALLBACK( \ " prfm pstl1strm, %2\n" \ "1: ldxr %0, %2\n" \ " " #asm_op " %0, %0, %3\n" \ " stxr %w1, %0, %2\n" \ -" cbnz %w1, 1b" \ +" cbnz %w1, 1b") \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : #constraint "r" (i)); \ -} \ -__LL_SC_EXPORT(arch_atomic64_##op); +} #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ -__LL_SC_INLINE s64 \ -__LL_SC_PREFIX(arch_atomic64_##op##_return##name(s64 i, atomic64_t *v))\ +static inline long \ +__ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ { \ s64 result; \ unsigned long tmp; \ \ asm volatile("// atomic64_" #op "_return" #name "\n" \ + __LL_SC_FALLBACK( \ " prfm pstl1strm, %2\n" \ "1: ld" #acq "xr %0, %2\n" \ " " #asm_op " %0, %0, %3\n" \ " st" #rel "xr %w1, %0, %2\n" \ " cbnz %w1, 1b\n" \ -" " #mb \ +" " #mb ) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ : #constraint "r" (i) \ : cl); \ \ return result; \ -} \ -__LL_SC_EXPORT(arch_atomic64_##op##_return##name); +} #define ATOMIC64_FETCH_OP(name, mb, acq, rel, cl, op, asm_op, constraint)\ -__LL_SC_INLINE s64 \ -__LL_SC_PREFIX(arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v)) \ +static inline long \ +__ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ { \ s64 result, val; \ unsigned long tmp; \ \ asm volatile("// atomic64_fetch_" #op #name "\n" \ + __LL_SC_FALLBACK( \ " prfm pstl1strm, %3\n" \ "1: ld" #acq "xr %0, %3\n" \ " " #asm_op " %1, %0, %4\n" \ " st" #rel "xr %w2, %1, %3\n" \ " cbnz %w2, 1b\n" \ -" " #mb \ +" " #mb ) \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ : #constraint "r" (i) \ : cl); \ \ return result; \ -} \ -__LL_SC_EXPORT(arch_atomic64_fetch_##op##name); +} #define ATOMIC64_OPS(...) \ ATOMIC64_OP(__VA_ARGS__) \ @@ -214,13 +217,14 @@ ATOMIC64_OPS(xor, eor, L) #undef ATOMIC64_OP_RETURN #undef ATOMIC64_OP -__LL_SC_INLINE s64 -__LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v)) +static inline s64 +__ll_sc_atomic64_dec_if_positive(atomic64_t *v) { s64 result; unsigned long tmp; asm volatile("// atomic64_dec_if_positive\n" + __LL_SC_FALLBACK( " prfm pstl1strm, %2\n" "1: ldxr %0, %2\n" " subs %0, %0, #1\n" @@ -228,20 +232,19 @@ __LL_SC_PREFIX(arch_atomic64_dec_if_positive(atomic64_t *v)) " stlxr %w1, %0, %2\n" " cbnz %w1, 1b\n" " dmb ish\n" -"2:" +"2:") : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) : : "cc", "memory"); return result; } -__LL_SC_EXPORT(arch_atomic64_dec_if_positive); #define __CMPXCHG_CASE(w, sfx, name, sz, mb, acq, rel, cl, constraint) \ -__LL_SC_INLINE u##sz \ -__LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \ +static inline u##sz \ +__ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \ unsigned long old, \ - u##sz new)) \ + u##sz new) \ { \ unsigned long tmp; \ u##sz oldval; \ @@ -255,6 +258,7 @@ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \ old = (u##sz)old; \ \ asm volatile( \ + __LL_SC_FALLBACK( \ " prfm pstl1strm, %[v]\n" \ "1: ld" #acq "xr" #sfx "\t%" #w "[oldval], %[v]\n" \ " eor %" #w "[tmp], %" #w "[oldval], %" #w "[old]\n" \ @@ -262,15 +266,14 @@ __LL_SC_PREFIX(__cmpxchg_case_##name##sz(volatile void *ptr, \ " st" #rel "xr" #sfx "\t%w[tmp], %" #w "[new], %[v]\n" \ " cbnz %w[tmp], 1b\n" \ " " #mb "\n" \ - "2:" \ + "2:") \ : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ [v] "+Q" (*(u##sz *)ptr) \ : [old] #constraint "r" (old), [new] "r" (new) \ : cl); \ \ return oldval; \ -} \ -__LL_SC_EXPORT(__cmpxchg_case_##name##sz); +} /* * Earlier versions of GCC (no later than 8.1.0) appear to incorrectly @@ -297,16 +300,17 @@ __CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L) #undef __CMPXCHG_CASE #define __CMPXCHG_DBL(name, mb, rel, cl) \ -__LL_SC_INLINE long \ -__LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \ +static inline long \ +__ll_sc__cmpxchg_double##name(unsigned long old1, \ unsigned long old2, \ unsigned long new1, \ unsigned long new2, \ - volatile void *ptr)) \ + volatile void *ptr) \ { \ unsigned long tmp, ret; \ \ asm volatile("// __cmpxchg_double" #name "\n" \ + __LL_SC_FALLBACK( \ " prfm pstl1strm, %2\n" \ "1: ldxp %0, %1, %2\n" \ " eor %0, %0, %3\n" \ @@ -316,14 +320,13 @@ __LL_SC_PREFIX(__cmpxchg_double##name(unsigned long old1, \ " st" #rel "xp %w0, %5, %6, %2\n" \ " cbnz %w0, 1b\n" \ " " #mb "\n" \ - "2:" \ + "2:") \ : "=&r" (tmp), "=&r" (ret), "+Q" (*(unsigned long *)ptr) \ : "r" (old1), "r" (old2), "r" (new1), "r" (new2) \ : cl); \ \ return ret; \ -} \ -__LL_SC_EXPORT(__cmpxchg_double##name); +} __CMPXCHG_DBL( , , , ) __CMPXCHG_DBL(_mb, dmb ish, l, "memory") diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index 69acb1c19a15..7dce5e1f074e 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -10,22 +10,13 @@ #ifndef __ASM_ATOMIC_LSE_H #define __ASM_ATOMIC_LSE_H -#ifndef __ARM64_IN_ATOMIC_IMPL -#error "please don't include this file directly" -#endif - -#define __LL_SC_ATOMIC(op) __LL_SC_CALL(arch_atomic_##op) #define ATOMIC_OP(op, asm_op) \ -static inline void arch_atomic_##op(int i, atomic_t *v) \ +static inline void __lse_atomic_##op(int i, atomic_t *v) \ { \ - register int w0 asm ("w0") = i; \ - register atomic_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC(op), \ -" " #asm_op " %w[i], %[v]\n") \ - : [i] "+r" (w0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS); \ + asm volatile( \ +" " #asm_op " %w[i], %[v]\n" \ + : [i] "+r" (i), [v] "+Q" (v->counter) \ + : "r" (v)); \ } ATOMIC_OP(andnot, stclr) @@ -36,21 +27,15 @@ ATOMIC_OP(add, stadd) #undef ATOMIC_OP #define ATOMIC_FETCH_OP(name, mb, op, asm_op, cl...) \ -static inline int arch_atomic_fetch_##op##name(int i, atomic_t *v) \ +static inline int __lse_atomic_fetch_##op##name(int i, atomic_t *v) \ { \ - register int w0 asm ("w0") = i; \ - register atomic_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC(fetch_##op##name), \ - /* LSE atomics */ \ -" " #asm_op #mb " %w[i], %w[i], %[v]") \ - : [i] "+r" (w0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS, ##cl); \ + asm volatile( \ +" " #asm_op #mb " %w[i], %w[i], %[v]" \ + : [i] "+r" (i), [v] "+Q" (v->counter) \ + : "r" (v) \ + : cl); \ \ - return w0; \ + return i; \ } #define ATOMIC_FETCH_OPS(op, asm_op) \ @@ -68,23 +53,16 @@ ATOMIC_FETCH_OPS(add, ldadd) #undef ATOMIC_FETCH_OPS #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ -static inline int arch_atomic_add_return##name(int i, atomic_t *v) \ +static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \ { \ - register int w0 asm ("w0") = i; \ - register atomic_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC(add_return##name) \ - __nops(1), \ - /* LSE atomics */ \ + asm volatile( \ " ldadd" #mb " %w[i], w30, %[v]\n" \ - " add %w[i], %w[i], w30") \ - : [i] "+r" (w0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS, ##cl); \ + " add %w[i], %w[i], w30" \ + : [i] "+r" (i), [v] "+Q" (v->counter) \ + : "r" (v) \ + : "x30", ##cl); \ \ - return w0; \ + return i; \ } ATOMIC_OP_ADD_RETURN(_relaxed, ) @@ -94,41 +72,26 @@ ATOMIC_OP_ADD_RETURN( , al, "memory") #undef ATOMIC_OP_ADD_RETURN -static inline void arch_atomic_and(int i, atomic_t *v) +static inline void __lse_atomic_and(int i, atomic_t *v) { - register int w0 asm ("w0") = i; - register atomic_t *x1 asm ("x1") = v; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - __LL_SC_ATOMIC(and) - __nops(1), - /* LSE atomics */ + asm volatile( " mvn %w[i], %w[i]\n" - " stclr %w[i], %[v]") - : [i] "+&r" (w0), [v] "+Q" (v->counter) - : "r" (x1) - : __LL_SC_CLOBBERS); + " stclr %w[i], %[v]" + : [i] "+&r" (i), [v] "+Q" (v->counter) + : "r" (v)); } #define ATOMIC_FETCH_OP_AND(name, mb, cl...) \ -static inline int arch_atomic_fetch_and##name(int i, atomic_t *v) \ +static inline int __lse_atomic_fetch_and##name(int i, atomic_t *v) \ { \ - register int w0 asm ("w0") = i; \ - register atomic_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC(fetch_and##name) \ - __nops(1), \ - /* LSE atomics */ \ + asm volatile( \ " mvn %w[i], %w[i]\n" \ - " ldclr" #mb " %w[i], %w[i], %[v]") \ - : [i] "+&r" (w0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS, ##cl); \ + " ldclr" #mb " %w[i], %w[i], %[v]" \ + : [i] "+&r" (i), [v] "+Q" (v->counter) \ + : "r" (v) \ + : cl); \ \ - return w0; \ + return i; \ } ATOMIC_FETCH_OP_AND(_relaxed, ) @@ -138,42 +101,27 @@ ATOMIC_FETCH_OP_AND( , al, "memory") #undef ATOMIC_FETCH_OP_AND -static inline void arch_atomic_sub(int i, atomic_t *v) +static inline void __lse_atomic_sub(int i, atomic_t *v) { - register int w0 asm ("w0") = i; - register atomic_t *x1 asm ("x1") = v; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - __LL_SC_ATOMIC(sub) - __nops(1), - /* LSE atomics */ + asm volatile( " neg %w[i], %w[i]\n" - " stadd %w[i], %[v]") - : [i] "+&r" (w0), [v] "+Q" (v->counter) - : "r" (x1) - : __LL_SC_CLOBBERS); + " stadd %w[i], %[v]" + : [i] "+&r" (i), [v] "+Q" (v->counter) + : "r" (v)); } #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \ -static inline int arch_atomic_sub_return##name(int i, atomic_t *v) \ +static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \ { \ - register int w0 asm ("w0") = i; \ - register atomic_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC(sub_return##name) \ - __nops(2), \ - /* LSE atomics */ \ + asm volatile( \ " neg %w[i], %w[i]\n" \ " ldadd" #mb " %w[i], w30, %[v]\n" \ - " add %w[i], %w[i], w30") \ - : [i] "+&r" (w0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS , ##cl); \ + " add %w[i], %w[i], w30" \ + : [i] "+&r" (i), [v] "+Q" (v->counter) \ + : "r" (v) \ + : "x30", ##cl); \ \ - return w0; \ + return i; \ } ATOMIC_OP_SUB_RETURN(_relaxed, ) @@ -184,23 +132,16 @@ ATOMIC_OP_SUB_RETURN( , al, "memory") #undef ATOMIC_OP_SUB_RETURN #define ATOMIC_FETCH_OP_SUB(name, mb, cl...) \ -static inline int arch_atomic_fetch_sub##name(int i, atomic_t *v) \ +static inline int __lse_atomic_fetch_sub##name(int i, atomic_t *v) \ { \ - register int w0 asm ("w0") = i; \ - register atomic_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC(fetch_sub##name) \ - __nops(1), \ - /* LSE atomics */ \ + asm volatile( \ " neg %w[i], %w[i]\n" \ - " ldadd" #mb " %w[i], %w[i], %[v]") \ - : [i] "+&r" (w0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS, ##cl); \ + " ldadd" #mb " %w[i], %w[i], %[v]" \ + : [i] "+&r" (i), [v] "+Q" (v->counter) \ + : "r" (v) \ + : cl); \ \ - return w0; \ + return i; \ } ATOMIC_FETCH_OP_SUB(_relaxed, ) @@ -209,20 +150,14 @@ ATOMIC_FETCH_OP_SUB(_release, l, "memory") ATOMIC_FETCH_OP_SUB( , al, "memory") #undef ATOMIC_FETCH_OP_SUB -#undef __LL_SC_ATOMIC -#define __LL_SC_ATOMIC64(op) __LL_SC_CALL(arch_atomic64_##op) #define ATOMIC64_OP(op, asm_op) \ -static inline void arch_atomic64_##op(s64 i, atomic64_t *v) \ +static inline void __lse_atomic64_##op(s64 i, atomic64_t *v) \ { \ - register s64 x0 asm ("x0") = i; \ - register atomic64_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN(__LL_SC_ATOMIC64(op), \ -" " #asm_op " %[i], %[v]\n") \ - : [i] "+r" (x0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS); \ + asm volatile( \ +" " #asm_op " %[i], %[v]\n" \ + : [i] "+r" (i), [v] "+Q" (v->counter) \ + : "r" (v)); \ } ATOMIC64_OP(andnot, stclr) @@ -233,21 +168,15 @@ ATOMIC64_OP(add, stadd) #undef ATOMIC64_OP #define ATOMIC64_FETCH_OP(name, mb, op, asm_op, cl...) \ -static inline s64 arch_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ +static inline long __lse_atomic64_fetch_##op##name(s64 i, atomic64_t *v)\ { \ - register s64 x0 asm ("x0") = i; \ - register atomic64_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC64(fetch_##op##name), \ - /* LSE atomics */ \ -" " #asm_op #mb " %[i], %[i], %[v]") \ - : [i] "+r" (x0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS, ##cl); \ + asm volatile( \ +" " #asm_op #mb " %[i], %[i], %[v]" \ + : [i] "+r" (i), [v] "+Q" (v->counter) \ + : "r" (v) \ + : cl); \ \ - return x0; \ + return i; \ } #define ATOMIC64_FETCH_OPS(op, asm_op) \ @@ -265,23 +194,16 @@ ATOMIC64_FETCH_OPS(add, ldadd) #undef ATOMIC64_FETCH_OPS #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ -static inline s64 arch_atomic64_add_return##name(s64 i, atomic64_t *v) \ +static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\ { \ - register s64 x0 asm ("x0") = i; \ - register atomic64_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC64(add_return##name) \ - __nops(1), \ - /* LSE atomics */ \ + asm volatile( \ " ldadd" #mb " %[i], x30, %[v]\n" \ - " add %[i], %[i], x30") \ - : [i] "+r" (x0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS, ##cl); \ + " add %[i], %[i], x30" \ + : [i] "+r" (i), [v] "+Q" (v->counter) \ + : "r" (v) \ + : "x30", ##cl); \ \ - return x0; \ + return i; \ } ATOMIC64_OP_ADD_RETURN(_relaxed, ) @@ -291,41 +213,26 @@ ATOMIC64_OP_ADD_RETURN( , al, "memory") #undef ATOMIC64_OP_ADD_RETURN -static inline void arch_atomic64_and(s64 i, atomic64_t *v) +static inline void __lse_atomic64_and(s64 i, atomic64_t *v) { - register s64 x0 asm ("x0") = i; - register atomic64_t *x1 asm ("x1") = v; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - __LL_SC_ATOMIC64(and) - __nops(1), - /* LSE atomics */ + asm volatile( " mvn %[i], %[i]\n" - " stclr %[i], %[v]") - : [i] "+&r" (x0), [v] "+Q" (v->counter) - : "r" (x1) - : __LL_SC_CLOBBERS); + " stclr %[i], %[v]" + : [i] "+&r" (i), [v] "+Q" (v->counter) + : "r" (v)); } #define ATOMIC64_FETCH_OP_AND(name, mb, cl...) \ -static inline s64 arch_atomic64_fetch_and##name(s64 i, atomic64_t *v) \ +static inline long __lse_atomic64_fetch_and##name(s64 i, atomic64_t *v) \ { \ - register s64 x0 asm ("x0") = i; \ - register atomic64_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC64(fetch_and##name) \ - __nops(1), \ - /* LSE atomics */ \ + asm volatile( \ " mvn %[i], %[i]\n" \ - " ldclr" #mb " %[i], %[i], %[v]") \ - : [i] "+&r" (x0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS, ##cl); \ + " ldclr" #mb " %[i], %[i], %[v]" \ + : [i] "+&r" (i), [v] "+Q" (v->counter) \ + : "r" (v) \ + : cl); \ \ - return x0; \ + return i; \ } ATOMIC64_FETCH_OP_AND(_relaxed, ) @@ -335,42 +242,27 @@ ATOMIC64_FETCH_OP_AND( , al, "memory") #undef ATOMIC64_FETCH_OP_AND -static inline void arch_atomic64_sub(s64 i, atomic64_t *v) +static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) { - register s64 x0 asm ("x0") = i; - register atomic64_t *x1 asm ("x1") = v; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - __LL_SC_ATOMIC64(sub) - __nops(1), - /* LSE atomics */ + asm volatile( " neg %[i], %[i]\n" - " stadd %[i], %[v]") - : [i] "+&r" (x0), [v] "+Q" (v->counter) - : "r" (x1) - : __LL_SC_CLOBBERS); + " stadd %[i], %[v]" + : [i] "+&r" (i), [v] "+Q" (v->counter) + : "r" (v)); } #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ -static inline s64 arch_atomic64_sub_return##name(s64 i, atomic64_t *v) \ +static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \ { \ - register s64 x0 asm ("x0") = i; \ - register atomic64_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC64(sub_return##name) \ - __nops(2), \ - /* LSE atomics */ \ + asm volatile( \ " neg %[i], %[i]\n" \ " ldadd" #mb " %[i], x30, %[v]\n" \ - " add %[i], %[i], x30") \ - : [i] "+&r" (x0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS, ##cl); \ + " add %[i], %[i], x30" \ + : [i] "+&r" (i), [v] "+Q" (v->counter) \ + : "r" (v) \ + : "x30", ##cl); \ \ - return x0; \ + return i; \ } ATOMIC64_OP_SUB_RETURN(_relaxed, ) @@ -381,23 +273,16 @@ ATOMIC64_OP_SUB_RETURN( , al, "memory") #undef ATOMIC64_OP_SUB_RETURN #define ATOMIC64_FETCH_OP_SUB(name, mb, cl...) \ -static inline s64 arch_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \ +static inline long __lse_atomic64_fetch_sub##name(s64 i, atomic64_t *v) \ { \ - register s64 x0 asm ("x0") = i; \ - register atomic64_t *x1 asm ("x1") = v; \ - \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_ATOMIC64(fetch_sub##name) \ - __nops(1), \ - /* LSE atomics */ \ + asm volatile( \ " neg %[i], %[i]\n" \ - " ldadd" #mb " %[i], %[i], %[v]") \ - : [i] "+&r" (x0), [v] "+Q" (v->counter) \ - : "r" (x1) \ - : __LL_SC_CLOBBERS, ##cl); \ + " ldadd" #mb " %[i], %[i], %[v]" \ + : [i] "+&r" (i), [v] "+Q" (v->counter) \ + : "r" (v) \ + : cl); \ \ - return x0; \ + return i; \ } ATOMIC64_FETCH_OP_SUB(_relaxed, ) @@ -407,15 +292,9 @@ ATOMIC64_FETCH_OP_SUB( , al, "memory") #undef ATOMIC64_FETCH_OP_SUB -static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) +static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v) { - register long x0 asm ("x0") = (long)v; - - asm volatile(ARM64_LSE_ATOMIC_INSN( - /* LL/SC */ - __LL_SC_ATOMIC64(dec_if_positive) - __nops(6), - /* LSE atomics */ + asm volatile( "1: ldr x30, %[v]\n" " subs %[ret], x30, #1\n" " b.lt 2f\n" @@ -423,20 +302,16 @@ static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) " sub x30, x30, #1\n" " sub x30, x30, %[ret]\n" " cbnz x30, 1b\n" - "2:") - : [ret] "+&r" (x0), [v] "+Q" (v->counter) + "2:" + : [ret] "+&r" (v), [v] "+Q" (v->counter) : - : __LL_SC_CLOBBERS, "cc", "memory"); + : "x30", "cc", "memory"); - return x0; + return (long)v; } -#undef __LL_SC_ATOMIC64 - -#define __LL_SC_CMPXCHG(op) __LL_SC_CALL(__cmpxchg_case_##op) - #define __CMPXCHG_CASE(w, sfx, name, sz, mb, cl...) \ -static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \ +static inline u##sz __lse__cmpxchg_case_##name##sz(volatile void *ptr, \ u##sz old, \ u##sz new) \ { \ @@ -444,17 +319,13 @@ static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \ register u##sz x1 asm ("x1") = old; \ register u##sz x2 asm ("x2") = new; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_CMPXCHG(name##sz) \ - __nops(2), \ - /* LSE atomics */ \ + asm volatile( \ " mov " #w "30, %" #w "[old]\n" \ " cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n" \ - " mov %" #w "[ret], " #w "30") \ + " mov %" #w "[ret], " #w "30" \ : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \ : [old] "r" (x1), [new] "r" (x2) \ - : __LL_SC_CLOBBERS, ##cl); \ + : "x30", ##cl); \ \ return x0; \ } @@ -476,13 +347,10 @@ __CMPXCHG_CASE(w, h, mb_, 16, al, "memory") __CMPXCHG_CASE(w, , mb_, 32, al, "memory") __CMPXCHG_CASE(x, , mb_, 64, al, "memory") -#undef __LL_SC_CMPXCHG #undef __CMPXCHG_CASE -#define __LL_SC_CMPXCHG_DBL(op) __LL_SC_CALL(__cmpxchg_double##op) - #define __CMPXCHG_DBL(name, mb, cl...) \ -static inline long __cmpxchg_double##name(unsigned long old1, \ +static inline long __lse__cmpxchg_double##name(unsigned long old1, \ unsigned long old2, \ unsigned long new1, \ unsigned long new2, \ @@ -496,20 +364,16 @@ static inline long __cmpxchg_double##name(unsigned long old1, \ register unsigned long x3 asm ("x3") = new2; \ register unsigned long x4 asm ("x4") = (unsigned long)ptr; \ \ - asm volatile(ARM64_LSE_ATOMIC_INSN( \ - /* LL/SC */ \ - __LL_SC_CMPXCHG_DBL(name) \ - __nops(3), \ - /* LSE atomics */ \ + asm volatile( \ " casp" #mb "\t%[old1], %[old2], %[new1], %[new2], %[v]\n"\ " eor %[old1], %[old1], %[oldval1]\n" \ " eor %[old2], %[old2], %[oldval2]\n" \ - " orr %[old1], %[old1], %[old2]") \ + " orr %[old1], %[old1], %[old2]" \ : [old1] "+&r" (x0), [old2] "+&r" (x1), \ [v] "+Q" (*(unsigned long *)ptr) \ : [new1] "r" (x2), [new2] "r" (x3), [ptr] "r" (x4), \ [oldval1] "r" (oldval1), [oldval2] "r" (oldval2) \ - : __LL_SC_CLOBBERS, ##cl); \ + : cl); \ \ return x0; \ } @@ -517,7 +381,6 @@ static inline long __cmpxchg_double##name(unsigned long old1, \ __CMPXCHG_DBL( , ) __CMPXCHG_DBL(_mb, al, "memory") -#undef __LL_SC_CMPXCHG_DBL #undef __CMPXCHG_DBL #endif /* __ASM_ATOMIC_LSE_H */ diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 7a299a20f6dc..e5fff8cd4904 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -10,7 +10,7 @@ #include #include -#include +#include #include #include diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index 8262325e2fc6..52b80846d1b7 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -22,14 +22,6 @@ __asm__(".arch_extension lse"); -/* Move the ll/sc atomics out-of-line */ -#define __LL_SC_INLINE notrace -#define __LL_SC_PREFIX(x) __ll_sc_##x -#define __LL_SC_EXPORT(x) EXPORT_SYMBOL(__LL_SC_PREFIX(x)) - -/* Macro for constructing calls to out-of-line ll/sc atomics */ -#define __LL_SC_CALL(op) "bl\t" __stringify(__LL_SC_PREFIX(op)) "\n" -#define __LL_SC_CLOBBERS "x16", "x17", "x30" /* In-line patching at runtime */ #define ARM64_LSE_ATOMIC_INSN(llsc, lse) \ @@ -46,9 +38,6 @@ __asm__(".arch_extension lse"); #else /* __ASSEMBLER__ */ -#define __LL_SC_INLINE static inline -#define __LL_SC_PREFIX(x) x -#define __LL_SC_EXPORT(x) #define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc -- cgit v1.2.3-58-ga151 From 3337cb5aea594e4090a660e3fc3250bb669b1305 Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Wed, 28 Aug 2019 18:50:08 +0100 Subject: arm64: avoid using hard-coded registers for LSE atomics Now that we have removed the out-of-line ll/sc atomics we can give the compiler the freedom to choose its own register allocation. Remove the hard-coded use of x30. Signed-off-by: Andrew Murray Signed-off-by: Will Deacon --- arch/arm64/include/asm/atomic_lse.h | 70 ++++++++++++++++++++++--------------- 1 file changed, 41 insertions(+), 29 deletions(-) diff --git a/arch/arm64/include/asm/atomic_lse.h b/arch/arm64/include/asm/atomic_lse.h index 7dce5e1f074e..c6bd87d2915b 100644 --- a/arch/arm64/include/asm/atomic_lse.h +++ b/arch/arm64/include/asm/atomic_lse.h @@ -55,12 +55,14 @@ ATOMIC_FETCH_OPS(add, ldadd) #define ATOMIC_OP_ADD_RETURN(name, mb, cl...) \ static inline int __lse_atomic_add_return##name(int i, atomic_t *v) \ { \ + u32 tmp; \ + \ asm volatile( \ - " ldadd" #mb " %w[i], w30, %[v]\n" \ - " add %w[i], %w[i], w30" \ - : [i] "+r" (i), [v] "+Q" (v->counter) \ + " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ + " add %w[i], %w[i], %w[tmp]" \ + : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ : "r" (v) \ - : "x30", ##cl); \ + : cl); \ \ return i; \ } @@ -113,13 +115,15 @@ static inline void __lse_atomic_sub(int i, atomic_t *v) #define ATOMIC_OP_SUB_RETURN(name, mb, cl...) \ static inline int __lse_atomic_sub_return##name(int i, atomic_t *v) \ { \ + u32 tmp; \ + \ asm volatile( \ " neg %w[i], %w[i]\n" \ - " ldadd" #mb " %w[i], w30, %[v]\n" \ - " add %w[i], %w[i], w30" \ - : [i] "+&r" (i), [v] "+Q" (v->counter) \ + " ldadd" #mb " %w[i], %w[tmp], %[v]\n" \ + " add %w[i], %w[i], %w[tmp]" \ + : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ : "r" (v) \ - : "x30", ##cl); \ + : cl); \ \ return i; \ } @@ -196,12 +200,14 @@ ATOMIC64_FETCH_OPS(add, ldadd) #define ATOMIC64_OP_ADD_RETURN(name, mb, cl...) \ static inline long __lse_atomic64_add_return##name(s64 i, atomic64_t *v)\ { \ + unsigned long tmp; \ + \ asm volatile( \ - " ldadd" #mb " %[i], x30, %[v]\n" \ - " add %[i], %[i], x30" \ - : [i] "+r" (i), [v] "+Q" (v->counter) \ + " ldadd" #mb " %[i], %x[tmp], %[v]\n" \ + " add %[i], %[i], %x[tmp]" \ + : [i] "+r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ : "r" (v) \ - : "x30", ##cl); \ + : cl); \ \ return i; \ } @@ -254,13 +260,15 @@ static inline void __lse_atomic64_sub(s64 i, atomic64_t *v) #define ATOMIC64_OP_SUB_RETURN(name, mb, cl...) \ static inline long __lse_atomic64_sub_return##name(s64 i, atomic64_t *v) \ { \ + unsigned long tmp; \ + \ asm volatile( \ " neg %[i], %[i]\n" \ - " ldadd" #mb " %[i], x30, %[v]\n" \ - " add %[i], %[i], x30" \ - : [i] "+&r" (i), [v] "+Q" (v->counter) \ + " ldadd" #mb " %[i], %x[tmp], %[v]\n" \ + " add %[i], %[i], %x[tmp]" \ + : [i] "+&r" (i), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) \ : "r" (v) \ - : "x30", ##cl); \ + : cl); \ \ return i; \ } @@ -294,18 +302,20 @@ ATOMIC64_FETCH_OP_SUB( , al, "memory") static inline s64 __lse_atomic64_dec_if_positive(atomic64_t *v) { + unsigned long tmp; + asm volatile( - "1: ldr x30, %[v]\n" - " subs %[ret], x30, #1\n" + "1: ldr %x[tmp], %[v]\n" + " subs %[ret], %x[tmp], #1\n" " b.lt 2f\n" - " casal x30, %[ret], %[v]\n" - " sub x30, x30, #1\n" - " sub x30, x30, %[ret]\n" - " cbnz x30, 1b\n" + " casal %x[tmp], %[ret], %[v]\n" + " sub %x[tmp], %x[tmp], #1\n" + " sub %x[tmp], %x[tmp], %[ret]\n" + " cbnz %x[tmp], 1b\n" "2:" - : [ret] "+&r" (v), [v] "+Q" (v->counter) + : [ret] "+&r" (v), [v] "+Q" (v->counter), [tmp] "=&r" (tmp) : - : "x30", "cc", "memory"); + : "cc", "memory"); return (long)v; } @@ -318,14 +328,16 @@ static inline u##sz __lse__cmpxchg_case_##name##sz(volatile void *ptr, \ register unsigned long x0 asm ("x0") = (unsigned long)ptr; \ register u##sz x1 asm ("x1") = old; \ register u##sz x2 asm ("x2") = new; \ + unsigned long tmp; \ \ asm volatile( \ - " mov " #w "30, %" #w "[old]\n" \ - " cas" #mb #sfx "\t" #w "30, %" #w "[new], %[v]\n" \ - " mov %" #w "[ret], " #w "30" \ - : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr) \ + " mov %" #w "[tmp], %" #w "[old]\n" \ + " cas" #mb #sfx "\t%" #w "[tmp], %" #w "[new], %[v]\n" \ + " mov %" #w "[ret], %" #w "[tmp]" \ + : [ret] "+r" (x0), [v] "+Q" (*(unsigned long *)ptr), \ + [tmp] "=&r" (tmp) \ : [old] "r" (x1), [new] "r" (x2) \ - : "x30", ##cl); \ + : cl); \ \ return x0; \ } -- cgit v1.2.3-58-ga151 From eb3aabbfbfc203082d06a64517df97a3746ba9ea Mon Sep 17 00:00:00 2001 From: Andrew Murray Date: Wed, 28 Aug 2019 18:50:09 +0100 Subject: arm64: atomics: Remove atomic_ll_sc compilation unit We no longer fall back to out-of-line atomics on systems with CONFIG_ARM64_LSE_ATOMICS where ARM64_HAS_LSE_ATOMICS is not set. Remove the unused compilation unit which provided these symbols. Signed-off-by: Andrew Murray Signed-off-by: Will Deacon --- arch/arm64/lib/Makefile | 19 ------------------- arch/arm64/lib/atomic_ll_sc.c | 3 --- 2 files changed, 22 deletions(-) delete mode 100644 arch/arm64/lib/atomic_ll_sc.c diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index 33c2a4abda04..f10809ef1690 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -11,25 +11,6 @@ CFLAGS_REMOVE_xor-neon.o += -mgeneral-regs-only CFLAGS_xor-neon.o += -ffreestanding endif -# Tell the compiler to treat all general purpose registers (with the -# exception of the IP registers, which are already handled by the caller -# in case of a PLT) as callee-saved, which allows for efficient runtime -# patching of the bl instruction in the caller with an atomic instruction -# when supported by the CPU. Result and argument registers are handled -# correctly, based on the function prototype. -lib-$(CONFIG_ARM64_LSE_ATOMICS) += atomic_ll_sc.o -CFLAGS_atomic_ll_sc.o := -ffixed-x1 -ffixed-x2 \ - -ffixed-x3 -ffixed-x4 -ffixed-x5 -ffixed-x6 \ - -ffixed-x7 -fcall-saved-x8 -fcall-saved-x9 \ - -fcall-saved-x10 -fcall-saved-x11 -fcall-saved-x12 \ - -fcall-saved-x13 -fcall-saved-x14 -fcall-saved-x15 \ - -fcall-saved-x18 -fomit-frame-pointer -CFLAGS_REMOVE_atomic_ll_sc.o := $(CC_FLAGS_FTRACE) -GCOV_PROFILE_atomic_ll_sc.o := n -KASAN_SANITIZE_atomic_ll_sc.o := n -KCOV_INSTRUMENT_atomic_ll_sc.o := n -UBSAN_SANITIZE_atomic_ll_sc.o := n - lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o obj-$(CONFIG_CRC32) += crc32.o diff --git a/arch/arm64/lib/atomic_ll_sc.c b/arch/arm64/lib/atomic_ll_sc.c deleted file mode 100644 index b0c538b0da28..000000000000 --- a/arch/arm64/lib/atomic_ll_sc.c +++ /dev/null @@ -1,3 +0,0 @@ -#include -#define __ARM64_IN_ATOMIC_IMPL -#include -- cgit v1.2.3-58-ga151 From 0ca98b2456fbd8a465098fe3735ae2c7645a76e8 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 29 Aug 2019 11:22:30 +0100 Subject: arm64: lse: Remove unused 'alt_lse' assembly macro The 'alt_lse' assembly macro has been unused since 7c8fc35dfc32 ("locking/atomics/arm64: Replace our atomic/lock bitop implementations with asm-generic"). Remove it. Reviewed-by: Andrew Murray Signed-off-by: Will Deacon --- arch/arm64/include/asm/lse.h | 22 ---------------------- 1 file changed, 22 deletions(-) diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index 52b80846d1b7..08e818e53ed7 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -10,37 +10,15 @@ #include #include -#ifdef __ASSEMBLER__ - -.arch_extension lse - -.macro alt_lse, llsc, lse - alternative_insn "\llsc", "\lse", ARM64_HAS_LSE_ATOMICS -.endm - -#else /* __ASSEMBLER__ */ - __asm__(".arch_extension lse"); - /* In-line patching at runtime */ #define ARM64_LSE_ATOMIC_INSN(llsc, lse) \ ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS) -#endif /* __ASSEMBLER__ */ #else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ -#ifdef __ASSEMBLER__ - -.macro alt_lse, llsc, lse - \llsc -.endm - -#else /* __ASSEMBLER__ */ - - #define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc -#endif /* __ASSEMBLER__ */ #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ #endif /* __ASM_LSE_H */ -- cgit v1.2.3-58-ga151 From 0533f97b4356bfa8af5d4758c6c3fe703bb010d9 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 29 Aug 2019 11:49:10 +0100 Subject: arm64: asm: Kill 'asm/atomic_arch.h' The contents of 'asm/atomic_arch.h' can be split across some of our other 'asm/' headers. Remove it. Reviewed-by: Andrew Murray Signed-off-by: Will Deacon --- arch/arm64/include/asm/atomic.h | 77 ++++++++++++++++- arch/arm64/include/asm/atomic_arch.h | 155 ----------------------------------- arch/arm64/include/asm/cmpxchg.h | 41 ++++++++- arch/arm64/include/asm/lse.h | 24 ++++++ 4 files changed, 140 insertions(+), 157 deletions(-) delete mode 100644 arch/arm64/include/asm/atomic_arch.h diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index c70d3f389d29..7c334337674d 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -17,9 +17,84 @@ #ifdef __KERNEL__ -#include #include +#define ATOMIC_OP(op) \ +static inline void arch_##op(int i, atomic_t *v) \ +{ \ + __lse_ll_sc_body(op, i, v); \ +} + +ATOMIC_OP(atomic_andnot) +ATOMIC_OP(atomic_or) +ATOMIC_OP(atomic_xor) +ATOMIC_OP(atomic_add) +ATOMIC_OP(atomic_and) +ATOMIC_OP(atomic_sub) + + +#define ATOMIC_FETCH_OP(name, op) \ +static inline int arch_##op##name(int i, atomic_t *v) \ +{ \ + return __lse_ll_sc_body(op##name, i, v); \ +} + +#define ATOMIC_FETCH_OPS(op) \ + ATOMIC_FETCH_OP(_relaxed, op) \ + ATOMIC_FETCH_OP(_acquire, op) \ + ATOMIC_FETCH_OP(_release, op) \ + ATOMIC_FETCH_OP( , op) + +ATOMIC_FETCH_OPS(atomic_fetch_andnot) +ATOMIC_FETCH_OPS(atomic_fetch_or) +ATOMIC_FETCH_OPS(atomic_fetch_xor) +ATOMIC_FETCH_OPS(atomic_fetch_add) +ATOMIC_FETCH_OPS(atomic_fetch_and) +ATOMIC_FETCH_OPS(atomic_fetch_sub) +ATOMIC_FETCH_OPS(atomic_add_return) +ATOMIC_FETCH_OPS(atomic_sub_return) + + +#define ATOMIC64_OP(op) \ +static inline void arch_##op(long i, atomic64_t *v) \ +{ \ + __lse_ll_sc_body(op, i, v); \ +} + +ATOMIC64_OP(atomic64_andnot) +ATOMIC64_OP(atomic64_or) +ATOMIC64_OP(atomic64_xor) +ATOMIC64_OP(atomic64_add) +ATOMIC64_OP(atomic64_and) +ATOMIC64_OP(atomic64_sub) + + +#define ATOMIC64_FETCH_OP(name, op) \ +static inline long arch_##op##name(long i, atomic64_t *v) \ +{ \ + return __lse_ll_sc_body(op##name, i, v); \ +} + +#define ATOMIC64_FETCH_OPS(op) \ + ATOMIC64_FETCH_OP(_relaxed, op) \ + ATOMIC64_FETCH_OP(_acquire, op) \ + ATOMIC64_FETCH_OP(_release, op) \ + ATOMIC64_FETCH_OP( , op) + +ATOMIC64_FETCH_OPS(atomic64_fetch_andnot) +ATOMIC64_FETCH_OPS(atomic64_fetch_or) +ATOMIC64_FETCH_OPS(atomic64_fetch_xor) +ATOMIC64_FETCH_OPS(atomic64_fetch_add) +ATOMIC64_FETCH_OPS(atomic64_fetch_and) +ATOMIC64_FETCH_OPS(atomic64_fetch_sub) +ATOMIC64_FETCH_OPS(atomic64_add_return) +ATOMIC64_FETCH_OPS(atomic64_sub_return) + +static inline long arch_atomic64_dec_if_positive(atomic64_t *v) +{ + return __lse_ll_sc_body(atomic64_dec_if_positive, v); +} + #define ATOMIC_INIT(i) { (i) } #define arch_atomic_read(v) READ_ONCE((v)->counter) diff --git a/arch/arm64/include/asm/atomic_arch.h b/arch/arm64/include/asm/atomic_arch.h deleted file mode 100644 index 1aac7fc65084..000000000000 --- a/arch/arm64/include/asm/atomic_arch.h +++ /dev/null @@ -1,155 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Selection between LSE and LL/SC atomics. - * - * Copyright (C) 2018 ARM Ltd. - * Author: Andrew Murray - */ - -#ifndef __ASM_ATOMIC_ARCH_H -#define __ASM_ATOMIC_ARCH_H - - -#include - -#include -#include -#include - -extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; -extern struct static_key_false arm64_const_caps_ready; - -static inline bool system_uses_lse_atomics(void) -{ - return (IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && - IS_ENABLED(CONFIG_AS_LSE) && - static_branch_likely(&arm64_const_caps_ready)) && - static_branch_likely(&cpu_hwcap_keys[ARM64_HAS_LSE_ATOMICS]); -} - -#define __lse_ll_sc_body(op, ...) \ -({ \ - system_uses_lse_atomics() ? \ - __lse_##op(__VA_ARGS__) : \ - __ll_sc_##op(__VA_ARGS__); \ -}) - -#define ATOMIC_OP(op) \ -static inline void arch_##op(int i, atomic_t *v) \ -{ \ - __lse_ll_sc_body(op, i, v); \ -} - -ATOMIC_OP(atomic_andnot) -ATOMIC_OP(atomic_or) -ATOMIC_OP(atomic_xor) -ATOMIC_OP(atomic_add) -ATOMIC_OP(atomic_and) -ATOMIC_OP(atomic_sub) - - -#define ATOMIC_FETCH_OP(name, op) \ -static inline int arch_##op##name(int i, atomic_t *v) \ -{ \ - return __lse_ll_sc_body(op##name, i, v); \ -} - -#define ATOMIC_FETCH_OPS(op) \ - ATOMIC_FETCH_OP(_relaxed, op) \ - ATOMIC_FETCH_OP(_acquire, op) \ - ATOMIC_FETCH_OP(_release, op) \ - ATOMIC_FETCH_OP( , op) - -ATOMIC_FETCH_OPS(atomic_fetch_andnot) -ATOMIC_FETCH_OPS(atomic_fetch_or) -ATOMIC_FETCH_OPS(atomic_fetch_xor) -ATOMIC_FETCH_OPS(atomic_fetch_add) -ATOMIC_FETCH_OPS(atomic_fetch_and) -ATOMIC_FETCH_OPS(atomic_fetch_sub) -ATOMIC_FETCH_OPS(atomic_add_return) -ATOMIC_FETCH_OPS(atomic_sub_return) - - -#define ATOMIC64_OP(op) \ -static inline void arch_##op(long i, atomic64_t *v) \ -{ \ - __lse_ll_sc_body(op, i, v); \ -} - -ATOMIC64_OP(atomic64_andnot) -ATOMIC64_OP(atomic64_or) -ATOMIC64_OP(atomic64_xor) -ATOMIC64_OP(atomic64_add) -ATOMIC64_OP(atomic64_and) -ATOMIC64_OP(atomic64_sub) - - -#define ATOMIC64_FETCH_OP(name, op) \ -static inline long arch_##op##name(long i, atomic64_t *v) \ -{ \ - return __lse_ll_sc_body(op##name, i, v); \ -} - -#define ATOMIC64_FETCH_OPS(op) \ - ATOMIC64_FETCH_OP(_relaxed, op) \ - ATOMIC64_FETCH_OP(_acquire, op) \ - ATOMIC64_FETCH_OP(_release, op) \ - ATOMIC64_FETCH_OP( , op) - -ATOMIC64_FETCH_OPS(atomic64_fetch_andnot) -ATOMIC64_FETCH_OPS(atomic64_fetch_or) -ATOMIC64_FETCH_OPS(atomic64_fetch_xor) -ATOMIC64_FETCH_OPS(atomic64_fetch_add) -ATOMIC64_FETCH_OPS(atomic64_fetch_and) -ATOMIC64_FETCH_OPS(atomic64_fetch_sub) -ATOMIC64_FETCH_OPS(atomic64_add_return) -ATOMIC64_FETCH_OPS(atomic64_sub_return) - - -static inline long arch_atomic64_dec_if_positive(atomic64_t *v) -{ - return __lse_ll_sc_body(atomic64_dec_if_positive, v); -} - -#define __CMPXCHG_CASE(name, sz) \ -static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \ - u##sz old, \ - u##sz new) \ -{ \ - return __lse_ll_sc_body(_cmpxchg_case_##name##sz, \ - ptr, old, new); \ -} - -__CMPXCHG_CASE( , 8) -__CMPXCHG_CASE( , 16) -__CMPXCHG_CASE( , 32) -__CMPXCHG_CASE( , 64) -__CMPXCHG_CASE(acq_, 8) -__CMPXCHG_CASE(acq_, 16) -__CMPXCHG_CASE(acq_, 32) -__CMPXCHG_CASE(acq_, 64) -__CMPXCHG_CASE(rel_, 8) -__CMPXCHG_CASE(rel_, 16) -__CMPXCHG_CASE(rel_, 32) -__CMPXCHG_CASE(rel_, 64) -__CMPXCHG_CASE(mb_, 8) -__CMPXCHG_CASE(mb_, 16) -__CMPXCHG_CASE(mb_, 32) -__CMPXCHG_CASE(mb_, 64) - - -#define __CMPXCHG_DBL(name) \ -static inline long __cmpxchg_double##name(unsigned long old1, \ - unsigned long old2, \ - unsigned long new1, \ - unsigned long new2, \ - volatile void *ptr) \ -{ \ - return __lse_ll_sc_body(_cmpxchg_double##name, \ - old1, old2, new1, new2, ptr); \ -} - -__CMPXCHG_DBL( ) -__CMPXCHG_DBL(_mb) - -#endif /* __ASM_ATOMIC_LSE_H */ diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index e5fff8cd4904..afaba73e0b2c 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -10,7 +10,6 @@ #include #include -#include #include #include @@ -104,6 +103,46 @@ __XCHG_GEN(_mb) #define arch_xchg_release(...) __xchg_wrapper(_rel, __VA_ARGS__) #define arch_xchg(...) __xchg_wrapper( _mb, __VA_ARGS__) +#define __CMPXCHG_CASE(name, sz) \ +static inline u##sz __cmpxchg_case_##name##sz(volatile void *ptr, \ + u##sz old, \ + u##sz new) \ +{ \ + return __lse_ll_sc_body(_cmpxchg_case_##name##sz, \ + ptr, old, new); \ +} + +__CMPXCHG_CASE( , 8) +__CMPXCHG_CASE( , 16) +__CMPXCHG_CASE( , 32) +__CMPXCHG_CASE( , 64) +__CMPXCHG_CASE(acq_, 8) +__CMPXCHG_CASE(acq_, 16) +__CMPXCHG_CASE(acq_, 32) +__CMPXCHG_CASE(acq_, 64) +__CMPXCHG_CASE(rel_, 8) +__CMPXCHG_CASE(rel_, 16) +__CMPXCHG_CASE(rel_, 32) +__CMPXCHG_CASE(rel_, 64) +__CMPXCHG_CASE(mb_, 8) +__CMPXCHG_CASE(mb_, 16) +__CMPXCHG_CASE(mb_, 32) +__CMPXCHG_CASE(mb_, 64) + +#define __CMPXCHG_DBL(name) \ +static inline long __cmpxchg_double##name(unsigned long old1, \ + unsigned long old2, \ + unsigned long new1, \ + unsigned long new2, \ + volatile void *ptr) \ +{ \ + return __lse_ll_sc_body(_cmpxchg_double##name, \ + old1, old2, new1, new2, ptr); \ +} + +__CMPXCHG_DBL( ) +__CMPXCHG_DBL(_mb) + #define __CMPXCHG_GEN(sfx) \ static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ unsigned long old, \ diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h index 08e818e53ed7..80b388278149 100644 --- a/arch/arm64/include/asm/lse.h +++ b/arch/arm64/include/asm/lse.h @@ -2,22 +2,46 @@ #ifndef __ASM_LSE_H #define __ASM_LSE_H +#include + #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS) #include #include +#include #include #include +#include #include __asm__(".arch_extension lse"); +extern struct static_key_false cpu_hwcap_keys[ARM64_NCAPS]; +extern struct static_key_false arm64_const_caps_ready; + +static inline bool system_uses_lse_atomics(void) +{ + return (static_branch_likely(&arm64_const_caps_ready)) && + static_branch_likely(&cpu_hwcap_keys[ARM64_HAS_LSE_ATOMICS]); +} + +#define __lse_ll_sc_body(op, ...) \ +({ \ + system_uses_lse_atomics() ? \ + __lse_##op(__VA_ARGS__) : \ + __ll_sc_##op(__VA_ARGS__); \ +}) + /* In-line patching at runtime */ #define ARM64_LSE_ATOMIC_INSN(llsc, lse) \ ALTERNATIVE(llsc, lse, ARM64_HAS_LSE_ATOMICS) #else /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ +static inline bool system_uses_lse_atomics(void) { return false; } + +#define __lse_ll_sc_body(op, ...) __ll_sc_##op(__VA_ARGS__) + #define ARM64_LSE_ATOMIC_INSN(llsc, lse) llsc #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */ -- cgit v1.2.3-58-ga151 From b32baf91f60fb9c7010bff87e68132f2ce31d9a8 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 29 Aug 2019 11:52:47 +0100 Subject: arm64: lse: Make ARM64_LSE_ATOMICS depend on JUMP_LABEL MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Support for LSE atomic instructions (CONFIG_ARM64_LSE_ATOMICS) relies on a static key to select between the legacy LL/SC implementation which is available on all arm64 CPUs and the super-duper LSE implementation which is available on CPUs implementing v8.1 and later. Unfortunately, when building a kernel with CONFIG_JUMP_LABEL disabled (e.g. because the toolchain doesn't support 'asm goto'), the static key inside the atomics code tries to use atomics itself. This results in a mess of circular includes and a build failure: In file included from ./arch/arm64/include/asm/lse.h:11, from ./arch/arm64/include/asm/atomic.h:16, from ./include/linux/atomic.h:7, from ./include/asm-generic/bitops/atomic.h:5, from ./arch/arm64/include/asm/bitops.h:26, from ./include/linux/bitops.h:19, from ./include/linux/kernel.h:12, from ./include/asm-generic/bug.h:18, from ./arch/arm64/include/asm/bug.h:26, from ./include/linux/bug.h:5, from ./include/linux/page-flags.h:10, from kernel/bounds.c:10: ./include/linux/jump_label.h: In function ‘static_key_count’: ./include/linux/jump_label.h:254:9: error: implicit declaration of function ‘atomic_read’ [-Werror=implicit-function-declaration] return atomic_read(&key->enabled); ^~~~~~~~~~~ [ ... more of the same ... ] Since LSE atomic instructions are not critical to the operation of the kernel, make them depend on JUMP_LABEL at compile time. Reviewed-by: Andrew Murray Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 3adcec05b1f6..27405ac94228 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1263,6 +1263,7 @@ config ARM64_PAN config ARM64_LSE_ATOMICS bool "Atomic instructions" + depends on JUMP_LABEL default y help As part of the Large System Extensions, ARMv8.1 introduces new -- cgit v1.2.3-58-ga151 From 5aad6cdabbf91fd330bd216fe3c93d90f78bc7e7 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 29 Aug 2019 14:33:23 +0100 Subject: arm64: atomics: Undefine internal macros after use We use a bunch of internal macros when constructing our atomic and cmpxchg routines in order to save on boilerplate. Avoid exposing these directly to users of the header files. Reviewed-by: Andrew Murray Signed-off-by: Will Deacon --- arch/arm64/include/asm/atomic.h | 7 +++++++ arch/arm64/include/asm/cmpxchg.h | 4 ++++ 2 files changed, 11 insertions(+) diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h index 7c334337674d..916e5a6d5454 100644 --- a/arch/arm64/include/asm/atomic.h +++ b/arch/arm64/include/asm/atomic.h @@ -32,6 +32,7 @@ ATOMIC_OP(atomic_add) ATOMIC_OP(atomic_and) ATOMIC_OP(atomic_sub) +#undef ATOMIC_OP #define ATOMIC_FETCH_OP(name, op) \ static inline int arch_##op##name(int i, atomic_t *v) \ @@ -54,6 +55,8 @@ ATOMIC_FETCH_OPS(atomic_fetch_sub) ATOMIC_FETCH_OPS(atomic_add_return) ATOMIC_FETCH_OPS(atomic_sub_return) +#undef ATOMIC_FETCH_OP +#undef ATOMIC_FETCH_OPS #define ATOMIC64_OP(op) \ static inline void arch_##op(long i, atomic64_t *v) \ @@ -68,6 +71,7 @@ ATOMIC64_OP(atomic64_add) ATOMIC64_OP(atomic64_and) ATOMIC64_OP(atomic64_sub) +#undef ATOMIC64_OP #define ATOMIC64_FETCH_OP(name, op) \ static inline long arch_##op##name(long i, atomic64_t *v) \ @@ -90,6 +94,9 @@ ATOMIC64_FETCH_OPS(atomic64_fetch_sub) ATOMIC64_FETCH_OPS(atomic64_add_return) ATOMIC64_FETCH_OPS(atomic64_sub_return) +#undef ATOMIC64_FETCH_OP +#undef ATOMIC64_FETCH_OPS + static inline long arch_atomic64_dec_if_positive(atomic64_t *v) { return __lse_ll_sc_body(atomic64_dec_if_positive, v); diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index afaba73e0b2c..a1398f2f9994 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -129,6 +129,8 @@ __CMPXCHG_CASE(mb_, 16) __CMPXCHG_CASE(mb_, 32) __CMPXCHG_CASE(mb_, 64) +#undef __CMPXCHG_CASE + #define __CMPXCHG_DBL(name) \ static inline long __cmpxchg_double##name(unsigned long old1, \ unsigned long old2, \ @@ -143,6 +145,8 @@ static inline long __cmpxchg_double##name(unsigned long old1, \ __CMPXCHG_DBL( ) __CMPXCHG_DBL(_mb) +#undef __CMPXCHG_DBL + #define __CMPXCHG_GEN(sfx) \ static inline unsigned long __cmpxchg##sfx(volatile void *ptr, \ unsigned long old, \ -- cgit v1.2.3-58-ga151 From 03adcbd996be7ce81cac793b1511406a7a4df117 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Thu, 29 Aug 2019 14:34:42 +0100 Subject: arm64: atomics: Use K constraint when toolchain appears to support it The 'K' constraint is a documented AArch64 machine constraint supported by GCC for matching integer constants that can be used with a 32-bit logical instruction. Unfortunately, some released compilers erroneously accept the immediate '4294967295' for this constraint, which is later refused by GAS at assembly time. This had led us to avoid the use of the 'K' constraint altogether. Instead, detect whether the compiler is up to the job when building the kernel and pass the 'K' constraint to our 32-bit atomic macros when it appears to be supported. Signed-off-by: Will Deacon --- arch/arm64/Makefile | 9 ++++- arch/arm64/include/asm/atomic_ll_sc.h | 63 ++++++++++++++++++++++------------- 2 files changed, 48 insertions(+), 24 deletions(-) diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile index 61de992bbea3..0cef056b5fb1 100644 --- a/arch/arm64/Makefile +++ b/arch/arm64/Makefile @@ -39,6 +39,12 @@ $(warning LSE atomics not supported by binutils) endif endif +cc_has_k_constraint := $(call try-run,echo \ + 'int main(void) { \ + asm volatile("and w0, w0, %w0" :: "K" (4294967295)); \ + return 0; \ + }' | $(CC) -S -x c -o "$$TMP" -,,-DCONFIG_CC_HAS_K_CONSTRAINT=1) + ifeq ($(CONFIG_ARM64), y) brokengasinst := $(call as-instr,1:\n.inst 0\n.rept . - 1b\n\nnop\n.endr\n,,-DCONFIG_BROKEN_GAS_INST=1) @@ -63,7 +69,8 @@ ifeq ($(CONFIG_GENERIC_COMPAT_VDSO), y) endif endif -KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) $(compat_vdso) +KBUILD_CFLAGS += -mgeneral-regs-only $(lseinstr) $(brokengasinst) \ + $(compat_vdso) $(cc_has_k_constraint) KBUILD_CFLAGS += -fno-asynchronous-unwind-tables KBUILD_CFLAGS += $(call cc-disable-warning, psabi) KBUILD_AFLAGS += $(lseinstr) $(brokengasinst) $(compat_vdso) diff --git a/arch/arm64/include/asm/atomic_ll_sc.h b/arch/arm64/include/asm/atomic_ll_sc.h index 95091f72228b..7b012148bfd6 100644 --- a/arch/arm64/include/asm/atomic_ll_sc.h +++ b/arch/arm64/include/asm/atomic_ll_sc.h @@ -10,6 +10,8 @@ #ifndef __ASM_ATOMIC_LL_SC_H #define __ASM_ATOMIC_LL_SC_H +#include + #if IS_ENABLED(CONFIG_ARM64_LSE_ATOMICS) && IS_ENABLED(CONFIG_AS_LSE) #define __LL_SC_FALLBACK(asm_ops) \ " b 3f\n" \ @@ -23,6 +25,10 @@ asm_ops "\n" \ #define __LL_SC_FALLBACK(asm_ops) asm_ops #endif +#ifndef CONFIG_CC_HAS_K_CONSTRAINT +#define K +#endif + /* * AArch64 UP and SMP safe atomic ops. We use load exclusive and * store exclusive to ensure that these are atomic. We may loop @@ -44,7 +50,7 @@ __ll_sc_atomic_##op(int i, atomic_t *v) \ " stxr %w1, %w0, %2\n" \ " cbnz %w1, 1b\n") \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : #constraint "r" (i)); \ + : __stringify(constraint) "r" (i)); \ } #define ATOMIC_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ @@ -63,7 +69,7 @@ __ll_sc_atomic_##op##_return##name(int i, atomic_t *v) \ " cbnz %w1, 1b\n" \ " " #mb ) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : #constraint "r" (i) \ + : __stringify(constraint) "r" (i) \ : cl); \ \ return result; \ @@ -85,7 +91,7 @@ __ll_sc_atomic_fetch_##op##name(int i, atomic_t *v) \ " cbnz %w2, 1b\n" \ " " #mb ) \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ - : #constraint "r" (i) \ + : __stringify(constraint) "r" (i) \ : cl); \ \ return result; \ @@ -113,10 +119,15 @@ ATOMIC_OPS(sub, sub, J) ATOMIC_FETCH_OP (_acquire, , a, , "memory", __VA_ARGS__)\ ATOMIC_FETCH_OP (_release, , , l, "memory", __VA_ARGS__) -ATOMIC_OPS(and, and, ) +ATOMIC_OPS(and, and, K) +ATOMIC_OPS(or, orr, K) +ATOMIC_OPS(xor, eor, K) +/* + * GAS converts the mysterious and undocumented BIC (immediate) alias to + * an AND (immediate) instruction with the immediate inverted. We don't + * have a constraint for this, so fall back to register. + */ ATOMIC_OPS(andnot, bic, ) -ATOMIC_OPS(or, orr, ) -ATOMIC_OPS(xor, eor, ) #undef ATOMIC_OPS #undef ATOMIC_FETCH_OP @@ -138,7 +149,7 @@ __ll_sc_atomic64_##op(s64 i, atomic64_t *v) \ " stxr %w1, %0, %2\n" \ " cbnz %w1, 1b") \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : #constraint "r" (i)); \ + : __stringify(constraint) "r" (i)); \ } #define ATOMIC64_OP_RETURN(name, mb, acq, rel, cl, op, asm_op, constraint)\ @@ -157,7 +168,7 @@ __ll_sc_atomic64_##op##_return##name(s64 i, atomic64_t *v) \ " cbnz %w1, 1b\n" \ " " #mb ) \ : "=&r" (result), "=&r" (tmp), "+Q" (v->counter) \ - : #constraint "r" (i) \ + : __stringify(constraint) "r" (i) \ : cl); \ \ return result; \ @@ -179,7 +190,7 @@ __ll_sc_atomic64_fetch_##op##name(s64 i, atomic64_t *v) \ " cbnz %w2, 1b\n" \ " " #mb ) \ : "=&r" (result), "=&r" (val), "=&r" (tmp), "+Q" (v->counter) \ - : #constraint "r" (i) \ + : __stringify(constraint) "r" (i) \ : cl); \ \ return result; \ @@ -208,9 +219,14 @@ ATOMIC64_OPS(sub, sub, J) ATOMIC64_FETCH_OP (_release,, , l, "memory", __VA_ARGS__) ATOMIC64_OPS(and, and, L) -ATOMIC64_OPS(andnot, bic, ) ATOMIC64_OPS(or, orr, L) ATOMIC64_OPS(xor, eor, L) +/* + * GAS converts the mysterious and undocumented BIC (immediate) alias to + * an AND (immediate) instruction with the immediate inverted. We don't + * have a constraint for this, so fall back to register. + */ +ATOMIC64_OPS(andnot, bic, ) #undef ATOMIC64_OPS #undef ATOMIC64_FETCH_OP @@ -269,7 +285,7 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \ "2:") \ : [tmp] "=&r" (tmp), [oldval] "=&r" (oldval), \ [v] "+Q" (*(u##sz *)ptr) \ - : [old] #constraint "r" (old), [new] "r" (new) \ + : [old] __stringify(constraint) "r" (old), [new] "r" (new) \ : cl); \ \ return oldval; \ @@ -280,21 +296,21 @@ __ll_sc__cmpxchg_case_##name##sz(volatile void *ptr, \ * handle the 'K' constraint for the value 4294967295 - thus we use no * constraint for 32 bit operations. */ -__CMPXCHG_CASE(w, b, , 8, , , , , ) -__CMPXCHG_CASE(w, h, , 16, , , , , ) -__CMPXCHG_CASE(w, , , 32, , , , , ) +__CMPXCHG_CASE(w, b, , 8, , , , , K) +__CMPXCHG_CASE(w, h, , 16, , , , , K) +__CMPXCHG_CASE(w, , , 32, , , , , K) __CMPXCHG_CASE( , , , 64, , , , , L) -__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", ) -__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", ) -__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", ) +__CMPXCHG_CASE(w, b, acq_, 8, , a, , "memory", K) +__CMPXCHG_CASE(w, h, acq_, 16, , a, , "memory", K) +__CMPXCHG_CASE(w, , acq_, 32, , a, , "memory", K) __CMPXCHG_CASE( , , acq_, 64, , a, , "memory", L) -__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", ) -__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", ) -__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", ) +__CMPXCHG_CASE(w, b, rel_, 8, , , l, "memory", K) +__CMPXCHG_CASE(w, h, rel_, 16, , , l, "memory", K) +__CMPXCHG_CASE(w, , rel_, 32, , , l, "memory", K) __CMPXCHG_CASE( , , rel_, 64, , , l, "memory", L) -__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", ) -__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", ) -__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", ) +__CMPXCHG_CASE(w, b, mb_, 8, dmb ish, , l, "memory", K) +__CMPXCHG_CASE(w, h, mb_, 16, dmb ish, , l, "memory", K) +__CMPXCHG_CASE(w, , mb_, 32, dmb ish, , l, "memory", K) __CMPXCHG_CASE( , , mb_, 64, dmb ish, , l, "memory", L) #undef __CMPXCHG_CASE @@ -332,5 +348,6 @@ __CMPXCHG_DBL( , , , ) __CMPXCHG_DBL(_mb, dmb ish, l, "memory") #undef __CMPXCHG_DBL +#undef K #endif /* __ASM_ATOMIC_LL_SC_H */ -- cgit v1.2.3-58-ga151 From e376897f424a1c807779a2635f62eb02d7e382f9 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 2 Sep 2019 10:06:23 +0200 Subject: arm64: remove __iounmap No need to indirect iounmap for arm64. Signed-off-by: Christoph Hellwig Signed-off-by: Will Deacon --- arch/arm64/include/asm/io.h | 3 +-- arch/arm64/mm/ioremap.c | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h index e9763831186a..323cb306bd28 100644 --- a/arch/arm64/include/asm/io.h +++ b/arch/arm64/include/asm/io.h @@ -163,14 +163,13 @@ extern void __memset_io(volatile void __iomem *, int, size_t); * I/O memory mapping functions. */ extern void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot); -extern void __iounmap(volatile void __iomem *addr); +extern void iounmap(volatile void __iomem *addr); extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) #define ioremap_wt(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) -#define iounmap __iounmap /* * PCI configuration space mapping function. diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c index fdb595a5d65f..9be71bee902c 100644 --- a/arch/arm64/mm/ioremap.c +++ b/arch/arm64/mm/ioremap.c @@ -69,7 +69,7 @@ void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot) } EXPORT_SYMBOL(__ioremap); -void __iounmap(volatile void __iomem *io_addr) +void iounmap(volatile void __iomem *io_addr) { unsigned long addr = (unsigned long)io_addr & PAGE_MASK; @@ -80,7 +80,7 @@ void __iounmap(volatile void __iomem *io_addr) if (is_vmalloc_addr((void *)addr)) vunmap((void *)addr); } -EXPORT_SYMBOL(__iounmap); +EXPORT_SYMBOL(iounmap); void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size) { -- cgit v1.2.3-58-ga151