diff options
303 files changed, 24179 insertions, 2656 deletions
@@ -390,9 +390,10 @@ Matthias Fuchs <socketcan@esd.eu> <matthias.fuchs@esd.eu> Matthieu Baerts <matttbe@kernel.org> <matthieu.baerts@tessares.net> Matthieu CASTET <castet.matthieu@free.fr> Matti Vaittinen <mazziesaccount@gmail.com> <matti.vaittinen@fi.rohmeurope.com> -Matt Ranostay <matt.ranostay@konsulko.com> <matt@ranostay.consulting> -Matt Ranostay <mranostay@gmail.com> Matthew Ranostay <mranostay@embeddedalley.com> -Matt Ranostay <mranostay@gmail.com> <matt.ranostay@intel.com> +Matt Ranostay <matt@ranostay.sg> <matt.ranostay@konsulko.com> +Matt Ranostay <matt@ranostay.sg> <matt@ranostay.consulting> +Matt Ranostay <matt@ranostay.sg> Matthew Ranostay <mranostay@embeddedalley.com> +Matt Ranostay <matt@ranostay.sg> <matt.ranostay@intel.com> Matt Redfearn <matt.redfearn@mips.com> <matt.redfearn@imgtec.com> Maulik Shah <quic_mkshah@quicinc.com> <mkshah@codeaurora.org> Mauro Carvalho Chehab <mchehab@kernel.org> <maurochehab@gmail.com> diff --git a/Documentation/ABI/testing/sysfs-bus-cdx b/Documentation/ABI/testing/sysfs-bus-cdx index 8c067ff99e54..e84277531414 100644 --- a/Documentation/ABI/testing/sysfs-bus-cdx +++ b/Documentation/ABI/testing/sysfs-bus-cdx @@ -98,6 +98,13 @@ Description: # echo 1 > /sys/bus/cdx/devices/.../remove +What: /sys/bus/cdx/devices/.../resource<N> +Date: July 2023 +Contact: puneet.gupta@amd.com +Description: + The resource binary file contains the content of the memory + regions. These files can be m'maped from userspace. + What: /sys/bus/cdx/devices/.../modalias Date: July 2023 Contact: nipun.gupta@amd.com diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc index 6aa527296c71..96aafa66b4a5 100644 --- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc +++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tmc @@ -91,3 +91,19 @@ Contact: Mathieu Poirier <mathieu.poirier@linaro.org> Description: (RW) Size of the trace buffer for TMC-ETR when used in SYSFS mode. Writable only for TMC-ETR configurations. The value should be aligned to the kernel pagesize. + +What: /sys/bus/coresight/devices/<memory_map>.tmc/buf_modes_available +Date: August 2023 +KernelVersion: 6.7 +Contact: Anshuman Khandual <anshuman.khandual@arm.com> +Description: (Read) Shows all supported Coresight TMC-ETR buffer modes available + for the users to configure explicitly. This file is avaialble only + for TMC ETR devices. + +What: /sys/bus/coresight/devices/<memory_map>.tmc/buf_mode_preferred +Date: August 2023 +KernelVersion: 6.7 +Contact: Anshuman Khandual <anshuman.khandual@arm.com> +Description: (RW) Current Coresight TMC-ETR buffer mode selected. But user could + only provide a mode which is supported for a given ETR device. This + file is available only for TMC ETR devices. diff --git a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm index 4a58e649550d..4dd49b159543 100644 --- a/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm +++ b/Documentation/ABI/testing/sysfs-bus-coresight-devices-tpdm @@ -11,3 +11,162 @@ Description: Accepts only one of the 2 values - 1 or 2. 1 : Generate 64 bits data 2 : Generate 32 bits data + +What: /sys/bus/coresight/devices/<tpdm-name>/reset_dataset +Date: March 2023 +KernelVersion 6.7 +Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> +Description: + (Write) Reset the dataset of the tpdm. + + Accepts only one value - 1. + 1 : Reset the dataset of the tpdm + +What: /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_type +Date: March 2023 +KernelVersion 6.7 +Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> +Description: + (RW) Set/Get the trigger type of the DSB for tpdm. + + Accepts only one of the 2 values - 0 or 1. + 0 : Set the DSB trigger type to false + 1 : Set the DSB trigger type to true + +What: /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_ts +Date: March 2023 +KernelVersion 6.7 +Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> +Description: + (RW) Set/Get the trigger timestamp of the DSB for tpdm. + + Accepts only one of the 2 values - 0 or 1. + 0 : Set the DSB trigger type to false + 1 : Set the DSB trigger type to true + +What: /sys/bus/coresight/devices/<tpdm-name>/dsb_mode +Date: March 2023 +KernelVersion 6.7 +Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> +Description: + (RW) Set/Get the programming mode of the DSB for tpdm. + + Accepts the value needs to be greater than 0. What data + bits do is listed below. + Bit[0:1] : Test mode control bit for choosing the inputs. + Bit[3] : Set to 0 for low performance mode. Set to 1 for high + performance mode. + Bit[4:8] : Select byte lane for high performance mode. + +What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/ctrl_idx +Date: March 2023 +KernelVersion 6.7 +Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> +Description: + (RW) Set/Get the index number of the edge detection for the DSB + subunit TPDM. Since there are at most 256 edge detections, this + value ranges from 0 to 255. + +What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/ctrl_val +Date: March 2023 +KernelVersion 6.7 +Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> +Description: + Write a data to control the edge detection corresponding to + the index number. Before writing data to this sysfs file, + "ctrl_idx" should be written first to configure the index + number of the edge detection which needs to be controlled. + + Accepts only one of the following values. + 0 - Rising edge detection + 1 - Falling edge detection + 2 - Rising and falling edge detection (toggle detection) + + +What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/ctrl_mask +Date: March 2023 +KernelVersion 6.7 +Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> +Description: + Write a data to mask the edge detection corresponding to the index + number. Before writing data to this sysfs file, "ctrl_idx" should + be written first to configure the index number of the edge detection + which needs to be masked. + + Accepts only one of the 2 values - 0 or 1. + +What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/edcr[0:15] +Date: March 2023 +KernelVersion 6.7 +Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> +Description: + Read a set of the edge control value of the DSB in TPDM. + +What: /sys/bus/coresight/devices/<tpdm-name>/dsb_edge/edcmr[0:7] +Date: March 2023 +KernelVersion 6.7 +Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> +Description: + Read a set of the edge control mask of the DSB in TPDM. + +What: /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_patt/xpr[0:7] +Date: March 2023 +KernelVersion 6.7 +Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> +Description: + (RW) Set/Get the value of the trigger pattern for the DSB + subunit TPDM. + +What: /sys/bus/coresight/devices/<tpdm-name>/dsb_trig_patt/xpmr[0:7] +Date: March 2023 +KernelVersion 6.7 +Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> +Description: + (RW) Set/Get the mask of the trigger pattern for the DSB + subunit TPDM. + +What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/tpr[0:7] +Date: March 2023 +KernelVersion 6.7 +Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> +Description: + (RW) Set/Get the value of the pattern for the DSB subunit TPDM. + +What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/tpmr[0:7] +Date: March 2023 +KernelVersion 6.7 +Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> +Description: + (RW) Set/Get the mask of the pattern for the DSB subunit TPDM. + +What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/enable_ts +Date: March 2023 +KernelVersion 6.7 +Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> +Description: + (Write) Set the pattern timestamp of DSB tpdm. Read + the pattern timestamp of DSB tpdm. + + Accepts only one of the 2 values - 0 or 1. + 0 : Disable DSB pattern timestamp. + 1 : Enable DSB pattern timestamp. + +What: /sys/bus/coresight/devices/<tpdm-name>/dsb_patt/set_type +Date: March 2023 +KernelVersion 6.7 +Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> +Description: + (Write) Set the pattern type of DSB tpdm. Read + the pattern type of DSB tpdm. + + Accepts only one of the 2 values - 0 or 1. + 0 : Set the DSB pattern type to value. + 1 : Set the DSB pattern type to toggle. + +What: /sys/bus/coresight/devices/<tpdm-name>/dsb_msr/msr[0:31] +Date: March 2023 +KernelVersion 6.7 +Contact: Jinlong Mao (QUIC) <quic_jinlmao@quicinc.com>, Tao Zhang (QUIC) <quic_taozha@quicinc.com> +Description: + (RW) Set/Get the MSR(mux select register) for the DSB subunit + TPDM. diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio index 19cde14f3869..2e6d5ebfd3c7 100644 --- a/Documentation/ABI/testing/sysfs-bus-iio +++ b/Documentation/ABI/testing/sysfs-bus-iio @@ -362,10 +362,21 @@ Description: What: /sys/bus/iio/devices/iio:deviceX/in_accel_x_peak_raw What: /sys/bus/iio/devices/iio:deviceX/in_accel_y_peak_raw What: /sys/bus/iio/devices/iio:deviceX/in_accel_z_peak_raw +What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_peak_raw +What: /sys/bus/iio/devices/iio:deviceX/in_temp_peak_raw KernelVersion: 2.6.36 Contact: linux-iio@vger.kernel.org Description: - Highest value since some reset condition. These + Highest value since some reset condition. These + attributes allow access to this and are otherwise + the direct equivalent of the <type>Y[_name]_raw attributes. + +What: /sys/bus/iio/devices/iio:deviceX/in_humidityrelative_trough_raw +What: /sys/bus/iio/devices/iio:deviceX/in_temp_trough_raw +KernelVersion: 6.7 +Contact: linux-iio@vger.kernel.org +Description: + Lowest value since some reset condition. These attributes allow access to this and are otherwise the direct equivalent of the <type>Y[_name]_raw attributes. @@ -618,7 +629,9 @@ KernelVersion: 2.6.35 Contact: linux-iio@vger.kernel.org Description: If a discrete set of scale values is available, they - are listed in this attribute. + are listed in this attribute. Unlike illumination, + multiplying intensity by intensity_scale does not + yield value with any standardized unit. What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_hardwaregain What: /sys/bus/iio/devices/iio:deviceX/in_intensity_hardwaregain @@ -1574,6 +1587,8 @@ What: /sys/.../iio:deviceX/in_intensityY_raw What: /sys/.../iio:deviceX/in_intensityY_ir_raw What: /sys/.../iio:deviceX/in_intensityY_both_raw What: /sys/.../iio:deviceX/in_intensityY_uv_raw +What: /sys/.../iio:deviceX/in_intensityY_uva_raw +What: /sys/.../iio:deviceX/in_intensityY_uvb_raw What: /sys/.../iio:deviceX/in_intensityY_duv_raw KernelVersion: 3.4 Contact: linux-iio@vger.kernel.org @@ -1582,8 +1597,9 @@ Description: that measurements contain visible and infrared light components or just infrared light, respectively. Modifier uv indicates that measurements contain ultraviolet light - components. Modifier duv indicates that measurements - contain deep ultraviolet light components. + components. Modifiers uva, uvb and duv indicate that + measurements contain A, B or deep (C) ultraviolet light + components respectively. What: /sys/.../iio:deviceX/in_uvindex_input KernelVersion: 4.6 @@ -2254,3 +2270,21 @@ Description: If a label is defined for this event add that to the event specific attributes. This is useful for userspace to be able to better identify an individual event. + +What: /sys/.../events/in_accel_gesture_tap_wait_timeout +KernelVersion: 6.7 +Contact: linux-iio@vger.kernel.org +Description: + Enable tap gesture confirmation with timeout. + +What: /sys/.../events/in_accel_gesture_tap_wait_dur +KernelVersion: 6.7 +Contact: linux-iio@vger.kernel.org +Description: + Timeout value in seconds for tap gesture confirmation. + +What: /sys/.../events/in_accel_gesture_tap_wait_dur_available +KernelVersion: 6.7 +Contact: linux-iio@vger.kernel.org +Description: + List of available timeout value for tap gesture confirmation. diff --git a/Documentation/ABI/testing/sysfs-nvmem-cells b/Documentation/ABI/testing/sysfs-nvmem-cells new file mode 100644 index 000000000000..7af70adf3690 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-nvmem-cells @@ -0,0 +1,21 @@ +What: /sys/bus/nvmem/devices/.../cells/<cell-name> +Date: May 2023 +KernelVersion: 6.5 +Contact: Miquel Raynal <miquel.raynal@bootlin.com> +Description: + The "cells" folder contains one file per cell exposed by the + NVMEM device. The name of the file is: <name>@<where>, with + <name> being the cell name and <where> its location in the NVMEM + device, in hexadecimal (without the '0x' prefix, to mimic device + tree node names). The length of the file is the size of the cell + (when known). The content of the file is the binary content of + the cell (may sometimes be ASCII, likely without trailing + character). + Note: This file is only present if CONFIG_NVMEM_SYSFS + is enabled. + + Example:: + + hexdump -C /sys/bus/nvmem/devices/1-00563/cells/product-name@d + 00000000 54 4e 34 38 4d 2d 50 2d 44 4e |TN48M-P-DN| + 0000000a diff --git a/Documentation/arch/arm64/silicon-errata.rst b/Documentation/arch/arm64/silicon-errata.rst index f47f63bcf67c..bfdf236e2af3 100644 --- a/Documentation/arch/arm64/silicon-errata.rst +++ b/Documentation/arch/arm64/silicon-errata.rst @@ -117,6 +117,10 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A76 | #1463225 | ARM64_ERRATUM_1463225 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A76 | #1490853 | N/A | ++----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A77 | #1491015 | N/A | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A77 | #1508412 | ARM64_ERRATUM_1508412 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 | @@ -127,6 +131,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A715 | #2645198 | ARM64_ERRATUM_2645198 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-X1 | #1502854 | N/A | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-X2 | #2119858 | ARM64_ERRATUM_2119858 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-X2 | #2224489 | ARM64_ERRATUM_2224489 | @@ -135,6 +141,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1349291 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-N1 | #1490853 | N/A | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1542419 | ARM64_ERRATUM_1542419 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N2 | #2139208 | ARM64_ERRATUM_2139208 | @@ -143,6 +151,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N2 | #2253138 | ARM64_ERRATUM_2253138 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-V1 | #1619801 | N/A | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-500 | #841119,826419 | N/A | +----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-600 | #1076982,1209401| N/A | diff --git a/Documentation/devicetree/bindings/arm/qcom,coresight-tpdm.yaml b/Documentation/devicetree/bindings/arm/qcom,coresight-tpdm.yaml index 3bad47b7b02b..61ddc3b5b247 100644 --- a/Documentation/devicetree/bindings/arm/qcom,coresight-tpdm.yaml +++ b/Documentation/devicetree/bindings/arm/qcom,coresight-tpdm.yaml @@ -44,6 +44,23 @@ properties: minItems: 1 maxItems: 2 + qcom,dsb-element-size: + description: + Specifies the DSB(Discrete Single Bit) element size supported by + the monitor. The associated aggregator will read this size before it + is enabled. DSB element size currently only supports 32-bit and 64-bit. + $ref: /schemas/types.yaml#/definitions/uint8 + enum: [32, 64] + + qcom,dsb-msrs-num: + description: + Specifies the number of DSB(Discrete Single Bit) MSR(mux select register) + registers supported by the monitor. If this property is not configured + or set to 0, it means this DSB TPDM doesn't support MSR. + $ref: /schemas/types.yaml#/definitions/uint32 + minimum: 0 + maximum: 32 + clocks: maxItems: 1 @@ -77,6 +94,9 @@ examples: compatible = "qcom,coresight-tpdm", "arm,primecell"; reg = <0x0684c000 0x1000>; + qcom,dsb-element-size = /bits/ 8 <32>; + qcom,dsb-msrs-num = <16>; + clocks = <&aoss_qmp>; clock-names = "apb_pclk"; diff --git a/Documentation/devicetree/bindings/iio/adc/adi,ad7091r5.yaml b/Documentation/devicetree/bindings/iio/adc/adi,ad7091r5.yaml index ce7ba634643c..ddec9747436c 100644 --- a/Documentation/devicetree/bindings/iio/adc/adi,ad7091r5.yaml +++ b/Documentation/devicetree/bindings/iio/adc/adi,ad7091r5.yaml @@ -4,36 +4,92 @@ $id: http://devicetree.org/schemas/iio/adc/adi,ad7091r5.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Analog Devices AD7091R5 4-Channel 12-Bit ADC +title: Analog Devices AD7091R-2/-4/-5/-8 Multi-Channel 12-Bit ADCs maintainers: - Michael Hennerich <michael.hennerich@analog.com> + - Marcelo Schmitt <marcelo.schmitt@analog.com> description: | - Analog Devices AD7091R5 4-Channel 12-Bit ADC + Analog Devices AD7091R5 4-Channel 12-Bit ADC supporting I2C interface https://www.analog.com/media/en/technical-documentation/data-sheets/ad7091r-5.pdf + Analog Devices AD7091R-2/AD7091R-4/AD7091R-8 2-/4-/8-Channel 12-Bit ADCs + supporting SPI interface + https://www.analog.com/media/en/technical-documentation/data-sheets/AD7091R-2_7091R-4_7091R-8.pdf properties: compatible: enum: + - adi,ad7091r2 + - adi,ad7091r4 - adi,ad7091r5 + - adi,ad7091r8 reg: maxItems: 1 + vdd-supply: + description: + Provide VDD power to the sensor (VDD range is from 2.7V to 5.25V). + + vdrive-supply: + description: + Determines the voltage level at which the interface logic will operate. + The V_drive voltage range is from 1.8V to 5.25V and must not exceed VDD by + more than 0.3V. + vref-supply: description: Phandle to the vref power supply - interrupts: + convst-gpios: + description: + GPIO connected to the CONVST pin. + This logic input is used to initiate conversions on the analog + input channels. maxItems: 1 + reset-gpios: + maxItems: 1 + + interrupts: + description: + Interrupt for signaling when conversion results exceed the high limit for + ADC readings or fall below the low limit for them. Interrupt source must + be attached to ALERT/BUSY/GPO0 pin. + maxItems: 1 required: - compatible - reg -additionalProperties: false +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + + # AD7091R-2 does not have ALERT/BUSY/GPO pin + - if: + properties: + compatible: + contains: + enum: + - adi,ad7091r2 + then: + properties: + interrupts: false + + - if: + properties: + compatible: + contains: + enum: + - adi,ad7091r2 + - adi,ad7091r4 + - adi,ad7091r8 + then: + required: + - convst-gpios + +unevaluatedProperties: false examples: - | @@ -51,4 +107,22 @@ examples: interrupt-parent = <&gpio>; }; }; + - | + #include <dt-bindings/gpio/gpio.h> + #include <dt-bindings/interrupt-controller/irq.h> + spi { + #address-cells = <1>; + #size-cells = <0>; + + adc@0 { + compatible = "adi,ad7091r8"; + reg = <0x0>; + spi-max-frequency = <1000000>; + vref-supply = <&adc_vref>; + convst-gpios = <&gpio 25 GPIO_ACTIVE_LOW>; + reset-gpios = <&gpio 27 GPIO_ACTIVE_LOW>; + interrupts = <22 IRQ_TYPE_EDGE_FALLING>; + interrupt-parent = <&gpio>; + }; + }; ... diff --git a/Documentation/devicetree/bindings/iio/adc/maxim,max34408.yaml b/Documentation/devicetree/bindings/iio/adc/maxim,max34408.yaml new file mode 100644 index 000000000000..4cba856e8d47 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/adc/maxim,max34408.yaml @@ -0,0 +1,139 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/adc/maxim,max34408.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Maxim MAX34408/MAX34409 current monitors with overcurrent control + +maintainers: + - Ivan Mikhaylov <fr0st61te@gmail.com> + +description: | + The MAX34408/MAX34409 are two- and four-channel current monitors that are + configured and monitored with a standard I2C/SMBus serial interface. Each + unidirectional current sensor offers precision high-side operation with a + low full-scale sense voltage. The devices automatically sequence through + two or four channels and collect the current-sense samples and average them + to reduce the effect of impulse noise. The raw ADC samples are compared to + user-programmable digital thresholds to indicate overcurrent conditions. + Overcurrent conditions trigger a hardware output to provide an immediate + indication to shut down any necessary external circuitry. + + Specifications about the devices can be found at: + https://www.analog.com/media/en/technical-documentation/data-sheets/MAX34408-MAX34409.pdf + +properties: + compatible: + enum: + - maxim,max34408 + - maxim,max34409 + + "#address-cells": + const: 1 + + "#size-cells": + const: 0 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + powerdown-gpios: + description: + Shutdown Output. Open-drain output. This output transitions to high impedance + when any of the digital comparator thresholds are exceeded as long as the ENA + pin is high. + maxItems: 1 + + powerdown-status-gpios: + description: + SHTDN Enable Input. CMOS digital input. Connect to GND to clear the latch and + unconditionally deassert (force low) the SHTDN output and reset the shutdown + delay. Connect to VDD to enable normal latch operation of the SHTDN output. + maxItems: 1 + + vdd-supply: true + +patternProperties: + "^channel@[0-3]$": + $ref: adc.yaml + type: object + description: + Represents the internal channels of the ADC. + + properties: + reg: + items: + - minimum: 0 + maximum: 3 + + maxim,rsense-val-micro-ohms: + description: + Adjust the Rsense value to monitor higher or lower current levels for + input. + enum: [250, 500, 1000, 5000, 10000, 50000, 100000, 200000, 500000] + default: 1000 + + required: + - reg + - maxim,rsense-val-micro-ohms + + unevaluatedProperties: false + +required: + - compatible + - reg + +allOf: + - if: + properties: + compatible: + contains: + const: maxim,max34408 + then: + patternProperties: + "^channel@[2-3]$": false + "^channel@[0-1]$": + properties: + reg: + maximum: 1 + else: + patternProperties: + "^channel@[0-3]$": + properties: + reg: + maximum: 3 + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + adc@1e { + compatible = "maxim,max34409"; + reg = <0x1e>; + powerdown-gpios = <&gpio0 1 GPIO_ACTIVE_LOW>; + powerdown-status-gpios = <&gpio0 2 GPIO_ACTIVE_HIGH>; + + #address-cells = <1>; + #size-cells = <0>; + + channel@0 { + reg = <0x0>; + maxim,rsense-val-micro-ohms = <5000>; + }; + + channel@1 { + reg = <0x1>; + maxim,rsense-val-micro-ohms = <10000>; + }; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-iadc.yaml b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-iadc.yaml index b6a233cd5f6b..5ed893ef5c18 100644 --- a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-iadc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-iadc.yaml @@ -25,7 +25,7 @@ properties: - const: qcom,spmi-iadc reg: - description: IADC base address and length in the SPMI PMIC register map + description: IADC base address in the SPMI PMIC register map maxItems: 1 qcom,external-resistor-micro-ohms: @@ -50,10 +50,12 @@ additionalProperties: false examples: - | #include <dt-bindings/interrupt-controller/irq.h> - spmi { + + pmic { #address-cells = <1>; #size-cells = <0>; - pmic_iadc: adc@3600 { + + adc@3600 { compatible = "qcom,pm8941-iadc", "qcom,spmi-iadc"; reg = <0x3600>; interrupts = <0x0 0x36 0x0 IRQ_TYPE_EDGE_RISING>; diff --git a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-rradc.yaml b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-rradc.yaml index 64abe9a4cd9e..f39bc92c2b99 100644 --- a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-rradc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-rradc.yaml @@ -43,7 +43,7 @@ examples: #address-cells = <1>; #size-cells = <0>; - pmic_rradc: adc@4500 { + adc@4500 { compatible = "qcom,pmi8998-rradc"; reg = <0x4500>; #io-channel-cells = <1>; diff --git a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml index ad7d6fc49de5..40fa0710f1f0 100644 --- a/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/qcom,spmi-vadc.yaml @@ -236,11 +236,11 @@ additionalProperties: false examples: - | - spmi { + pmic { #address-cells = <1>; #size-cells = <0>; - /* VADC node */ - pmic_vadc: adc@3100 { + + adc@3100 { compatible = "qcom,spmi-vadc"; reg = <0x3100>; interrupts = <0x0 0x31 0x0 0x1>; @@ -281,9 +281,10 @@ examples: #include <dt-bindings/iio/qcom,spmi-adc7-pm8350.h> #include <dt-bindings/interrupt-controller/irq.h> - spmi { + pmic { #address-cells = <1>; #size-cells = <0>; + adc@3100 { reg = <0x3100>; compatible = "qcom,spmi-adc7"; diff --git a/Documentation/devicetree/bindings/iio/adc/ti,palmas-gpadc.yaml b/Documentation/devicetree/bindings/iio/adc/ti,palmas-gpadc.yaml index 720c16a108d4..f94057d8f605 100644 --- a/Documentation/devicetree/bindings/iio/adc/ti,palmas-gpadc.yaml +++ b/Documentation/devicetree/bindings/iio/adc/ti,palmas-gpadc.yaml @@ -67,19 +67,4 @@ required: - compatible - "#io-channel-cells" -examples: - - | - #include <dt-bindings/clock/mt8183-clk.h> - pmic { - compatible = "ti,twl6035-pmic", "ti,palmas-pmic"; - adc { - compatible = "ti,palmas-gpadc"; - interrupts = <18 0>, - <16 0>, - <17 0>; - #io-channel-cells = <1>; - ti,channel0-current-microamp = <5>; - ti,channel3-current-microamp = <10>; - }; - }; ... diff --git a/Documentation/devicetree/bindings/iio/amplifiers/adi,hmc425a.yaml b/Documentation/devicetree/bindings/iio/amplifiers/adi,hmc425a.yaml index 2ee6080deac7..67de9d4e3a1d 100644 --- a/Documentation/devicetree/bindings/iio/amplifiers/adi,hmc425a.yaml +++ b/Documentation/devicetree/bindings/iio/amplifiers/adi,hmc425a.yaml @@ -12,6 +12,9 @@ maintainers: description: | Digital Step Attenuator IIO devices with gpio interface. Offer various frequency and attenuation ranges. + ADRF5750 2 dB LSB, 4-Bit, Silicon Digital Attenuator, 10 MHz to 60 GHz + https://www.analog.com/media/en/technical-documentation/data-sheets/adrf5740.pdf + HMC425A 0.5 dB LSB GaAs MMIC 6-BIT DIGITAL POSITIVE CONTROL ATTENUATOR, 2.2 - 8.0 GHz https://www.analog.com/media/en/technical-documentation/data-sheets/hmc425A.pdf @@ -22,6 +25,7 @@ description: | properties: compatible: enum: + - adi,adrf5740 - adi,hmc425a - adi,hmc540s diff --git a/Documentation/devicetree/bindings/iio/chemical/aosong,ags02ma.yaml b/Documentation/devicetree/bindings/iio/chemical/aosong,ags02ma.yaml new file mode 100644 index 000000000000..35e7b094e878 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/chemical/aosong,ags02ma.yaml @@ -0,0 +1,47 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/chemical/aosong,ags02ma.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Aosong AGS02MA VOC Sensor + +description: | + AGS02MA is an TVOC (Total Volatile Organic Compounds) i2c sensor with default + address of 0x1a. + + Datasheet: + https://asairsensors.com/wp-content/uploads/2021/09/AGS02MA.pdf + +maintainers: + - Anshul Dalal <anshulusr@gmail.com> + +properties: + compatible: + enum: + - aosong,ags02ma + + reg: + maxItems: 1 + + vdd-supply: true + +required: + - compatible + - reg + - vdd-supply + +additionalProperties: false + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + + voc-sensor@1a { + compatible = "aosong,ags02ma"; + reg = <0x1a>; + vdd-supply = <&vdd_regulator>; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/dac/adi,ad5791.yaml b/Documentation/devicetree/bindings/iio/dac/adi,ad5791.yaml index 3a84739736f6..c81285d84db7 100644 --- a/Documentation/devicetree/bindings/iio/dac/adi,ad5791.yaml +++ b/Documentation/devicetree/bindings/iio/dac/adi,ad5791.yaml @@ -26,6 +26,11 @@ properties: vdd-supply: true vss-supply: true + adi,rbuf-gain2-en: + description: Specify to allow an external amplifier to be connected in a + gain of two configuration. + type: boolean + required: - compatible - reg diff --git a/Documentation/devicetree/bindings/iio/dac/microchip,mcp4821.yaml b/Documentation/devicetree/bindings/iio/dac/microchip,mcp4821.yaml new file mode 100644 index 000000000000..0dc577c33918 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/dac/microchip,mcp4821.yaml @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/dac/microchip,mcp4821.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Microchip MCP4821 and similar DACs + +description: | + Supports MCP48x1 (single channel) and MCP48x2 (dual channel) series of DACs. + Device supports simplex communication over SPI in Mode 0 and Mode 3. + + +---------+--------------+-------------+ + | Device | Resolution | Channels | + |---------|--------------|-------------| + | MCP4801 | 8-bit | 1 | + | MCP4802 | 8-bit | 2 | + | MCP4811 | 10-bit | 1 | + | MCP4812 | 10-bit | 2 | + | MCP4821 | 12-bit | 1 | + | MCP4822 | 12-bit | 2 | + +---------+--------------+-------------+ + + Datasheet: + MCP48x1: https://ww1.microchip.com/downloads/en/DeviceDoc/22244B.pdf + MCP48x2: https://ww1.microchip.com/downloads/en/DeviceDoc/20002249B.pdf + +maintainers: + - Anshul Dalal <anshulusr@gmail.com> + +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +properties: + compatible: + enum: + - microchip,mcp4801 + - microchip,mcp4802 + - microchip,mcp4811 + - microchip,mcp4812 + - microchip,mcp4821 + - microchip,mcp4822 + + reg: + maxItems: 1 + + vdd-supply: true + + ldac-gpios: + description: | + Active Low LDAC (Latch DAC Input) pin used to update the DAC output. + maxItems: 1 + + powerdown-gpios: + description: | + Active Low SHDN pin used to enter the shutdown mode. + maxItems: 1 + + spi-cpha: true + spi-cpol: true + +required: + - compatible + - reg + - vdd-supply + +additionalProperties: false + +examples: + - | + #include <dt-bindings/gpio/gpio.h> + + spi { + #address-cells = <1>; + #size-cells = <0>; + + dac@0 { + compatible = "microchip,mcp4821"; + reg = <0>; + vdd-supply = <&vdd_regulator>; + ldac-gpios = <&gpio0 1 GPIO_ACTIVE_HIGH>; + powerdown-gpios = <&gpio0 2 GPIO_ACTIVE_HIGH>; + spi-cpha; + spi-cpol; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/humidity/ti,hdc3020.yaml b/Documentation/devicetree/bindings/iio/humidity/ti,hdc3020.yaml new file mode 100644 index 000000000000..7f6d0f9edc75 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/humidity/ti,hdc3020.yaml @@ -0,0 +1,55 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/humidity/ti,hdc3020.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: HDC3020/HDC3021/HDC3022 humidity and temperature iio sensors + +maintainers: + - Li peiyu <579lpy@gmail.com> + - Javier Carrasco <javier.carrasco.cruz@gmail.com> + +description: + https://www.ti.com/lit/ds/symlink/hdc3020.pdf + + The HDC302x is an integrated capacitive based relative humidity (RH) + and temperature sensor. + +properties: + compatible: + oneOf: + - items: + - enum: + - ti,hdc3021 + - ti,hdc3022 + - const: ti,hdc3020 + - const: ti,hdc3020 + + interrupts: + maxItems: 1 + + vdd-supply: true + + reg: + maxItems: 1 + +required: + - compatible + - reg + - vdd-supply + +additionalProperties: false + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + + humidity-sensor@47 { + compatible = "ti,hdc3021", "ti,hdc3020"; + reg = <0x47>; + vdd-supply = <&vcc_3v3>; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/imu/adi,adis16460.yaml b/Documentation/devicetree/bindings/iio/imu/adi,adis16460.yaml index 4e43c80e5119..4cacc9948726 100644 --- a/Documentation/devicetree/bindings/iio/imu/adi,adis16460.yaml +++ b/Documentation/devicetree/bindings/iio/imu/adi,adis16460.yaml @@ -25,6 +25,10 @@ properties: spi-cpol: true + spi-cs-inactive-delay-ns: + minimum: 16000 + default: 16000 + interrupts: maxItems: 1 diff --git a/Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml b/Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml index c73533c54588..9b7ad609f7db 100644 --- a/Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml +++ b/Documentation/devicetree/bindings/iio/imu/adi,adis16475.yaml @@ -47,6 +47,10 @@ properties: spi-max-frequency: maximum: 2000000 + spi-cs-inactive-delay-ns: + minimum: 16000 + default: 16000 + interrupts: maxItems: 1 diff --git a/Documentation/devicetree/bindings/iio/imu/bosch,bmi323.yaml b/Documentation/devicetree/bindings/iio/imu/bosch,bmi323.yaml new file mode 100644 index 000000000000..64ef26e19669 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/imu/bosch,bmi323.yaml @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/imu/bosch,bmi323.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Bosch BMI323 6-Axis IMU + +maintainers: + - Jagath Jog J <jagathjog1996@gmail.com> + +description: + BMI323 is a 6-axis inertial measurement unit that supports acceleration and + gyroscopic measurements with hardware fifo buffering. Sensor also provides + events information such as motion, steps, orientation, single and double + tap detection. + +properties: + compatible: + const: bosch,bmi323 + + reg: + maxItems: 1 + + vdd-supply: true + vddio-supply: true + + interrupts: + minItems: 1 + maxItems: 2 + + interrupt-names: + minItems: 1 + maxItems: 2 + items: + enum: + - INT1 + - INT2 + + drive-open-drain: + description: + set if the specified interrupt pin should be configured as + open drain. If not set, defaults to push-pull. + + mount-matrix: + description: + an optional 3x3 mounting rotation matrix. + +required: + - compatible + - reg + - vdd-supply + - vddio-supply + +allOf: + - $ref: /schemas/spi/spi-peripheral-props.yaml# + +unevaluatedProperties: false + +examples: + - | + // Example for I2C + #include <dt-bindings/interrupt-controller/irq.h> + i2c { + #address-cells = <1>; + #size-cells = <0>; + + imu@68 { + compatible = "bosch,bmi323"; + reg = <0x68>; + vddio-supply = <&vddio>; + vdd-supply = <&vdd>; + interrupt-parent = <&gpio1>; + interrupts = <29 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "INT1"; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/light/liteon,ltr390.yaml b/Documentation/devicetree/bindings/iio/light/liteon,ltr390.yaml new file mode 100644 index 000000000000..5d98ef2af74d --- /dev/null +++ b/Documentation/devicetree/bindings/iio/light/liteon,ltr390.yaml @@ -0,0 +1,56 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/light/liteon,ltr390.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Lite-On LTR390 ALS and UV Sensor + +description: | + The Lite-On LTR390 is an ALS (Ambient Light Sensor) and a UV sensor in a + single package with i2c address of 0x53. + + Datasheet: + https://optoelectronics.liteon.com/upload/download/DS86-2015-0004/LTR-390UV_Final_%20DS_V1%201.pdf + +maintainers: + - Anshul Dalal <anshulusr@gmail.com> + +properties: + compatible: + enum: + - liteon,ltr390 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + description: | + Level interrupt pin with open drain output. + The sensor pulls this pin low when the measured reading is greater than + some configured threshold. + + vdd-supply: true + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/irq.h> + + i2c { + #address-cells = <1>; + #size-cells = <0>; + + light-sensor@53 { + compatible = "liteon,ltr390"; + reg = <0x53>; + interrupts = <18 IRQ_TYPE_EDGE_FALLING>; + vdd-supply = <&vdd_regulator>; + }; + }; diff --git a/Documentation/devicetree/bindings/iio/light/vishay,veml6075.yaml b/Documentation/devicetree/bindings/iio/light/vishay,veml6075.yaml new file mode 100644 index 000000000000..abee04cd126e --- /dev/null +++ b/Documentation/devicetree/bindings/iio/light/vishay,veml6075.yaml @@ -0,0 +1,39 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/light/vishay,veml6075.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Vishay VEML6075 UVA and UVB sensor + +maintainers: + - Javier Carrasco <javier.carrasco.cruz@gmail.com> + +properties: + compatible: + const: vishay,veml6075 + + reg: + maxItems: 1 + + vdd-supply: true + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + + uv-sensor@10 { + compatible = "vishay,veml6075"; + reg = <0x10>; + vdd-supply = <&vdd_reg>; + }; + }; +... diff --git a/Documentation/devicetree/bindings/iio/pressure/honeywell,hsc030pa.yaml b/Documentation/devicetree/bindings/iio/pressure/honeywell,hsc030pa.yaml new file mode 100644 index 000000000000..65a24ed67b3c --- /dev/null +++ b/Documentation/devicetree/bindings/iio/pressure/honeywell,hsc030pa.yaml @@ -0,0 +1,142 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/pressure/honeywell,hsc030pa.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Honeywell TruStability HSC and SSC pressure sensor series + +description: | + support for Honeywell TruStability HSC and SSC digital pressure sensor + series. + + These sensors have either an I2C, an SPI or an analog interface. Only the + digital versions are supported by this driver. + + There are 118 models with different pressure ranges available in each family. + The vendor calls them "HSC series" and "SSC series". All of them have an + identical programming model but differ in pressure range, unit and transfer + function. + + To support different models one needs to specify the pressure range as well + as the transfer function. Pressure range can either be provided via + pressure-triplet (directly extracted from the part number) or in case it's + a custom chip via numerical range limits converted to pascals. + + The transfer function defines the ranges of raw conversion values delivered + by the sensor. pmin-pascal and pmax-pascal corespond to the minimum and + maximum pressure that can be measured. + + Please note that in case of an SPI-based sensor, the clock signal should not + exceed 800kHz and the MOSI signal is not required. + + Specifications about the devices can be found at: + https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/trustability-hsc-series/documents/sps-siot-trustability-hsc-series-high-accuracy-board-mount-pressure-sensors-50099148-a-en-ciid-151133.pdf + https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/trustability-ssc-series/documents/sps-siot-trustability-ssc-series-standard-accuracy-board-mount-pressure-sensors-50099533-a-en-ciid-151134.pdf + +maintainers: + - Petre Rodan <petre.rodan@subdimension.ro> + +properties: + compatible: + const: honeywell,hsc030pa + + reg: + maxItems: 1 + + honeywell,transfer-function: + description: | + Transfer function which defines the range of valid values delivered by + the sensor. + 0 - A, 10% to 90% of 2^14 + 1 - B, 5% to 95% of 2^14 + 2 - C, 5% to 85% of 2^14 + 3 - F, 4% to 94% of 2^14 + enum: [0, 1, 2, 3] + $ref: /schemas/types.yaml#/definitions/uint32 + + honeywell,pressure-triplet: + description: | + Case-sensitive five character string that defines pressure range, unit + and type as part of the device nomenclature. In the unlikely case of a + custom chip, set to "NA" and provide pmin-pascal and pmax-pascal. + enum: [001BA, 1.6BA, 2.5BA, 004BA, 006BA, 010BA, 1.6MD, 2.5MD, 004MD, + 006MD, 010MD, 016MD, 025MD, 040MD, 060MD, 100MD, 160MD, 250MD, + 400MD, 600MD, 001BD, 1.6BD, 2.5BD, 004BD, 2.5MG, 004MG, 006MG, + 010MG, 016MG, 025MG, 040MG, 060MG, 100MG, 160MG, 250MG, 400MG, + 600MG, 001BG, 1.6BG, 2.5BG, 004BG, 006BG, 010BG, 100KA, 160KA, + 250KA, 400KA, 600KA, 001GA, 160LD, 250LD, 400LD, 600LD, 001KD, + 1.6KD, 2.5KD, 004KD, 006KD, 010KD, 016KD, 025KD, 040KD, 060KD, + 100KD, 160KD, 250KD, 400KD, 250LG, 400LG, 600LG, 001KG, 1.6KG, + 2.5KG, 004KG, 006KG, 010KG, 016KG, 025KG, 040KG, 060KG, 100KG, + 160KG, 250KG, 400KG, 600KG, 001GG, 015PA, 030PA, 060PA, 100PA, + 150PA, 0.5ND, 001ND, 002ND, 004ND, 005ND, 010ND, 020ND, 030ND, + 001PD, 005PD, 015PD, 030PD, 060PD, 001NG, 002NG, 004NG, 005NG, + 010NG, 020NG, 030NG, 001PG, 005PG, 015PG, 030PG, 060PG, 100PG, + 150PG, NA] + $ref: /schemas/types.yaml#/definitions/string + + honeywell,pmin-pascal: + description: | + Minimum pressure value the sensor can measure in pascal. + To be specified only if honeywell,pressure-triplet is set to "NA". + + honeywell,pmax-pascal: + description: | + Maximum pressure value the sensor can measure in pascal. + To be specified only if honeywell,pressure-triplet is set to "NA". + + vdd-supply: + description: + Provide VDD power to the sensor (either 3.3V or 5V depending on the chip) + + spi-max-frequency: + maximum: 800000 + +required: + - compatible + - reg + - honeywell,transfer-function + - honeywell,pressure-triplet + +additionalProperties: false + +dependentSchemas: + honeywell,pmin-pascal: + properties: + honeywell,pressure-triplet: + const: NA + honeywell,pmax-pascal: + properties: + honeywell,pressure-triplet: + const: NA + +examples: + - | + i2c { + #address-cells = <1>; + #size-cells = <0>; + + pressure@28 { + compatible = "honeywell,hsc030pa"; + reg = <0x28>; + honeywell,transfer-function = <0>; + honeywell,pressure-triplet = "030PA"; + }; + }; + - | + spi { + #address-cells = <1>; + #size-cells = <0>; + + pressure@0 { + compatible = "honeywell,hsc030pa"; + reg = <0>; + spi-max-frequency = <800000>; + honeywell,transfer-function = <0>; + honeywell,pressure-triplet = "NA"; + honeywell,pmin-pascal = <0>; + honeywell,pmax-pascal = <200000>; + }; + }; +... diff --git a/Documentation/devicetree/bindings/iio/pressure/honeywell,mprls0025pa.yaml b/Documentation/devicetree/bindings/iio/pressure/honeywell,mprls0025pa.yaml index b31f8120f14e..d9e903fbfd99 100644 --- a/Documentation/devicetree/bindings/iio/pressure/honeywell,mprls0025pa.yaml +++ b/Documentation/devicetree/bindings/iio/pressure/honeywell,mprls0025pa.yaml @@ -53,12 +53,10 @@ properties: honeywell,pmin-pascal: description: Minimum pressure value the sensor can measure in pascal. - $ref: /schemas/types.yaml#/definitions/uint32 honeywell,pmax-pascal: description: Maximum pressure value the sensor can measure in pascal. - $ref: /schemas/types.yaml#/definitions/uint32 honeywell,transfer-function: description: | diff --git a/Documentation/devicetree/bindings/iio/temperature/melexis,mlx90632.yaml b/Documentation/devicetree/bindings/iio/temperature/melexis,mlx90632.yaml index 4a55e7f25ae7..03bb5d4fa8b5 100644 --- a/Documentation/devicetree/bindings/iio/temperature/melexis,mlx90632.yaml +++ b/Documentation/devicetree/bindings/iio/temperature/melexis,mlx90632.yaml @@ -4,7 +4,7 @@ $id: http://devicetree.org/schemas/iio/temperature/melexis,mlx90632.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: Melexis MLX90632 contactless Infra Red temperature sensor +title: Melexis MLX90632 and MLX90635 contactless Infra Red temperature sensor maintainers: - Crt Mori <cmo@melexis.com> @@ -27,9 +27,24 @@ description: | Since measured object emissivity effects Infra Red energy emitted, emissivity should be set before requesting the object temperature. + https://www.melexis.com/en/documents/documentation/datasheets/datasheet-mlx90635 + + MLX90635 is most suitable for consumer applications where + measured object temperature is in range between -20 to 100 degrees + Celsius with relative error of measurement 2 degree Celsius in + object temperature range for industrial applications, while just 0.2 + degree Celsius for human body measurement applications. Since it can + operate and measure ambient temperature in range of -20 to 85 degrees + Celsius it is suitable also for outdoor use. + + Since measured object emissivity effects Infra Red energy emitted, + emissivity should be set before requesting the object temperature. + properties: compatible: - const: melexis,mlx90632 + enum: + - melexis,mlx90632 + - melexis,mlx90635 reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/iio/temperature/microchip,mcp9600.yaml b/Documentation/devicetree/bindings/iio/temperature/microchip,mcp9600.yaml new file mode 100644 index 000000000000..d2cafa38a544 --- /dev/null +++ b/Documentation/devicetree/bindings/iio/temperature/microchip,mcp9600.yaml @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/iio/temperature/microchip,mcp9600.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Microchip MCP9600 thermocouple EMF converter + +maintainers: + - Andrew Hepp <andrew.hepp@ahepp.dev> + +description: + https://ww1.microchip.com/downloads/en/DeviceDoc/MCP960X-Data-Sheet-20005426.pdf + +properties: + compatible: + const: microchip,mcp9600 + + reg: + maxItems: 1 + + interrupts: + minItems: 1 + maxItems: 6 + + interrupt-names: + minItems: 1 + maxItems: 6 + items: + enum: + - open-circuit + - short-circuit + - alert1 + - alert2 + - alert3 + - alert4 + + thermocouple-type: + $ref: /schemas/types.yaml#/definitions/uint32 + description: + Type of thermocouple (THERMOCOUPLE_TYPE_K if omitted). + Use defines in dt-bindings/iio/temperature/thermocouple.h. + Supported types are B, E, J, K, N, R, S, T. + + vdd-supply: true + +required: + - compatible + - reg + +additionalProperties: false + +examples: + - | + #include <dt-bindings/iio/temperature/thermocouple.h> + #include <dt-bindings/interrupt-controller/irq.h> + i2c { + #address-cells = <1>; + #size-cells = <0>; + + temperature-sensor@60 { + compatible = "microchip,mcp9600"; + reg = <0x60>; + interrupt-parent = <&gpio>; + interrupts = <25 IRQ_TYPE_EDGE_RISING>; + interrupt-names = "open-circuit"; + thermocouple-type = <THERMOCOUPLE_TYPE_K>; + vdd-supply = <&vdd>; + }; + }; diff --git a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml index 73f809cdb783..05067e197abe 100644 --- a/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml +++ b/Documentation/devicetree/bindings/interconnect/qcom,msm8998-bwmon.yaml @@ -25,13 +25,16 @@ properties: - const: qcom,msm8998-bwmon # BWMON v4 - items: - enum: + - qcom,qcm2290-cpu-bwmon - qcom,sc7180-cpu-bwmon - qcom,sc7280-cpu-bwmon - qcom,sc8280xp-cpu-bwmon - qcom,sdm845-cpu-bwmon + - qcom,sm6115-cpu-bwmon - qcom,sm6350-llcc-bwmon - qcom,sm8250-cpu-bwmon - qcom,sm8550-cpu-bwmon + - qcom,sm8650-cpu-bwmon - const: qcom,sdm845-bwmon # BWMON v4, unified register space - items: - enum: @@ -40,6 +43,7 @@ properties: - qcom,sm6350-cpu-bwmon - qcom,sm8250-llcc-bwmon - qcom,sm8550-llcc-bwmon + - qcom,sm8650-llcc-bwmon - const: qcom,sc7280-llcc-bwmon - const: qcom,sc7280-llcc-bwmon # BWMON v5 - const: qcom,sdm845-llcc-bwmon # BWMON v5 diff --git a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml index a69de3e92282..92bfe25f0571 100644 --- a/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml +++ b/Documentation/devicetree/bindings/nvmem/st,stm32-romem.yaml @@ -24,6 +24,7 @@ properties: - st,stm32f4-otp - st,stm32mp13-bsec - st,stm32mp15-bsec + - st,stm32mp25-bsec reg: maxItems: 1 diff --git a/Documentation/devicetree/bindings/trivial-devices.yaml b/Documentation/devicetree/bindings/trivial-devices.yaml index 21d1f066f875..79dcd92c4a43 100644 --- a/Documentation/devicetree/bindings/trivial-devices.yaml +++ b/Documentation/devicetree/bindings/trivial-devices.yaml @@ -177,6 +177,8 @@ properties: - isil,isl29030 # Intersil ISL68137 Digital Output Configurable PWM Controller - isil,isl68137 + # Intersil ISL76682 Ambient Light Sensor + - isil,isl76682 # Linear Technology LTC2488 - lineartechnology,ltc2488 # 5 Bit Programmable, Pulse-Width Modulator diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml index 02aaa1f538b2..1a0dc04f1db4 100644 --- a/Documentation/devicetree/bindings/vendor-prefixes.yaml +++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml @@ -121,6 +121,8 @@ patternProperties: description: Andes Technology Corporation "^anvo,.*": description: Anvo-Systems Dresden GmbH + "^aosong,.*": + description: Guangzhou Aosong Electronic Co., Ltd. "^apm,.*": description: Applied Micro Circuits Corporation (APM) "^apple,.*": diff --git a/Documentation/devicetree/bindings/w1/amd,axi-1wire-host.yaml b/Documentation/devicetree/bindings/w1/amd,axi-1wire-host.yaml new file mode 100644 index 000000000000..ef70fa2c0c5d --- /dev/null +++ b/Documentation/devicetree/bindings/w1/amd,axi-1wire-host.yaml @@ -0,0 +1,44 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/w1/amd,axi-1wire-host.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: AMD AXI 1-wire bus host for programmable logic + +maintainers: + - Kris Chaplin <kris.chaplin@amd.com> + +properties: + compatible: + const: amd,axi-1wire-host + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + interrupts: + maxItems: 1 + +required: + - compatible + - reg + - clocks + - interrupts + +additionalProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/arm-gic.h> + + onewire@a0000000 { + compatible = "amd,axi-1wire-host"; + reg = <0xa0000000 0x10000>; + clocks = <&zynqmp_clk 0x47>; + interrupts = <GIC_SPI 0x59 IRQ_TYPE_LEVEL_HIGH>; + }; + +... diff --git a/Documentation/trace/coresight/coresight.rst b/Documentation/trace/coresight/coresight.rst index 826e59a698da..d4f93d6a2d63 100644 --- a/Documentation/trace/coresight/coresight.rst +++ b/Documentation/trace/coresight/coresight.rst @@ -624,6 +624,10 @@ They are also listed in the folder /sys/bus/event_source/devices/cs_etm/format/ * - timestamp - Session local version of the system wide setting: :ref:`ETMv4_MODE_TIMESTAMP <coresight-timestamp>` + * - cc_threshold + - Cycle count threshold value. If nothing is provided here or the provided value is 0, then the + default value i.e 0x100 will be used. If provided value is less than minimum cycles threshold + value, as indicated via TRCIDR3.CCITMIN, then the minimum value will be used instead. How to use the STM module ------------------------- diff --git a/MAINTAINERS b/MAINTAINERS index 67d09557c956..2d94c72c3742 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -890,6 +890,14 @@ Q: https://patchwork.kernel.org/project/linux-rdma/list/ F: drivers/infiniband/hw/efa/ F: include/uapi/rdma/efa-abi.h +AMD AXI W1 DRIVER +M: Kris Chaplin <kris.chaplin@amd.com> +R: Thomas Delev <thomas.delev@amd.com> +R: Michal Simek <michal.simek@amd.com> +S: Maintained +F: Documentation/devicetree/bindings/w1/amd,axi-1wire-host.yaml +F: drivers/w1/masters/amd_axi_w1.c + AMD CDX BUS DRIVER M: Nipun Gupta <nipun.gupta@amd.com> M: Nikhil Agarwal <nikhil.agarwal@amd.com> @@ -1123,6 +1131,14 @@ F: Documentation/ABI/testing/sysfs-bus-iio-adc-ad4130 F: Documentation/devicetree/bindings/iio/adc/adi,ad4130.yaml F: drivers/iio/adc/ad4130.c +ANALOG DEVICES INC AD7091R DRIVER +M: Marcelo Schmitt <marcelo.schmitt@analog.com> +L: linux-iio@vger.kernel.org +S: Supported +W: http://ez.analog.com/community/linux-device-drivers +F: Documentation/devicetree/bindings/iio/adc/adi,ad7091r* +F: drivers/iio/adc/drivers/iio/adc/ad7091r* + ANALOG DEVICES INC AD7192 DRIVER M: Alexandru Tachici <alexandru.tachici@analog.com> L: linux-iio@vger.kernel.org @@ -2048,7 +2064,6 @@ ARM/CORESIGHT FRAMEWORK AND DRIVERS M: Suzuki K Poulose <suzuki.poulose@arm.com> R: Mike Leach <mike.leach@linaro.org> R: James Clark <james.clark@arm.com> -R: Leo Yan <leo.yan@linaro.org> L: coresight@lists.linaro.org (moderated for non-subscribers) L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -3044,6 +3059,13 @@ S: Supported W: http://www.akm.com/ F: drivers/iio/magnetometer/ak8974.c +AOSONG AGS02MA TVOC SENSOR DRIVER +M: Anshul Dalal <anshulusr@gmail.com> +L: linux-iio@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/iio/chemical/aosong,ags02ma.yaml +F: drivers/iio/chemical/ags02ma.c + ASC7621 HARDWARE MONITOR DRIVER M: George Joseph <george.joseph@fairview5.com> L: linux-hwmon@vger.kernel.org @@ -3643,6 +3665,13 @@ S: Maintained F: Documentation/devicetree/bindings/iio/accel/bosch,bma400.yaml F: drivers/iio/accel/bma400* +BOSCH SENSORTEC BMI323 IMU IIO DRIVER +M: Jagath Jog J <jagathjog1996@gmail.com> +L: linux-iio@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/iio/imu/bosch,bmi323.yaml +F: drivers/iio/imu/bmi323/ + BPF JIT for ARM M: Russell King <linux@armlinux.org.uk> M: Puranjay Mohan <puranjay12@gmail.com> @@ -5384,6 +5413,12 @@ F: include/linux/counter.h F: include/uapi/linux/counter.h F: tools/counter/ +COUNTER WATCH EVENTS TOOL +M: Fabrice Gasnier <fabrice.gasnier@foss.st.com> +L: linux-iio@vger.kernel.org +S: Maintained +F: tools/counter/counter_watch_events.c + CP2615 I2C DRIVER M: Bence Csókás <bence98@sch.bme.hu> S: Maintained @@ -9731,6 +9766,13 @@ F: lib/test_hmm* F: mm/hmm* F: tools/testing/selftests/mm/*hmm* +HONEYWELL HSC030PA PRESSURE SENSOR SERIES IIO DRIVER +M: Petre Rodan <petre.rodan@subdimension.ro> +L: linux-iio@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/iio/pressure/honeywell,hsc030pa.yaml +F: drivers/iio/pressure/hsc030pa* + HONEYWELL MPRLS0025PA PRESSURE SENSOR SERIES IIO DRIVER M: Andreas Klinger <ak@it-klinger.de> L: linux-iio@vger.kernel.org @@ -10346,8 +10388,8 @@ IIO LIGHT SENSOR GAIN-TIME-SCALE HELPERS M: Matti Vaittinen <mazziesaccount@gmail.com> L: linux-iio@vger.kernel.org S: Maintained -F: drivers/iio/light/gain-time-scale-helper.c -F: drivers/iio/light/gain-time-scale-helper.h +F: drivers/iio/industrialio-gts-helper.c +F: include/linux/iio/iio-gts-helper.h IIO MULTIPLEXER M: Peter Rosin <peda@axentia.se> @@ -12734,6 +12776,13 @@ S: Maintained W: http://linux-test-project.github.io/ T: git https://github.com/linux-test-project/ltp.git +LTR390 AMBIENT/UV LIGHT SENSOR DRIVER +M: Anshul Dalal <anshulusr@gmail.com> +L: linux-iio@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/iio/light/liteon,ltr390.yaml +F: drivers/iio/light/ltr390.c + LYNX 28G SERDES PHY DRIVER M: Ioana Ciornei <ioana.ciornei@nxp.com> L: netdev@vger.kernel.org @@ -13278,6 +13327,13 @@ F: Documentation/ABI/testing/sysfs-bus-iio-potentiometer-mcp4531 F: drivers/iio/potentiometer/mcp4018.c F: drivers/iio/potentiometer/mcp4531.c +MCP4821 DAC DRIVER +M: Anshul Dalal <anshulusr@gmail.com> +L: linux-iio@vger.kernel.org +S: Maintained +F: Documentation/devicetree/bindings/iio/dac/microchip,mcp4821.yaml +F: drivers/iio/dac/mcp4821.c + MCR20A IEEE-802.15.4 RADIO DRIVER M: Stefan Schmidt <stefan@datenfreihafen.org> L: linux-wpan@vger.kernel.org @@ -13805,6 +13861,13 @@ S: Supported W: http://www.melexis.com F: drivers/iio/temperature/mlx90632.c +MELEXIS MLX90635 DRIVER +M: Crt Mori <cmo@melexis.com> +L: linux-iio@vger.kernel.org +S: Supported +W: http://www.melexis.com +F: drivers/iio/temperature/mlx90635.c + MELFAS MIP4 TOUCHSCREEN DRIVER M: Sangwon Jee <jeesw@melfas.com> S: Supported @@ -14275,11 +14338,12 @@ F: Documentation/devicetree/bindings/regulator/mcp16502-regulator.txt F: drivers/regulator/mcp16502.c MICROCHIP MCP3564 ADC DRIVER -M: Marius Cristea <marius.cristea@microchip.com> -L: linux-iio@vger.kernel.org -S: Supported -F: Documentation/devicetree/bindings/iio/adc/microchip,mcp3564.yaml -F: drivers/iio/adc/mcp3564.c +M: Marius Cristea <marius.cristea@microchip.com> +L: linux-iio@vger.kernel.org +S: Supported +F: Documentation/ABI/testing/sysfs-bus-iio-adc-mcp3564 +F: Documentation/devicetree/bindings/iio/adc/microchip,mcp3564.yaml +F: drivers/iio/adc/mcp3564.c MICROCHIP MCP3911 ADC DRIVER M: Marcus Folkesson <marcus.folkesson@gmail.com> @@ -15374,6 +15438,15 @@ F: include/linux/nitro_enclaves.h F: include/uapi/linux/nitro_enclaves.h F: samples/nitro_enclaves/ +NITRO SECURE MODULE (NSM) +M: Alexander Graf <graf@amazon.com> +L: linux-kernel@vger.kernel.org +L: The AWS Nitro Enclaves Team <aws-nitro-enclaves-devel@amazon.com> +S: Supported +W: https://aws.amazon.com/ec2/nitro/nitro-enclaves/ +F: drivers/misc/nsm.c +F: include/uapi/linux/nsm.h + NOHZ, DYNTICKS SUPPORT M: Frederic Weisbecker <frederic@kernel.org> M: Thomas Gleixner <tglx@linutronix.de> @@ -23288,6 +23361,12 @@ S: Maintained F: drivers/input/serio/userio.c F: include/uapi/linux/userio.h +VISHAY VEML6075 UVA AND UVB LIGHT SENSOR DRIVER +M: Javier Carrasco <javier.carrasco.cruz@gmail.com> +S: Maintained +F: Documentation/devicetree/bindings/iio/light/vishay,veml6075.yaml +F: drivers/iio/light/veml6075.c + VISL VIRTUAL STATELESS DECODER DRIVER M: Daniel Almeida <daniel.almeida@collabora.com> L: linux-media@vger.kernel.org diff --git a/arch/arm/common/locomo.c b/arch/arm/common/locomo.c index 70480dd9e96d..6d0c9f7268ba 100644 --- a/arch/arm/common/locomo.c +++ b/arch/arm/common/locomo.c @@ -68,6 +68,8 @@ struct locomo { #endif }; +static const struct bus_type locomo_bus_type; + struct locomo_dev_info { unsigned long offset; unsigned long length; @@ -842,7 +844,7 @@ static void locomo_bus_remove(struct device *dev) drv->remove(ldev); } -struct bus_type locomo_bus_type = { +static const struct bus_type locomo_bus_type = { .name = "locomo-bus", .match = locomo_match, .probe = locomo_bus_probe, diff --git a/arch/arm/include/asm/hardware/locomo.h b/arch/arm/include/asm/hardware/locomo.h index aaaedafef7cc..9fd9ad5d9202 100644 --- a/arch/arm/include/asm/hardware/locomo.h +++ b/arch/arm/include/asm/hardware/locomo.h @@ -158,8 +158,6 @@ #define LOCOMO_LPT_TOH(TOH) ((TOH & 0x7) << 4) #define LOCOMO_LPT_TOL(TOL) ((TOL & 0x7)) -extern struct bus_type locomo_bus_type; - #define LOCOMO_DEVID_KEYBOARD 0 #define LOCOMO_DEVID_FRONTLIGHT 1 #define LOCOMO_DEVID_BACKLIGHT 2 diff --git a/drivers/android/binder.c b/drivers/android/binder.c index 7658103ba760..8dd23b19e997 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -2077,9 +2077,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, * Convert the address to an offset relative to * the base of the transaction buffer. */ - fda_offset = - (parent->buffer - (uintptr_t)buffer->user_data) + - fda->parent_offset; + fda_offset = parent->buffer - buffer->user_data + + fda->parent_offset; for (fd_index = 0; fd_index < fda->num_fds; fd_index++) { u32 fd; @@ -2597,7 +2596,7 @@ static int binder_translate_fd_array(struct list_head *pf_head, * Convert the address to an offset relative to * the base of the transaction buffer. */ - fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + + fda_offset = parent->buffer - t->buffer->user_data + fda->parent_offset; sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer + fda->parent_offset; @@ -2672,8 +2671,9 @@ static int binder_fixup_parent(struct list_head *pf_head, proc->pid, thread->pid); return -EINVAL; } - buffer_offset = bp->parent_offset + - (uintptr_t)parent->buffer - (uintptr_t)b->user_data; + + buffer_offset = bp->parent_offset + parent->buffer - b->user_data; + return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0); } @@ -3225,7 +3225,7 @@ static void binder_transaction(struct binder_proc *proc, t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, tr->offsets_size, extra_buffers_size, - !reply && (t->flags & TF_ONE_WAY), current->tgid); + !reply && (t->flags & TF_ONE_WAY)); if (IS_ERR(t->buffer)) { char *s; @@ -3250,7 +3250,7 @@ static void binder_transaction(struct binder_proc *proc, ALIGN(extra_buffers_size, sizeof(void *)) - ALIGN(secctx_sz, sizeof(u64)); - t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset; + t->security_ctx = t->buffer->user_data + buf_offset; err = binder_alloc_copy_to_buffer(&target_proc->alloc, t->buffer, buf_offset, secctx, secctx_sz); @@ -3527,8 +3527,7 @@ static void binder_transaction(struct binder_proc *proc, goto err_translate_failed; } /* Fixup buffer pointer to target proc address space */ - bp->buffer = (uintptr_t) - t->buffer->user_data + sg_buf_offset; + bp->buffer = t->buffer->user_data + sg_buf_offset; sg_buf_offset += ALIGN(bp->length, sizeof(u64)); num_valid = (buffer_offset - off_start_offset) / @@ -4698,7 +4697,7 @@ retry: } trd->data_size = t->buffer->data_size; trd->offsets_size = t->buffer->offsets_size; - trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data; + trd->data.ptr.buffer = t->buffer->user_data; trd->data.ptr.offsets = trd->data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); @@ -5030,7 +5029,7 @@ static __poll_t binder_poll(struct file *filp, thread = binder_get_thread(proc); if (!thread) - return POLLERR; + return EPOLLERR; binder_inner_proc_lock(thread->proc); thread->looper |= BINDER_LOOPER_STATE_POLL; @@ -5981,9 +5980,9 @@ static void print_binder_transaction_ilocked(struct seq_file *m, } if (buffer->target_node) seq_printf(m, " node %d", buffer->target_node->debug_id); - seq_printf(m, " size %zd:%zd data %pK\n", + seq_printf(m, " size %zd:%zd offset %lx\n", buffer->data_size, buffer->offsets_size, - buffer->user_data); + proc->alloc.buffer - buffer->user_data); } static void print_binder_work_ilocked(struct seq_file *m, diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index f69d30c9f50f..e0e4dc38b692 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -26,7 +26,7 @@ #include "binder_alloc.h" #include "binder_trace.h" -struct list_lru binder_alloc_lru; +struct list_lru binder_freelist; static DEFINE_MUTEX(binder_alloc_mmap_lock); @@ -125,23 +125,20 @@ static void binder_insert_allocated_buffer_locked( static struct binder_buffer *binder_alloc_prepare_to_free_locked( struct binder_alloc *alloc, - uintptr_t user_ptr) + unsigned long user_ptr) { struct rb_node *n = alloc->allocated_buffers.rb_node; struct binder_buffer *buffer; - void __user *uptr; - - uptr = (void __user *)user_ptr; while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(buffer->free); - if (uptr < buffer->user_data) + if (user_ptr < buffer->user_data) { n = n->rb_left; - else if (uptr > buffer->user_data) + } else if (user_ptr > buffer->user_data) { n = n->rb_right; - else { + } else { /* * Guard against user threads attempting to * free the buffer when in use by kernel or @@ -168,145 +165,168 @@ static struct binder_buffer *binder_alloc_prepare_to_free_locked( * Return: Pointer to buffer or NULL */ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, - uintptr_t user_ptr) + unsigned long user_ptr) { struct binder_buffer *buffer; - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); return buffer; } -static int binder_update_page_range(struct binder_alloc *alloc, int allocate, - void __user *start, void __user *end) +static inline void +binder_set_installed_page(struct binder_lru_page *lru_page, + struct page *page) +{ + /* Pairs with acquire in binder_get_installed_page() */ + smp_store_release(&lru_page->page_ptr, page); +} + +static inline struct page * +binder_get_installed_page(struct binder_lru_page *lru_page) +{ + /* Pairs with release in binder_set_installed_page() */ + return smp_load_acquire(&lru_page->page_ptr); +} + +static void binder_lru_freelist_add(struct binder_alloc *alloc, + unsigned long start, unsigned long end) { - void __user *page_addr; - unsigned long user_page_addr; struct binder_lru_page *page; - struct vm_area_struct *vma = NULL; - struct mm_struct *mm = NULL; - bool need_mm = false; + unsigned long page_addr; - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: %s pages %pK-%pK\n", alloc->pid, - allocate ? "allocate" : "free", start, end); + trace_binder_update_page_range(alloc, false, start, end); - if (end <= start) - return 0; + for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { + size_t index; + int ret; - trace_binder_update_page_range(alloc, allocate, start, end); + index = (page_addr - alloc->buffer) / PAGE_SIZE; + page = &alloc->pages[index]; - if (allocate == 0) - goto free_range; + if (!binder_get_installed_page(page)) + continue; - for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { - page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; - if (!page->page_ptr) { - need_mm = true; - break; - } + trace_binder_free_lru_start(alloc, index); + + ret = list_lru_add_obj(&binder_freelist, &page->lru); + WARN_ON(!ret); + + trace_binder_free_lru_end(alloc, index); } +} + +static int binder_install_single_page(struct binder_alloc *alloc, + struct binder_lru_page *lru_page, + unsigned long addr) +{ + struct page *page; + int ret = 0; - if (need_mm && mmget_not_zero(alloc->mm)) - mm = alloc->mm; + if (!mmget_not_zero(alloc->mm)) + return -ESRCH; - if (mm) { - mmap_write_lock(mm); - vma = alloc->vma; + /* + * Protected with mmap_sem in write mode as multiple tasks + * might race to install the same page. + */ + mmap_write_lock(alloc->mm); + if (binder_get_installed_page(lru_page)) + goto out; + + if (!alloc->vma) { + pr_err("%d: %s failed, no vma\n", alloc->pid, __func__); + ret = -ESRCH; + goto out; } - if (!vma && need_mm) { - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%d: binder_alloc_buf failed to map pages in userspace, no vma\n", - alloc->pid); - goto err_no_vma; + page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); + if (!page) { + pr_err("%d: failed to allocate page\n", alloc->pid); + ret = -ENOMEM; + goto out; } - for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { + ret = vm_insert_page(alloc->vma, addr, page); + if (ret) { + pr_err("%d: %s failed to insert page at offset %lx with %d\n", + alloc->pid, __func__, addr - alloc->buffer, ret); + __free_page(page); + ret = -ENOMEM; + goto out; + } + + /* Mark page installation complete and safe to use */ + binder_set_installed_page(lru_page, page); +out: + mmap_write_unlock(alloc->mm); + mmput_async(alloc->mm); + return ret; +} + +static int binder_install_buffer_pages(struct binder_alloc *alloc, + struct binder_buffer *buffer, + size_t size) +{ + struct binder_lru_page *page; + unsigned long start, final; + unsigned long page_addr; + + start = buffer->user_data & PAGE_MASK; + final = PAGE_ALIGN(buffer->user_data + size); + + for (page_addr = start; page_addr < final; page_addr += PAGE_SIZE) { + unsigned long index; int ret; - bool on_lru; - size_t index; index = (page_addr - alloc->buffer) / PAGE_SIZE; page = &alloc->pages[index]; - if (page->page_ptr) { - trace_binder_alloc_lru_start(alloc, index); - - on_lru = list_lru_del_obj(&binder_alloc_lru, &page->lru); - WARN_ON(!on_lru); - - trace_binder_alloc_lru_end(alloc, index); + if (binder_get_installed_page(page)) continue; - } - - if (WARN_ON(!vma)) - goto err_page_ptr_cleared; trace_binder_alloc_page_start(alloc, index); - page->page_ptr = alloc_page(GFP_KERNEL | - __GFP_HIGHMEM | - __GFP_ZERO); - if (!page->page_ptr) { - pr_err("%d: binder_alloc_buf failed for page at %pK\n", - alloc->pid, page_addr); - goto err_alloc_page_failed; - } - page->alloc = alloc; - INIT_LIST_HEAD(&page->lru); - - user_page_addr = (uintptr_t)page_addr; - ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); - if (ret) { - pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", - alloc->pid, user_page_addr); - goto err_vm_insert_page_failed; - } - if (index + 1 > alloc->pages_high) - alloc->pages_high = index + 1; + ret = binder_install_single_page(alloc, page, page_addr); + if (ret) + return ret; trace_binder_alloc_page_end(alloc, index); } - if (mm) { - mmap_write_unlock(mm); - mmput(mm); - } + return 0; +} -free_range: - for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) { - bool ret; - size_t index; +/* The range of pages should exclude those shared with other buffers */ +static void binder_lru_freelist_del(struct binder_alloc *alloc, + unsigned long start, unsigned long end) +{ + struct binder_lru_page *page; + unsigned long page_addr; + + trace_binder_update_page_range(alloc, true, start, end); + + for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { + unsigned long index; + bool on_lru; index = (page_addr - alloc->buffer) / PAGE_SIZE; page = &alloc->pages[index]; - trace_binder_free_lru_start(alloc, index); + if (page->page_ptr) { + trace_binder_alloc_lru_start(alloc, index); - ret = list_lru_add_obj(&binder_alloc_lru, &page->lru); - WARN_ON(!ret); + on_lru = list_lru_del_obj(&binder_freelist, &page->lru); + WARN_ON(!on_lru); - trace_binder_free_lru_end(alloc, index); - if (page_addr == start) - break; - continue; - -err_vm_insert_page_failed: - __free_page(page->page_ptr); - page->page_ptr = NULL; -err_alloc_page_failed: -err_page_ptr_cleared: - if (page_addr == start) - break; - } -err_no_vma: - if (mm) { - mmap_write_unlock(mm); - mmput(mm); + trace_binder_alloc_lru_end(alloc, index); + continue; + } + + if (index + 1 > alloc->pages_high) + alloc->pages_high = index + 1; } - return vma ? -ENOMEM : -ESRCH; } static inline void binder_alloc_set_vma(struct binder_alloc *alloc, @@ -323,7 +343,44 @@ static inline struct vm_area_struct *binder_alloc_get_vma( return smp_load_acquire(&alloc->vma); } -static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) +static void debug_no_space_locked(struct binder_alloc *alloc) +{ + size_t largest_alloc_size = 0; + struct binder_buffer *buffer; + size_t allocated_buffers = 0; + size_t largest_free_size = 0; + size_t total_alloc_size = 0; + size_t total_free_size = 0; + size_t free_buffers = 0; + size_t buffer_size; + struct rb_node *n; + + for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + buffer_size = binder_alloc_buffer_size(alloc, buffer); + allocated_buffers++; + total_alloc_size += buffer_size; + if (buffer_size > largest_alloc_size) + largest_alloc_size = buffer_size; + } + + for (n = rb_first(&alloc->free_buffers); n; n = rb_next(n)) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + buffer_size = binder_alloc_buffer_size(alloc, buffer); + free_buffers++; + total_free_size += buffer_size; + if (buffer_size > largest_free_size) + largest_free_size = buffer_size; + } + + binder_alloc_debug(BINDER_DEBUG_USER_ERROR, + "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", + total_alloc_size, allocated_buffers, + largest_alloc_size, total_free_size, + free_buffers, largest_free_size); +} + +static bool debug_low_async_space_locked(struct binder_alloc *alloc) { /* * Find the amount and size of buffers allocated by the current caller; @@ -332,10 +389,20 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) * and at some point we'll catch them in the act. This is more efficient * than keeping a map per pid. */ - struct rb_node *n; struct binder_buffer *buffer; size_t total_alloc_size = 0; + int pid = current->tgid; size_t num_buffers = 0; + struct rb_node *n; + + /* + * Only start detecting spammers once we have less than 20% of async + * space left (which is less than 10% of total buffer size). + */ + if (alloc->free_async_space >= alloc->buffer_size / 10) { + alloc->oneway_spam_detected = false; + return false; + } for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) { @@ -344,8 +411,7 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) continue; if (!buffer->async_transaction) continue; - total_alloc_size += binder_alloc_buffer_size(alloc, buffer) - + sizeof(struct binder_buffer); + total_alloc_size += binder_alloc_buffer_size(alloc, buffer); num_buffers++; } @@ -366,58 +432,28 @@ static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) return false; } +/* Callers preallocate @new_buffer, it is freed by this function if unused */ static struct binder_buffer *binder_alloc_new_buf_locked( struct binder_alloc *alloc, - size_t data_size, - size_t offsets_size, - size_t extra_buffers_size, - int is_async, - int pid) + struct binder_buffer *new_buffer, + size_t size, + int is_async) { struct rb_node *n = alloc->free_buffers.rb_node; + struct rb_node *best_fit = NULL; struct binder_buffer *buffer; + unsigned long next_used_page; + unsigned long curr_last_page; size_t buffer_size; - struct rb_node *best_fit = NULL; - void __user *has_page_addr; - void __user *end_page_addr; - size_t size, data_offsets_size; - int ret; - /* Check binder_alloc is fully initialized */ - if (!binder_alloc_get_vma(alloc)) { - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "%d: binder_alloc_buf, no vma\n", - alloc->pid); - return ERR_PTR(-ESRCH); - } - - data_offsets_size = ALIGN(data_size, sizeof(void *)) + - ALIGN(offsets_size, sizeof(void *)); - - if (data_offsets_size < data_size || data_offsets_size < offsets_size) { - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: got transaction with invalid size %zd-%zd\n", - alloc->pid, data_size, offsets_size); - return ERR_PTR(-EINVAL); - } - size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); - if (size < data_offsets_size || size < extra_buffers_size) { - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: got transaction with invalid extra_buffers_size %zd\n", - alloc->pid, extra_buffers_size); - return ERR_PTR(-EINVAL); - } - if (is_async && - alloc->free_async_space < size + sizeof(struct binder_buffer)) { + if (is_async && alloc->free_async_space < size) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd failed, no async space left\n", alloc->pid, size); - return ERR_PTR(-ENOSPC); + buffer = ERR_PTR(-ENOSPC); + goto out; } - /* Pad 0-size buffers so they get assigned unique addresses */ - size = max(size, sizeof(void *)); - while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(!buffer->free); @@ -426,121 +462,92 @@ static struct binder_buffer *binder_alloc_new_buf_locked( if (size < buffer_size) { best_fit = n; n = n->rb_left; - } else if (size > buffer_size) + } else if (size > buffer_size) { n = n->rb_right; - else { + } else { best_fit = n; break; } } - if (best_fit == NULL) { - size_t allocated_buffers = 0; - size_t largest_alloc_size = 0; - size_t total_alloc_size = 0; - size_t free_buffers = 0; - size_t largest_free_size = 0; - size_t total_free_size = 0; - - for (n = rb_first(&alloc->allocated_buffers); n != NULL; - n = rb_next(n)) { - buffer = rb_entry(n, struct binder_buffer, rb_node); - buffer_size = binder_alloc_buffer_size(alloc, buffer); - allocated_buffers++; - total_alloc_size += buffer_size; - if (buffer_size > largest_alloc_size) - largest_alloc_size = buffer_size; - } - for (n = rb_first(&alloc->free_buffers); n != NULL; - n = rb_next(n)) { - buffer = rb_entry(n, struct binder_buffer, rb_node); - buffer_size = binder_alloc_buffer_size(alloc, buffer); - free_buffers++; - total_free_size += buffer_size; - if (buffer_size > largest_free_size) - largest_free_size = buffer_size; - } + + if (unlikely(!best_fit)) { binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%d: binder_alloc_buf size %zd failed, no address space\n", alloc->pid, size); - binder_alloc_debug(BINDER_DEBUG_USER_ERROR, - "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", - total_alloc_size, allocated_buffers, - largest_alloc_size, total_free_size, - free_buffers, largest_free_size); - return ERR_PTR(-ENOSPC); + debug_no_space_locked(alloc); + buffer = ERR_PTR(-ENOSPC); + goto out; } - if (n == NULL) { + + if (buffer_size != size) { + /* Found an oversized buffer and needs to be split */ buffer = rb_entry(best_fit, struct binder_buffer, rb_node); buffer_size = binder_alloc_buffer_size(alloc, buffer); + + WARN_ON(n || buffer_size == size); + new_buffer->user_data = buffer->user_data + size; + list_add(&new_buffer->entry, &buffer->entry); + new_buffer->free = 1; + binder_insert_free_buffer(alloc, new_buffer); + new_buffer = NULL; } binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", alloc->pid, size, buffer, buffer_size); - has_page_addr = (void __user *) - (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK); - WARN_ON(n && buffer_size != size); - end_page_addr = - (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); - if (end_page_addr > has_page_addr) - end_page_addr = has_page_addr; - ret = binder_update_page_range(alloc, 1, (void __user *) - PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr); - if (ret) - return ERR_PTR(ret); - - if (buffer_size != size) { - struct binder_buffer *new_buffer; - - new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); - if (!new_buffer) { - pr_err("%s: %d failed to alloc new buffer struct\n", - __func__, alloc->pid); - goto err_alloc_buf_struct_failed; - } - new_buffer->user_data = (u8 __user *)buffer->user_data + size; - list_add(&new_buffer->entry, &buffer->entry); - new_buffer->free = 1; - binder_insert_free_buffer(alloc, new_buffer); - } + /* + * Now we remove the pages from the freelist. A clever calculation + * with buffer_size determines if the last page is shared with an + * adjacent in-use buffer. In such case, the page has been already + * removed from the freelist so we trim our range short. + */ + next_used_page = (buffer->user_data + buffer_size) & PAGE_MASK; + curr_last_page = PAGE_ALIGN(buffer->user_data + size); + binder_lru_freelist_del(alloc, PAGE_ALIGN(buffer->user_data), + min(next_used_page, curr_last_page)); - rb_erase(best_fit, &alloc->free_buffers); + rb_erase(&buffer->rb_node, &alloc->free_buffers); buffer->free = 0; buffer->allow_user_free = 0; binder_insert_allocated_buffer_locked(alloc, buffer); - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: binder_alloc_buf size %zd got %pK\n", - alloc->pid, size, buffer); - buffer->data_size = data_size; - buffer->offsets_size = offsets_size; buffer->async_transaction = is_async; - buffer->extra_buffers_size = extra_buffers_size; - buffer->pid = pid; buffer->oneway_spam_suspect = false; if (is_async) { - alloc->free_async_space -= size + sizeof(struct binder_buffer); + alloc->free_async_space -= size; binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_alloc_buf size %zd async free %zd\n", alloc->pid, size, alloc->free_async_space); - if (alloc->free_async_space < alloc->buffer_size / 10) { - /* - * Start detecting spammers once we have less than 20% - * of async space left (which is less than 10% of total - * buffer size). - */ - buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid); - } else { - alloc->oneway_spam_detected = false; - } + if (debug_low_async_space_locked(alloc)) + buffer->oneway_spam_suspect = true; } + +out: + /* Discard possibly unused new_buffer */ + kfree(new_buffer); return buffer; +} -err_alloc_buf_struct_failed: - binder_update_page_range(alloc, 0, (void __user *) - PAGE_ALIGN((uintptr_t)buffer->user_data), - end_page_addr); - return ERR_PTR(-ENOMEM); +/* Calculate the sanitized total size, returns 0 for invalid request */ +static inline size_t sanitized_size(size_t data_size, + size_t offsets_size, + size_t extra_buffers_size) +{ + size_t total, tmp; + + /* Align to pointer size and check for overflows */ + tmp = ALIGN(data_size, sizeof(void *)) + + ALIGN(offsets_size, sizeof(void *)); + if (tmp < data_size || tmp < offsets_size) + return 0; + total = tmp + ALIGN(extra_buffers_size, sizeof(void *)); + if (total < tmp || total < extra_buffers_size) + return 0; + + /* Pad 0-sized buffers so they get a unique address */ + total = max(total, sizeof(void *)); + + return total; } /** @@ -550,87 +557,101 @@ err_alloc_buf_struct_failed: * @offsets_size: user specified buffer offset * @extra_buffers_size: size of extra space for meta-data (eg, security context) * @is_async: buffer for async transaction - * @pid: pid to attribute allocation to (used for debugging) * * Allocate a new buffer given the requested sizes. Returns * the kernel version of the buffer pointer. The size allocated * is the sum of the three given sizes (each rounded up to * pointer-sized boundary) * - * Return: The allocated buffer or %NULL if error + * Return: The allocated buffer or %ERR_PTR(-errno) if error */ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, size_t data_size, size_t offsets_size, size_t extra_buffers_size, - int is_async, - int pid) + int is_async) { - struct binder_buffer *buffer; + struct binder_buffer *buffer, *next; + size_t size; + int ret; + + /* Check binder_alloc is fully initialized */ + if (!binder_alloc_get_vma(alloc)) { + binder_alloc_debug(BINDER_DEBUG_USER_ERROR, + "%d: binder_alloc_buf, no vma\n", + alloc->pid); + return ERR_PTR(-ESRCH); + } + + size = sanitized_size(data_size, offsets_size, extra_buffers_size); + if (unlikely(!size)) { + binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, + "%d: got transaction with invalid size %zd-%zd-%zd\n", + alloc->pid, data_size, offsets_size, + extra_buffers_size); + return ERR_PTR(-EINVAL); + } - mutex_lock(&alloc->mutex); - buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, - extra_buffers_size, is_async, pid); - mutex_unlock(&alloc->mutex); + /* Preallocate the next buffer */ + next = kzalloc(sizeof(*next), GFP_KERNEL); + if (!next) + return ERR_PTR(-ENOMEM); + + spin_lock(&alloc->lock); + buffer = binder_alloc_new_buf_locked(alloc, next, size, is_async); + if (IS_ERR(buffer)) { + spin_unlock(&alloc->lock); + goto out; + } + + buffer->data_size = data_size; + buffer->offsets_size = offsets_size; + buffer->extra_buffers_size = extra_buffers_size; + buffer->pid = current->tgid; + spin_unlock(&alloc->lock); + + ret = binder_install_buffer_pages(alloc, buffer, size); + if (ret) { + binder_alloc_free_buf(alloc, buffer); + buffer = ERR_PTR(ret); + } +out: return buffer; } -static void __user *buffer_start_page(struct binder_buffer *buffer) +static unsigned long buffer_start_page(struct binder_buffer *buffer) { - return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK); + return buffer->user_data & PAGE_MASK; } -static void __user *prev_buffer_end_page(struct binder_buffer *buffer) +static unsigned long prev_buffer_end_page(struct binder_buffer *buffer) { - return (void __user *) - (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK); + return (buffer->user_data - 1) & PAGE_MASK; } static void binder_delete_free_buffer(struct binder_alloc *alloc, struct binder_buffer *buffer) { - struct binder_buffer *prev, *next = NULL; - bool to_free = true; + struct binder_buffer *prev, *next; + + if (PAGE_ALIGNED(buffer->user_data)) + goto skip_freelist; BUG_ON(alloc->buffers.next == &buffer->entry); prev = binder_buffer_prev(buffer); BUG_ON(!prev->free); - if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) { - to_free = false; - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %pK share page with %pK\n", - alloc->pid, buffer->user_data, - prev->user_data); - } + if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) + goto skip_freelist; if (!list_is_last(&buffer->entry, &alloc->buffers)) { next = binder_buffer_next(buffer); - if (buffer_start_page(next) == buffer_start_page(buffer)) { - to_free = false; - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %pK share page with %pK\n", - alloc->pid, - buffer->user_data, - next->user_data); - } - } - - if (PAGE_ALIGNED(buffer->user_data)) { - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer start %pK is page aligned\n", - alloc->pid, buffer->user_data); - to_free = false; + if (buffer_start_page(next) == buffer_start_page(buffer)) + goto skip_freelist; } - if (to_free) { - binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%d: merge free, buffer %pK do not share page with %pK or %pK\n", - alloc->pid, buffer->user_data, - prev->user_data, - next ? next->user_data : NULL); - binder_update_page_range(alloc, 0, buffer_start_page(buffer), - buffer_start_page(buffer) + PAGE_SIZE); - } + binder_lru_freelist_add(alloc, buffer_start_page(buffer), + buffer_start_page(buffer) + PAGE_SIZE); +skip_freelist: list_del(&buffer->entry); kfree(buffer); } @@ -657,17 +678,14 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); if (buffer->async_transaction) { - alloc->free_async_space += buffer_size + sizeof(struct binder_buffer); - + alloc->free_async_space += buffer_size; binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_free_buf size %zd async free %zd\n", alloc->pid, size, alloc->free_async_space); } - binder_update_page_range(alloc, 0, - (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data), - (void __user *)(((uintptr_t) - buffer->user_data + buffer_size) & PAGE_MASK)); + binder_lru_freelist_add(alloc, PAGE_ALIGN(buffer->user_data), + (buffer->user_data + buffer_size) & PAGE_MASK); rb_erase(&buffer->rb_node, &alloc->allocated_buffers); buffer->free = 1; @@ -691,8 +709,68 @@ static void binder_free_buf_locked(struct binder_alloc *alloc, binder_insert_free_buffer(alloc, buffer); } +/** + * binder_alloc_get_page() - get kernel pointer for given buffer offset + * @alloc: binder_alloc for this proc + * @buffer: binder buffer to be accessed + * @buffer_offset: offset into @buffer data + * @pgoffp: address to copy final page offset to + * + * Lookup the struct page corresponding to the address + * at @buffer_offset into @buffer->user_data. If @pgoffp is not + * NULL, the byte-offset into the page is written there. + * + * The caller is responsible to ensure that the offset points + * to a valid address within the @buffer and that @buffer is + * not freeable by the user. Since it can't be freed, we are + * guaranteed that the corresponding elements of @alloc->pages[] + * cannot change. + * + * Return: struct page + */ +static struct page *binder_alloc_get_page(struct binder_alloc *alloc, + struct binder_buffer *buffer, + binder_size_t buffer_offset, + pgoff_t *pgoffp) +{ + binder_size_t buffer_space_offset = buffer_offset + + (buffer->user_data - alloc->buffer); + pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK; + size_t index = buffer_space_offset >> PAGE_SHIFT; + struct binder_lru_page *lru_page; + + lru_page = &alloc->pages[index]; + *pgoffp = pgoff; + return lru_page->page_ptr; +} + +/** + * binder_alloc_clear_buf() - zero out buffer + * @alloc: binder_alloc for this proc + * @buffer: binder buffer to be cleared + * + * memset the given buffer to 0 + */ static void binder_alloc_clear_buf(struct binder_alloc *alloc, - struct binder_buffer *buffer); + struct binder_buffer *buffer) +{ + size_t bytes = binder_alloc_buffer_size(alloc, buffer); + binder_size_t buffer_offset = 0; + + while (bytes) { + unsigned long size; + struct page *page; + pgoff_t pgoff; + + page = binder_alloc_get_page(alloc, buffer, + buffer_offset, &pgoff); + size = min_t(size_t, bytes, PAGE_SIZE - pgoff); + memset_page(page, pgoff, 0, size); + bytes -= size; + buffer_offset += size; + } +} + /** * binder_alloc_free_buf() - free a binder buffer * @alloc: binder_alloc for this proc @@ -706,18 +784,18 @@ void binder_alloc_free_buf(struct binder_alloc *alloc, /* * We could eliminate the call to binder_alloc_clear_buf() * from binder_alloc_deferred_release() by moving this to - * binder_alloc_free_buf_locked(). However, that could - * increase contention for the alloc mutex if clear_on_free - * is used frequently for large buffers. The mutex is not + * binder_free_buf_locked(). However, that could + * increase contention for the alloc->lock if clear_on_free + * is used frequently for large buffers. This lock is not * needed for correctness here. */ if (buffer->clear_on_free) { binder_alloc_clear_buf(alloc, buffer); buffer->clear_on_free = false; } - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); binder_free_buf_locked(alloc, buffer); - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); } /** @@ -736,9 +814,9 @@ void binder_alloc_free_buf(struct binder_alloc *alloc, int binder_alloc_mmap_handler(struct binder_alloc *alloc, struct vm_area_struct *vma) { - int ret; - const char *failure_string; struct binder_buffer *buffer; + const char *failure_string; + int ret, i; if (unlikely(vma->vm_mm != alloc->mm)) { ret = -EINVAL; @@ -756,7 +834,7 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, SZ_4M); mutex_unlock(&binder_alloc_mmap_lock); - alloc->buffer = (void __user *)vma->vm_start; + alloc->buffer = vma->vm_start; alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE, sizeof(alloc->pages[0]), @@ -767,6 +845,11 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, goto err_alloc_pages_failed; } + for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { + alloc->pages[i].alloc = alloc; + INIT_LIST_HEAD(&alloc->pages[i].lru); + } + buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) { ret = -ENOMEM; @@ -789,7 +872,7 @@ err_alloc_buf_struct_failed: kfree(alloc->pages); alloc->pages = NULL; err_alloc_pages_failed: - alloc->buffer = NULL; + alloc->buffer = 0; mutex_lock(&binder_alloc_mmap_lock); alloc->buffer_size = 0; err_already_mapped: @@ -810,7 +893,7 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) struct binder_buffer *buffer; buffers = 0; - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); BUG_ON(alloc->vma); while ((n = rb_first(&alloc->allocated_buffers))) { @@ -842,25 +925,25 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) int i; for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { - void __user *page_addr; + unsigned long page_addr; bool on_lru; if (!alloc->pages[i].page_ptr) continue; - on_lru = list_lru_del_obj(&binder_alloc_lru, - &alloc->pages[i].lru); + on_lru = list_lru_del_obj(&binder_freelist, + &alloc->pages[i].lru); page_addr = alloc->buffer + i * PAGE_SIZE; binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, - "%s: %d: page %d at %pK %s\n", - __func__, alloc->pid, i, page_addr, + "%s: %d: page %d %s\n", + __func__, alloc->pid, i, on_lru ? "on lru" : "active"); __free_page(alloc->pages[i].page_ptr); page_count++; } kfree(alloc->pages); } - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); if (alloc->mm) mmdrop(alloc->mm); @@ -869,16 +952,6 @@ void binder_alloc_deferred_release(struct binder_alloc *alloc) __func__, alloc->pid, buffers, page_count); } -static void print_binder_buffer(struct seq_file *m, const char *prefix, - struct binder_buffer *buffer) -{ - seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", - prefix, buffer->debug_id, buffer->user_data, - buffer->data_size, buffer->offsets_size, - buffer->extra_buffers_size, - buffer->transaction ? "active" : "delivered"); -} - /** * binder_alloc_print_allocated() - print buffer info * @m: seq_file for output via seq_printf() @@ -890,13 +963,20 @@ static void print_binder_buffer(struct seq_file *m, const char *prefix, void binder_alloc_print_allocated(struct seq_file *m, struct binder_alloc *alloc) { + struct binder_buffer *buffer; struct rb_node *n; - mutex_lock(&alloc->mutex); - for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) - print_binder_buffer(m, " buffer", - rb_entry(n, struct binder_buffer, rb_node)); - mutex_unlock(&alloc->mutex); + spin_lock(&alloc->lock); + for (n = rb_first(&alloc->allocated_buffers); n; n = rb_next(n)) { + buffer = rb_entry(n, struct binder_buffer, rb_node); + seq_printf(m, " buffer %d: %lx size %zd:%zd:%zd %s\n", + buffer->debug_id, + buffer->user_data - alloc->buffer, + buffer->data_size, buffer->offsets_size, + buffer->extra_buffers_size, + buffer->transaction ? "active" : "delivered"); + } + spin_unlock(&alloc->lock); } /** @@ -913,7 +993,7 @@ void binder_alloc_print_pages(struct seq_file *m, int lru = 0; int free = 0; - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); /* * Make sure the binder_alloc is fully initialized, otherwise we might * read inconsistent state. @@ -929,7 +1009,7 @@ void binder_alloc_print_pages(struct seq_file *m, lru++; } } - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); } @@ -945,10 +1025,10 @@ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) struct rb_node *n; int count = 0; - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) count++; - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); return count; } @@ -981,33 +1061,39 @@ enum lru_status binder_alloc_free_page(struct list_head *item, void *cb_arg) __must_hold(lock) { - struct mm_struct *mm = NULL; - struct binder_lru_page *page = container_of(item, - struct binder_lru_page, - lru); - struct binder_alloc *alloc; - uintptr_t page_addr; - size_t index; + struct binder_lru_page *page = container_of(item, typeof(*page), lru); + struct binder_alloc *alloc = page->alloc; + struct mm_struct *mm = alloc->mm; struct vm_area_struct *vma; + struct page *page_to_free; + unsigned long page_addr; + size_t index; - alloc = page->alloc; - if (!mutex_trylock(&alloc->mutex)) - goto err_get_alloc_mutex_failed; - + if (!mmget_not_zero(mm)) + goto err_mmget; + if (!mmap_read_trylock(mm)) + goto err_mmap_read_lock_failed; + if (!spin_trylock(&alloc->lock)) + goto err_get_alloc_lock_failed; if (!page->page_ptr) goto err_page_already_freed; index = page - alloc->pages; - page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; + page_addr = alloc->buffer + index * PAGE_SIZE; - mm = alloc->mm; - if (!mmget_not_zero(mm)) - goto err_mmget; - if (!mmap_read_trylock(mm)) - goto err_mmap_read_lock_failed; - vma = binder_alloc_get_vma(alloc); + vma = vma_lookup(mm, page_addr); + if (vma && vma != binder_alloc_get_vma(alloc)) + goto err_invalid_vma; + + trace_binder_unmap_kernel_start(alloc, index); + + page_to_free = page->page_ptr; + page->page_ptr = NULL; + + trace_binder_unmap_kernel_end(alloc, index); list_lru_isolate(lru, item); + spin_unlock(&alloc->lock); spin_unlock(lock); if (vma) { @@ -1017,39 +1103,35 @@ enum lru_status binder_alloc_free_page(struct list_head *item, trace_binder_unmap_user_end(alloc, index); } + mmap_read_unlock(mm); mmput_async(mm); - - trace_binder_unmap_kernel_start(alloc, index); - - __free_page(page->page_ptr); - page->page_ptr = NULL; - - trace_binder_unmap_kernel_end(alloc, index); + __free_page(page_to_free); spin_lock(lock); - mutex_unlock(&alloc->mutex); return LRU_REMOVED_RETRY; +err_invalid_vma: +err_page_already_freed: + spin_unlock(&alloc->lock); +err_get_alloc_lock_failed: + mmap_read_unlock(mm); err_mmap_read_lock_failed: mmput_async(mm); err_mmget: -err_page_already_freed: - mutex_unlock(&alloc->mutex); -err_get_alloc_mutex_failed: return LRU_SKIP; } static unsigned long binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { - return list_lru_count(&binder_alloc_lru); + return list_lru_count(&binder_freelist); } static unsigned long binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { - return list_lru_walk(&binder_alloc_lru, binder_alloc_free_page, + return list_lru_walk(&binder_freelist, binder_alloc_free_page, NULL, sc->nr_to_scan); } @@ -1067,7 +1149,7 @@ void binder_alloc_init(struct binder_alloc *alloc) alloc->pid = current->group_leader->pid; alloc->mm = current->mm; mmgrab(alloc->mm); - mutex_init(&alloc->mutex); + spin_lock_init(&alloc->lock); INIT_LIST_HEAD(&alloc->buffers); } @@ -1075,13 +1157,13 @@ int binder_alloc_shrinker_init(void) { int ret; - ret = list_lru_init(&binder_alloc_lru); + ret = list_lru_init(&binder_freelist); if (ret) return ret; binder_shrinker = shrinker_alloc(0, "android-binder"); if (!binder_shrinker) { - list_lru_destroy(&binder_alloc_lru); + list_lru_destroy(&binder_freelist); return -ENOMEM; } @@ -1096,7 +1178,7 @@ int binder_alloc_shrinker_init(void) void binder_alloc_shrinker_exit(void) { shrinker_free(binder_shrinker); - list_lru_destroy(&binder_alloc_lru); + list_lru_destroy(&binder_freelist); } /** @@ -1132,68 +1214,6 @@ static inline bool check_buffer(struct binder_alloc *alloc, } /** - * binder_alloc_get_page() - get kernel pointer for given buffer offset - * @alloc: binder_alloc for this proc - * @buffer: binder buffer to be accessed - * @buffer_offset: offset into @buffer data - * @pgoffp: address to copy final page offset to - * - * Lookup the struct page corresponding to the address - * at @buffer_offset into @buffer->user_data. If @pgoffp is not - * NULL, the byte-offset into the page is written there. - * - * The caller is responsible to ensure that the offset points - * to a valid address within the @buffer and that @buffer is - * not freeable by the user. Since it can't be freed, we are - * guaranteed that the corresponding elements of @alloc->pages[] - * cannot change. - * - * Return: struct page - */ -static struct page *binder_alloc_get_page(struct binder_alloc *alloc, - struct binder_buffer *buffer, - binder_size_t buffer_offset, - pgoff_t *pgoffp) -{ - binder_size_t buffer_space_offset = buffer_offset + - (buffer->user_data - alloc->buffer); - pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK; - size_t index = buffer_space_offset >> PAGE_SHIFT; - struct binder_lru_page *lru_page; - - lru_page = &alloc->pages[index]; - *pgoffp = pgoff; - return lru_page->page_ptr; -} - -/** - * binder_alloc_clear_buf() - zero out buffer - * @alloc: binder_alloc for this proc - * @buffer: binder buffer to be cleared - * - * memset the given buffer to 0 - */ -static void binder_alloc_clear_buf(struct binder_alloc *alloc, - struct binder_buffer *buffer) -{ - size_t bytes = binder_alloc_buffer_size(alloc, buffer); - binder_size_t buffer_offset = 0; - - while (bytes) { - unsigned long size; - struct page *page; - pgoff_t pgoff; - - page = binder_alloc_get_page(alloc, buffer, - buffer_offset, &pgoff); - size = min_t(size_t, bytes, PAGE_SIZE - pgoff); - memset_page(page, pgoff, 0, size); - bytes -= size; - buffer_offset += size; - } -} - -/** * binder_alloc_copy_user_to_buffer() - copy src user to tgt user * @alloc: binder_alloc for this proc * @buffer: binder buffer to be accessed diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index dc1e2b01dd64..70387234477e 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -9,13 +9,13 @@ #include <linux/rbtree.h> #include <linux/list.h> #include <linux/mm.h> -#include <linux/rtmutex.h> +#include <linux/spinlock.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/list_lru.h> #include <uapi/linux/android/binder.h> -extern struct list_lru binder_alloc_lru; +extern struct list_lru binder_freelist; struct binder_transaction; /** @@ -49,21 +49,19 @@ struct binder_buffer { unsigned async_transaction:1; unsigned oneway_spam_suspect:1; unsigned debug_id:27; - struct binder_transaction *transaction; - struct binder_node *target_node; size_t data_size; size_t offsets_size; size_t extra_buffers_size; - void __user *user_data; - int pid; + unsigned long user_data; + int pid; }; /** * struct binder_lru_page - page object used for binder shrinker * @page_ptr: pointer to physical page in mmap'd space - * @lru: entry in binder_alloc_lru + * @lru: entry in binder_freelist * @alloc: binder_alloc for a proc */ struct binder_lru_page { @@ -74,7 +72,7 @@ struct binder_lru_page { /** * struct binder_alloc - per-binder proc state for binder allocator - * @mutex: protects binder_alloc fields + * @lock: protects binder_alloc fields * @vma: vm_area_struct passed to mmap_handler * (invariant after mmap) * @mm: copy of task->mm (invariant after open) @@ -98,10 +96,10 @@ struct binder_lru_page { * struct binder_buffer objects used to track the user buffers */ struct binder_alloc { - struct mutex mutex; + spinlock_t lock; struct vm_area_struct *vma; struct mm_struct *mm; - void __user *buffer; + unsigned long buffer; struct list_head buffers; struct rb_root free_buffers; struct rb_root allocated_buffers; @@ -121,27 +119,26 @@ static inline void binder_selftest_alloc(struct binder_alloc *alloc) {} enum lru_status binder_alloc_free_page(struct list_head *item, struct list_lru_one *lru, spinlock_t *lock, void *cb_arg); -extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, - size_t data_size, - size_t offsets_size, - size_t extra_buffers_size, - int is_async, - int pid); -extern void binder_alloc_init(struct binder_alloc *alloc); -extern int binder_alloc_shrinker_init(void); -extern void binder_alloc_shrinker_exit(void); -extern void binder_alloc_vma_close(struct binder_alloc *alloc); -extern struct binder_buffer * +struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, + size_t data_size, + size_t offsets_size, + size_t extra_buffers_size, + int is_async); +void binder_alloc_init(struct binder_alloc *alloc); +int binder_alloc_shrinker_init(void); +void binder_alloc_shrinker_exit(void); +void binder_alloc_vma_close(struct binder_alloc *alloc); +struct binder_buffer * binder_alloc_prepare_to_free(struct binder_alloc *alloc, - uintptr_t user_ptr); -extern void binder_alloc_free_buf(struct binder_alloc *alloc, - struct binder_buffer *buffer); -extern int binder_alloc_mmap_handler(struct binder_alloc *alloc, - struct vm_area_struct *vma); -extern void binder_alloc_deferred_release(struct binder_alloc *alloc); -extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc); -extern void binder_alloc_print_allocated(struct seq_file *m, - struct binder_alloc *alloc); + unsigned long user_ptr); +void binder_alloc_free_buf(struct binder_alloc *alloc, + struct binder_buffer *buffer); +int binder_alloc_mmap_handler(struct binder_alloc *alloc, + struct vm_area_struct *vma); +void binder_alloc_deferred_release(struct binder_alloc *alloc); +int binder_alloc_get_allocated_count(struct binder_alloc *alloc); +void binder_alloc_print_allocated(struct seq_file *m, + struct binder_alloc *alloc); void binder_alloc_print_pages(struct seq_file *m, struct binder_alloc *alloc); @@ -156,9 +153,9 @@ binder_alloc_get_free_async_space(struct binder_alloc *alloc) { size_t free_async_space; - mutex_lock(&alloc->mutex); + spin_lock(&alloc->lock); free_async_space = alloc->free_async_space; - mutex_unlock(&alloc->mutex); + spin_unlock(&alloc->lock); return free_async_space; } diff --git a/drivers/android/binder_alloc_selftest.c b/drivers/android/binder_alloc_selftest.c index c2b323bc3b3a..81442fe20a69 100644 --- a/drivers/android/binder_alloc_selftest.c +++ b/drivers/android/binder_alloc_selftest.c @@ -72,6 +72,10 @@ enum buf_end_align_type { * buf1 ]|[ buf2 | buf2 | buf2 ][ ... */ NEXT_NEXT_UNALIGNED, + /** + * @LOOP_END: The number of enum values in &buf_end_align_type. + * It is used for controlling loop termination. + */ LOOP_END, }; @@ -93,11 +97,11 @@ static bool check_buffer_pages_allocated(struct binder_alloc *alloc, struct binder_buffer *buffer, size_t size) { - void __user *page_addr; - void __user *end; + unsigned long page_addr; + unsigned long end; int page_index; - end = (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); + end = PAGE_ALIGN(buffer->user_data + size); page_addr = buffer->user_data; for (; page_addr < end; page_addr += PAGE_SIZE) { page_index = (page_addr - alloc->buffer) / PAGE_SIZE; @@ -119,7 +123,7 @@ static void binder_selftest_alloc_buf(struct binder_alloc *alloc, int i; for (i = 0; i < BUFFER_NUM; i++) { - buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0); + buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0); if (IS_ERR(buffers[i]) || !check_buffer_pages_allocated(alloc, buffers[i], sizes[i])) { @@ -158,8 +162,8 @@ static void binder_selftest_free_page(struct binder_alloc *alloc) int i; unsigned long count; - while ((count = list_lru_count(&binder_alloc_lru))) { - list_lru_walk(&binder_alloc_lru, binder_alloc_free_page, + while ((count = list_lru_count(&binder_freelist))) { + list_lru_walk(&binder_freelist, binder_alloc_free_page, NULL, count); } @@ -183,7 +187,7 @@ static void binder_selftest_alloc_free(struct binder_alloc *alloc, /* Allocate from lru. */ binder_selftest_alloc_buf(alloc, buffers, sizes, seq); - if (list_lru_count(&binder_alloc_lru)) + if (list_lru_count(&binder_freelist)) pr_err("lru list should be empty but is not\n"); binder_selftest_free_buf(alloc, buffers, sizes, seq, end); diff --git a/drivers/android/binder_trace.h b/drivers/android/binder_trace.h index 8cc07e6a4273..fe38c6fc65d0 100644 --- a/drivers/android/binder_trace.h +++ b/drivers/android/binder_trace.h @@ -317,7 +317,7 @@ DEFINE_EVENT(binder_buffer_class, binder_transaction_update_buffer_release, TRACE_EVENT(binder_update_page_range, TP_PROTO(struct binder_alloc *alloc, bool allocate, - void __user *start, void __user *end), + unsigned long start, unsigned long end), TP_ARGS(alloc, allocate, start, end), TP_STRUCT__entry( __field(int, proc) diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c index 1224ab7aa070..3001d754ac36 100644 --- a/drivers/android/binderfs.c +++ b/drivers/android/binderfs.c @@ -29,7 +29,6 @@ #include <linux/uaccess.h> #include <linux/user_namespace.h> #include <linux/xarray.h> -#include <uapi/asm-generic/errno-base.h> #include <uapi/linux/android/binder.h> #include <uapi/linux/android/binderfs.h> diff --git a/drivers/base/property.c b/drivers/base/property.c index b79608ee0b46..6e39db62446d 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -473,7 +473,7 @@ int fwnode_property_match_string(const struct fwnode_handle *fwnode, const char **values; int nval, ret; - nval = fwnode_property_read_string_array(fwnode, propname, NULL, 0); + nval = fwnode_property_string_array_count(fwnode, propname); if (nval < 0) return nval; @@ -499,6 +499,41 @@ out_free: EXPORT_SYMBOL_GPL(fwnode_property_match_string); /** + * fwnode_property_match_property_string - find a property string value in an array and return index + * @fwnode: Firmware node to get the property of + * @propname: Name of the property holding the string value + * @array: String array to search in + * @n: Size of the @array + * + * Find a property string value in a given @array and if it is found return + * the index back. + * + * Return: index, starting from %0, if the string value was found in the @array (success), + * %-ENOENT when the string value was not found in the @array, + * %-EINVAL if given arguments are not valid, + * %-ENODATA if the property does not have a value, + * %-EPROTO or %-EILSEQ if the property is not a string, + * %-ENXIO if no suitable firmware interface is present. + */ +int fwnode_property_match_property_string(const struct fwnode_handle *fwnode, + const char *propname, const char * const *array, size_t n) +{ + const char *string; + int ret; + + ret = fwnode_property_read_string(fwnode, propname, &string); + if (ret) + return ret; + + ret = match_string(array, n, string); + if (ret < 0) + ret = -ENOENT; + + return ret; +} +EXPORT_SYMBOL_GPL(fwnode_property_match_property_string); + +/** * fwnode_property_get_reference_args() - Find a reference with arguments * @fwnode: Firmware node where to look for the reference * @prop: The name of the property diff --git a/drivers/bus/mhi/ep/internal.h b/drivers/bus/mhi/ep/internal.h index a2125fa5fe2f..577965f95fda 100644 --- a/drivers/bus/mhi/ep/internal.h +++ b/drivers/bus/mhi/ep/internal.h @@ -126,6 +126,7 @@ struct mhi_ep_ring { union mhi_ep_ring_ctx *ring_ctx; struct mhi_ring_element *ring_cache; enum mhi_ep_ring_type type; + struct delayed_work intmodt_work; u64 rbase; size_t rd_offset; size_t wr_offset; @@ -135,7 +136,9 @@ struct mhi_ep_ring { u32 ch_id; u32 er_index; u32 irq_vector; + u32 intmodt; bool started; + bool irq_pending; }; struct mhi_ep_cmd { @@ -159,6 +162,7 @@ struct mhi_ep_chan { void (*xfer_cb)(struct mhi_ep_device *mhi_dev, struct mhi_result *result); enum mhi_ch_state state; enum dma_data_direction dir; + size_t rd_offset; u64 tre_loc; u32 tre_size; u32 tre_bytes_left; diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c index 600881808982..65fc1d738bec 100644 --- a/drivers/bus/mhi/ep/main.c +++ b/drivers/bus/mhi/ep/main.c @@ -54,11 +54,27 @@ static int mhi_ep_send_event(struct mhi_ep_cntrl *mhi_cntrl, u32 ring_idx, mutex_unlock(&mhi_cntrl->event_lock); /* - * Raise IRQ to host only if the BEI flag is not set in TRE. Host might - * set this flag for interrupt moderation as per MHI protocol. + * As per the MHI specification, section 4.3, Interrupt moderation: + * + * 1. If BEI flag is not set, cancel any pending intmodt work if started + * for the event ring and raise IRQ immediately. + * + * 2. If both BEI and intmodt are set, and if no IRQ is pending for the + * same event ring, start the IRQ delayed work as per the value of + * intmodt. If previous IRQ is pending, then do nothing as the pending + * IRQ is enough for the host to process the current event ring element. + * + * 3. If BEI is set and intmodt is not set, no need to raise IRQ. */ - if (!bei) + if (!bei) { + if (READ_ONCE(ring->irq_pending)) + cancel_delayed_work(&ring->intmodt_work); + mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); + } else if (ring->intmodt && !READ_ONCE(ring->irq_pending)) { + WRITE_ONCE(ring->irq_pending, true); + schedule_delayed_work(&ring->intmodt_work, msecs_to_jiffies(ring->intmodt)); + } return 0; @@ -71,45 +87,77 @@ err_unlock: static int mhi_ep_send_completion_event(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, struct mhi_ring_element *tre, u32 len, enum mhi_ev_ccs code) { - struct mhi_ring_element event = {}; + struct mhi_ring_element *event; + int ret; + + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + if (!event) + return -ENOMEM; - event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); - event.dword[0] = MHI_TRE_EV_DWORD0(code, len); - event.dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); + event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(*tre)); + event->dword[0] = MHI_TRE_EV_DWORD0(code, len); + event->dword[1] = MHI_TRE_EV_DWORD1(ring->ch_id, MHI_PKT_TYPE_TX_EVENT); - return mhi_ep_send_event(mhi_cntrl, ring->er_index, &event, MHI_TRE_DATA_GET_BEI(tre)); + ret = mhi_ep_send_event(mhi_cntrl, ring->er_index, event, MHI_TRE_DATA_GET_BEI(tre)); + kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); + + return ret; } int mhi_ep_send_state_change_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_state state) { - struct mhi_ring_element event = {}; + struct mhi_ring_element *event; + int ret; - event.dword[0] = MHI_SC_EV_DWORD0(state); - event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + if (!event) + return -ENOMEM; - return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); + event->dword[0] = MHI_SC_EV_DWORD0(state); + event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_STATE_CHANGE_EVENT); + + ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); + kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); + + return ret; } int mhi_ep_send_ee_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ee_type exec_env) { - struct mhi_ring_element event = {}; + struct mhi_ring_element *event; + int ret; - event.dword[0] = MHI_EE_EV_DWORD0(exec_env); - event.dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + if (!event) + return -ENOMEM; + + event->dword[0] = MHI_EE_EV_DWORD0(exec_env); + event->dword[1] = MHI_SC_EV_DWORD1(MHI_PKT_TYPE_EE_EVENT); - return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); + ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); + kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); + + return ret; } static int mhi_ep_send_cmd_comp_event(struct mhi_ep_cntrl *mhi_cntrl, enum mhi_ev_ccs code) { struct mhi_ep_ring *ring = &mhi_cntrl->mhi_cmd->ring; - struct mhi_ring_element event = {}; + struct mhi_ring_element *event; + int ret; + + event = kmem_cache_zalloc(mhi_cntrl->ev_ring_el_cache, GFP_KERNEL | GFP_DMA); + if (!event) + return -ENOMEM; + + event->ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); + event->dword[0] = MHI_CC_EV_DWORD0(code); + event->dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); - event.ptr = cpu_to_le64(ring->rbase + ring->rd_offset * sizeof(struct mhi_ring_element)); - event.dword[0] = MHI_CC_EV_DWORD0(code); - event.dword[1] = MHI_CC_EV_DWORD1(MHI_PKT_TYPE_CMD_COMPLETION_EVENT); + ret = mhi_ep_send_event(mhi_cntrl, 0, event, 0); + kmem_cache_free(mhi_cntrl->ev_ring_el_cache, event); - return mhi_ep_send_event(mhi_cntrl, 0, &event, 0); + return ret; } static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) @@ -151,6 +199,8 @@ static int mhi_ep_process_cmd_ring(struct mhi_ep_ring *ring, struct mhi_ring_ele goto err_unlock; } + + mhi_chan->rd_offset = ch_ring->rd_offset; } /* Set channel state to RUNNING */ @@ -280,22 +330,85 @@ bool mhi_ep_queue_is_empty(struct mhi_ep_device *mhi_dev, enum dma_data_directio struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; - return !!(ring->rd_offset == ring->wr_offset); + return !!(mhi_chan->rd_offset == ring->wr_offset); } EXPORT_SYMBOL_GPL(mhi_ep_queue_is_empty); +static void mhi_ep_read_completion(struct mhi_ep_buf_info *buf_info) +{ + struct mhi_ep_device *mhi_dev = buf_info->mhi_dev; + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_chan *mhi_chan = mhi_dev->ul_chan; + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset]; + struct mhi_result result = {}; + int ret; + + if (mhi_chan->xfer_cb) { + result.buf_addr = buf_info->cb_buf; + result.dir = mhi_chan->dir; + result.bytes_xferd = buf_info->size; + + mhi_chan->xfer_cb(mhi_dev, &result); + } + + /* + * The host will split the data packet into multiple TREs if it can't fit + * the packet in a single TRE. In that case, CHAIN flag will be set by the + * host for all TREs except the last one. + */ + if (buf_info->code != MHI_EV_CC_OVERFLOW) { + if (MHI_TRE_DATA_GET_CHAIN(el)) { + /* + * IEOB (Interrupt on End of Block) flag will be set by the host if + * it expects the completion event for all TREs of a TD. + */ + if (MHI_TRE_DATA_GET_IEOB(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOB); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + goto err_free_tre_buf; + } + } + } else { + /* + * IEOT (Interrupt on End of Transfer) flag will be set by the host + * for the last TRE of the TD and expects the completion event for + * the same. + */ + if (MHI_TRE_DATA_GET_IEOT(el)) { + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, + MHI_TRE_DATA_GET_LEN(el), + MHI_EV_CC_EOT); + if (ret < 0) { + dev_err(&mhi_chan->mhi_dev->dev, + "Error sending transfer compl. event\n"); + goto err_free_tre_buf; + } + } + } + } + + mhi_ep_ring_inc_index(ring); + +err_free_tre_buf: + kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_info->cb_buf); +} + static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, - struct mhi_ep_ring *ring, - struct mhi_result *result, - u32 len) + struct mhi_ep_ring *ring) { struct mhi_ep_chan *mhi_chan = &mhi_cntrl->mhi_chan[ring->ch_id]; struct device *dev = &mhi_cntrl->mhi_dev->dev; size_t tr_len, read_offset, write_offset; + struct mhi_ep_buf_info buf_info = {}; + u32 len = MHI_EP_DEFAULT_MTU; struct mhi_ring_element *el; bool tr_done = false; - void *write_addr; - u64 read_addr; + void *buf_addr; u32 buf_left; int ret; @@ -308,7 +421,7 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, return -ENODEV; } - el = &ring->ring_cache[ring->rd_offset]; + el = &ring->ring_cache[mhi_chan->rd_offset]; /* Check if there is data pending to be read from previous read operation */ if (mhi_chan->tre_bytes_left) { @@ -324,81 +437,51 @@ static int mhi_ep_read_channel(struct mhi_ep_cntrl *mhi_cntrl, read_offset = mhi_chan->tre_size - mhi_chan->tre_bytes_left; write_offset = len - buf_left; - read_addr = mhi_chan->tre_loc + read_offset; - write_addr = result->buf_addr + write_offset; + + buf_addr = kmem_cache_zalloc(mhi_cntrl->tre_buf_cache, GFP_KERNEL | GFP_DMA); + if (!buf_addr) + return -ENOMEM; + + buf_info.host_addr = mhi_chan->tre_loc + read_offset; + buf_info.dev_addr = buf_addr + write_offset; + buf_info.size = tr_len; + buf_info.cb = mhi_ep_read_completion; + buf_info.cb_buf = buf_addr; + buf_info.mhi_dev = mhi_chan->mhi_dev; + + if (mhi_chan->tre_bytes_left - tr_len) + buf_info.code = MHI_EV_CC_OVERFLOW; dev_dbg(dev, "Reading %zd bytes from channel (%u)\n", tr_len, ring->ch_id); - ret = mhi_cntrl->read_from_host(mhi_cntrl, read_addr, write_addr, tr_len); + ret = mhi_cntrl->read_async(mhi_cntrl, &buf_info); if (ret < 0) { dev_err(&mhi_chan->mhi_dev->dev, "Error reading from channel\n"); - return ret; + goto err_free_buf_addr; } buf_left -= tr_len; mhi_chan->tre_bytes_left -= tr_len; - /* - * Once the TRE (Transfer Ring Element) of a TD (Transfer Descriptor) has been - * read completely: - * - * 1. Send completion event to the host based on the flags set in TRE. - * 2. Increment the local read offset of the transfer ring. - */ if (!mhi_chan->tre_bytes_left) { - /* - * The host will split the data packet into multiple TREs if it can't fit - * the packet in a single TRE. In that case, CHAIN flag will be set by the - * host for all TREs except the last one. - */ - if (MHI_TRE_DATA_GET_CHAIN(el)) { - /* - * IEOB (Interrupt on End of Block) flag will be set by the host if - * it expects the completion event for all TREs of a TD. - */ - if (MHI_TRE_DATA_GET_IEOB(el)) { - ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, - MHI_TRE_DATA_GET_LEN(el), - MHI_EV_CC_EOB); - if (ret < 0) { - dev_err(&mhi_chan->mhi_dev->dev, - "Error sending transfer compl. event\n"); - return ret; - } - } - } else { - /* - * IEOT (Interrupt on End of Transfer) flag will be set by the host - * for the last TRE of the TD and expects the completion event for - * the same. - */ - if (MHI_TRE_DATA_GET_IEOT(el)) { - ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, - MHI_TRE_DATA_GET_LEN(el), - MHI_EV_CC_EOT); - if (ret < 0) { - dev_err(&mhi_chan->mhi_dev->dev, - "Error sending transfer compl. event\n"); - return ret; - } - } - + if (MHI_TRE_DATA_GET_IEOT(el)) tr_done = true; - } - mhi_ep_ring_inc_index(ring); + mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; } - - result->bytes_xferd += tr_len; } while (buf_left && !tr_done); return 0; + +err_free_buf_addr: + kmem_cache_free(mhi_cntrl->tre_buf_cache, buf_addr); + + return ret; } -static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_element *el) +static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring) { struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; struct mhi_result result = {}; - u32 len = MHI_EP_DEFAULT_MTU; struct mhi_ep_chan *mhi_chan; int ret; @@ -419,44 +502,59 @@ static int mhi_ep_process_ch_ring(struct mhi_ep_ring *ring, struct mhi_ring_elem mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); } else { /* UL channel */ - result.buf_addr = kzalloc(len, GFP_KERNEL); - if (!result.buf_addr) - return -ENOMEM; - do { - ret = mhi_ep_read_channel(mhi_cntrl, ring, &result, len); + ret = mhi_ep_read_channel(mhi_cntrl, ring); if (ret < 0) { dev_err(&mhi_chan->mhi_dev->dev, "Failed to read channel\n"); - kfree(result.buf_addr); return ret; } - result.dir = mhi_chan->dir; - mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); - result.bytes_xferd = 0; - memset(result.buf_addr, 0, len); - /* Read until the ring becomes empty */ } while (!mhi_ep_queue_is_empty(mhi_chan->mhi_dev, DMA_TO_DEVICE)); - - kfree(result.buf_addr); } return 0; } +static void mhi_ep_skb_completion(struct mhi_ep_buf_info *buf_info) +{ + struct mhi_ep_device *mhi_dev = buf_info->mhi_dev; + struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; + struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; + struct mhi_ep_ring *ring = &mhi_cntrl->mhi_chan[mhi_chan->chan].ring; + struct mhi_ring_element *el = &ring->ring_cache[ring->rd_offset]; + struct device *dev = &mhi_dev->dev; + struct mhi_result result = {}; + int ret; + + if (mhi_chan->xfer_cb) { + result.buf_addr = buf_info->cb_buf; + result.dir = mhi_chan->dir; + result.bytes_xferd = buf_info->size; + + mhi_chan->xfer_cb(mhi_dev, &result); + } + + ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, buf_info->size, + buf_info->code); + if (ret) { + dev_err(dev, "Error sending transfer completion event\n"); + return; + } + + mhi_ep_ring_inc_index(ring); +} + /* TODO: Handle partially formed TDs */ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) { struct mhi_ep_cntrl *mhi_cntrl = mhi_dev->mhi_cntrl; struct mhi_ep_chan *mhi_chan = mhi_dev->dl_chan; struct device *dev = &mhi_chan->mhi_dev->dev; + struct mhi_ep_buf_info buf_info = {}; struct mhi_ring_element *el; u32 buf_left, read_offset; struct mhi_ep_ring *ring; - enum mhi_ev_ccs code; - void *read_addr; - u64 write_addr; size_t tr_len; u32 tre_len; int ret; @@ -480,40 +578,44 @@ int mhi_ep_queue_skb(struct mhi_ep_device *mhi_dev, struct sk_buff *skb) goto err_exit; } - el = &ring->ring_cache[ring->rd_offset]; + el = &ring->ring_cache[mhi_chan->rd_offset]; tre_len = MHI_TRE_DATA_GET_LEN(el); tr_len = min(buf_left, tre_len); read_offset = skb->len - buf_left; - read_addr = skb->data + read_offset; - write_addr = MHI_TRE_DATA_GET_PTR(el); - dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); - ret = mhi_cntrl->write_to_host(mhi_cntrl, read_addr, write_addr, tr_len); - if (ret < 0) { - dev_err(dev, "Error writing to the channel\n"); - goto err_exit; - } + buf_info.dev_addr = skb->data + read_offset; + buf_info.host_addr = MHI_TRE_DATA_GET_PTR(el); + buf_info.size = tr_len; + buf_info.cb = mhi_ep_skb_completion; + buf_info.cb_buf = skb; + buf_info.mhi_dev = mhi_dev; - buf_left -= tr_len; /* * For all TREs queued by the host for DL channel, only the EOT flag will be set. * If the packet doesn't fit into a single TRE, send the OVERFLOW event to * the host so that the host can adjust the packet boundary to next TREs. Else send * the EOT event to the host indicating the packet boundary. */ - if (buf_left) - code = MHI_EV_CC_OVERFLOW; + if (buf_left - tr_len) + buf_info.code = MHI_EV_CC_OVERFLOW; else - code = MHI_EV_CC_EOT; + buf_info.code = MHI_EV_CC_EOT; - ret = mhi_ep_send_completion_event(mhi_cntrl, ring, el, tr_len, code); - if (ret) { - dev_err(dev, "Error sending transfer completion event\n"); + dev_dbg(dev, "Writing %zd bytes to channel (%u)\n", tr_len, ring->ch_id); + ret = mhi_cntrl->write_async(mhi_cntrl, &buf_info); + if (ret < 0) { + dev_err(dev, "Error writing to the channel\n"); goto err_exit; } - mhi_ep_ring_inc_index(ring); + buf_left -= tr_len; + + /* + * Update the read offset cached in mhi_chan. Actual read offset + * will be updated by the completion handler. + */ + mhi_chan->rd_offset = (mhi_chan->rd_offset + 1) % ring->ring_size; } while (buf_left); mutex_unlock(&mhi_chan->lock); @@ -714,7 +816,6 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work) struct mhi_ep_cntrl *mhi_cntrl = container_of(work, struct mhi_ep_cntrl, ch_ring_work); struct device *dev = &mhi_cntrl->mhi_dev->dev; struct mhi_ep_ring_item *itr, *tmp; - struct mhi_ring_element *el; struct mhi_ep_ring *ring; struct mhi_ep_chan *chan; unsigned long flags; @@ -748,31 +849,29 @@ static void mhi_ep_ch_ring_worker(struct work_struct *work) if (ret) { dev_err(dev, "Error updating write offset for ring\n"); mutex_unlock(&chan->lock); - kfree(itr); + kmem_cache_free(mhi_cntrl->ring_item_cache, itr); continue; } /* Sanity check to make sure there are elements in the ring */ - if (ring->rd_offset == ring->wr_offset) { + if (chan->rd_offset == ring->wr_offset) { mutex_unlock(&chan->lock); - kfree(itr); + kmem_cache_free(mhi_cntrl->ring_item_cache, itr); continue; } - el = &ring->ring_cache[ring->rd_offset]; - dev_dbg(dev, "Processing the ring for channel (%u)\n", ring->ch_id); - ret = mhi_ep_process_ch_ring(ring, el); + ret = mhi_ep_process_ch_ring(ring); if (ret) { dev_err(dev, "Error processing ring for channel (%u): %d\n", ring->ch_id, ret); mutex_unlock(&chan->lock); - kfree(itr); + kmem_cache_free(mhi_cntrl->ring_item_cache, itr); continue; } mutex_unlock(&chan->lock); - kfree(itr); + kmem_cache_free(mhi_cntrl->ring_item_cache, itr); } } @@ -828,7 +927,7 @@ static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned lon u32 ch_id = ch_idx + i; ring = &mhi_cntrl->mhi_chan[ch_id].ring; - item = kzalloc(sizeof(*item), GFP_ATOMIC); + item = kmem_cache_zalloc(mhi_cntrl->ring_item_cache, GFP_ATOMIC); if (!item) return; @@ -1365,6 +1464,10 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq) return -EINVAL; + if (!mhi_cntrl->read_sync || !mhi_cntrl->write_sync || + !mhi_cntrl->read_async || !mhi_cntrl->write_async) + return -EINVAL; + ret = mhi_ep_chan_init(mhi_cntrl, config); if (ret) return ret; @@ -1375,6 +1478,29 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, goto err_free_ch; } + mhi_cntrl->ev_ring_el_cache = kmem_cache_create("mhi_ep_event_ring_el", + sizeof(struct mhi_ring_element), 0, + SLAB_CACHE_DMA, NULL); + if (!mhi_cntrl->ev_ring_el_cache) { + ret = -ENOMEM; + goto err_free_cmd; + } + + mhi_cntrl->tre_buf_cache = kmem_cache_create("mhi_ep_tre_buf", MHI_EP_DEFAULT_MTU, 0, + SLAB_CACHE_DMA, NULL); + if (!mhi_cntrl->tre_buf_cache) { + ret = -ENOMEM; + goto err_destroy_ev_ring_el_cache; + } + + mhi_cntrl->ring_item_cache = kmem_cache_create("mhi_ep_ring_item", + sizeof(struct mhi_ep_ring_item), 0, + 0, NULL); + if (!mhi_cntrl->ev_ring_el_cache) { + ret = -ENOMEM; + goto err_destroy_tre_buf_cache; + } + INIT_WORK(&mhi_cntrl->state_work, mhi_ep_state_worker); INIT_WORK(&mhi_cntrl->reset_work, mhi_ep_reset_worker); INIT_WORK(&mhi_cntrl->cmd_ring_work, mhi_ep_cmd_ring_worker); @@ -1383,7 +1509,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl, mhi_cntrl->wq = alloc_workqueue("mhi_ep_wq", 0, 0); if (!mhi_cntrl->wq) { ret = -ENOMEM; - goto err_free_cmd; + goto err_destroy_ring_item_cache; } INIT_LIST_HEAD(&mhi_cntrl->st_transition_list); @@ -1442,6 +1568,12 @@ err_ida_free: ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index); err_destroy_wq: destroy_workqueue(mhi_cntrl->wq); +err_destroy_ring_item_cache: + kmem_cache_destroy(mhi_cntrl->ring_item_cache); +err_destroy_ev_ring_el_cache: + kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache); +err_destroy_tre_buf_cache: + kmem_cache_destroy(mhi_cntrl->tre_buf_cache); err_free_cmd: kfree(mhi_cntrl->mhi_cmd); err_free_ch: @@ -1463,6 +1595,9 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl) free_irq(mhi_cntrl->irq, mhi_cntrl); + kmem_cache_destroy(mhi_cntrl->tre_buf_cache); + kmem_cache_destroy(mhi_cntrl->ev_ring_el_cache); + kmem_cache_destroy(mhi_cntrl->ring_item_cache); kfree(mhi_cntrl->mhi_cmd); kfree(mhi_cntrl->mhi_chan); diff --git a/drivers/bus/mhi/ep/ring.c b/drivers/bus/mhi/ep/ring.c index 115518ec76a4..aeb53b2c34a8 100644 --- a/drivers/bus/mhi/ep/ring.c +++ b/drivers/bus/mhi/ep/ring.c @@ -30,7 +30,8 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end) { struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; struct device *dev = &mhi_cntrl->mhi_dev->dev; - size_t start, copy_size; + struct mhi_ep_buf_info buf_info = {}; + size_t start; int ret; /* Don't proceed in the case of event ring. This happens during mhi_ep_ring_start(). */ @@ -43,30 +44,34 @@ static int __mhi_ep_cache_ring(struct mhi_ep_ring *ring, size_t end) start = ring->wr_offset; if (start < end) { - copy_size = (end - start) * sizeof(struct mhi_ring_element); - ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase + - (start * sizeof(struct mhi_ring_element)), - &ring->ring_cache[start], copy_size); + buf_info.size = (end - start) * sizeof(struct mhi_ring_element); + buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element)); + buf_info.dev_addr = &ring->ring_cache[start]; + + ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info); if (ret < 0) return ret; } else { - copy_size = (ring->ring_size - start) * sizeof(struct mhi_ring_element); - ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase + - (start * sizeof(struct mhi_ring_element)), - &ring->ring_cache[start], copy_size); + buf_info.size = (ring->ring_size - start) * sizeof(struct mhi_ring_element); + buf_info.host_addr = ring->rbase + (start * sizeof(struct mhi_ring_element)); + buf_info.dev_addr = &ring->ring_cache[start]; + + ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info); if (ret < 0) return ret; if (end) { - ret = mhi_cntrl->read_from_host(mhi_cntrl, ring->rbase, - &ring->ring_cache[0], - end * sizeof(struct mhi_ring_element)); + buf_info.host_addr = ring->rbase; + buf_info.dev_addr = &ring->ring_cache[0]; + buf_info.size = end * sizeof(struct mhi_ring_element); + + ret = mhi_cntrl->read_sync(mhi_cntrl, &buf_info); if (ret < 0) return ret; } } - dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, copy_size); + dev_dbg(dev, "Cached ring: start %zu end %zu size %zu\n", start, end, buf_info.size); return 0; } @@ -102,6 +107,7 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e { struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; struct device *dev = &mhi_cntrl->mhi_dev->dev; + struct mhi_ep_buf_info buf_info = {}; size_t old_offset = 0; u32 num_free_elem; __le64 rp; @@ -133,12 +139,11 @@ int mhi_ep_ring_add_element(struct mhi_ep_ring *ring, struct mhi_ring_element *e rp = cpu_to_le64(ring->rd_offset * sizeof(*el) + ring->rbase); memcpy_toio((void __iomem *) &ring->ring_ctx->generic.rp, &rp, sizeof(u64)); - ret = mhi_cntrl->write_to_host(mhi_cntrl, el, ring->rbase + (old_offset * sizeof(*el)), - sizeof(*el)); - if (ret < 0) - return ret; + buf_info.host_addr = ring->rbase + (old_offset * sizeof(*el)); + buf_info.dev_addr = el; + buf_info.size = sizeof(*el); - return 0; + return mhi_cntrl->write_sync(mhi_cntrl, &buf_info); } void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 id) @@ -157,6 +162,15 @@ void mhi_ep_ring_init(struct mhi_ep_ring *ring, enum mhi_ep_ring_type type, u32 } } +static void mhi_ep_raise_irq(struct work_struct *work) +{ + struct mhi_ep_ring *ring = container_of(work, struct mhi_ep_ring, intmodt_work.work); + struct mhi_ep_cntrl *mhi_cntrl = ring->mhi_cntrl; + + mhi_cntrl->raise_irq(mhi_cntrl, ring->irq_vector); + WRITE_ONCE(ring->irq_pending, false); +} + int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, union mhi_ep_ring_ctx *ctx) { @@ -173,8 +187,13 @@ int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, if (ring->type == RING_TYPE_CH) ring->er_index = le32_to_cpu(ring->ring_ctx->ch.erindex); - if (ring->type == RING_TYPE_ER) + if (ring->type == RING_TYPE_ER) { ring->irq_vector = le32_to_cpu(ring->ring_ctx->ev.msivec); + ring->intmodt = FIELD_GET(EV_CTX_INTMODT_MASK, + le32_to_cpu(ring->ring_ctx->ev.intmod)); + + INIT_DELAYED_WORK(&ring->intmodt_work, mhi_ep_raise_irq); + } /* During ring init, both rp and wp are equal */ memcpy_fromio(&val, (void __iomem *) &ring->ring_ctx->generic.rp, sizeof(u64)); @@ -201,6 +220,9 @@ int mhi_ep_ring_start(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring, void mhi_ep_ring_reset(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_ring *ring) { + if (ring->type == RING_TYPE_ER) + cancel_delayed_work_sync(&ring->intmodt_work); + ring->started = false; kfree(ring->ring_cache); ring->ring_cache = NULL; diff --git a/drivers/bus/mhi/host/init.c b/drivers/bus/mhi/host/init.c index f78aefd2d7a3..65ceac1837f9 100644 --- a/drivers/bus/mhi/host/init.c +++ b/drivers/bus/mhi/host/init.c @@ -881,6 +881,7 @@ static int parse_config(struct mhi_controller *mhi_cntrl, if (!mhi_cntrl->timeout_ms) mhi_cntrl->timeout_ms = MHI_TIMEOUT_MS; + mhi_cntrl->ready_timeout_ms = config->ready_timeout_ms; mhi_cntrl->bounce_buf = config->use_bounce_buf; mhi_cntrl->buffer_len = config->buf_len; if (!mhi_cntrl->buffer_len) diff --git a/drivers/bus/mhi/host/internal.h b/drivers/bus/mhi/host/internal.h index 2e139e76de4c..30ac415a3000 100644 --- a/drivers/bus/mhi/host/internal.h +++ b/drivers/bus/mhi/host/internal.h @@ -321,7 +321,7 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, u32 *out); int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, u32 mask, - u32 val, u32 delayus); + u32 val, u32 delayus, u32 timeout_ms); void mhi_write_reg(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, u32 val); int __must_check mhi_write_reg_field(struct mhi_controller *mhi_cntrl, diff --git a/drivers/bus/mhi/host/main.c b/drivers/bus/mhi/host/main.c index dcf627b36e82..abb561db9ae1 100644 --- a/drivers/bus/mhi/host/main.c +++ b/drivers/bus/mhi/host/main.c @@ -40,10 +40,11 @@ int __must_check mhi_read_reg_field(struct mhi_controller *mhi_cntrl, int __must_check mhi_poll_reg_field(struct mhi_controller *mhi_cntrl, void __iomem *base, u32 offset, - u32 mask, u32 val, u32 delayus) + u32 mask, u32 val, u32 delayus, + u32 timeout_ms) { int ret; - u32 out, retry = (mhi_cntrl->timeout_ms * 1000) / delayus; + u32 out, retry = (timeout_ms * 1000) / delayus; while (retry--) { ret = mhi_read_reg_field(mhi_cntrl, base, offset, mask, &out); @@ -268,7 +269,8 @@ static void mhi_del_ring_element(struct mhi_controller *mhi_cntrl, static bool is_valid_ring_ptr(struct mhi_ring *ring, dma_addr_t addr) { - return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len; + return addr >= ring->iommu_base && addr < ring->iommu_base + ring->len && + !(addr & (sizeof(struct mhi_ring_element) - 1)); } int mhi_destroy_device(struct device *dev, void *data) @@ -642,6 +644,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, mhi_del_ring_element(mhi_cntrl, tre_ring); local_rp = tre_ring->rp; + read_unlock_bh(&mhi_chan->lock); + /* notify client */ mhi_chan->xfer_cb(mhi_chan->mhi_dev, &result); @@ -667,6 +671,8 @@ static int parse_xfer_event(struct mhi_controller *mhi_cntrl, kfree(buf_info->cb_buf); } } + + read_lock_bh(&mhi_chan->lock); } break; } /* CC_EOT */ @@ -1122,17 +1128,15 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) return -EIO; - read_lock_irqsave(&mhi_cntrl->pm_lock, flags); - ret = mhi_is_ring_full(mhi_cntrl, tre_ring); - if (unlikely(ret)) { - ret = -EAGAIN; - goto exit_unlock; - } + if (unlikely(ret)) + return -EAGAIN; ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags); if (unlikely(ret)) - goto exit_unlock; + return ret; + + read_lock_irqsave(&mhi_cntrl->pm_lock, flags); /* Packet is queued, take a usage ref to exit M3 if necessary * for host->device buffer, balanced put is done on buffer completion @@ -1152,7 +1156,6 @@ static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info, if (dir == DMA_FROM_DEVICE) mhi_cntrl->runtime_put(mhi_cntrl); -exit_unlock: read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags); return ret; @@ -1204,6 +1207,9 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, int eot, eob, chain, bei; int ret; + /* Protect accesses for reading and incrementing WP */ + write_lock_bh(&mhi_chan->lock); + buf_ring = &mhi_chan->buf_ring; tre_ring = &mhi_chan->tre_ring; @@ -1221,8 +1227,10 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, if (!info->pre_mapped) { ret = mhi_cntrl->map_single(mhi_cntrl, buf_info); - if (ret) + if (ret) { + write_unlock_bh(&mhi_chan->lock); return ret; + } } eob = !!(flags & MHI_EOB); @@ -1239,6 +1247,8 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, mhi_add_ring_element(mhi_cntrl, tre_ring); mhi_add_ring_element(mhi_cntrl, buf_ring); + write_unlock_bh(&mhi_chan->lock); + return 0; } diff --git a/drivers/bus/mhi/host/pci_generic.c b/drivers/bus/mhi/host/pci_generic.c index 08f3f039dbdd..cd6cd14b3d29 100644 --- a/drivers/bus/mhi/host/pci_generic.c +++ b/drivers/bus/mhi/host/pci_generic.c @@ -269,6 +269,16 @@ static struct mhi_event_config modem_qcom_v1_mhi_events[] = { MHI_EVENT_CONFIG_HW_DATA(5, 2048, 101) }; +static const struct mhi_controller_config modem_qcom_v2_mhiv_config = { + .max_channels = 128, + .timeout_ms = 8000, + .ready_timeout_ms = 50000, + .num_channels = ARRAY_SIZE(modem_qcom_v1_mhi_channels), + .ch_cfg = modem_qcom_v1_mhi_channels, + .num_events = ARRAY_SIZE(modem_qcom_v1_mhi_events), + .event_cfg = modem_qcom_v1_mhi_events, +}; + static const struct mhi_controller_config modem_qcom_v1_mhiv_config = { .max_channels = 128, .timeout_ms = 8000, @@ -278,6 +288,16 @@ static const struct mhi_controller_config modem_qcom_v1_mhiv_config = { .event_cfg = modem_qcom_v1_mhi_events, }; +static const struct mhi_pci_dev_info mhi_qcom_sdx75_info = { + .name = "qcom-sdx75m", + .fw = "qcom/sdx75m/xbl.elf", + .edl = "qcom/sdx75m/edl.mbn", + .config = &modem_qcom_v2_mhiv_config, + .bar_num = MHI_PCI_DEFAULT_BAR_NUM, + .dma_data_width = 32, + .sideband_wake = false, +}; + static const struct mhi_pci_dev_info mhi_qcom_sdx65_info = { .name = "qcom-sdx65m", .fw = "qcom/sdx65m/xbl.elf", @@ -600,6 +620,8 @@ static const struct pci_device_id mhi_pci_id_table[] = { .driver_data = (kernel_ulong_t) &mhi_telit_fn990_info }, { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0308), .driver_data = (kernel_ulong_t) &mhi_qcom_sdx65_info }, + { PCI_DEVICE(PCI_VENDOR_ID_QCOM, 0x0309), + .driver_data = (kernel_ulong_t) &mhi_qcom_sdx75_info }, { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1001), /* EM120R-GL (sdx24) */ .driver_data = (kernel_ulong_t) &mhi_quectel_em1xx_info }, { PCI_DEVICE(PCI_VENDOR_ID_QUECTEL, 0x1002), /* EM160R-GL (sdx24) */ diff --git a/drivers/bus/mhi/host/pm.c b/drivers/bus/mhi/host/pm.c index 8a4362d75fc4..a2f2feef1476 100644 --- a/drivers/bus/mhi/host/pm.c +++ b/drivers/bus/mhi/host/pm.c @@ -163,6 +163,7 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) enum mhi_pm_state cur_state; struct device *dev = &mhi_cntrl->mhi_dev->dev; u32 interval_us = 25000; /* poll register field every 25 milliseconds */ + u32 timeout_ms; int ret, i; /* Check if device entered error state */ @@ -173,14 +174,18 @@ int mhi_ready_state_transition(struct mhi_controller *mhi_cntrl) /* Wait for RESET to be cleared and READY bit to be set by the device */ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, 0, interval_us); + MHICTRL_RESET_MASK, 0, interval_us, + mhi_cntrl->timeout_ms); if (ret) { dev_err(dev, "Device failed to clear MHI Reset\n"); return ret; } + timeout_ms = mhi_cntrl->ready_timeout_ms ? + mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms; ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHISTATUS, - MHISTATUS_READY_MASK, 1, interval_us); + MHISTATUS_READY_MASK, 1, interval_us, + timeout_ms); if (ret) { dev_err(dev, "Device failed to enter MHI Ready\n"); return ret; @@ -479,7 +484,7 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) /* Wait for the reset bit to be cleared by the device */ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, 0, 25000); + MHICTRL_RESET_MASK, 0, 25000, mhi_cntrl->timeout_ms); if (ret) dev_err(dev, "Device failed to clear MHI Reset\n"); @@ -492,8 +497,8 @@ static void mhi_pm_disable_transition(struct mhi_controller *mhi_cntrl) if (!MHI_IN_PBL(mhi_get_exec_env(mhi_cntrl))) { /* wait for ready to be set */ ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, - MHISTATUS, - MHISTATUS_READY_MASK, 1, 25000); + MHISTATUS, MHISTATUS_READY_MASK, + 1, 25000, mhi_cntrl->timeout_ms); if (ret) dev_err(dev, "Device failed to enter READY state\n"); } @@ -1111,7 +1116,8 @@ int mhi_async_power_up(struct mhi_controller *mhi_cntrl) if (state == MHI_STATE_SYS_ERR) { mhi_set_mhi_state(mhi_cntrl, MHI_STATE_RESET); ret = mhi_poll_reg_field(mhi_cntrl, mhi_cntrl->regs, MHICTRL, - MHICTRL_RESET_MASK, 0, interval_us); + MHICTRL_RESET_MASK, 0, interval_us, + mhi_cntrl->timeout_ms); if (ret) { dev_info(dev, "Failed to reset MHI due to syserr state\n"); goto error_exit; @@ -1202,14 +1208,18 @@ EXPORT_SYMBOL_GPL(mhi_power_down); int mhi_sync_power_up(struct mhi_controller *mhi_cntrl) { int ret = mhi_async_power_up(mhi_cntrl); + u32 timeout_ms; if (ret) return ret; + /* Some devices need more time to set ready during power up */ + timeout_ms = mhi_cntrl->ready_timeout_ms ? + mhi_cntrl->ready_timeout_ms : mhi_cntrl->timeout_ms; wait_event_timeout(mhi_cntrl->state_event, MHI_IN_MISSION_MODE(mhi_cntrl->ee) || MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state), - msecs_to_jiffies(mhi_cntrl->timeout_ms)); + msecs_to_jiffies(timeout_ms)); ret = (MHI_IN_MISSION_MODE(mhi_cntrl->ee)) ? 0 : -ETIMEDOUT; if (ret) diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c index e384fbc6c1d9..641c1a6adc8a 100644 --- a/drivers/bus/moxtet.c +++ b/drivers/bus/moxtet.c @@ -102,7 +102,7 @@ static int moxtet_match(struct device *dev, struct device_driver *drv) return 0; } -static struct bus_type moxtet_bus_type = { +static const struct bus_type moxtet_bus_type = { .name = "moxtet", .dev_groups = moxtet_dev_groups, .match = moxtet_match, diff --git a/drivers/cdx/cdx.c b/drivers/cdx/cdx.c index 4461c6c9313f..b74d76afccb6 100644 --- a/drivers/cdx/cdx.c +++ b/drivers/cdx/cdx.c @@ -57,13 +57,17 @@ #include <linux/init.h> #include <linux/kernel.h> +#include <linux/of.h> #include <linux/of_device.h> +#include <linux/of_platform.h> +#include <linux/platform_device.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/idr.h> #include <linux/cdx/cdx_bus.h> #include <linux/iommu.h> #include <linux/dma-map-ops.h> +#include <linux/debugfs.h> #include "cdx.h" /* Default DMA mask for devices on a CDX bus */ @@ -74,9 +78,13 @@ static DEFINE_IDA(cdx_controller_ida); /* Lock to protect controller ops */ static DEFINE_MUTEX(cdx_controller_lock); +/* Debugfs dir for cdx bus */ +static struct dentry *cdx_debugfs_dir; static char *compat_node_name = "xlnx,versal-net-cdx"; +static void cdx_destroy_res_attr(struct cdx_device *cdx_dev, int num); + /** * cdx_dev_reset - Reset a CDX device * @dev: CDX device @@ -145,6 +153,8 @@ static int cdx_unregister_device(struct device *dev, if (cdx_dev->enabled && cdx->ops->bus_disable) cdx->ops->bus_disable(cdx, cdx_dev->bus_num); } else { + cdx_destroy_res_attr(cdx_dev, MAX_CDX_DEV_RESOURCES); + debugfs_remove_recursive(cdx_dev->debugfs_dir); kfree(cdx_dev->driver_override); cdx_dev->driver_override = NULL; } @@ -548,6 +558,31 @@ static const struct attribute_group *cdx_dev_groups[] = { NULL, }; +static int cdx_debug_resource_show(struct seq_file *s, void *data) +{ + struct cdx_device *cdx_dev = s->private; + int i; + + for (i = 0; i < MAX_CDX_DEV_RESOURCES; i++) { + struct resource *res = &cdx_dev->res[i]; + + seq_printf(s, "%pr\n", res); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(cdx_debug_resource); + +static void cdx_device_debugfs_init(struct cdx_device *cdx_dev) +{ + cdx_dev->debugfs_dir = debugfs_create_dir(dev_name(&cdx_dev->dev), cdx_debugfs_dir); + if (IS_ERR(cdx_dev->debugfs_dir)) + return; + + debugfs_create_file("resource", 0444, cdx_dev->debugfs_dir, cdx_dev, + &cdx_debug_resource_fops); +} + static ssize_t rescan_store(const struct bus_type *bus, const char *buf, size_t count) { @@ -569,12 +604,12 @@ static ssize_t rescan_store(const struct bus_type *bus, /* Rescan all the devices */ for_each_compatible_node(np, NULL, compat_node_name) { - if (!np) - return -EINVAL; - pd = of_find_device_by_node(np); - if (!pd) - return -EINVAL; + if (!pd) { + of_node_put(np); + count = -EINVAL; + goto unlock; + } cdx = platform_get_drvdata(pd); if (cdx && cdx->controller_registered && cdx->ops->scan) @@ -583,6 +618,7 @@ static ssize_t rescan_store(const struct bus_type *bus, put_device(&pd->dev); } +unlock: mutex_unlock(&cdx_controller_lock); return count; @@ -640,11 +676,105 @@ static void cdx_device_release(struct device *dev) kfree(cdx_dev); } +static const struct vm_operations_struct cdx_phys_vm_ops = { +#ifdef CONFIG_HAVE_IOREMAP_PROT + .access = generic_access_phys, +#endif +}; + +/** + * cdx_mmap_resource - map a CDX resource into user memory space + * @fp: File pointer. Not used in this function, but required where + * this API is registered as a callback. + * @kobj: kobject for mapping + * @attr: struct bin_attribute for the file being mapped + * @vma: struct vm_area_struct passed into the mmap + * + * Use the regular CDX mapping routines to map a CDX resource into userspace. + * + * Return: true on success, false otherwise. + */ +static int cdx_mmap_resource(struct file *fp, struct kobject *kobj, + struct bin_attribute *attr, + struct vm_area_struct *vma) +{ + struct cdx_device *cdx_dev = to_cdx_device(kobj_to_dev(kobj)); + int num = (unsigned long)attr->private; + struct resource *res; + unsigned long size; + + res = &cdx_dev->res[num]; + if (iomem_is_exclusive(res->start)) + return -EINVAL; + + /* Make sure the caller is mapping a valid resource for this device */ + size = ((cdx_resource_len(cdx_dev, num) - 1) >> PAGE_SHIFT) + 1; + if (vma->vm_pgoff + vma_pages(vma) > size) + return -EINVAL; + + /* + * Map memory region and vm->vm_pgoff is expected to be an + * offset within that region. + */ + vma->vm_page_prot = pgprot_device(vma->vm_page_prot); + vma->vm_pgoff += (cdx_resource_start(cdx_dev, num) >> PAGE_SHIFT); + vma->vm_ops = &cdx_phys_vm_ops; + return io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, + vma->vm_end - vma->vm_start, + vma->vm_page_prot); +} + +static void cdx_destroy_res_attr(struct cdx_device *cdx_dev, int num) +{ + int i; + + /* removing the bin attributes */ + for (i = 0; i < num; i++) { + struct bin_attribute *res_attr; + + res_attr = cdx_dev->res_attr[i]; + if (res_attr) { + sysfs_remove_bin_file(&cdx_dev->dev.kobj, res_attr); + kfree(res_attr); + } + } +} + +#define CDX_RES_ATTR_NAME_LEN 10 +static int cdx_create_res_attr(struct cdx_device *cdx_dev, int num) +{ + struct bin_attribute *res_attr; + char *res_attr_name; + int ret; + + res_attr = kzalloc(sizeof(*res_attr) + CDX_RES_ATTR_NAME_LEN, GFP_ATOMIC); + if (!res_attr) + return -ENOMEM; + + res_attr_name = (char *)(res_attr + 1); + + sysfs_bin_attr_init(res_attr); + + cdx_dev->res_attr[num] = res_attr; + sprintf(res_attr_name, "resource%d", num); + + res_attr->mmap = cdx_mmap_resource; + res_attr->attr.name = res_attr_name; + res_attr->attr.mode = 0600; + res_attr->size = cdx_resource_len(cdx_dev, num); + res_attr->private = (void *)(unsigned long)num; + ret = sysfs_create_bin_file(&cdx_dev->dev.kobj, res_attr); + if (ret) + kfree(res_attr); + + return ret; +} + int cdx_device_add(struct cdx_dev_params *dev_params) { struct cdx_controller *cdx = dev_params->cdx; struct cdx_device *cdx_dev; - int ret; + int ret, i; cdx_dev = kzalloc(sizeof(*cdx_dev), GFP_KERNEL); if (!cdx_dev) @@ -687,7 +817,28 @@ int cdx_device_add(struct cdx_dev_params *dev_params) goto fail; } + /* Create resource<N> attributes */ + for (i = 0; i < MAX_CDX_DEV_RESOURCES; i++) { + if (cdx_resource_flags(cdx_dev, i) & IORESOURCE_MEM) { + /* skip empty resources */ + if (!cdx_resource_len(cdx_dev, i)) + continue; + + ret = cdx_create_res_attr(cdx_dev, i); + if (ret != 0) { + dev_err(&cdx_dev->dev, + "cdx device resource<%d> file creation failed: %d", i, ret); + goto resource_create_fail; + } + } + } + + cdx_device_debugfs_init(cdx_dev); + return 0; +resource_create_fail: + cdx_destroy_res_attr(cdx_dev, i); + device_del(&cdx_dev->dev); fail: /* * Do not free cdx_dev here as it would be freed in @@ -788,6 +939,12 @@ EXPORT_SYMBOL_NS_GPL(cdx_unregister_controller, CDX_BUS_CONTROLLER); static int __init cdx_bus_init(void) { - return bus_register(&cdx_bus_type); + int ret; + + ret = bus_register(&cdx_bus_type); + if (!ret) + cdx_debugfs_dir = debugfs_create_dir(cdx_bus_type.name, NULL); + + return ret; } postcore_initcall(cdx_bus_init); diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index 4c188e9e477c..ee951b265213 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c @@ -299,7 +299,7 @@ static int register_device(int minor, struct pp_struct *pp) goto err; } - index = ida_simple_get(&ida_index, 0, 0, GFP_KERNEL); + index = ida_alloc(&ida_index, GFP_KERNEL); memset(&ppdev_cb, 0, sizeof(ppdev_cb)); ppdev_cb.irq_func = pp_irq; ppdev_cb.flags = (pp->flags & PP_EXCL) ? PARPORT_FLAG_EXCL : 0; @@ -310,7 +310,7 @@ static int register_device(int minor, struct pp_struct *pp) if (!pdev) { pr_warn("%s: failed to register device!\n", name); rc = -ENXIO; - ida_simple_remove(&ida_index, index); + ida_free(&ida_index, index); goto err; } @@ -750,7 +750,7 @@ static int pp_release(struct inode *inode, struct file *file) if (pp->pdev) { parport_unregister_device(pp->pdev); - ida_simple_remove(&ida_index, pp->index); + ida_free(&ida_index, pp->index); pp->pdev = NULL; pr_debug(CHRDEV "%x: unregistered pardevice\n", minor); } diff --git a/drivers/comedi/comedi_fops.c b/drivers/comedi/comedi_fops.c index 1548dea15df1..1b481731df96 100644 --- a/drivers/comedi/comedi_fops.c +++ b/drivers/comedi/comedi_fops.c @@ -1714,8 +1714,8 @@ static int __comedi_get_user_chanlist(struct comedi_device *dev, lockdep_assert_held(&dev->mutex); cmd->chanlist = NULL; - chanlist = memdup_user(user_chanlist, - cmd->chanlist_len * sizeof(unsigned int)); + chanlist = memdup_array_user(user_chanlist, + cmd->chanlist_len, sizeof(unsigned int)); if (IS_ERR(chanlist)) return PTR_ERR(chanlist); diff --git a/drivers/edac/versal_edac.c b/drivers/edac/versal_edac.c index 8625de20fc71..62caf454b567 100644 --- a/drivers/edac/versal_edac.c +++ b/drivers/edac/versal_edac.c @@ -1005,7 +1005,7 @@ static int mc_probe(struct platform_device *pdev) goto free_edac_mc; } - rc = xlnx_register_event(PM_NOTIFY_CB, EVENT_ERROR_PMC_ERR1, + rc = xlnx_register_event(PM_NOTIFY_CB, VERSAL_EVENT_ERROR_PMC_ERR1, XPM_EVENT_ERROR_MASK_DDRMC_CR | XPM_EVENT_ERROR_MASK_DDRMC_NCR | XPM_EVENT_ERROR_MASK_NOC_CR | XPM_EVENT_ERROR_MASK_NOC_NCR, false, err_callback, mci); @@ -1042,7 +1042,7 @@ static int mc_remove(struct platform_device *pdev) debugfs_remove_recursive(priv->debugfs); #endif - xlnx_unregister_event(PM_NOTIFY_CB, EVENT_ERROR_PMC_ERR1, + xlnx_unregister_event(PM_NOTIFY_CB, VERSAL_EVENT_ERROR_PMC_ERR1, XPM_EVENT_ERROR_MASK_DDRMC_CR | XPM_EVENT_ERROR_MASK_NOC_CR | XPM_EVENT_ERROR_MASK_NOC_NCR | diff --git a/drivers/extcon/extcon-qcom-spmi-misc.c b/drivers/extcon/extcon-qcom-spmi-misc.c index f72e90ceca53..53de581a393a 100644 --- a/drivers/extcon/extcon-qcom-spmi-misc.c +++ b/drivers/extcon/extcon-qcom-spmi-misc.c @@ -1,5 +1,5 @@ // SPDX-License-Identifier: GPL-2.0-only -/** +/* * extcon-qcom-spmi-misc.c - Qualcomm USB extcon driver to support USB ID * and VBUS detection based on extcon-usb-gpio.c. * diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c index 4d08c2123e59..2eab341de6b7 100644 --- a/drivers/extcon/extcon-usbc-tusb320.c +++ b/drivers/extcon/extcon-usbc-tusb320.c @@ -17,6 +17,7 @@ #include <linux/usb/typec.h> #include <linux/usb/typec_altmode.h> #include <linux/usb/role.h> +#include <linux/irq.h> #define TUSB320_REG8 0x8 #define TUSB320_REG8_CURRENT_MODE_ADVERTISE GENMASK(7, 6) @@ -515,6 +516,8 @@ static int tusb320_probe(struct i2c_client *client) const void *match_data; unsigned int revision; int ret; + u32 irq_trigger_type = IRQF_TRIGGER_FALLING; + struct irq_data *irq_d; priv = devm_kzalloc(&client->dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -568,9 +571,13 @@ static int tusb320_probe(struct i2c_client *client) */ tusb320_state_update_handler(priv, true); + irq_d = irq_get_irq_data(client->irq); + if (irq_d) + irq_trigger_type = irqd_get_trigger_type(irq_d); + ret = devm_request_threaded_irq(priv->dev, client->irq, NULL, tusb320_irq_handler, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + IRQF_ONESHOT | irq_trigger_type, client->name, priv); if (ret) tusb320_typec_remove(priv); diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c index 6f7a60d2ed91..e7f55c021e56 100644 --- a/drivers/extcon/extcon.c +++ b/drivers/extcon/extcon.c @@ -1280,8 +1280,6 @@ int extcon_dev_register(struct extcon_dev *edev) edev->id = ret; - dev_set_name(&edev->dev, "extcon%d", edev->id); - ret = extcon_alloc_cables(edev); if (ret < 0) goto err_alloc_cables; @@ -1310,6 +1308,7 @@ int extcon_dev_register(struct extcon_dev *edev) RAW_INIT_NOTIFIER_HEAD(&edev->nh_all); dev_set_drvdata(&edev->dev, edev); + dev_set_name(&edev->dev, "extcon%d", edev->id); edev->state = 0; ret = device_register(&edev->dev); diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index a9f70e6e58ac..3ea64b22cf0d 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -2834,7 +2834,7 @@ clear_ida: return ret; } -static int scmi_remove(struct platform_device *pdev) +static void scmi_remove(struct platform_device *pdev) { int id; struct scmi_info *info = platform_get_drvdata(pdev); @@ -2868,8 +2868,6 @@ static int scmi_remove(struct platform_device *pdev) scmi_cleanup_txrx_channels(info); ida_free(&scmi_id, info->id); - - return 0; } static ssize_t protocol_version_show(struct device *dev, @@ -2947,7 +2945,7 @@ static struct platform_driver scmi_driver = { .dev_groups = versions_groups, }, .probe = scmi_probe, - .remove = scmi_remove, + .remove_new = scmi_remove, }; /** diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c index 3f123f592cb4..94a6b4e667de 100644 --- a/drivers/firmware/arm_scpi.c +++ b/drivers/firmware/arm_scpi.c @@ -863,7 +863,7 @@ static void scpi_free_channels(void *data) mbox_free_channel(info->channels[i].chan); } -static int scpi_remove(struct platform_device *pdev) +static void scpi_remove(struct platform_device *pdev) { int i; struct scpi_drvinfo *info = platform_get_drvdata(pdev); @@ -874,8 +874,6 @@ static int scpi_remove(struct platform_device *pdev) kfree(info->dvfs[i]->opps); kfree(info->dvfs[i]); } - - return 0; } #define MAX_SCPI_XFERS 10 @@ -1048,7 +1046,7 @@ static struct platform_driver scpi_driver = { .dev_groups = versions_groups, }, .probe = scpi_probe, - .remove = scpi_remove, + .remove_new = scpi_remove, }; module_platform_driver(scpi_driver); diff --git a/drivers/firmware/imx/imx-dsp.c b/drivers/firmware/imx/imx-dsp.c index a48a58e0c61f..01c8ef14eaec 100644 --- a/drivers/firmware/imx/imx-dsp.c +++ b/drivers/firmware/imx/imx-dsp.c @@ -160,7 +160,7 @@ static int imx_dsp_probe(struct platform_device *pdev) return 0; } -static int imx_dsp_remove(struct platform_device *pdev) +static void imx_dsp_remove(struct platform_device *pdev) { struct imx_dsp_chan *dsp_chan; struct imx_dsp_ipc *dsp_ipc; @@ -173,8 +173,6 @@ static int imx_dsp_remove(struct platform_device *pdev) mbox_free_channel(dsp_chan->ch); kfree(dsp_chan->name); } - - return 0; } static struct platform_driver imx_dsp_driver = { @@ -182,7 +180,7 @@ static struct platform_driver imx_dsp_driver = { .name = "imx-dsp", }, .probe = imx_dsp_probe, - .remove = imx_dsp_remove, + .remove_new = imx_dsp_remove, }; builtin_platform_driver(imx_dsp_driver); diff --git a/drivers/firmware/mtk-adsp-ipc.c b/drivers/firmware/mtk-adsp-ipc.c index 85e94ddc7204..a762302978de 100644 --- a/drivers/firmware/mtk-adsp-ipc.c +++ b/drivers/firmware/mtk-adsp-ipc.c @@ -116,7 +116,7 @@ static int mtk_adsp_ipc_probe(struct platform_device *pdev) return 0; } -static int mtk_adsp_ipc_remove(struct platform_device *pdev) +static void mtk_adsp_ipc_remove(struct platform_device *pdev) { struct mtk_adsp_ipc *adsp_ipc = dev_get_drvdata(&pdev->dev); struct mtk_adsp_chan *adsp_chan; @@ -126,8 +126,6 @@ static int mtk_adsp_ipc_remove(struct platform_device *pdev) adsp_chan = &adsp_ipc->chans[i]; mbox_free_channel(adsp_chan->ch); } - - return 0; } static struct platform_driver mtk_adsp_ipc_driver = { @@ -135,7 +133,7 @@ static struct platform_driver mtk_adsp_ipc_driver = { .name = "mtk-adsp-ipc", }, .probe = mtk_adsp_ipc_probe, - .remove = mtk_adsp_ipc_remove, + .remove_new = mtk_adsp_ipc_remove, }; builtin_platform_driver(mtk_adsp_ipc_driver); diff --git a/drivers/firmware/qemu_fw_cfg.c b/drivers/firmware/qemu_fw_cfg.c index 1448f61173b3..03da9a4354f8 100644 --- a/drivers/firmware/qemu_fw_cfg.c +++ b/drivers/firmware/qemu_fw_cfg.c @@ -731,7 +731,7 @@ err_sel: return err; } -static int fw_cfg_sysfs_remove(struct platform_device *pdev) +static void fw_cfg_sysfs_remove(struct platform_device *pdev) { pr_debug("fw_cfg: unloading.\n"); fw_cfg_sysfs_cache_cleanup(); @@ -739,7 +739,6 @@ static int fw_cfg_sysfs_remove(struct platform_device *pdev) fw_cfg_io_cleanup(); fw_cfg_kset_unregister_recursive(fw_cfg_fname_kset); fw_cfg_kobj_cleanup(fw_cfg_sel_ko); - return 0; } static const struct of_device_id fw_cfg_sysfs_mmio_match[] = { @@ -758,7 +757,7 @@ MODULE_DEVICE_TABLE(acpi, fw_cfg_sysfs_acpi_match); static struct platform_driver fw_cfg_sysfs_driver = { .probe = fw_cfg_sysfs_probe, - .remove = fw_cfg_sysfs_remove, + .remove_new = fw_cfg_sysfs_remove, .driver = { .name = "fw_cfg", .of_match_table = fw_cfg_sysfs_mmio_match, diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index 4cd290a60fba..322aada20f74 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -317,7 +317,7 @@ static void rpi_firmware_shutdown(struct platform_device *pdev) rpi_firmware_property(fw, RPI_FIRMWARE_NOTIFY_REBOOT, NULL, 0); } -static int rpi_firmware_remove(struct platform_device *pdev) +static void rpi_firmware_remove(struct platform_device *pdev) { struct rpi_firmware *fw = platform_get_drvdata(pdev); @@ -327,8 +327,6 @@ static int rpi_firmware_remove(struct platform_device *pdev) rpi_clk = NULL; rpi_firmware_put(fw); - - return 0; } static const struct of_device_id rpi_firmware_of_match[] = { @@ -406,7 +404,7 @@ static struct platform_driver rpi_firmware_driver = { }, .probe = rpi_firmware_probe, .shutdown = rpi_firmware_shutdown, - .remove = rpi_firmware_remove, + .remove_new = rpi_firmware_remove, }; module_platform_driver(rpi_firmware_driver); diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c index 4f7a7abada48..e20cee9c2d32 100644 --- a/drivers/firmware/stratix10-rsu.c +++ b/drivers/firmware/stratix10-rsu.c @@ -793,17 +793,16 @@ static int stratix10_rsu_probe(struct platform_device *pdev) return ret; } -static int stratix10_rsu_remove(struct platform_device *pdev) +static void stratix10_rsu_remove(struct platform_device *pdev) { struct stratix10_rsu_priv *priv = platform_get_drvdata(pdev); stratix10_svc_free_channel(priv->chan); - return 0; } static struct platform_driver stratix10_rsu_driver = { .probe = stratix10_rsu_probe, - .remove = stratix10_rsu_remove, + .remove_new = stratix10_rsu_remove, .driver = { .name = "stratix10-rsu", .dev_groups = rsu_groups, diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c index c693da60e9a9..528f37417aea 100644 --- a/drivers/firmware/stratix10-svc.c +++ b/drivers/firmware/stratix10-svc.c @@ -1251,7 +1251,7 @@ err_destroy_pool: return ret; } -static int stratix10_svc_drv_remove(struct platform_device *pdev) +static void stratix10_svc_drv_remove(struct platform_device *pdev) { struct stratix10_svc *svc = dev_get_drvdata(&pdev->dev); struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev); @@ -1267,13 +1267,11 @@ static int stratix10_svc_drv_remove(struct platform_device *pdev) if (ctrl->genpool) gen_pool_destroy(ctrl->genpool); list_del(&ctrl->node); - - return 0; } static struct platform_driver stratix10_svc_driver = { .probe = stratix10_svc_drv_probe, - .remove = stratix10_svc_drv_remove, + .remove_new = stratix10_svc_drv_remove, .driver = { .name = "stratix10-svc", .of_match_table = stratix10_svc_drv_match, diff --git a/drivers/firmware/turris-mox-rwtm.c b/drivers/firmware/turris-mox-rwtm.c index 2de0fb139ce1..31d962cdd6eb 100644 --- a/drivers/firmware/turris-mox-rwtm.c +++ b/drivers/firmware/turris-mox-rwtm.c @@ -554,7 +554,7 @@ put_kobj: return ret; } -static int turris_mox_rwtm_remove(struct platform_device *pdev) +static void turris_mox_rwtm_remove(struct platform_device *pdev) { struct mox_rwtm *rwtm = platform_get_drvdata(pdev); @@ -562,8 +562,6 @@ static int turris_mox_rwtm_remove(struct platform_device *pdev) sysfs_remove_files(rwtm_to_kobj(rwtm), mox_rwtm_attrs); kobject_put(rwtm_to_kobj(rwtm)); mbox_free_channel(rwtm->mbox); - - return 0; } static const struct of_device_id turris_mox_rwtm_match[] = { @@ -576,7 +574,7 @@ MODULE_DEVICE_TABLE(of, turris_mox_rwtm_match); static struct platform_driver turris_mox_rwtm_driver = { .probe = turris_mox_rwtm_probe, - .remove = turris_mox_rwtm_remove, + .remove_new = turris_mox_rwtm_remove, .driver = { .name = DRIVER_NAME, .of_match_table = turris_mox_rwtm_match, diff --git a/drivers/firmware/xilinx/zynqmp.c b/drivers/firmware/xilinx/zynqmp.c index b0d22d4455d9..79789f0563f6 100644 --- a/drivers/firmware/xilinx/zynqmp.c +++ b/drivers/firmware/xilinx/zynqmp.c @@ -92,6 +92,8 @@ static int zynqmp_pm_ret_code(u32 ret_status) return 0; case XST_PM_NO_FEATURE: return -ENOTSUPP; + case XST_PM_INVALID_VERSION: + return -EOPNOTSUPP; case XST_PM_NO_ACCESS: return -EACCES; case XST_PM_ABORT_SUSPEND: @@ -101,13 +103,13 @@ static int zynqmp_pm_ret_code(u32 ret_status) case XST_PM_INTERNAL: case XST_PM_CONFLICT: case XST_PM_INVALID_NODE: + case XST_PM_INVALID_CRC: default: return -EINVAL; } } -static noinline int do_fw_call_fail(u64 arg0, u64 arg1, u64 arg2, - u32 *ret_payload) +static noinline int do_fw_call_fail(u32 *ret_payload, u32 num_args, ...) { return -ENODEV; } @@ -116,25 +118,35 @@ static noinline int do_fw_call_fail(u64 arg0, u64 arg1, u64 arg2, * PM function call wrapper * Invoke do_fw_call_smc or do_fw_call_hvc, depending on the configuration */ -static int (*do_fw_call)(u64, u64, u64, u32 *ret_payload) = do_fw_call_fail; +static int (*do_fw_call)(u32 *ret_payload, u32, ...) = do_fw_call_fail; /** * do_fw_call_smc() - Call system-level platform management layer (SMC) - * @arg0: Argument 0 to SMC call - * @arg1: Argument 1 to SMC call - * @arg2: Argument 2 to SMC call + * @num_args: Number of variable arguments should be <= 8 * @ret_payload: Returned value array * * Invoke platform management function via SMC call (no hypervisor present). * * Return: Returns status, either success or error+reason */ -static noinline int do_fw_call_smc(u64 arg0, u64 arg1, u64 arg2, - u32 *ret_payload) +static noinline int do_fw_call_smc(u32 *ret_payload, u32 num_args, ...) { struct arm_smccc_res res; + u64 args[8] = {0}; + va_list arg_list; + u8 i; - arm_smccc_smc(arg0, arg1, arg2, 0, 0, 0, 0, 0, &res); + if (num_args > 8) + return -EINVAL; + + va_start(arg_list, num_args); + + for (i = 0; i < num_args; i++) + args[i] = va_arg(arg_list, u64); + + va_end(arg_list); + + arm_smccc_smc(args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], &res); if (ret_payload) { ret_payload[0] = lower_32_bits(res.a0); @@ -148,9 +160,7 @@ static noinline int do_fw_call_smc(u64 arg0, u64 arg1, u64 arg2, /** * do_fw_call_hvc() - Call system-level platform management layer (HVC) - * @arg0: Argument 0 to HVC call - * @arg1: Argument 1 to HVC call - * @arg2: Argument 2 to HVC call + * @num_args: Number of variable arguments should be <= 8 * @ret_payload: Returned value array * * Invoke platform management function via HVC @@ -159,12 +169,24 @@ static noinline int do_fw_call_smc(u64 arg0, u64 arg1, u64 arg2, * * Return: Returns status, either success or error+reason */ -static noinline int do_fw_call_hvc(u64 arg0, u64 arg1, u64 arg2, - u32 *ret_payload) +static noinline int do_fw_call_hvc(u32 *ret_payload, u32 num_args, ...) { struct arm_smccc_res res; + u64 args[8] = {0}; + va_list arg_list; + u8 i; + + if (num_args > 8) + return -EINVAL; + + va_start(arg_list, num_args); + + for (i = 0; i < num_args; i++) + args[i] = va_arg(arg_list, u64); + + va_end(arg_list); - arm_smccc_hvc(arg0, arg1, arg2, 0, 0, 0, 0, 0, &res); + arm_smccc_hvc(args[0], args[1], args[2], args[3], args[4], args[5], args[6], args[7], &res); if (ret_payload) { ret_payload[0] = lower_32_bits(res.a0); @@ -180,11 +202,31 @@ static int __do_feature_check_call(const u32 api_id, u32 *ret_payload) { int ret; u64 smc_arg[2]; + u32 module_id; + u32 feature_check_api_id; - smc_arg[0] = PM_SIP_SVC | PM_FEATURE_CHECK; - smc_arg[1] = api_id; + module_id = FIELD_GET(MODULE_ID_MASK, api_id); - ret = do_fw_call(smc_arg[0], smc_arg[1], 0, ret_payload); + /* + * Feature check of APIs belonging to PM, XSEM, and TF-A are handled by calling + * PM_FEATURE_CHECK API. For other modules, call PM_API_FEATURES API. + */ + if (module_id == PM_MODULE_ID || module_id == XSEM_MODULE_ID || module_id == TF_A_MODULE_ID) + feature_check_api_id = PM_FEATURE_CHECK; + else + feature_check_api_id = PM_API_FEATURES; + + /* + * Feature check of TF-A APIs is done in the TF-A layer and it expects for + * MODULE_ID_MASK bits of SMC's arg[0] to be the same as PM_MODULE_ID. + */ + if (module_id == TF_A_MODULE_ID) + module_id = PM_MODULE_ID; + + smc_arg[0] = PM_SIP_SVC | FIELD_PREP(MODULE_ID_MASK, module_id) | feature_check_api_id; + smc_arg[1] = (api_id & API_ID_MASK); + + ret = do_fw_call(ret_payload, 2, smc_arg[0], smc_arg[1]); if (ret) ret = -EOPNOTSUPP; else @@ -295,11 +337,8 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported); * zynqmp_pm_invoke_fn() - Invoke the system-level platform management layer * caller function depending on the configuration * @pm_api_id: Requested PM-API call - * @arg0: Argument 0 to requested PM-API call - * @arg1: Argument 1 to requested PM-API call - * @arg2: Argument 2 to requested PM-API call - * @arg3: Argument 3 to requested PM-API call * @ret_payload: Returned value array + * @num_args: Number of arguments to requested PM-API call * * Invoke platform management function for SMC or HVC call, depending on * configuration. @@ -316,26 +355,38 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_is_function_supported); * * Return: Returns status, either success or error+reason */ -int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1, - u32 arg2, u32 arg3, u32 *ret_payload) +int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...) { /* * Added SIP service call Function Identifier * Make sure to stay in x0 register */ - u64 smc_arg[4]; - int ret; + u64 smc_arg[8]; + int ret, i; + va_list arg_list; + u32 args[14] = {0}; + + if (num_args > 14) + return -EINVAL; + + va_start(arg_list, num_args); /* Check if feature is supported or not */ ret = zynqmp_pm_feature(pm_api_id); if (ret < 0) return ret; + for (i = 0; i < num_args; i++) + args[i] = va_arg(arg_list, u32); + + va_end(arg_list); + smc_arg[0] = PM_SIP_SVC | pm_api_id; - smc_arg[1] = ((u64)arg1 << 32) | arg0; - smc_arg[2] = ((u64)arg3 << 32) | arg2; + for (i = 0; i < 7; i++) + smc_arg[i + 1] = ((u64)args[(i * 2) + 1] << 32) | args[i * 2]; - return do_fw_call(smc_arg[0], smc_arg[1], smc_arg[2], ret_payload); + return do_fw_call(ret_payload, 8, smc_arg[0], smc_arg[1], smc_arg[2], smc_arg[3], + smc_arg[4], smc_arg[5], smc_arg[6], smc_arg[7]); } static u32 pm_api_version; @@ -347,14 +398,12 @@ int zynqmp_pm_register_sgi(u32 sgi_num, u32 reset) { int ret; - ret = zynqmp_pm_invoke_fn(TF_A_PM_REGISTER_SGI, sgi_num, reset, 0, 0, - NULL); - if (!ret) + ret = zynqmp_pm_invoke_fn(TF_A_PM_REGISTER_SGI, NULL, 2, sgi_num, reset); + if (ret != -EOPNOTSUPP && !ret) return ret; /* try old implementation as fallback strategy if above fails */ - return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_REGISTER_SGI, sgi_num, - reset, NULL); + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 3, IOCTL_REGISTER_SGI, sgi_num, reset); } /** @@ -376,7 +425,7 @@ int zynqmp_pm_get_api_version(u32 *version) *version = pm_api_version; return 0; } - ret = zynqmp_pm_invoke_fn(PM_GET_API_VERSION, 0, 0, 0, 0, ret_payload); + ret = zynqmp_pm_invoke_fn(PM_GET_API_VERSION, ret_payload, 0); *version = ret_payload[1]; return ret; @@ -399,7 +448,7 @@ int zynqmp_pm_get_chipid(u32 *idcode, u32 *version) if (!idcode || !version) return -EINVAL; - ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, 0, 0, 0, 0, ret_payload); + ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, ret_payload, 0); *idcode = ret_payload[1]; *version = ret_payload[2]; @@ -414,7 +463,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_chipid); * * Return: Returns status, either success or error+reason */ -static int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily) +int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily) { u32 ret_payload[PAYLOAD_ARG_CNT]; u32 idcode; @@ -427,7 +476,7 @@ static int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily) return 0; } - ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, 0, 0, 0, 0, ret_payload); + ret = zynqmp_pm_invoke_fn(PM_GET_CHIPID, ret_payload, 0); if (ret < 0) return ret; @@ -439,6 +488,7 @@ static int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily) return 0; } +EXPORT_SYMBOL_GPL(zynqmp_pm_get_family_info); /** * zynqmp_pm_get_trustzone_version() - Get secure trustzone firmware version @@ -459,8 +509,7 @@ static int zynqmp_pm_get_trustzone_version(u32 *version) *version = pm_tz_version; return 0; } - ret = zynqmp_pm_invoke_fn(PM_GET_TRUSTZONE_VERSION, 0, 0, - 0, 0, ret_payload); + ret = zynqmp_pm_invoke_fn(PM_GET_TRUSTZONE_VERSION, ret_payload, 0); *version = ret_payload[1]; return ret; @@ -507,8 +556,8 @@ int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out) { int ret; - ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, qdata.qid, qdata.arg1, - qdata.arg2, qdata.arg3, out); + ret = zynqmp_pm_invoke_fn(PM_QUERY_DATA, out, 4, qdata.qid, qdata.arg1, qdata.arg2, + qdata.arg3); /* * For clock name query, all bytes in SMC response are clock name @@ -530,7 +579,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_query_data); */ int zynqmp_pm_clock_enable(u32 clock_id) { - return zynqmp_pm_invoke_fn(PM_CLOCK_ENABLE, clock_id, 0, 0, 0, NULL); + return zynqmp_pm_invoke_fn(PM_CLOCK_ENABLE, NULL, 1, clock_id); } EXPORT_SYMBOL_GPL(zynqmp_pm_clock_enable); @@ -545,7 +594,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_clock_enable); */ int zynqmp_pm_clock_disable(u32 clock_id) { - return zynqmp_pm_invoke_fn(PM_CLOCK_DISABLE, clock_id, 0, 0, 0, NULL); + return zynqmp_pm_invoke_fn(PM_CLOCK_DISABLE, NULL, 1, clock_id); } EXPORT_SYMBOL_GPL(zynqmp_pm_clock_disable); @@ -564,8 +613,7 @@ int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state) u32 ret_payload[PAYLOAD_ARG_CNT]; int ret; - ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETSTATE, clock_id, 0, - 0, 0, ret_payload); + ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETSTATE, ret_payload, 1, clock_id); *state = ret_payload[1]; return ret; @@ -584,8 +632,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getstate); */ int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider) { - return zynqmp_pm_invoke_fn(PM_CLOCK_SETDIVIDER, clock_id, divider, - 0, 0, NULL); + return zynqmp_pm_invoke_fn(PM_CLOCK_SETDIVIDER, NULL, 2, clock_id, divider); } EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setdivider); @@ -604,8 +651,7 @@ int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider) u32 ret_payload[PAYLOAD_ARG_CNT]; int ret; - ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETDIVIDER, clock_id, 0, - 0, 0, ret_payload); + ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETDIVIDER, ret_payload, 1, clock_id); *divider = ret_payload[1]; return ret; @@ -613,47 +659,6 @@ int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider) EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getdivider); /** - * zynqmp_pm_clock_setrate() - Set the clock rate for given id - * @clock_id: ID of the clock - * @rate: rate value in hz - * - * This function is used by master to set rate for any clock. - * - * Return: Returns status, either success or error+reason - */ -int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate) -{ - return zynqmp_pm_invoke_fn(PM_CLOCK_SETRATE, clock_id, - lower_32_bits(rate), - upper_32_bits(rate), - 0, NULL); -} -EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setrate); - -/** - * zynqmp_pm_clock_getrate() - Get the clock rate for given id - * @clock_id: ID of the clock - * @rate: rate value in hz - * - * This function is used by master to get rate - * for any clock. - * - * Return: Returns status, either success or error+reason - */ -int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate) -{ - u32 ret_payload[PAYLOAD_ARG_CNT]; - int ret; - - ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETRATE, clock_id, 0, - 0, 0, ret_payload); - *rate = ((u64)ret_payload[2] << 32) | ret_payload[1]; - - return ret; -} -EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getrate); - -/** * zynqmp_pm_clock_setparent() - Set the clock parent for given id * @clock_id: ID of the clock * @parent_id: parent id @@ -664,8 +669,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getrate); */ int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id) { - return zynqmp_pm_invoke_fn(PM_CLOCK_SETPARENT, clock_id, - parent_id, 0, 0, NULL); + return zynqmp_pm_invoke_fn(PM_CLOCK_SETPARENT, NULL, 2, clock_id, parent_id); } EXPORT_SYMBOL_GPL(zynqmp_pm_clock_setparent); @@ -684,8 +688,7 @@ int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id) u32 ret_payload[PAYLOAD_ARG_CNT]; int ret; - ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETPARENT, clock_id, 0, - 0, 0, ret_payload); + ret = zynqmp_pm_invoke_fn(PM_CLOCK_GETPARENT, ret_payload, 1, clock_id); *parent_id = ret_payload[1]; return ret; @@ -704,8 +707,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_clock_getparent); */ int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode) { - return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_MODE, - clk_id, mode, NULL); + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, 0, IOCTL_SET_PLL_FRAC_MODE, clk_id, mode); } EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_mode); @@ -721,8 +723,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_mode); */ int zynqmp_pm_get_pll_frac_mode(u32 clk_id, u32 *mode) { - return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_MODE, - clk_id, 0, mode); + return zynqmp_pm_invoke_fn(PM_IOCTL, mode, 3, 0, IOCTL_GET_PLL_FRAC_MODE, clk_id); } EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_mode); @@ -739,8 +740,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_mode); */ int zynqmp_pm_set_pll_frac_data(u32 clk_id, u32 data) { - return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_PLL_FRAC_DATA, - clk_id, data, NULL); + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, 0, IOCTL_SET_PLL_FRAC_DATA, clk_id, data); } EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_data); @@ -756,8 +756,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_pll_frac_data); */ int zynqmp_pm_get_pll_frac_data(u32 clk_id, u32 *data) { - return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_PLL_FRAC_DATA, - clk_id, 0, data); + return zynqmp_pm_invoke_fn(PM_IOCTL, data, 3, 0, IOCTL_GET_PLL_FRAC_DATA, clk_id); } EXPORT_SYMBOL_GPL(zynqmp_pm_get_pll_frac_data); @@ -778,9 +777,8 @@ int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value) u32 mask = (node_id == NODE_SD_0) ? GENMASK(15, 0) : GENMASK(31, 16); if (value) { - return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, - IOCTL_SET_SD_TAPDELAY, - type, value, NULL); + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, node_id, IOCTL_SET_SD_TAPDELAY, type, + value); } /* @@ -798,7 +796,7 @@ int zynqmp_pm_set_sd_tapdelay(u32 node_id, u32 type, u32 value) * Use PM_MMIO_READ/PM_MMIO_WRITE to re-implement the missing counter * part of IOCTL_SET_SD_TAPDELAY which clears SDx_ITAPDLYENA bits. */ - return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, reg, mask, 0, 0, NULL); + return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, NULL, 2, reg, mask); } EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay); @@ -814,8 +812,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_tapdelay); */ int zynqmp_pm_sd_dll_reset(u32 node_id, u32 type) { - return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, IOCTL_SD_DLL_RESET, - type, 0, NULL); + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 3, node_id, IOCTL_SD_DLL_RESET, type); } EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset); @@ -831,8 +828,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_sd_dll_reset); */ int zynqmp_pm_ospi_mux_select(u32 dev_id, u32 select) { - return zynqmp_pm_invoke_fn(PM_IOCTL, dev_id, IOCTL_OSPI_MUX_SELECT, - select, 0, NULL); + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 3, dev_id, IOCTL_OSPI_MUX_SELECT, select); } EXPORT_SYMBOL_GPL(zynqmp_pm_ospi_mux_select); @@ -847,8 +843,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_ospi_mux_select); */ int zynqmp_pm_write_ggs(u32 index, u32 value) { - return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_GGS, - index, value, NULL); + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, 0, IOCTL_WRITE_GGS, index, value); } EXPORT_SYMBOL_GPL(zynqmp_pm_write_ggs); @@ -863,8 +858,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_write_ggs); */ int zynqmp_pm_read_ggs(u32 index, u32 *value) { - return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_GGS, - index, 0, value); + return zynqmp_pm_invoke_fn(PM_IOCTL, value, 3, 0, IOCTL_READ_GGS, index); } EXPORT_SYMBOL_GPL(zynqmp_pm_read_ggs); @@ -880,8 +874,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_read_ggs); */ int zynqmp_pm_write_pggs(u32 index, u32 value) { - return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_WRITE_PGGS, index, value, - NULL); + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, 0, IOCTL_WRITE_PGGS, index, value); } EXPORT_SYMBOL_GPL(zynqmp_pm_write_pggs); @@ -897,15 +890,13 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_write_pggs); */ int zynqmp_pm_read_pggs(u32 index, u32 *value) { - return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_READ_PGGS, index, 0, - value); + return zynqmp_pm_invoke_fn(PM_IOCTL, value, 3, 0, IOCTL_READ_PGGS, index); } EXPORT_SYMBOL_GPL(zynqmp_pm_read_pggs); int zynqmp_pm_set_tapdelay_bypass(u32 index, u32 value) { - return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_TAPDELAY_BYPASS, - index, value, NULL); + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, 0, IOCTL_SET_TAPDELAY_BYPASS, index, value); } EXPORT_SYMBOL_GPL(zynqmp_pm_set_tapdelay_bypass); @@ -920,8 +911,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_tapdelay_bypass); */ int zynqmp_pm_set_boot_health_status(u32 value) { - return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_BOOT_HEALTH_STATUS, - value, 0, NULL); + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 3, 0, IOCTL_SET_BOOT_HEALTH_STATUS, value); } /** @@ -935,8 +925,7 @@ int zynqmp_pm_set_boot_health_status(u32 value) int zynqmp_pm_reset_assert(const enum zynqmp_pm_reset reset, const enum zynqmp_pm_reset_action assert_flag) { - return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, reset, assert_flag, - 0, 0, NULL); + return zynqmp_pm_invoke_fn(PM_RESET_ASSERT, NULL, 2, reset, assert_flag); } EXPORT_SYMBOL_GPL(zynqmp_pm_reset_assert); @@ -955,8 +944,7 @@ int zynqmp_pm_reset_get_status(const enum zynqmp_pm_reset reset, u32 *status) if (!status) return -EINVAL; - ret = zynqmp_pm_invoke_fn(PM_RESET_GET_STATUS, reset, 0, - 0, 0, ret_payload); + ret = zynqmp_pm_invoke_fn(PM_RESET_GET_STATUS, ret_payload, 1, reset); *status = ret_payload[1]; return ret; @@ -981,9 +969,8 @@ int zynqmp_pm_fpga_load(const u64 address, const u32 size, const u32 flags) u32 ret_payload[PAYLOAD_ARG_CNT]; int ret; - ret = zynqmp_pm_invoke_fn(PM_FPGA_LOAD, lower_32_bits(address), - upper_32_bits(address), size, flags, - ret_payload); + ret = zynqmp_pm_invoke_fn(PM_FPGA_LOAD, ret_payload, 4, lower_32_bits(address), + upper_32_bits(address), size, flags); if (ret_payload[0]) return -ret_payload[0]; @@ -1008,7 +995,7 @@ int zynqmp_pm_fpga_get_status(u32 *value) if (!value) return -EINVAL; - ret = zynqmp_pm_invoke_fn(PM_FPGA_GET_STATUS, 0, 0, 0, 0, ret_payload); + ret = zynqmp_pm_invoke_fn(PM_FPGA_GET_STATUS, ret_payload, 0); *value = ret_payload[1]; return ret; @@ -1036,11 +1023,9 @@ int zynqmp_pm_fpga_get_config_status(u32 *value) lower_addr = lower_32_bits((u64)&buf); upper_addr = upper_32_bits((u64)&buf); - ret = zynqmp_pm_invoke_fn(PM_FPGA_READ, - XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, - lower_addr, upper_addr, - XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG, - ret_payload); + ret = zynqmp_pm_invoke_fn(PM_FPGA_READ, ret_payload, 4, + XILINX_ZYNQMP_PM_FPGA_CONFIG_STAT_OFFSET, lower_addr, upper_addr, + XILINX_ZYNQMP_PM_FPGA_READ_CONFIG_REG); *value = ret_payload[1]; @@ -1058,7 +1043,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_fpga_get_config_status); */ int zynqmp_pm_pinctrl_request(const u32 pin) { - return zynqmp_pm_invoke_fn(PM_PINCTRL_REQUEST, pin, 0, 0, 0, NULL); + return zynqmp_pm_invoke_fn(PM_PINCTRL_REQUEST, NULL, 1, pin); } EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_request); @@ -1072,36 +1057,11 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_request); */ int zynqmp_pm_pinctrl_release(const u32 pin) { - return zynqmp_pm_invoke_fn(PM_PINCTRL_RELEASE, pin, 0, 0, 0, NULL); + return zynqmp_pm_invoke_fn(PM_PINCTRL_RELEASE, NULL, 1, pin); } EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_release); /** - * zynqmp_pm_pinctrl_get_function - Read function id set for the given pin - * @pin: Pin number - * @id: Buffer to store function ID - * - * This function provides the function currently set for the given pin. - * - * Return: Returns status, either success or error+reason - */ -int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id) -{ - u32 ret_payload[PAYLOAD_ARG_CNT]; - int ret; - - if (!id) - return -EINVAL; - - ret = zynqmp_pm_invoke_fn(PM_PINCTRL_GET_FUNCTION, pin, 0, - 0, 0, ret_payload); - *id = ret_payload[1]; - - return ret; -} -EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_get_function); - -/** * zynqmp_pm_pinctrl_set_function - Set requested function for the pin * @pin: Pin number * @id: Function ID to set @@ -1112,8 +1072,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_get_function); */ int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id) { - return zynqmp_pm_invoke_fn(PM_PINCTRL_SET_FUNCTION, pin, id, - 0, 0, NULL); + return zynqmp_pm_invoke_fn(PM_PINCTRL_SET_FUNCTION, NULL, 2, pin, id); } EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_set_function); @@ -1136,8 +1095,7 @@ int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param, if (!value) return -EINVAL; - ret = zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_GET, pin, param, - 0, 0, ret_payload); + ret = zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_GET, ret_payload, 2, pin, param); *value = ret_payload[1]; return ret; @@ -1166,8 +1124,7 @@ int zynqmp_pm_pinctrl_set_config(const u32 pin, const u32 param, return -EOPNOTSUPP; } - return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, pin, - param, value, 0, NULL); + return zynqmp_pm_invoke_fn(PM_PINCTRL_CONFIG_PARAM_SET, NULL, 3, pin, param, value); } EXPORT_SYMBOL_GPL(zynqmp_pm_pinctrl_set_config); @@ -1185,8 +1142,7 @@ unsigned int zynqmp_pm_bootmode_read(u32 *ps_mode) unsigned int ret; u32 ret_payload[PAYLOAD_ARG_CNT]; - ret = zynqmp_pm_invoke_fn(PM_MMIO_READ, CRL_APB_BOOT_PIN_CTRL, 0, - 0, 0, ret_payload); + ret = zynqmp_pm_invoke_fn(PM_MMIO_READ, ret_payload, 1, CRL_APB_BOOT_PIN_CTRL); *ps_mode = ret_payload[1]; @@ -1205,8 +1161,8 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_bootmode_read); */ int zynqmp_pm_bootmode_write(u32 ps_mode) { - return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, CRL_APB_BOOT_PIN_CTRL, - CRL_APB_BOOTPIN_CTRL_MASK, ps_mode, 0, NULL); + return zynqmp_pm_invoke_fn(PM_MMIO_WRITE, NULL, 3, CRL_APB_BOOT_PIN_CTRL, + CRL_APB_BOOTPIN_CTRL_MASK, ps_mode); } EXPORT_SYMBOL_GPL(zynqmp_pm_bootmode_write); @@ -1221,7 +1177,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_bootmode_write); */ int zynqmp_pm_init_finalize(void) { - return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, 0, 0, 0, 0, NULL); + return zynqmp_pm_invoke_fn(PM_PM_INIT_FINALIZE, NULL, 0); } EXPORT_SYMBOL_GPL(zynqmp_pm_init_finalize); @@ -1235,7 +1191,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_init_finalize); */ int zynqmp_pm_set_suspend_mode(u32 mode) { - return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, mode, 0, 0, 0, NULL); + return zynqmp_pm_invoke_fn(PM_SET_SUSPEND_MODE, NULL, 1, mode); } EXPORT_SYMBOL_GPL(zynqmp_pm_set_suspend_mode); @@ -1254,8 +1210,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_suspend_mode); int zynqmp_pm_request_node(const u32 node, const u32 capabilities, const u32 qos, const enum zynqmp_pm_request_ack ack) { - return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, node, capabilities, - qos, ack, NULL); + return zynqmp_pm_invoke_fn(PM_REQUEST_NODE, NULL, 4, node, capabilities, qos, ack); } EXPORT_SYMBOL_GPL(zynqmp_pm_request_node); @@ -1271,7 +1226,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_request_node); */ int zynqmp_pm_release_node(const u32 node) { - return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, node, 0, 0, 0, NULL); + return zynqmp_pm_invoke_fn(PM_RELEASE_NODE, NULL, 1, node); } EXPORT_SYMBOL_GPL(zynqmp_pm_release_node); @@ -1290,8 +1245,7 @@ int zynqmp_pm_get_rpu_mode(u32 node_id, enum rpu_oper_mode *rpu_mode) u32 ret_payload[PAYLOAD_ARG_CNT]; int ret; - ret = zynqmp_pm_invoke_fn(PM_IOCTL, node_id, - IOCTL_GET_RPU_OPER_MODE, 0, 0, ret_payload); + ret = zynqmp_pm_invoke_fn(PM_IOCTL, ret_payload, 2, node_id, IOCTL_GET_RPU_OPER_MODE); /* only set rpu_mode if no error */ if (ret == XST_PM_SUCCESS) @@ -1313,9 +1267,8 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_get_rpu_mode); */ int zynqmp_pm_set_rpu_mode(u32 node_id, enum rpu_oper_mode rpu_mode) { - return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, - IOCTL_SET_RPU_OPER_MODE, (u32)rpu_mode, - 0, NULL); + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 3, node_id, IOCTL_SET_RPU_OPER_MODE, + (u32)rpu_mode); } EXPORT_SYMBOL_GPL(zynqmp_pm_set_rpu_mode); @@ -1331,9 +1284,8 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_rpu_mode); */ int zynqmp_pm_set_tcm_config(u32 node_id, enum rpu_tcm_comb tcm_mode) { - return zynqmp_pm_invoke_fn(PM_IOCTL, node_id, - IOCTL_TCM_COMB_CONFIG, (u32)tcm_mode, 0, - NULL); + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 3, node_id, IOCTL_TCM_COMB_CONFIG, + (u32)tcm_mode); } EXPORT_SYMBOL_GPL(zynqmp_pm_set_tcm_config); @@ -1348,7 +1300,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_tcm_config); int zynqmp_pm_force_pwrdwn(const u32 node, const enum zynqmp_pm_request_ack ack) { - return zynqmp_pm_invoke_fn(PM_FORCE_POWERDOWN, node, ack, 0, 0, NULL); + return zynqmp_pm_invoke_fn(PM_FORCE_POWERDOWN, NULL, 2, node, ack); } EXPORT_SYMBOL_GPL(zynqmp_pm_force_pwrdwn); @@ -1367,8 +1319,8 @@ int zynqmp_pm_request_wake(const u32 node, const enum zynqmp_pm_request_ack ack) { /* set_addr flag is encoded into 1st bit of address */ - return zynqmp_pm_invoke_fn(PM_REQUEST_WAKEUP, node, address | set_addr, - address >> 32, ack, NULL); + return zynqmp_pm_invoke_fn(PM_REQUEST_WAKEUP, NULL, 4, node, address | set_addr, + address >> 32, ack); } EXPORT_SYMBOL_GPL(zynqmp_pm_request_wake); @@ -1388,15 +1340,14 @@ int zynqmp_pm_set_requirement(const u32 node, const u32 capabilities, const u32 qos, const enum zynqmp_pm_request_ack ack) { - return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, node, capabilities, - qos, ack, NULL); + return zynqmp_pm_invoke_fn(PM_SET_REQUIREMENT, NULL, 4, node, capabilities, qos, ack); } EXPORT_SYMBOL_GPL(zynqmp_pm_set_requirement); /** * zynqmp_pm_load_pdi - Load and process PDI - * @src: Source device where PDI is located - * @address: PDI src address + * @src: Source device where PDI is located + * @address: PDI src address * * This function provides support to load PDI from linux * @@ -1404,9 +1355,8 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_requirement); */ int zynqmp_pm_load_pdi(const u32 src, const u64 address) { - return zynqmp_pm_invoke_fn(PM_LOAD_PDI, src, - lower_32_bits(address), - upper_32_bits(address), 0, NULL); + return zynqmp_pm_invoke_fn(PM_LOAD_PDI, NULL, 3, src, lower_32_bits(address), + upper_32_bits(address)); } EXPORT_SYMBOL_GPL(zynqmp_pm_load_pdi); @@ -1426,9 +1376,8 @@ int zynqmp_pm_aes_engine(const u64 address, u32 *out) if (!out) return -EINVAL; - ret = zynqmp_pm_invoke_fn(PM_SECURE_AES, upper_32_bits(address), - lower_32_bits(address), - 0, 0, ret_payload); + ret = zynqmp_pm_invoke_fn(PM_SECURE_AES, ret_payload, 2, upper_32_bits(address), + lower_32_bits(address)); *out = ret_payload[1]; return ret; @@ -1456,8 +1405,7 @@ int zynqmp_pm_sha_hash(const u64 address, const u32 size, const u32 flags) u32 lower_addr = lower_32_bits(address); u32 upper_addr = upper_32_bits(address); - return zynqmp_pm_invoke_fn(PM_SECURE_SHA, upper_addr, lower_addr, - size, flags, NULL); + return zynqmp_pm_invoke_fn(PM_SECURE_SHA, NULL, 4, upper_addr, lower_addr, size, flags); } EXPORT_SYMBOL_GPL(zynqmp_pm_sha_hash); @@ -1479,8 +1427,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_sha_hash); int zynqmp_pm_register_notifier(const u32 node, const u32 event, const u32 wake, const u32 enable) { - return zynqmp_pm_invoke_fn(PM_REGISTER_NOTIFIER, node, event, - wake, enable, NULL); + return zynqmp_pm_invoke_fn(PM_REGISTER_NOTIFIER, NULL, 4, node, event, wake, enable); } EXPORT_SYMBOL_GPL(zynqmp_pm_register_notifier); @@ -1493,8 +1440,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_register_notifier); */ int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype) { - return zynqmp_pm_invoke_fn(PM_SYSTEM_SHUTDOWN, type, subtype, - 0, 0, NULL); + return zynqmp_pm_invoke_fn(PM_SYSTEM_SHUTDOWN, NULL, 2, type, subtype); } /** @@ -1506,8 +1452,7 @@ int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype) */ int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value) { - return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_SET_FEATURE_CONFIG, - id, value, NULL); + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, 0, IOCTL_SET_FEATURE_CONFIG, id, value); } /** @@ -1520,8 +1465,7 @@ int zynqmp_pm_set_feature_config(enum pm_feature_config_id id, u32 value) int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, u32 *payload) { - return zynqmp_pm_invoke_fn(PM_IOCTL, 0, IOCTL_GET_FEATURE_CONFIG, - id, 0, payload); + return zynqmp_pm_invoke_fn(PM_IOCTL, payload, 3, 0, IOCTL_GET_FEATURE_CONFIG, id); } /** @@ -1534,8 +1478,7 @@ int zynqmp_pm_get_feature_config(enum pm_feature_config_id id, */ int zynqmp_pm_set_sd_config(u32 node, enum pm_sd_config_type config, u32 value) { - return zynqmp_pm_invoke_fn(PM_IOCTL, node, IOCTL_SET_SD_CONFIG, - config, value, NULL); + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, node, IOCTL_SET_SD_CONFIG, config, value); } EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_config); @@ -1550,8 +1493,7 @@ EXPORT_SYMBOL_GPL(zynqmp_pm_set_sd_config); int zynqmp_pm_set_gem_config(u32 node, enum pm_gem_config_type config, u32 value) { - return zynqmp_pm_invoke_fn(PM_IOCTL, node, IOCTL_SET_GEM_CONFIG, - config, value, NULL); + return zynqmp_pm_invoke_fn(PM_IOCTL, NULL, 4, node, IOCTL_SET_GEM_CONFIG, config, value); } EXPORT_SYMBOL_GPL(zynqmp_pm_set_gem_config); @@ -1916,7 +1858,6 @@ ATTRIBUTE_GROUPS(zynqmp_firmware); static int zynqmp_firmware_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; - struct device_node *np; struct zynqmp_devinfo *devinfo; int ret; @@ -1924,22 +1865,9 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) if (ret) return ret; - np = of_find_compatible_node(NULL, NULL, "xlnx,zynqmp"); - if (!np) { - np = of_find_compatible_node(NULL, NULL, "xlnx,versal"); - if (!np) - return 0; - + ret = do_feature_check_call(PM_FEATURE_CHECK); + if (ret >= 0 && ((ret & FIRMWARE_VERSION_MASK) >= PM_API_VERSION_1)) feature_check_enabled = true; - } - - if (!feature_check_enabled) { - ret = do_feature_check_call(PM_FEATURE_CHECK); - if (ret >= 0) - feature_check_enabled = true; - } - - of_node_put(np); devinfo = devm_kzalloc(dev, sizeof(*devinfo), GFP_KERNEL); if (!devinfo) @@ -1992,19 +1920,17 @@ static int zynqmp_firmware_probe(struct platform_device *pdev) zynqmp_pm_api_debugfs_init(); - np = of_find_compatible_node(NULL, NULL, "xlnx,versal"); - if (np) { + if (pm_family_code == VERSAL_FAMILY_CODE) { em_dev = platform_device_register_data(&pdev->dev, "xlnx_event_manager", -1, NULL, 0); if (IS_ERR(em_dev)) dev_err_probe(&pdev->dev, PTR_ERR(em_dev), "EM register fail with error\n"); } - of_node_put(np); return of_platform_populate(dev->of_node, NULL, NULL, dev); } -static int zynqmp_firmware_remove(struct platform_device *pdev) +static void zynqmp_firmware_remove(struct platform_device *pdev) { struct pm_api_feature_data *feature_data; struct hlist_node *tmp; @@ -2019,8 +1945,6 @@ static int zynqmp_firmware_remove(struct platform_device *pdev) } platform_device_unregister(em_dev); - - return 0; } static const struct of_device_id zynqmp_firmware_of_match[] = { @@ -2037,6 +1961,6 @@ static struct platform_driver zynqmp_firmware_driver = { .dev_groups = zynqmp_firmware_groups, }, .probe = zynqmp_firmware_probe, - .remove = zynqmp_firmware_remove, + .remove_new = zynqmp_firmware_remove, }; module_platform_driver(zynqmp_firmware_driver); diff --git a/drivers/fpga/altera-fpga2sdram.c b/drivers/fpga/altera-fpga2sdram.c index 1fa2ccc321ab..6b60ca004345 100644 --- a/drivers/fpga/altera-fpga2sdram.c +++ b/drivers/fpga/altera-fpga2sdram.c @@ -147,20 +147,18 @@ static int alt_fpga_bridge_probe(struct platform_device *pdev) return ret; } -static int alt_fpga_bridge_remove(struct platform_device *pdev) +static void alt_fpga_bridge_remove(struct platform_device *pdev) { struct fpga_bridge *br = platform_get_drvdata(pdev); fpga_bridge_unregister(br); - - return 0; } MODULE_DEVICE_TABLE(of, altera_fpga_of_match); static struct platform_driver altera_fpga_driver = { .probe = alt_fpga_bridge_probe, - .remove = alt_fpga_bridge_remove, + .remove_new = alt_fpga_bridge_remove, .driver = { .name = "altera_fpga2sdram_bridge", .of_match_table = of_match_ptr(altera_fpga_of_match), diff --git a/drivers/fpga/altera-freeze-bridge.c b/drivers/fpga/altera-freeze-bridge.c index 0c3fb8226908..44061cb16f87 100644 --- a/drivers/fpga/altera-freeze-bridge.c +++ b/drivers/fpga/altera-freeze-bridge.c @@ -253,18 +253,16 @@ static int altera_freeze_br_probe(struct platform_device *pdev) return 0; } -static int altera_freeze_br_remove(struct platform_device *pdev) +static void altera_freeze_br_remove(struct platform_device *pdev) { struct fpga_bridge *br = platform_get_drvdata(pdev); fpga_bridge_unregister(br); - - return 0; } static struct platform_driver altera_freeze_br_driver = { .probe = altera_freeze_br_probe, - .remove = altera_freeze_br_remove, + .remove_new = altera_freeze_br_remove, .driver = { .name = "altera_freeze_br", .of_match_table = altera_freeze_br_of_match, diff --git a/drivers/fpga/altera-hps2fpga.c b/drivers/fpga/altera-hps2fpga.c index 578663503297..6f8e24be19c6 100644 --- a/drivers/fpga/altera-hps2fpga.c +++ b/drivers/fpga/altera-hps2fpga.c @@ -191,7 +191,7 @@ err: return ret; } -static int alt_fpga_bridge_remove(struct platform_device *pdev) +static void alt_fpga_bridge_remove(struct platform_device *pdev) { struct fpga_bridge *bridge = platform_get_drvdata(pdev); struct altera_hps2fpga_data *priv = bridge->priv; @@ -199,15 +199,13 @@ static int alt_fpga_bridge_remove(struct platform_device *pdev) fpga_bridge_unregister(bridge); clk_disable_unprepare(priv->clk); - - return 0; } MODULE_DEVICE_TABLE(of, altera_fpga_of_match); static struct platform_driver alt_fpga_bridge_driver = { .probe = alt_fpga_bridge_probe, - .remove = alt_fpga_bridge_remove, + .remove_new = alt_fpga_bridge_remove, .driver = { .name = "altera_hps2fpga_bridge", .of_match_table = of_match_ptr(altera_fpga_of_match), diff --git a/drivers/fpga/dfl-afu-main.c b/drivers/fpga/dfl-afu-main.c index 7f621e96d3b8..c0a75ca360d6 100644 --- a/drivers/fpga/dfl-afu-main.c +++ b/drivers/fpga/dfl-afu-main.c @@ -932,15 +932,13 @@ exit: return ret; } -static int afu_remove(struct platform_device *pdev) +static void afu_remove(struct platform_device *pdev) { dev_dbg(&pdev->dev, "%s\n", __func__); dfl_fpga_dev_ops_unregister(pdev); dfl_fpga_dev_feature_uinit(pdev); afu_dev_destroy(pdev); - - return 0; } static const struct attribute_group *afu_dev_groups[] = { @@ -956,7 +954,7 @@ static struct platform_driver afu_driver = { .dev_groups = afu_dev_groups, }, .probe = afu_probe, - .remove = afu_remove, + .remove_new = afu_remove, }; static int __init afu_init(void) diff --git a/drivers/fpga/dfl-fme-br.c b/drivers/fpga/dfl-fme-br.c index 808d1f4d76df..0b01b3895277 100644 --- a/drivers/fpga/dfl-fme-br.c +++ b/drivers/fpga/dfl-fme-br.c @@ -78,7 +78,7 @@ static int fme_br_probe(struct platform_device *pdev) return 0; } -static int fme_br_remove(struct platform_device *pdev) +static void fme_br_remove(struct platform_device *pdev) { struct fpga_bridge *br = platform_get_drvdata(pdev); struct fme_br_priv *priv = br->priv; @@ -89,8 +89,6 @@ static int fme_br_remove(struct platform_device *pdev) put_device(&priv->port_pdev->dev); if (priv->port_ops) dfl_fpga_port_ops_put(priv->port_ops); - - return 0; } static struct platform_driver fme_br_driver = { @@ -98,7 +96,7 @@ static struct platform_driver fme_br_driver = { .name = DFL_FPGA_FME_BRIDGE, }, .probe = fme_br_probe, - .remove = fme_br_remove, + .remove_new = fme_br_remove, }; module_platform_driver(fme_br_driver); diff --git a/drivers/fpga/dfl-fme-main.c b/drivers/fpga/dfl-fme-main.c index 3dcf990bd261..a2b5da0093da 100644 --- a/drivers/fpga/dfl-fme-main.c +++ b/drivers/fpga/dfl-fme-main.c @@ -730,13 +730,11 @@ exit: return ret; } -static int fme_remove(struct platform_device *pdev) +static void fme_remove(struct platform_device *pdev) { dfl_fpga_dev_ops_unregister(pdev); dfl_fpga_dev_feature_uinit(pdev); fme_dev_destroy(pdev); - - return 0; } static const struct attribute_group *fme_dev_groups[] = { @@ -751,7 +749,7 @@ static struct platform_driver fme_driver = { .dev_groups = fme_dev_groups, }, .probe = fme_probe, - .remove = fme_remove, + .remove_new = fme_remove, }; module_platform_driver(fme_driver); diff --git a/drivers/fpga/dfl-fme-region.c b/drivers/fpga/dfl-fme-region.c index 4aebde0a7f1c..71616f8b4982 100644 --- a/drivers/fpga/dfl-fme-region.c +++ b/drivers/fpga/dfl-fme-region.c @@ -61,15 +61,13 @@ eprobe_mgr_put: return ret; } -static int fme_region_remove(struct platform_device *pdev) +static void fme_region_remove(struct platform_device *pdev) { struct fpga_region *region = platform_get_drvdata(pdev); struct fpga_manager *mgr = region->mgr; fpga_region_unregister(region); fpga_mgr_put(mgr); - - return 0; } static struct platform_driver fme_region_driver = { @@ -77,7 +75,7 @@ static struct platform_driver fme_region_driver = { .name = DFL_FPGA_FME_REGION, }, .probe = fme_region_probe, - .remove = fme_region_remove, + .remove_new = fme_region_remove, }; module_platform_driver(fme_region_driver); diff --git a/drivers/fpga/dfl.c b/drivers/fpga/dfl.c index e73f88050f08..e6d12fbab653 100644 --- a/drivers/fpga/dfl.c +++ b/drivers/fpga/dfl.c @@ -2008,8 +2008,8 @@ long dfl_feature_ioctl_set_irq(struct platform_device *pdev, (hdr.start + hdr.count < hdr.start)) return -EINVAL; - fds = memdup_user((void __user *)(arg + sizeof(hdr)), - array_size(hdr.count, sizeof(s32))); + fds = memdup_array_user((void __user *)(arg + sizeof(hdr)), + hdr.count, sizeof(s32)); if (IS_ERR(fds)) return PTR_ERR(fds); diff --git a/drivers/fpga/intel-m10-bmc-sec-update.c b/drivers/fpga/intel-m10-bmc-sec-update.c index 31af2e08c825..89851b133709 100644 --- a/drivers/fpga/intel-m10-bmc-sec-update.c +++ b/drivers/fpga/intel-m10-bmc-sec-update.c @@ -730,15 +730,13 @@ fw_name_fail: return ret; } -static int m10bmc_sec_remove(struct platform_device *pdev) +static void m10bmc_sec_remove(struct platform_device *pdev) { struct m10bmc_sec *sec = dev_get_drvdata(&pdev->dev); firmware_upload_unregister(sec->fwl); kfree(sec->fw_name); xa_erase(&fw_upload_xa, sec->fw_name_id); - - return 0; } static const struct platform_device_id intel_m10bmc_sec_ids[] = { @@ -760,7 +758,7 @@ MODULE_DEVICE_TABLE(platform, intel_m10bmc_sec_ids); static struct platform_driver intel_m10bmc_sec_driver = { .probe = m10bmc_sec_probe, - .remove = m10bmc_sec_remove, + .remove_new = m10bmc_sec_remove, .driver = { .name = "intel-m10bmc-sec-update", .dev_groups = m10bmc_sec_attr_groups, diff --git a/drivers/fpga/of-fpga-region.c b/drivers/fpga/of-fpga-region.c index a6affd83f275..8526a5a86f0c 100644 --- a/drivers/fpga/of-fpga-region.c +++ b/drivers/fpga/of-fpga-region.c @@ -425,20 +425,18 @@ eprobe_mgr_put: return ret; } -static int of_fpga_region_remove(struct platform_device *pdev) +static void of_fpga_region_remove(struct platform_device *pdev) { struct fpga_region *region = platform_get_drvdata(pdev); struct fpga_manager *mgr = region->mgr; fpga_region_unregister(region); fpga_mgr_put(mgr); - - return 0; } static struct platform_driver of_fpga_region_driver = { .probe = of_fpga_region_probe, - .remove = of_fpga_region_remove, + .remove_new = of_fpga_region_remove, .driver = { .name = "of-fpga-region", .of_match_table = of_match_ptr(fpga_region_of_match), diff --git a/drivers/fpga/socfpga-a10.c b/drivers/fpga/socfpga-a10.c index cc4861e345c9..4c03513b8f03 100644 --- a/drivers/fpga/socfpga-a10.c +++ b/drivers/fpga/socfpga-a10.c @@ -517,15 +517,13 @@ static int socfpga_a10_fpga_probe(struct platform_device *pdev) return 0; } -static int socfpga_a10_fpga_remove(struct platform_device *pdev) +static void socfpga_a10_fpga_remove(struct platform_device *pdev) { struct fpga_manager *mgr = platform_get_drvdata(pdev); struct a10_fpga_priv *priv = mgr->priv; fpga_mgr_unregister(mgr); clk_disable_unprepare(priv->clk); - - return 0; } static const struct of_device_id socfpga_a10_fpga_of_match[] = { @@ -537,7 +535,7 @@ MODULE_DEVICE_TABLE(of, socfpga_a10_fpga_of_match); static struct platform_driver socfpga_a10_fpga_driver = { .probe = socfpga_a10_fpga_probe, - .remove = socfpga_a10_fpga_remove, + .remove_new = socfpga_a10_fpga_remove, .driver = { .name = "socfpga_a10_fpga_manager", .of_match_table = socfpga_a10_fpga_of_match, diff --git a/drivers/fpga/stratix10-soc.c b/drivers/fpga/stratix10-soc.c index cacb9cc5757e..2c0def7d7cbb 100644 --- a/drivers/fpga/stratix10-soc.c +++ b/drivers/fpga/stratix10-soc.c @@ -436,15 +436,13 @@ probe_err: return ret; } -static int s10_remove(struct platform_device *pdev) +static void s10_remove(struct platform_device *pdev) { struct fpga_manager *mgr = platform_get_drvdata(pdev); struct s10_priv *priv = mgr->priv; fpga_mgr_unregister(mgr); stratix10_svc_free_channel(priv->chan); - - return 0; } static const struct of_device_id s10_of_match[] = { @@ -457,7 +455,7 @@ MODULE_DEVICE_TABLE(of, s10_of_match); static struct platform_driver s10_driver = { .probe = s10_probe, - .remove = s10_remove, + .remove_new = s10_remove, .driver = { .name = "Stratix10 SoC FPGA manager", .of_match_table = of_match_ptr(s10_of_match), diff --git a/drivers/fpga/xilinx-pr-decoupler.c b/drivers/fpga/xilinx-pr-decoupler.c index 68835896f180..788dd2f63a65 100644 --- a/drivers/fpga/xilinx-pr-decoupler.c +++ b/drivers/fpga/xilinx-pr-decoupler.c @@ -150,7 +150,7 @@ err_clk: return err; } -static int xlnx_pr_decoupler_remove(struct platform_device *pdev) +static void xlnx_pr_decoupler_remove(struct platform_device *pdev) { struct fpga_bridge *bridge = platform_get_drvdata(pdev); struct xlnx_pr_decoupler_data *p = bridge->priv; @@ -158,13 +158,11 @@ static int xlnx_pr_decoupler_remove(struct platform_device *pdev) fpga_bridge_unregister(bridge); clk_unprepare(p->clk); - - return 0; } static struct platform_driver xlnx_pr_decoupler_driver = { .probe = xlnx_pr_decoupler_probe, - .remove = xlnx_pr_decoupler_remove, + .remove_new = xlnx_pr_decoupler_remove, .driver = { .name = "xlnx_pr_decoupler", .of_match_table = xlnx_pr_decoupler_of_match, diff --git a/drivers/fpga/zynq-fpga.c b/drivers/fpga/zynq-fpga.c index 96611d424a10..0ac93183d201 100644 --- a/drivers/fpga/zynq-fpga.c +++ b/drivers/fpga/zynq-fpga.c @@ -618,7 +618,7 @@ static int zynq_fpga_probe(struct platform_device *pdev) return 0; } -static int zynq_fpga_remove(struct platform_device *pdev) +static void zynq_fpga_remove(struct platform_device *pdev) { struct zynq_fpga_priv *priv; struct fpga_manager *mgr; @@ -629,8 +629,6 @@ static int zynq_fpga_remove(struct platform_device *pdev) fpga_mgr_unregister(mgr); clk_unprepare(priv->clk); - - return 0; } #ifdef CONFIG_OF @@ -644,7 +642,7 @@ MODULE_DEVICE_TABLE(of, zynq_fpga_of_match); static struct platform_driver zynq_fpga_driver = { .probe = zynq_fpga_probe, - .remove = zynq_fpga_remove, + .remove_new = zynq_fpga_remove, .driver = { .name = "zynq_fpga_manager", .of_match_table = of_match_ptr(zynq_fpga_of_match), diff --git a/drivers/greybus/gb-beagleplay.c b/drivers/greybus/gb-beagleplay.c index 43318c1993ba..7d98ae1a8263 100644 --- a/drivers/greybus/gb-beagleplay.c +++ b/drivers/greybus/gb-beagleplay.c @@ -85,17 +85,31 @@ struct hdlc_payload { void *buf; }; +/** + * struct hdlc_greybus_frame - Structure to represent greybus HDLC frame payload + * + * @cport: cport id + * @hdr: greybus operation header + * @payload: greybus message payload + * + * The HDLC payload sent over UART for greybus address has cport preappended to greybus message + */ +struct hdlc_greybus_frame { + __le16 cport; + struct gb_operation_msg_hdr hdr; + u8 payload[]; +} __packed; + static void hdlc_rx_greybus_frame(struct gb_beagleplay *bg, u8 *buf, u16 len) { - u16 cport_id; - struct gb_operation_msg_hdr *hdr = (struct gb_operation_msg_hdr *)buf; - - memcpy(&cport_id, hdr->pad, sizeof(cport_id)); + struct hdlc_greybus_frame *gb_frame = (struct hdlc_greybus_frame *)buf; + u16 cport_id = le16_to_cpu(gb_frame->cport); + u16 gb_msg_len = le16_to_cpu(gb_frame->hdr.size); dev_dbg(&bg->sd->dev, "Greybus Operation %u type %X cport %u status %u received", - hdr->operation_id, hdr->type, cport_id, hdr->result); + gb_frame->hdr.operation_id, gb_frame->hdr.type, cport_id, gb_frame->hdr.result); - greybus_data_rcvd(bg->gb_hd, cport_id, buf, len); + greybus_data_rcvd(bg->gb_hd, cport_id, (u8 *)&gb_frame->hdr, gb_msg_len); } static void hdlc_rx_dbg_frame(const struct gb_beagleplay *bg, const char *buf, u16 len) @@ -336,25 +350,39 @@ static struct serdev_device_ops gb_beagleplay_ops = { .write_wakeup = gb_tty_wakeup, }; +/** + * gb_message_send() - Send greybus message using HDLC over UART + * + * @hd: pointer to greybus host device + * @cport: AP cport where message originates + * @msg: greybus message to send + * @mask: gfp mask + * + * Greybus HDLC frame has the following payload: + * 1. le16 cport + * 2. gb_operation_msg_hdr msg_header + * 3. u8 *msg_payload + */ static int gb_message_send(struct gb_host_device *hd, u16 cport, struct gb_message *msg, gfp_t mask) { struct gb_beagleplay *bg = dev_get_drvdata(&hd->dev); - struct hdlc_payload payloads[2]; + struct hdlc_payload payloads[3]; + __le16 cport_id = cpu_to_le16(cport); dev_dbg(&hd->dev, "Sending greybus message with Operation %u, Type: %X on Cport %u", msg->header->operation_id, msg->header->type, cport); - if (msg->header->size > RX_HDLC_PAYLOAD) + if (le16_to_cpu(msg->header->size) > RX_HDLC_PAYLOAD) return dev_err_probe(&hd->dev, -E2BIG, "Greybus message too big"); - memcpy(msg->header->pad, &cport, sizeof(cport)); - - payloads[0].buf = msg->header; - payloads[0].len = sizeof(*msg->header); - payloads[1].buf = msg->payload; - payloads[1].len = msg->payload_size; + payloads[0].buf = &cport_id; + payloads[0].len = sizeof(cport_id); + payloads[1].buf = msg->header; + payloads[1].len = sizeof(*msg->header); + payloads[2].buf = msg->payload; + payloads[2].len = msg->payload_size; - hdlc_tx_frames(bg, ADDRESS_GREYBUS, 0x03, payloads, 2); + hdlc_tx_frames(bg, ADDRESS_GREYBUS, 0x03, payloads, 3); greybus_message_sent(bg->gb_hd, msg, 0); return 0; diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index 9fabe00a40d6..d7f0e231feb9 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -1093,6 +1093,7 @@ static int coresight_validate_source(struct coresight_device *csdev, if (subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_PROC && subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE && + subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM && subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS) { dev_err(&csdev->dev, "wrong device subtype in %s\n", function); return -EINVAL; @@ -1162,6 +1163,7 @@ int coresight_enable(struct coresight_device *csdev) per_cpu(tracer_path, cpu) = path; break; case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE: + case CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM: case CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS: /* * Use the hash of source's device name as ID @@ -1212,6 +1214,7 @@ void coresight_disable(struct coresight_device *csdev) per_cpu(tracer_path, cpu) = NULL; break; case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE: + case CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM: case CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS: hash = hashlen_hash(hashlen_string(NULL, dev_name(&csdev->dev))); /* Find the path by the hash. */ diff --git a/drivers/hwtracing/coresight/coresight-dummy.c b/drivers/hwtracing/coresight/coresight-dummy.c index e4deafae7bc2..ac70c0b491be 100644 --- a/drivers/hwtracing/coresight/coresight-dummy.c +++ b/drivers/hwtracing/coresight/coresight-dummy.c @@ -122,14 +122,13 @@ static int dummy_probe(struct platform_device *pdev) return 0; } -static int dummy_remove(struct platform_device *pdev) +static void dummy_remove(struct platform_device *pdev) { struct dummy_drvdata *drvdata = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; pm_runtime_disable(dev); coresight_unregister(drvdata->csdev); - return 0; } static const struct of_device_id dummy_match[] = { @@ -140,7 +139,7 @@ static const struct of_device_id dummy_match[] = { static struct platform_driver dummy_driver = { .probe = dummy_probe, - .remove = dummy_remove, + .remove_new = dummy_remove, .driver = { .name = "coresight-dummy", .of_match_table = dummy_match, diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c index 89e8ed214ea4..a52cfcce25d6 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c @@ -68,6 +68,7 @@ PMU_FORMAT_ATTR(preset, "config:0-3"); PMU_FORMAT_ATTR(sinkid, "config2:0-31"); /* config ID - set if a system configuration is selected */ PMU_FORMAT_ATTR(configid, "config2:32-63"); +PMU_FORMAT_ATTR(cc_threshold, "config3:0-11"); /* @@ -101,6 +102,7 @@ static struct attribute *etm_config_formats_attr[] = { &format_attr_preset.attr, &format_attr_configid.attr, &format_attr_branch_broadcast.attr, + &format_attr_cc_threshold.attr, NULL, }; diff --git a/drivers/hwtracing/coresight/coresight-etm4x-core.c b/drivers/hwtracing/coresight/coresight-etm4x-core.c index 34aee59dd147..ce1995a2827f 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x-core.c +++ b/drivers/hwtracing/coresight/coresight-etm4x-core.c @@ -644,7 +644,7 @@ static int etm4_parse_event_config(struct coresight_device *csdev, struct etmv4_config *config = &drvdata->config; struct perf_event_attr *attr = &event->attr; unsigned long cfg_hash; - int preset; + int preset, cc_threshold; /* Clear configuration from previous run */ memset(config, 0, sizeof(struct etmv4_config)); @@ -667,7 +667,12 @@ static int etm4_parse_event_config(struct coresight_device *csdev, if (attr->config & BIT(ETM_OPT_CYCACC)) { config->cfg |= TRCCONFIGR_CCI; /* TRM: Must program this for cycacc to work */ - config->ccctlr = ETM_CYC_THRESHOLD_DEFAULT; + cc_threshold = attr->config3 & ETM_CYC_THRESHOLD_MASK; + if (!cc_threshold) + cc_threshold = ETM_CYC_THRESHOLD_DEFAULT; + if (cc_threshold < drvdata->ccitmin) + cc_threshold = drvdata->ccitmin; + config->ccctlr = cc_threshold; } if (attr->config & BIT(ETM_OPT_TS)) { /* @@ -1150,6 +1155,41 @@ static void cpu_detect_trace_filtering(struct etmv4_drvdata *drvdata) drvdata->trfcr = trfcr; } +/* + * The following errata on applicable cpu ranges, affect the CCITMIN filed + * in TCRIDR3 register. Software read for the field returns 0x100 limiting + * the cycle threshold granularity, whereas the right value should have + * been 0x4, which is well supported in the hardware. + */ +static struct midr_range etm_wrong_ccitmin_cpus[] = { + /* Erratum #1490853 - Cortex-A76 */ + MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 4, 0), + /* Erratum #1490853 - Neoverse-N1 */ + MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 4, 0), + /* Erratum #1491015 - Cortex-A77 */ + MIDR_RANGE(MIDR_CORTEX_A77, 0, 0, 1, 0), + /* Erratum #1502854 - Cortex-X1 */ + MIDR_REV(MIDR_CORTEX_X1, 0, 0), + /* Erratum #1619801 - Neoverse-V1 */ + MIDR_REV(MIDR_NEOVERSE_V1, 0, 0), + {}, +}; + +static void etm4_fixup_wrong_ccitmin(struct etmv4_drvdata *drvdata) +{ + /* + * Erratum affected cpus will read 256 as the minimum + * instruction trace cycle counting threshold whereas + * the correct value should be 4 instead. Override the + * recorded value for 'drvdata->ccitmin' to workaround + * this problem. + */ + if (is_midr_in_range_list(read_cpuid_id(), etm_wrong_ccitmin_cpus)) { + if (drvdata->ccitmin == 256) + drvdata->ccitmin = 4; + } +} + static void etm4_init_arch_data(void *info) { u32 etmidr0; @@ -1214,6 +1254,8 @@ static void etm4_init_arch_data(void *info) etmidr3 = etm4x_relaxed_read32(csa, TRCIDR3); /* CCITMIN, bits[11:0] minimum threshold value that can be programmed */ drvdata->ccitmin = FIELD_GET(TRCIDR3_CCITMIN_MASK, etmidr3); + etm4_fixup_wrong_ccitmin(drvdata); + /* EXLEVEL_S, bits[19:16] Secure state instruction tracing */ drvdata->s_ex_level = FIELD_GET(TRCIDR3_EXLEVEL_S_MASK, etmidr3); drvdata->config.s_ex_level = drvdata->s_ex_level; @@ -2261,7 +2303,7 @@ static void etm4_remove_amba(struct amba_device *adev) etm4_remove_dev(drvdata); } -static int etm4_remove_platform_dev(struct platform_device *pdev) +static void etm4_remove_platform_dev(struct platform_device *pdev) { struct etmv4_drvdata *drvdata = dev_get_drvdata(&pdev->dev); @@ -2271,8 +2313,6 @@ static int etm4_remove_platform_dev(struct platform_device *pdev) if (drvdata && !IS_ERR_OR_NULL(drvdata->pclk)) clk_put(drvdata->pclk); - - return 0; } static const struct amba_id etm4_ids[] = { @@ -2358,7 +2398,7 @@ MODULE_DEVICE_TABLE(acpi, etm4x_acpi_ids); static struct platform_driver etm4_platform_driver = { .probe = etm4_probe_platform_dev, - .remove = etm4_remove_platform_dev, + .remove_new = etm4_remove_platform_dev, .driver = { .name = "coresight-etm4x", .of_match_table = etm4_sysreg_match, diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h index 20e2e4cb7614..da17b6c49b0f 100644 --- a/drivers/hwtracing/coresight/coresight-etm4x.h +++ b/drivers/hwtracing/coresight/coresight-etm4x.h @@ -1036,7 +1036,7 @@ struct etmv4_drvdata { u8 ctxid_size; u8 vmid_size; u8 ccsize; - u8 ccitmin; + u16 ccitmin; u8 s_ex_level; u8 ns_ex_level; u8 q_support; diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c index b8e150e45b27..a5b1fc787766 100644 --- a/drivers/hwtracing/coresight/coresight-funnel.c +++ b/drivers/hwtracing/coresight/coresight-funnel.c @@ -335,11 +335,10 @@ static int static_funnel_probe(struct platform_device *pdev) return ret; } -static int static_funnel_remove(struct platform_device *pdev) +static void static_funnel_remove(struct platform_device *pdev) { funnel_remove(&pdev->dev); pm_runtime_disable(&pdev->dev); - return 0; } static const struct of_device_id static_funnel_match[] = { @@ -360,7 +359,7 @@ MODULE_DEVICE_TABLE(acpi, static_funnel_ids); static struct platform_driver static_funnel_driver = { .probe = static_funnel_probe, - .remove = static_funnel_remove, + .remove_new = static_funnel_remove, .driver = { .name = "coresight-static-funnel", /* THIS_MODULE is taken care of by platform_driver_register() */ diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c index b6be73034996..91d93060dda5 100644 --- a/drivers/hwtracing/coresight/coresight-replicator.c +++ b/drivers/hwtracing/coresight/coresight-replicator.c @@ -320,11 +320,10 @@ static int static_replicator_probe(struct platform_device *pdev) return ret; } -static int static_replicator_remove(struct platform_device *pdev) +static void static_replicator_remove(struct platform_device *pdev) { replicator_remove(&pdev->dev); pm_runtime_disable(&pdev->dev); - return 0; } #ifdef CONFIG_PM @@ -373,7 +372,7 @@ MODULE_DEVICE_TABLE(acpi, static_replicator_acpi_ids); static struct platform_driver static_replicator_driver = { .probe = static_replicator_probe, - .remove = static_replicator_remove, + .remove_new = static_replicator_remove, .driver = { .name = "coresight-static-replicator", /* THIS_MODULE is taken care of by platform_driver_register() */ diff --git a/drivers/hwtracing/coresight/coresight-tmc-core.c b/drivers/hwtracing/coresight/coresight-tmc-core.c index c106d142e632..7ec5365e2b64 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-core.c +++ b/drivers/hwtracing/coresight/coresight-tmc-core.c @@ -10,6 +10,7 @@ #include <linux/device.h> #include <linux/idr.h> #include <linux/io.h> +#include <linux/iommu.h> #include <linux/err.h> #include <linux/fs.h> #include <linux/miscdevice.h> @@ -344,7 +345,14 @@ static const struct attribute_group coresight_tmc_mgmt_group = { .name = "mgmt", }; -static const struct attribute_group *coresight_tmc_groups[] = { +static const struct attribute_group *coresight_etf_groups[] = { + &coresight_tmc_group, + &coresight_tmc_mgmt_group, + NULL, +}; + +static const struct attribute_group *coresight_etr_groups[] = { + &coresight_etr_group, &coresight_tmc_group, &coresight_tmc_mgmt_group, NULL, @@ -465,6 +473,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) drvdata->memwidth = tmc_get_memwidth(devid); /* This device is not associated with a session */ drvdata->pid = -1; + drvdata->etr_mode = ETR_MODE_AUTO; if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { drvdata->size = tmc_etr_get_default_buffer_size(dev); @@ -474,16 +483,17 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) } desc.dev = dev; - desc.groups = coresight_tmc_groups; switch (drvdata->config_type) { case TMC_CONFIG_TYPE_ETB: + desc.groups = coresight_etf_groups; desc.type = CORESIGHT_DEV_TYPE_SINK; desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER; desc.ops = &tmc_etb_cs_ops; dev_list = &etb_devs; break; case TMC_CONFIG_TYPE_ETR: + desc.groups = coresight_etr_groups; desc.type = CORESIGHT_DEV_TYPE_SINK; desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_SYSMEM; desc.ops = &tmc_etr_cs_ops; @@ -496,6 +506,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id) dev_list = &etr_devs; break; case TMC_CONFIG_TYPE_ETF: + desc.groups = coresight_etf_groups; desc.type = CORESIGHT_DEV_TYPE_LINKSINK; desc.subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER; desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO; diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c index 8311e1028ddb..af02ba5d5f15 100644 --- a/drivers/hwtracing/coresight/coresight-tmc-etr.c +++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c @@ -26,6 +26,12 @@ struct etr_flat_buf { size_t size; }; +struct etr_buf_hw { + bool has_iommu; + bool has_etr_sg; + bool has_catu; +}; + /* * etr_perf_buffer - Perf buffer used for ETR * @drvdata - The ETR drvdaga this buffer has been allocated for. @@ -830,6 +836,22 @@ static inline int tmc_etr_mode_alloc_buf(int mode, } } +static void get_etr_buf_hw(struct device *dev, struct etr_buf_hw *buf_hw) +{ + struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); + + buf_hw->has_iommu = iommu_get_domain_for_dev(dev->parent); + buf_hw->has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG); + buf_hw->has_catu = !!tmc_etr_get_catu_device(drvdata); +} + +static bool etr_can_use_flat_mode(struct etr_buf_hw *buf_hw, ssize_t etr_buf_size) +{ + bool has_sg = buf_hw->has_catu || buf_hw->has_etr_sg; + + return !has_sg || buf_hw->has_iommu || etr_buf_size < SZ_1M; +} + /* * tmc_alloc_etr_buf: Allocate a buffer use by ETR. * @drvdata : ETR device details. @@ -843,23 +865,22 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata, int node, void **pages) { int rc = -ENOMEM; - bool has_etr_sg, has_iommu; - bool has_sg, has_catu; struct etr_buf *etr_buf; + struct etr_buf_hw buf_hw; struct device *dev = &drvdata->csdev->dev; - has_etr_sg = tmc_etr_has_cap(drvdata, TMC_ETR_SG); - has_iommu = iommu_get_domain_for_dev(dev->parent); - has_catu = !!tmc_etr_get_catu_device(drvdata); - - has_sg = has_catu || has_etr_sg; - + get_etr_buf_hw(dev, &buf_hw); etr_buf = kzalloc(sizeof(*etr_buf), GFP_KERNEL); if (!etr_buf) return ERR_PTR(-ENOMEM); etr_buf->size = size; + /* If there is user directive for buffer mode, try that first */ + if (drvdata->etr_mode != ETR_MODE_AUTO) + rc = tmc_etr_mode_alloc_buf(drvdata->etr_mode, drvdata, + etr_buf, node, pages); + /* * If we have to use an existing list of pages, we cannot reliably * use a contiguous DMA memory (even if we have an IOMMU). Otherwise, @@ -872,14 +893,13 @@ static struct etr_buf *tmc_alloc_etr_buf(struct tmc_drvdata *drvdata, * Fallback to available mechanisms. * */ - if (!pages && - (!has_sg || has_iommu || size < SZ_1M)) + if (rc && !pages && etr_can_use_flat_mode(&buf_hw, size)) rc = tmc_etr_mode_alloc_buf(ETR_MODE_FLAT, drvdata, etr_buf, node, pages); - if (rc && has_etr_sg) + if (rc && buf_hw.has_etr_sg) rc = tmc_etr_mode_alloc_buf(ETR_MODE_ETR_SG, drvdata, etr_buf, node, pages); - if (rc && has_catu) + if (rc && buf_hw.has_catu) rc = tmc_etr_mode_alloc_buf(ETR_MODE_CATU, drvdata, etr_buf, node, pages); if (rc) { @@ -1804,3 +1824,70 @@ int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata) return 0; } + +static const char *const buf_modes_str[] = { + [ETR_MODE_FLAT] = "flat", + [ETR_MODE_ETR_SG] = "tmc-sg", + [ETR_MODE_CATU] = "catu", + [ETR_MODE_AUTO] = "auto", +}; + +static ssize_t buf_modes_available_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct etr_buf_hw buf_hw; + ssize_t size = 0; + + get_etr_buf_hw(dev, &buf_hw); + size += sysfs_emit(buf, "%s ", buf_modes_str[ETR_MODE_AUTO]); + size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_FLAT]); + if (buf_hw.has_etr_sg) + size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_ETR_SG]); + + if (buf_hw.has_catu) + size += sysfs_emit_at(buf, size, "%s ", buf_modes_str[ETR_MODE_CATU]); + + size += sysfs_emit_at(buf, size, "\n"); + return size; +} +static DEVICE_ATTR_RO(buf_modes_available); + +static ssize_t buf_mode_preferred_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); + + return sysfs_emit(buf, "%s\n", buf_modes_str[drvdata->etr_mode]); +} + +static ssize_t buf_mode_preferred_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct etr_buf_hw buf_hw; + + get_etr_buf_hw(dev, &buf_hw); + if (sysfs_streq(buf, buf_modes_str[ETR_MODE_FLAT])) + drvdata->etr_mode = ETR_MODE_FLAT; + else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_ETR_SG]) && buf_hw.has_etr_sg) + drvdata->etr_mode = ETR_MODE_ETR_SG; + else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_CATU]) && buf_hw.has_catu) + drvdata->etr_mode = ETR_MODE_CATU; + else if (sysfs_streq(buf, buf_modes_str[ETR_MODE_AUTO])) + drvdata->etr_mode = ETR_MODE_AUTO; + else + return -EINVAL; + return size; +} +static DEVICE_ATTR_RW(buf_mode_preferred); + +static struct attribute *coresight_etr_attrs[] = { + &dev_attr_buf_modes_available.attr, + &dev_attr_buf_mode_preferred.attr, + NULL, +}; + +const struct attribute_group coresight_etr_group = { + .attrs = coresight_etr_attrs, +}; diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h index 0ee48c5ba764..8dcb426ac3e7 100644 --- a/drivers/hwtracing/coresight/coresight-tmc.h +++ b/drivers/hwtracing/coresight/coresight-tmc.h @@ -135,6 +135,7 @@ enum etr_mode { ETR_MODE_FLAT, /* Uses contiguous flat buffer */ ETR_MODE_ETR_SG, /* Uses in-built TMC ETR SG mechanism */ ETR_MODE_CATU, /* Use SG mechanism in CATU */ + ETR_MODE_AUTO, /* Use the default mechanism */ }; struct etr_buf_operations; @@ -207,6 +208,7 @@ struct tmc_drvdata { enum tmc_mem_intf_width memwidth; u32 trigger_cntr; u32 etr_caps; + enum etr_mode etr_mode; struct idr idr; struct mutex idr_mutex; struct etr_buf *sysfs_buf; @@ -334,5 +336,6 @@ void tmc_etr_set_catu_ops(const struct etr_buf_operations *catu); void tmc_etr_remove_catu_ops(void); struct etr_buf *tmc_etr_get_buffer(struct coresight_device *csdev, enum cs_mode mode, void *data); +extern const struct attribute_group coresight_etr_group; #endif diff --git a/drivers/hwtracing/coresight/coresight-tpda.c b/drivers/hwtracing/coresight/coresight-tpda.c index 8d2b9d29237d..5f82737c37bb 100644 --- a/drivers/hwtracing/coresight/coresight-tpda.c +++ b/drivers/hwtracing/coresight/coresight-tpda.c @@ -21,6 +21,80 @@ DEFINE_CORESIGHT_DEVLIST(tpda_devs, "tpda"); +static bool coresight_device_is_tpdm(struct coresight_device *csdev) +{ + return (csdev->type == CORESIGHT_DEV_TYPE_SOURCE) && + (csdev->subtype.source_subtype == + CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM); +} + +/* + * Read the DSB element size from the TPDM device + * Returns + * The dsb element size read from the devicetree if available. + * 0 - Otherwise, with a warning once. + */ +static int tpdm_read_dsb_element_size(struct coresight_device *csdev) +{ + int rc = 0; + u8 size = 0; + + rc = fwnode_property_read_u8(dev_fwnode(csdev->dev.parent), + "qcom,dsb-element-size", &size); + if (rc) + dev_warn_once(&csdev->dev, + "Failed to read TPDM DSB Element size: %d\n", rc); + + return size; +} + +/* + * Search and read element data size from the TPDM node in + * the devicetree. Each input port of TPDA is connected to + * a TPDM. Different TPDM supports different types of dataset, + * and some may support more than one type of dataset. + * Parameter "inport" is used to pass in the input port number + * of TPDA, and it is set to -1 in the recursize call. + */ +static int tpda_get_element_size(struct coresight_device *csdev, + int inport) +{ + int dsb_size = -ENOENT; + int i, size; + struct coresight_device *in; + + for (i = 0; i < csdev->pdata->nr_inconns; i++) { + in = csdev->pdata->in_conns[i]->src_dev; + if (!in) + continue; + + /* Ignore the paths that do not match port */ + if (inport > 0 && + csdev->pdata->in_conns[i]->dest_port != inport) + continue; + + if (coresight_device_is_tpdm(in)) { + size = tpdm_read_dsb_element_size(in); + } else { + /* Recurse down the path */ + size = tpda_get_element_size(in, -1); + } + + if (size < 0) + return size; + + if (dsb_size < 0) { + /* Found a size, save it. */ + dsb_size = size; + } else { + /* Found duplicate TPDMs */ + return -EEXIST; + } + } + + return dsb_size; +} + /* Settings pre enabling port control register */ static void tpda_enable_pre_port(struct tpda_drvdata *drvdata) { @@ -32,26 +106,55 @@ static void tpda_enable_pre_port(struct tpda_drvdata *drvdata) writel_relaxed(val, drvdata->base + TPDA_CR); } -static void tpda_enable_port(struct tpda_drvdata *drvdata, int port) +static int tpda_enable_port(struct tpda_drvdata *drvdata, int port) { u32 val; + int size; val = readl_relaxed(drvdata->base + TPDA_Pn_CR(port)); + /* + * Configure aggregator port n DSB data set element size + * Set the bit to 0 if the size is 32 + * Set the bit to 1 if the size is 64 + */ + size = tpda_get_element_size(drvdata->csdev, port); + switch (size) { + case 32: + val &= ~TPDA_Pn_CR_DSBSIZE; + break; + case 64: + val |= TPDA_Pn_CR_DSBSIZE; + break; + case 0: + return -EEXIST; + case -EEXIST: + dev_warn_once(&drvdata->csdev->dev, + "Detected multiple TPDMs on port %d", -EEXIST); + return -EEXIST; + default: + return -EINVAL; + } + /* Enable the port */ val |= TPDA_Pn_CR_ENA; writel_relaxed(val, drvdata->base + TPDA_Pn_CR(port)); + + return 0; } -static void __tpda_enable(struct tpda_drvdata *drvdata, int port) +static int __tpda_enable(struct tpda_drvdata *drvdata, int port) { + int ret; + CS_UNLOCK(drvdata->base); if (!drvdata->csdev->enable) tpda_enable_pre_port(drvdata); - tpda_enable_port(drvdata, port); - + ret = tpda_enable_port(drvdata, port); CS_LOCK(drvdata->base); + + return ret; } static int tpda_enable(struct coresight_device *csdev, @@ -59,16 +162,19 @@ static int tpda_enable(struct coresight_device *csdev, struct coresight_connection *out) { struct tpda_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); + int ret = 0; spin_lock(&drvdata->spinlock); - if (atomic_read(&in->dest_refcnt) == 0) - __tpda_enable(drvdata, in->dest_port); + if (atomic_read(&in->dest_refcnt) == 0) { + ret = __tpda_enable(drvdata, in->dest_port); + if (!ret) { + atomic_inc(&in->dest_refcnt); + dev_dbg(drvdata->dev, "TPDA inport %d enabled.\n", in->dest_port); + } + } - atomic_inc(&in->dest_refcnt); spin_unlock(&drvdata->spinlock); - - dev_dbg(drvdata->dev, "TPDA inport %d enabled.\n", in->dest_port); - return 0; + return ret; } static void __tpda_disable(struct tpda_drvdata *drvdata, int port) diff --git a/drivers/hwtracing/coresight/coresight-tpda.h b/drivers/hwtracing/coresight/coresight-tpda.h index 0399678df312..b3b38fd41b64 100644 --- a/drivers/hwtracing/coresight/coresight-tpda.h +++ b/drivers/hwtracing/coresight/coresight-tpda.h @@ -10,6 +10,8 @@ #define TPDA_Pn_CR(n) (0x004 + (n * 4)) /* Aggregator port enable bit */ #define TPDA_Pn_CR_ENA BIT(0) +/* Aggregator port DSB data set element size bit */ +#define TPDA_Pn_CR_DSBSIZE BIT(8) #define TPDA_MAX_INPORTS 32 diff --git a/drivers/hwtracing/coresight/coresight-tpdm.c b/drivers/hwtracing/coresight/coresight-tpdm.c index f4854af0431e..97654aa4b772 100644 --- a/drivers/hwtracing/coresight/coresight-tpdm.c +++ b/drivers/hwtracing/coresight/coresight-tpdm.c @@ -4,6 +4,7 @@ */ #include <linux/amba/bus.h> +#include <linux/bitfield.h> #include <linux/bitmap.h> #include <linux/coresight.h> #include <linux/coresight-pmu.h> @@ -20,23 +21,265 @@ DEFINE_CORESIGHT_DEVLIST(tpdm_devs, "tpdm"); -static void tpdm_enable_dsb(struct tpdm_drvdata *drvdata) +/* Read dataset array member with the index number */ +static ssize_t tpdm_simple_dataset_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct tpdm_dataset_attribute *tpdm_attr = + container_of(attr, struct tpdm_dataset_attribute, attr); + + switch (tpdm_attr->mem) { + case DSB_EDGE_CTRL: + if (tpdm_attr->idx >= TPDM_DSB_MAX_EDCR) + return -EINVAL; + return sysfs_emit(buf, "0x%x\n", + drvdata->dsb->edge_ctrl[tpdm_attr->idx]); + case DSB_EDGE_CTRL_MASK: + if (tpdm_attr->idx >= TPDM_DSB_MAX_EDCMR) + return -EINVAL; + return sysfs_emit(buf, "0x%x\n", + drvdata->dsb->edge_ctrl_mask[tpdm_attr->idx]); + case DSB_TRIG_PATT: + if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT) + return -EINVAL; + return sysfs_emit(buf, "0x%x\n", + drvdata->dsb->trig_patt[tpdm_attr->idx]); + case DSB_TRIG_PATT_MASK: + if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT) + return -EINVAL; + return sysfs_emit(buf, "0x%x\n", + drvdata->dsb->trig_patt_mask[tpdm_attr->idx]); + case DSB_PATT: + if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT) + return -EINVAL; + return sysfs_emit(buf, "0x%x\n", + drvdata->dsb->patt_val[tpdm_attr->idx]); + case DSB_PATT_MASK: + if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT) + return -EINVAL; + return sysfs_emit(buf, "0x%x\n", + drvdata->dsb->patt_mask[tpdm_attr->idx]); + case DSB_MSR: + if (tpdm_attr->idx >= drvdata->dsb_msr_num) + return -EINVAL; + return sysfs_emit(buf, "0x%x\n", + drvdata->dsb->msr[tpdm_attr->idx]); + } + return -EINVAL; +} + +/* Write dataset array member with the index number */ +static ssize_t tpdm_simple_dataset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t size) +{ + unsigned long val; + ssize_t ret = size; + + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct tpdm_dataset_attribute *tpdm_attr = + container_of(attr, struct tpdm_dataset_attribute, attr); + + if (kstrtoul(buf, 0, &val)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + switch (tpdm_attr->mem) { + case DSB_TRIG_PATT: + if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) + drvdata->dsb->trig_patt[tpdm_attr->idx] = val; + else + ret = -EINVAL; + break; + case DSB_TRIG_PATT_MASK: + if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) + drvdata->dsb->trig_patt_mask[tpdm_attr->idx] = val; + else + ret = -EINVAL; + break; + case DSB_PATT: + if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) + drvdata->dsb->patt_val[tpdm_attr->idx] = val; + else + ret = -EINVAL; + break; + case DSB_PATT_MASK: + if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) + drvdata->dsb->patt_mask[tpdm_attr->idx] = val; + else + ret = -EINVAL; + break; + case DSB_MSR: + if (tpdm_attr->idx < drvdata->dsb_msr_num) + drvdata->dsb->msr[tpdm_attr->idx] = val; + else + ret = -EINVAL; + break; + default: + ret = -EINVAL; + } + spin_unlock(&drvdata->spinlock); + + return ret; +} + +static bool tpdm_has_dsb_dataset(struct tpdm_drvdata *drvdata) +{ + return (drvdata->datasets & TPDM_PIDR0_DS_DSB); +} + +static umode_t tpdm_dsb_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + if (drvdata && tpdm_has_dsb_dataset(drvdata)) + return attr->mode; + + return 0; +} + +static umode_t tpdm_dsb_msr_is_visible(struct kobject *kobj, + struct attribute *attr, int n) +{ + struct device *dev = kobj_to_dev(kobj); + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + struct device_attribute *dev_attr = + container_of(attr, struct device_attribute, attr); + struct tpdm_dataset_attribute *tpdm_attr = + container_of(dev_attr, struct tpdm_dataset_attribute, attr); + + if (tpdm_attr->idx < drvdata->dsb_msr_num) + return attr->mode; + + return 0; +} + +static void tpdm_reset_datasets(struct tpdm_drvdata *drvdata) +{ + if (tpdm_has_dsb_dataset(drvdata)) { + memset(drvdata->dsb, 0, sizeof(struct dsb_dataset)); + + drvdata->dsb->trig_ts = true; + drvdata->dsb->trig_type = false; + } +} + +static void set_dsb_mode(struct tpdm_drvdata *drvdata, u32 *val) +{ + u32 mode; + + /* Set the test accurate mode */ + mode = TPDM_DSB_MODE_TEST(drvdata->dsb->mode); + *val &= ~TPDM_DSB_CR_TEST_MODE; + *val |= FIELD_PREP(TPDM_DSB_CR_TEST_MODE, mode); + + /* Set the byte lane for high-performance mode */ + mode = TPDM_DSB_MODE_HPBYTESEL(drvdata->dsb->mode); + *val &= ~TPDM_DSB_CR_HPSEL; + *val |= FIELD_PREP(TPDM_DSB_CR_HPSEL, mode); + + /* Set the performance mode */ + if (drvdata->dsb->mode & TPDM_DSB_MODE_PERF) + *val |= TPDM_DSB_CR_MODE; + else + *val &= ~TPDM_DSB_CR_MODE; +} + +static void set_dsb_tier(struct tpdm_drvdata *drvdata) { u32 val; - /* Set the enable bit of DSB control register to 1 */ + val = readl_relaxed(drvdata->base + TPDM_DSB_TIER); + + /* Clear all relevant fields */ + val &= ~(TPDM_DSB_TIER_PATT_TSENAB | TPDM_DSB_TIER_PATT_TYPE | + TPDM_DSB_TIER_XTRIG_TSENAB); + + /* Set pattern timestamp type and enablement */ + if (drvdata->dsb->patt_ts) { + val |= TPDM_DSB_TIER_PATT_TSENAB; + if (drvdata->dsb->patt_type) + val |= TPDM_DSB_TIER_PATT_TYPE; + else + val &= ~TPDM_DSB_TIER_PATT_TYPE; + } else { + val &= ~TPDM_DSB_TIER_PATT_TSENAB; + } + + /* Set trigger timestamp */ + if (drvdata->dsb->trig_ts) + val |= TPDM_DSB_TIER_XTRIG_TSENAB; + else + val &= ~TPDM_DSB_TIER_XTRIG_TSENAB; + + writel_relaxed(val, drvdata->base + TPDM_DSB_TIER); +} + +static void set_dsb_msr(struct tpdm_drvdata *drvdata) +{ + int i; + + for (i = 0; i < drvdata->dsb_msr_num; i++) + writel_relaxed(drvdata->dsb->msr[i], + drvdata->base + TPDM_DSB_MSR(i)); +} + +static void tpdm_enable_dsb(struct tpdm_drvdata *drvdata) +{ + u32 val, i; + + for (i = 0; i < TPDM_DSB_MAX_EDCR; i++) + writel_relaxed(drvdata->dsb->edge_ctrl[i], + drvdata->base + TPDM_DSB_EDCR(i)); + for (i = 0; i < TPDM_DSB_MAX_EDCMR; i++) + writel_relaxed(drvdata->dsb->edge_ctrl_mask[i], + drvdata->base + TPDM_DSB_EDCMR(i)); + for (i = 0; i < TPDM_DSB_MAX_PATT; i++) { + writel_relaxed(drvdata->dsb->patt_val[i], + drvdata->base + TPDM_DSB_TPR(i)); + writel_relaxed(drvdata->dsb->patt_mask[i], + drvdata->base + TPDM_DSB_TPMR(i)); + writel_relaxed(drvdata->dsb->trig_patt[i], + drvdata->base + TPDM_DSB_XPR(i)); + writel_relaxed(drvdata->dsb->trig_patt_mask[i], + drvdata->base + TPDM_DSB_XPMR(i)); + } + + set_dsb_tier(drvdata); + + set_dsb_msr(drvdata); + val = readl_relaxed(drvdata->base + TPDM_DSB_CR); + /* Set the mode of DSB dataset */ + set_dsb_mode(drvdata, &val); + /* Set trigger type */ + if (drvdata->dsb->trig_type) + val |= TPDM_DSB_CR_TRIG_TYPE; + else + val &= ~TPDM_DSB_CR_TRIG_TYPE; + /* Set the enable bit of DSB control register to 1 */ val |= TPDM_DSB_CR_ENA; writel_relaxed(val, drvdata->base + TPDM_DSB_CR); } -/* TPDM enable operations */ +/* + * TPDM enable operations + * The TPDM or Monitor serves as data collection component for various + * dataset types. It covers Basic Counts(BC), Tenure Counts(TC), + * Continuous Multi-Bit(CMB), Multi-lane CMB(MCMB) and Discrete Single + * Bit(DSB). This function will initialize the configuration according + * to the dataset type supported by the TPDM. + */ static void __tpdm_enable(struct tpdm_drvdata *drvdata) { CS_UNLOCK(drvdata->base); - /* Check if DSB datasets is present for TPDM. */ - if (drvdata->datasets & TPDM_PIDR0_DS_DSB) + if (tpdm_has_dsb_dataset(drvdata)) tpdm_enable_dsb(drvdata); CS_LOCK(drvdata->base); @@ -76,8 +319,7 @@ static void __tpdm_disable(struct tpdm_drvdata *drvdata) { CS_UNLOCK(drvdata->base); - /* Check if DSB datasets is present for TPDM. */ - if (drvdata->datasets & TPDM_PIDR0_DS_DSB) + if (tpdm_has_dsb_dataset(drvdata)) tpdm_disable_dsb(drvdata); CS_LOCK(drvdata->base); @@ -110,16 +352,45 @@ static const struct coresight_ops tpdm_cs_ops = { .source_ops = &tpdm_source_ops, }; -static void tpdm_init_default_data(struct tpdm_drvdata *drvdata) +static int tpdm_datasets_setup(struct tpdm_drvdata *drvdata) { u32 pidr; - CS_UNLOCK(drvdata->base); /* Get the datasets present on the TPDM. */ pidr = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR0); drvdata->datasets |= pidr & GENMASK(TPDM_DATASETS - 1, 0); - CS_LOCK(drvdata->base); + + if (tpdm_has_dsb_dataset(drvdata) && (!drvdata->dsb)) { + drvdata->dsb = devm_kzalloc(drvdata->dev, + sizeof(*drvdata->dsb), GFP_KERNEL); + if (!drvdata->dsb) + return -ENOMEM; + } + tpdm_reset_datasets(drvdata); + + return 0; +} + +static ssize_t reset_dataset_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t size) +{ + int ret = 0; + unsigned long val; + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + ret = kstrtoul(buf, 0, &val); + if (ret || val != 1) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + tpdm_reset_datasets(drvdata); + spin_unlock(&drvdata->spinlock); + + return size; } +static DEVICE_ATTR_WO(reset_dataset); /* * value 1: 64 bits test data @@ -161,6 +432,7 @@ static ssize_t integration_test_store(struct device *dev, static DEVICE_ATTR_WO(integration_test); static struct attribute *tpdm_attrs[] = { + &dev_attr_reset_dataset.attr, &dev_attr_integration_test.attr, NULL, }; @@ -169,8 +441,421 @@ static struct attribute_group tpdm_attr_grp = { .attrs = tpdm_attrs, }; +static ssize_t dsb_mode_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + return sysfs_emit(buf, "%x\n", drvdata->dsb->mode); +} + +static ssize_t dsb_mode_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t size) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + + if ((kstrtoul(buf, 0, &val)) || (val < 0) || + (val & ~TPDM_DSB_MODE_MASK)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + drvdata->dsb->mode = val & TPDM_DSB_MODE_MASK; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(dsb_mode); + +static ssize_t ctrl_idx_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + return sysfs_emit(buf, "%u\n", + (unsigned int)drvdata->dsb->edge_ctrl_idx); +} + +/* + * The EDCR registers can include up to 16 32-bit registers, and each + * one can be configured to control up to 16 edge detections(2 bits + * control one edge detection). So a total 256 edge detections can be + * configured. This function provides a way to set the index number of + * the edge detection which needs to be configured. + */ +static ssize_t ctrl_idx_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t size) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + + if ((kstrtoul(buf, 0, &val)) || (val >= TPDM_DSB_MAX_LINES)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + drvdata->dsb->edge_ctrl_idx = val; + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_RW(ctrl_idx); + +/* + * This function is used to control the edge detection according + * to the index number that has been set. + * "edge_ctrl" should be one of the following values. + * 0 - Rising edge detection + * 1 - Falling edge detection + * 2 - Rising and falling edge detection (toggle detection) + */ +static ssize_t ctrl_val_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t size) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val, edge_ctrl; + int reg; + + if ((kstrtoul(buf, 0, &edge_ctrl)) || (edge_ctrl > 0x2)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + /* + * There are 2 bit per DSB Edge Control line. + * Thus we have 16 lines in a 32bit word. + */ + reg = EDCR_TO_WORD_IDX(drvdata->dsb->edge_ctrl_idx); + val = drvdata->dsb->edge_ctrl[reg]; + val &= ~EDCR_TO_WORD_MASK(drvdata->dsb->edge_ctrl_idx); + val |= EDCR_TO_WORD_VAL(edge_ctrl, drvdata->dsb->edge_ctrl_idx); + drvdata->dsb->edge_ctrl[reg] = val; + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_WO(ctrl_val); + +static ssize_t ctrl_mask_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t size) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + u32 set; + int reg; + + if ((kstrtoul(buf, 0, &val)) || (val & ~1UL)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + /* + * There is 1 bit per DSB Edge Control Mark line. + * Thus we have 32 lines in a 32bit word. + */ + reg = EDCMR_TO_WORD_IDX(drvdata->dsb->edge_ctrl_idx); + set = drvdata->dsb->edge_ctrl_mask[reg]; + if (val) + set |= BIT(EDCMR_TO_WORD_SHIFT(drvdata->dsb->edge_ctrl_idx)); + else + set &= ~BIT(EDCMR_TO_WORD_SHIFT(drvdata->dsb->edge_ctrl_idx)); + drvdata->dsb->edge_ctrl_mask[reg] = set; + spin_unlock(&drvdata->spinlock); + + return size; +} +static DEVICE_ATTR_WO(ctrl_mask); + +static ssize_t enable_ts_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + return sysfs_emit(buf, "%u\n", + (unsigned int)drvdata->dsb->patt_ts); +} + +/* + * value 1: Enable/Disable DSB pattern timestamp + */ +static ssize_t enable_ts_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t size) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + + if ((kstrtoul(buf, 0, &val)) || (val & ~1UL)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + drvdata->dsb->patt_ts = !!val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(enable_ts); + +static ssize_t set_type_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + return sysfs_emit(buf, "%u\n", + (unsigned int)drvdata->dsb->patt_type); +} + +/* + * value 1: Set DSB pattern type + */ +static ssize_t set_type_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t size) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + + if ((kstrtoul(buf, 0, &val)) || (val & ~1UL)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + drvdata->dsb->patt_type = val; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(set_type); + +static ssize_t dsb_trig_type_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + return sysfs_emit(buf, "%u\n", + (unsigned int)drvdata->dsb->trig_type); +} + +/* + * Trigger type (boolean): + * false - Disable trigger type. + * true - Enable trigger type. + */ +static ssize_t dsb_trig_type_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t size) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + + if ((kstrtoul(buf, 0, &val)) || (val & ~1UL)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + if (val) + drvdata->dsb->trig_type = true; + else + drvdata->dsb->trig_type = false; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(dsb_trig_type); + +static ssize_t dsb_trig_ts_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + + return sysfs_emit(buf, "%u\n", + (unsigned int)drvdata->dsb->trig_ts); +} + +/* + * Trigger timestamp (boolean): + * false - Disable trigger timestamp. + * true - Enable trigger timestamp. + */ +static ssize_t dsb_trig_ts_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t size) +{ + struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent); + unsigned long val; + + if ((kstrtoul(buf, 0, &val)) || (val & ~1UL)) + return -EINVAL; + + spin_lock(&drvdata->spinlock); + if (val) + drvdata->dsb->trig_ts = true; + else + drvdata->dsb->trig_ts = false; + spin_unlock(&drvdata->spinlock); + return size; +} +static DEVICE_ATTR_RW(dsb_trig_ts); + +static struct attribute *tpdm_dsb_edge_attrs[] = { + &dev_attr_ctrl_idx.attr, + &dev_attr_ctrl_val.attr, + &dev_attr_ctrl_mask.attr, + DSB_EDGE_CTRL_ATTR(0), + DSB_EDGE_CTRL_ATTR(1), + DSB_EDGE_CTRL_ATTR(2), + DSB_EDGE_CTRL_ATTR(3), + DSB_EDGE_CTRL_ATTR(4), + DSB_EDGE_CTRL_ATTR(5), + DSB_EDGE_CTRL_ATTR(6), + DSB_EDGE_CTRL_ATTR(7), + DSB_EDGE_CTRL_ATTR(8), + DSB_EDGE_CTRL_ATTR(9), + DSB_EDGE_CTRL_ATTR(10), + DSB_EDGE_CTRL_ATTR(11), + DSB_EDGE_CTRL_ATTR(12), + DSB_EDGE_CTRL_ATTR(13), + DSB_EDGE_CTRL_ATTR(14), + DSB_EDGE_CTRL_ATTR(15), + DSB_EDGE_CTRL_MASK_ATTR(0), + DSB_EDGE_CTRL_MASK_ATTR(1), + DSB_EDGE_CTRL_MASK_ATTR(2), + DSB_EDGE_CTRL_MASK_ATTR(3), + DSB_EDGE_CTRL_MASK_ATTR(4), + DSB_EDGE_CTRL_MASK_ATTR(5), + DSB_EDGE_CTRL_MASK_ATTR(6), + DSB_EDGE_CTRL_MASK_ATTR(7), + NULL, +}; + +static struct attribute *tpdm_dsb_trig_patt_attrs[] = { + DSB_TRIG_PATT_ATTR(0), + DSB_TRIG_PATT_ATTR(1), + DSB_TRIG_PATT_ATTR(2), + DSB_TRIG_PATT_ATTR(3), + DSB_TRIG_PATT_ATTR(4), + DSB_TRIG_PATT_ATTR(5), + DSB_TRIG_PATT_ATTR(6), + DSB_TRIG_PATT_ATTR(7), + DSB_TRIG_PATT_MASK_ATTR(0), + DSB_TRIG_PATT_MASK_ATTR(1), + DSB_TRIG_PATT_MASK_ATTR(2), + DSB_TRIG_PATT_MASK_ATTR(3), + DSB_TRIG_PATT_MASK_ATTR(4), + DSB_TRIG_PATT_MASK_ATTR(5), + DSB_TRIG_PATT_MASK_ATTR(6), + DSB_TRIG_PATT_MASK_ATTR(7), + NULL, +}; + +static struct attribute *tpdm_dsb_patt_attrs[] = { + DSB_PATT_ATTR(0), + DSB_PATT_ATTR(1), + DSB_PATT_ATTR(2), + DSB_PATT_ATTR(3), + DSB_PATT_ATTR(4), + DSB_PATT_ATTR(5), + DSB_PATT_ATTR(6), + DSB_PATT_ATTR(7), + DSB_PATT_MASK_ATTR(0), + DSB_PATT_MASK_ATTR(1), + DSB_PATT_MASK_ATTR(2), + DSB_PATT_MASK_ATTR(3), + DSB_PATT_MASK_ATTR(4), + DSB_PATT_MASK_ATTR(5), + DSB_PATT_MASK_ATTR(6), + DSB_PATT_MASK_ATTR(7), + &dev_attr_enable_ts.attr, + &dev_attr_set_type.attr, + NULL, +}; + +static struct attribute *tpdm_dsb_msr_attrs[] = { + DSB_MSR_ATTR(0), + DSB_MSR_ATTR(1), + DSB_MSR_ATTR(2), + DSB_MSR_ATTR(3), + DSB_MSR_ATTR(4), + DSB_MSR_ATTR(5), + DSB_MSR_ATTR(6), + DSB_MSR_ATTR(7), + DSB_MSR_ATTR(8), + DSB_MSR_ATTR(9), + DSB_MSR_ATTR(10), + DSB_MSR_ATTR(11), + DSB_MSR_ATTR(12), + DSB_MSR_ATTR(13), + DSB_MSR_ATTR(14), + DSB_MSR_ATTR(15), + DSB_MSR_ATTR(16), + DSB_MSR_ATTR(17), + DSB_MSR_ATTR(18), + DSB_MSR_ATTR(19), + DSB_MSR_ATTR(20), + DSB_MSR_ATTR(21), + DSB_MSR_ATTR(22), + DSB_MSR_ATTR(23), + DSB_MSR_ATTR(24), + DSB_MSR_ATTR(25), + DSB_MSR_ATTR(26), + DSB_MSR_ATTR(27), + DSB_MSR_ATTR(28), + DSB_MSR_ATTR(29), + DSB_MSR_ATTR(30), + DSB_MSR_ATTR(31), + NULL, +}; + +static struct attribute *tpdm_dsb_attrs[] = { + &dev_attr_dsb_mode.attr, + &dev_attr_dsb_trig_ts.attr, + &dev_attr_dsb_trig_type.attr, + NULL, +}; + +static struct attribute_group tpdm_dsb_attr_grp = { + .attrs = tpdm_dsb_attrs, + .is_visible = tpdm_dsb_is_visible, +}; + +static struct attribute_group tpdm_dsb_edge_grp = { + .attrs = tpdm_dsb_edge_attrs, + .is_visible = tpdm_dsb_is_visible, + .name = "dsb_edge", +}; + +static struct attribute_group tpdm_dsb_trig_patt_grp = { + .attrs = tpdm_dsb_trig_patt_attrs, + .is_visible = tpdm_dsb_is_visible, + .name = "dsb_trig_patt", +}; + +static struct attribute_group tpdm_dsb_patt_grp = { + .attrs = tpdm_dsb_patt_attrs, + .is_visible = tpdm_dsb_is_visible, + .name = "dsb_patt", +}; + +static struct attribute_group tpdm_dsb_msr_grp = { + .attrs = tpdm_dsb_msr_attrs, + .is_visible = tpdm_dsb_msr_is_visible, + .name = "dsb_msr", +}; + static const struct attribute_group *tpdm_attr_grps[] = { &tpdm_attr_grp, + &tpdm_dsb_attr_grp, + &tpdm_dsb_edge_grp, + &tpdm_dsb_trig_patt_grp, + &tpdm_dsb_patt_grp, + &tpdm_dsb_msr_grp, NULL, }; @@ -181,6 +866,7 @@ static int tpdm_probe(struct amba_device *adev, const struct amba_id *id) struct coresight_platform_data *pdata; struct tpdm_drvdata *drvdata; struct coresight_desc desc = { 0 }; + int ret; pdata = coresight_get_platform_data(dev); if (IS_ERR(pdata)) @@ -200,12 +886,20 @@ static int tpdm_probe(struct amba_device *adev, const struct amba_id *id) drvdata->base = base; + ret = tpdm_datasets_setup(drvdata); + if (ret) + return ret; + + if (drvdata && tpdm_has_dsb_dataset(drvdata)) + of_property_read_u32(drvdata->dev->of_node, + "qcom,dsb-msrs-num", &drvdata->dsb_msr_num); + /* Set up coresight component description */ desc.name = coresight_alloc_device_name(&tpdm_devs, dev); if (!desc.name) return -ENOMEM; desc.type = CORESIGHT_DEV_TYPE_SOURCE; - desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS; + desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM; desc.ops = &tpdm_cs_ops; desc.pdata = adev->dev.platform_data; desc.dev = &adev->dev; @@ -216,7 +910,7 @@ static int tpdm_probe(struct amba_device *adev, const struct amba_id *id) return PTR_ERR(drvdata->csdev); spin_lock_init(&drvdata->spinlock); - tpdm_init_default_data(drvdata); + /* Decrease pm refcount when probe is done.*/ pm_runtime_put(&adev->dev); diff --git a/drivers/hwtracing/coresight/coresight-tpdm.h b/drivers/hwtracing/coresight/coresight-tpdm.h index 543854043a2d..4115b2a17b8d 100644 --- a/drivers/hwtracing/coresight/coresight-tpdm.h +++ b/drivers/hwtracing/coresight/coresight-tpdm.h @@ -11,8 +11,52 @@ /* DSB Subunit Registers */ #define TPDM_DSB_CR (0x780) +#define TPDM_DSB_TIER (0x784) +#define TPDM_DSB_TPR(n) (0x788 + (n * 4)) +#define TPDM_DSB_TPMR(n) (0x7A8 + (n * 4)) +#define TPDM_DSB_XPR(n) (0x7C8 + (n * 4)) +#define TPDM_DSB_XPMR(n) (0x7E8 + (n * 4)) +#define TPDM_DSB_EDCR(n) (0x808 + (n * 4)) +#define TPDM_DSB_EDCMR(n) (0x848 + (n * 4)) +#define TPDM_DSB_MSR(n) (0x980 + (n * 4)) + /* Enable bit for DSB subunit */ #define TPDM_DSB_CR_ENA BIT(0) +/* Enable bit for DSB subunit perfmance mode */ +#define TPDM_DSB_CR_MODE BIT(1) +/* Enable bit for DSB subunit trigger type */ +#define TPDM_DSB_CR_TRIG_TYPE BIT(12) +/* Data bits for DSB high performace mode */ +#define TPDM_DSB_CR_HPSEL GENMASK(6, 2) +/* Data bits for DSB test mode */ +#define TPDM_DSB_CR_TEST_MODE GENMASK(10, 9) + +/* Enable bit for DSB subunit pattern timestamp */ +#define TPDM_DSB_TIER_PATT_TSENAB BIT(0) +/* Enable bit for DSB subunit trigger timestamp */ +#define TPDM_DSB_TIER_XTRIG_TSENAB BIT(1) +/* Bit for DSB subunit pattern type */ +#define TPDM_DSB_TIER_PATT_TYPE BIT(2) + +/* DSB programming modes */ +/* DSB mode bits mask */ +#define TPDM_DSB_MODE_MASK GENMASK(8, 0) +/* Test mode control bit*/ +#define TPDM_DSB_MODE_TEST(val) (val & GENMASK(1, 0)) +/* Performance mode */ +#define TPDM_DSB_MODE_PERF BIT(3) +/* High performance mode */ +#define TPDM_DSB_MODE_HPBYTESEL(val) (val & GENMASK(8, 4)) + +#define EDCRS_PER_WORD 16 +#define EDCR_TO_WORD_IDX(r) ((r) / EDCRS_PER_WORD) +#define EDCR_TO_WORD_SHIFT(r) ((r % EDCRS_PER_WORD) * 2) +#define EDCR_TO_WORD_VAL(val, r) (val << EDCR_TO_WORD_SHIFT(r)) +#define EDCR_TO_WORD_MASK(r) EDCR_TO_WORD_VAL(0x3, r) + +#define EDCMRS_PER_WORD 32 +#define EDCMR_TO_WORD_IDX(r) ((r) / EDCMRS_PER_WORD) +#define EDCMR_TO_WORD_SHIFT(r) ((r) % EDCMRS_PER_WORD) /* TPDM integration test registers */ #define TPDM_ITATBCNTRL (0xEF0) @@ -40,6 +84,95 @@ #define TPDM_PIDR0_DS_IMPDEF BIT(0) #define TPDM_PIDR0_DS_DSB BIT(1) +#define TPDM_DSB_MAX_LINES 256 +/* MAX number of EDCR registers */ +#define TPDM_DSB_MAX_EDCR 16 +/* MAX number of EDCMR registers */ +#define TPDM_DSB_MAX_EDCMR 8 +/* MAX number of DSB pattern */ +#define TPDM_DSB_MAX_PATT 8 +/* MAX number of DSB MSR */ +#define TPDM_DSB_MAX_MSR 32 + +#define tpdm_simple_dataset_ro(name, mem, idx) \ + (&((struct tpdm_dataset_attribute[]) { \ + { \ + __ATTR(name, 0444, tpdm_simple_dataset_show, NULL), \ + mem, \ + idx, \ + } \ + })[0].attr.attr) + +#define tpdm_simple_dataset_rw(name, mem, idx) \ + (&((struct tpdm_dataset_attribute[]) { \ + { \ + __ATTR(name, 0644, tpdm_simple_dataset_show, \ + tpdm_simple_dataset_store), \ + mem, \ + idx, \ + } \ + })[0].attr.attr) + +#define DSB_EDGE_CTRL_ATTR(nr) \ + tpdm_simple_dataset_ro(edcr##nr, \ + DSB_EDGE_CTRL, nr) + +#define DSB_EDGE_CTRL_MASK_ATTR(nr) \ + tpdm_simple_dataset_ro(edcmr##nr, \ + DSB_EDGE_CTRL_MASK, nr) + +#define DSB_TRIG_PATT_ATTR(nr) \ + tpdm_simple_dataset_rw(xpr##nr, \ + DSB_TRIG_PATT, nr) + +#define DSB_TRIG_PATT_MASK_ATTR(nr) \ + tpdm_simple_dataset_rw(xpmr##nr, \ + DSB_TRIG_PATT_MASK, nr) + +#define DSB_PATT_ATTR(nr) \ + tpdm_simple_dataset_rw(tpr##nr, \ + DSB_PATT, nr) + +#define DSB_PATT_MASK_ATTR(nr) \ + tpdm_simple_dataset_rw(tpmr##nr, \ + DSB_PATT_MASK, nr) + +#define DSB_MSR_ATTR(nr) \ + tpdm_simple_dataset_rw(msr##nr, \ + DSB_MSR, nr) + +/** + * struct dsb_dataset - specifics associated to dsb dataset + * @mode: DSB programming mode + * @edge_ctrl_idx Index number of the edge control + * @edge_ctrl: Save value for edge control + * @edge_ctrl_mask: Save value for edge control mask + * @patt_val: Save value for pattern + * @patt_mask: Save value for pattern mask + * @trig_patt: Save value for trigger pattern + * @trig_patt_mask: Save value for trigger pattern mask + * @msr Save value for MSR + * @patt_ts: Enable/Disable pattern timestamp + * @patt_type: Set pattern type + * @trig_ts: Enable/Disable trigger timestamp. + * @trig_type: Enable/Disable trigger type. + */ +struct dsb_dataset { + u32 mode; + u32 edge_ctrl_idx; + u32 edge_ctrl[TPDM_DSB_MAX_EDCR]; + u32 edge_ctrl_mask[TPDM_DSB_MAX_EDCMR]; + u32 patt_val[TPDM_DSB_MAX_PATT]; + u32 patt_mask[TPDM_DSB_MAX_PATT]; + u32 trig_patt[TPDM_DSB_MAX_PATT]; + u32 trig_patt_mask[TPDM_DSB_MAX_PATT]; + u32 msr[TPDM_DSB_MAX_MSR]; + bool patt_ts; + bool patt_type; + bool trig_ts; + bool trig_type; +}; + /** * struct tpdm_drvdata - specifics associated to an TPDM component * @base: memory mapped base address for this component. @@ -48,6 +181,8 @@ * @spinlock: lock for the drvdata value. * @enable: enable status of the component. * @datasets: The datasets types present of the TPDM. + * @dsb Specifics associated to TPDM DSB. + * @dsb_msr_num Number of MSR supported by DSB TPDM */ struct tpdm_drvdata { @@ -57,6 +192,32 @@ struct tpdm_drvdata { spinlock_t spinlock; bool enable; unsigned long datasets; + struct dsb_dataset *dsb; + u32 dsb_msr_num; +}; + +/* Enumerate members of various datasets */ +enum dataset_mem { + DSB_EDGE_CTRL, + DSB_EDGE_CTRL_MASK, + DSB_TRIG_PATT, + DSB_TRIG_PATT_MASK, + DSB_PATT, + DSB_PATT_MASK, + DSB_MSR, +}; + +/** + * struct tpdm_dataset_attribute - Record the member variables and + * index number of datasets that need to be operated by sysfs file + * @attr: The device attribute + * @mem: The member in the dataset data structure + * @idx: The index number of the array data + */ +struct tpdm_dataset_attribute { + struct device_attribute attr; + enum dataset_mem mem; + u32 idx; }; #endif /* _CORESIGHT_CORESIGHT_TPDM_H */ diff --git a/drivers/hwtracing/coresight/coresight-trbe.c b/drivers/hwtracing/coresight/coresight-trbe.c index e20c1c6acc73..6136776482e6 100644 --- a/drivers/hwtracing/coresight/coresight-trbe.c +++ b/drivers/hwtracing/coresight/coresight-trbe.c @@ -1253,8 +1253,18 @@ static void arm_trbe_register_coresight_cpu(struct trbe_drvdata *drvdata, int cp desc.name = devm_kasprintf(dev, GFP_KERNEL, "trbe%d", cpu); if (!desc.name) goto cpu_clear; - - desc.pdata = coresight_get_platform_data(dev); + /* + * TRBE coresight devices do not need regular connections + * information, as the paths get built between all percpu + * source and their respective percpu sink devices. Though + * coresight_register() expect device connections via the + * platform_data, which TRBE devices do not have. As they + * are not real ACPI devices, coresight_get_platform_data() + * ends up failing. Instead let's allocate a dummy zeroed + * coresight_platform_data structure and assign that back + * into the device for that purpose. + */ + desc.pdata = devm_kzalloc(dev, sizeof(*desc.pdata), GFP_KERNEL); if (IS_ERR(desc.pdata)) goto cpu_clear; @@ -1520,14 +1530,13 @@ probe_failed: return ret; } -static int arm_trbe_device_remove(struct platform_device *pdev) +static void arm_trbe_device_remove(struct platform_device *pdev) { struct trbe_drvdata *drvdata = platform_get_drvdata(pdev); arm_trbe_remove_cpuhp(drvdata); arm_trbe_remove_coresight(drvdata); arm_trbe_remove_irq(drvdata); - return 0; } static const struct of_device_id arm_trbe_of_match[] = { @@ -1536,14 +1545,23 @@ static const struct of_device_id arm_trbe_of_match[] = { }; MODULE_DEVICE_TABLE(of, arm_trbe_of_match); +#ifdef CONFIG_ACPI +static const struct platform_device_id arm_trbe_acpi_match[] = { + { ARMV8_TRBE_PDEV_NAME, 0 }, + { } +}; +MODULE_DEVICE_TABLE(platform, arm_trbe_acpi_match); +#endif + static struct platform_driver arm_trbe_driver = { + .id_table = ACPI_PTR(arm_trbe_acpi_match), .driver = { .name = DRVNAME, .of_match_table = of_match_ptr(arm_trbe_of_match), .suppress_bind_attrs = true, }, .probe = arm_trbe_device_probe, - .remove = arm_trbe_device_remove, + .remove_new = arm_trbe_device_remove, }; static int __init arm_trbe_init(void) diff --git a/drivers/hwtracing/coresight/coresight-trbe.h b/drivers/hwtracing/coresight/coresight-trbe.h index e915e749be55..45202c48acce 100644 --- a/drivers/hwtracing/coresight/coresight-trbe.h +++ b/drivers/hwtracing/coresight/coresight-trbe.h @@ -7,11 +7,13 @@ * * Author: Anshuman Khandual <anshuman.khandual@arm.com> */ +#include <linux/acpi.h> #include <linux/coresight.h> #include <linux/device.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/of.h> +#include <linux/perf/arm_pmu.h> #include <linux/platform_device.h> #include <linux/smp.h> diff --git a/drivers/hwtracing/coresight/ultrasoc-smb.c b/drivers/hwtracing/coresight/ultrasoc-smb.c index 6e32d31a95fe..10e886455b8b 100644 --- a/drivers/hwtracing/coresight/ultrasoc-smb.c +++ b/drivers/hwtracing/coresight/ultrasoc-smb.c @@ -97,27 +97,19 @@ static int smb_open(struct inode *inode, struct file *file) { struct smb_drv_data *drvdata = container_of(file->private_data, struct smb_drv_data, miscdev); - int ret = 0; - spin_lock(&drvdata->spinlock); + guard(spinlock)(&drvdata->spinlock); - if (drvdata->reading) { - ret = -EBUSY; - goto out; - } + if (drvdata->reading) + return -EBUSY; - if (atomic_read(&drvdata->csdev->refcnt)) { - ret = -EBUSY; - goto out; - } + if (atomic_read(&drvdata->csdev->refcnt)) + return -EBUSY; smb_update_data_size(drvdata); - drvdata->reading = true; -out: - spin_unlock(&drvdata->spinlock); - return ret; + return 0; } static ssize_t smb_read(struct file *file, char __user *data, size_t len, @@ -160,9 +152,8 @@ static int smb_release(struct inode *inode, struct file *file) struct smb_drv_data *drvdata = container_of(file->private_data, struct smb_drv_data, miscdev); - spin_lock(&drvdata->spinlock); + guard(spinlock)(&drvdata->spinlock); drvdata->reading = false; - spin_unlock(&drvdata->spinlock); return 0; } @@ -255,19 +246,15 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode, struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent); int ret = 0; - spin_lock(&drvdata->spinlock); + guard(spinlock)(&drvdata->spinlock); /* Do nothing, the trace data is reading by other interface now */ - if (drvdata->reading) { - ret = -EBUSY; - goto out; - } + if (drvdata->reading) + return -EBUSY; /* Do nothing, the SMB is already enabled as other mode */ - if (drvdata->mode != CS_MODE_DISABLED && drvdata->mode != mode) { - ret = -EBUSY; - goto out; - } + if (drvdata->mode != CS_MODE_DISABLED && drvdata->mode != mode) + return -EBUSY; switch (mode) { case CS_MODE_SYSFS: @@ -281,13 +268,10 @@ static int smb_enable(struct coresight_device *csdev, enum cs_mode mode, } if (ret) - goto out; + return ret; atomic_inc(&csdev->refcnt); - dev_dbg(&csdev->dev, "Ultrasoc SMB enabled\n"); -out: - spin_unlock(&drvdata->spinlock); return ret; } @@ -295,19 +279,14 @@ out: static int smb_disable(struct coresight_device *csdev) { struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent); - int ret = 0; - spin_lock(&drvdata->spinlock); + guard(spinlock)(&drvdata->spinlock); - if (drvdata->reading) { - ret = -EBUSY; - goto out; - } + if (drvdata->reading) + return -EBUSY; - if (atomic_dec_return(&csdev->refcnt)) { - ret = -EBUSY; - goto out; - } + if (atomic_dec_return(&csdev->refcnt)) + return -EBUSY; /* Complain if we (somehow) got out of sync */ WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED); @@ -317,12 +296,9 @@ static int smb_disable(struct coresight_device *csdev) /* Dissociate from the target process. */ drvdata->pid = -1; drvdata->mode = CS_MODE_DISABLED; - dev_dbg(&csdev->dev, "Ultrasoc SMB disabled\n"); -out: - spin_unlock(&drvdata->spinlock); - return ret; + return 0; } static void *smb_alloc_buffer(struct coresight_device *csdev, @@ -395,17 +371,17 @@ static unsigned long smb_update_buffer(struct coresight_device *csdev, struct smb_drv_data *drvdata = dev_get_drvdata(csdev->dev.parent); struct smb_data_buffer *sdb = &drvdata->sdb; struct cs_buffers *buf = sink_config; - unsigned long data_size = 0; + unsigned long data_size; bool lost = false; if (!buf) return 0; - spin_lock(&drvdata->spinlock); + guard(spinlock)(&drvdata->spinlock); /* Don't do anything if another tracer is using this sink. */ if (atomic_read(&csdev->refcnt) != 1) - goto out; + return 0; smb_disable_hw(drvdata); smb_update_data_size(drvdata); @@ -424,8 +400,6 @@ static unsigned long smb_update_buffer(struct coresight_device *csdev, smb_sync_perf_buffer(drvdata, buf, handle->head); if (!buf->snapshot && lost) perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); -out: - spin_unlock(&drvdata->spinlock); return data_size; } @@ -601,15 +575,13 @@ static int smb_probe(struct platform_device *pdev) return 0; } -static int smb_remove(struct platform_device *pdev) +static void smb_remove(struct platform_device *pdev) { struct smb_drv_data *drvdata = platform_get_drvdata(pdev); smb_unregister_sink(drvdata); smb_config_inport(&pdev->dev, false); - - return 0; } #ifdef CONFIG_ACPI @@ -627,7 +599,7 @@ static struct platform_driver smb_driver = { .suppress_bind_attrs = true, }, .probe = smb_probe, - .remove = smb_remove, + .remove_new = smb_remove, }; module_platform_driver(smb_driver); diff --git a/drivers/hwtracing/ptt/hisi_ptt.c b/drivers/hwtracing/ptt/hisi_ptt.c index a991ecb7515a..c1b5fd2b8974 100644 --- a/drivers/hwtracing/ptt/hisi_ptt.c +++ b/drivers/hwtracing/ptt/hisi_ptt.c @@ -183,6 +183,10 @@ static void hisi_ptt_wait_dma_reset_done(struct hisi_ptt *hisi_ptt) static void hisi_ptt_trace_end(struct hisi_ptt *hisi_ptt) { writel(0, hisi_ptt->iobase + HISI_PTT_TRACE_CTRL); + + /* Mask the interrupt on the end */ + writel(HISI_PTT_TRACE_INT_MASK_ALL, hisi_ptt->iobase + HISI_PTT_TRACE_INT_MASK); + hisi_ptt->trace_ctrl.started = false; } @@ -270,15 +274,14 @@ static int hisi_ptt_update_aux(struct hisi_ptt *hisi_ptt, int index, bool stop) buf->pos += size; /* - * Just commit the traced data if we're going to stop. Otherwise if the - * resident AUX buffer cannot contain the data of next trace buffer, - * apply a new one. + * Always commit the data to the AUX buffer in time to make sure + * userspace got enough time to consume the data. + * + * If we're not going to stop, apply a new one and check whether + * there's enough room for the next trace. */ - if (stop) { - perf_aux_output_end(handle, buf->pos); - } else if (buf->length - buf->pos < HISI_PTT_TRACE_BUF_SIZE) { - perf_aux_output_end(handle, buf->pos); - + perf_aux_output_end(handle, size); + if (!stop) { buf = perf_aux_output_begin(handle, event); if (!buf) return -EINVAL; diff --git a/drivers/hwtracing/ptt/hisi_ptt.h b/drivers/hwtracing/ptt/hisi_ptt.h index e17f045d7e72..46030aa88081 100644 --- a/drivers/hwtracing/ptt/hisi_ptt.h +++ b/drivers/hwtracing/ptt/hisi_ptt.h @@ -47,6 +47,7 @@ #define HISI_PTT_TRACE_INT_STAT 0x0890 #define HISI_PTT_TRACE_INT_STAT_MASK GENMASK(3, 0) #define HISI_PTT_TRACE_INT_MASK 0x0894 +#define HISI_PTT_TRACE_INT_MASK_ALL GENMASK(3, 0) #define HISI_PTT_TUNING_INT_STAT 0x0898 #define HISI_PTT_TUNING_INT_STAT_MASK BIT(0) #define HISI_PTT_TRACE_WR_STS 0x08a0 diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig index f113dae59048..91adcac875a4 100644 --- a/drivers/iio/accel/Kconfig +++ b/drivers/iio/accel/Kconfig @@ -260,10 +260,11 @@ config BMI088_ACCEL select REGMAP select BMI088_ACCEL_SPI help - Say yes here to build support for the Bosch BMI088 accelerometer. + Say yes here to build support for the following Bosch accelerometers: + BMI088, BMI085, BMI090L. Note that all of these are combo module that + include both accelerometer and gyroscope. - This is a combo module with both accelerometer and gyroscope. This - driver only implements the accelerometer part, which has its own + This driver only implements the accelerometer part, which has its own address and register map. BMG160 provides the gyroscope driver. config BMI088_ACCEL_SPI diff --git a/drivers/iio/accel/bmi088-accel-core.c b/drivers/iio/accel/bmi088-accel-core.c index 84edcc78d796..4d989708e6c3 100644 --- a/drivers/iio/accel/bmi088-accel-core.c +++ b/drivers/iio/accel/bmi088-accel-core.c @@ -2,6 +2,8 @@ /* * 3-axis accelerometer driver supporting following Bosch-Sensortec chips: * - BMI088 + * - BMI085 + * - BMI090L * * Copyright (c) 2018-2021, Topic Embedded Products */ diff --git a/drivers/iio/accel/bmi088-accel-spi.c b/drivers/iio/accel/bmi088-accel-spi.c index ee540edd8412..7b419a7b2478 100644 --- a/drivers/iio/accel/bmi088-accel-spi.c +++ b/drivers/iio/accel/bmi088-accel-spi.c @@ -2,6 +2,8 @@ /* * 3-axis accelerometer driver supporting following Bosch-Sensortec chips: * - BMI088 + * - BMI085 + * - BMI090L * * Copyright (c) 2018-2020, Topic Embedded Products */ diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index 35f9867da12c..3b73c509bd68 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -36,13 +36,29 @@ config AD4130 To compile this driver as a module, choose M here: the module will be called ad4130. +config AD7091R + tristate + config AD7091R5 tristate "Analog Devices AD7091R5 ADC Driver" depends on I2C + select AD7091R select REGMAP_I2C help Say yes here to build support for Analog Devices AD7091R-5 ADC. +config AD7091R8 + tristate "Analog Devices AD7091R8 ADC Driver" + depends on SPI + select AD7091R + select REGMAP_SPI + help + Say yes here to build support for Analog Devices AD7091R-2, AD7091R-4, + and AD7091R-8 ADC. + + To compile this driver as a module, choose M here: the module will be + called ad7091r8. + config AD7124 tristate "Analog Devices AD7124 and similar sigma-delta ADCs driver" depends on SPI_MASTER @@ -292,7 +308,7 @@ config ADI_AXI_ADC select IIO_BUFFER select IIO_BUFFER_HW_CONSUMER select IIO_BUFFER_DMAENGINE - depends on HAS_IOMEM + select REGMAP_MMIO depends on OF help Say yes here to build support for Analog Devices Generic @@ -745,6 +761,17 @@ config MAX1363 To compile this driver as a module, choose M here: the module will be called max1363. +config MAX34408 + tristate "Maxim max34408/max344089 ADC driver" + depends on I2C + help + Say yes here to build support for Maxim max34408/max34409 current sense + monitor with 8-bits ADC interface with overcurrent delay/threshold and + shutdown delay. + + To compile this driver as a module, choose M here: the module will be + called max34408. + config MAX77541_ADC tristate "Analog Devices MAX77541 ADC driver" depends on MFD_MAX77541 diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile index bee11d442af4..d2fda54a3259 100644 --- a/drivers/iio/adc/Makefile +++ b/drivers/iio/adc/Makefile @@ -7,7 +7,9 @@ obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o obj-$(CONFIG_AD_SIGMA_DELTA) += ad_sigma_delta.o obj-$(CONFIG_AD4130) += ad4130.o -obj-$(CONFIG_AD7091R5) += ad7091r5.o ad7091r-base.o +obj-$(CONFIG_AD7091R) += ad7091r-base.o +obj-$(CONFIG_AD7091R5) += ad7091r5.o +obj-$(CONFIG_AD7091R8) += ad7091r8.o obj-$(CONFIG_AD7124) += ad7124.o obj-$(CONFIG_AD7192) += ad7192.o obj-$(CONFIG_AD7266) += ad7266.o @@ -68,6 +70,7 @@ obj-$(CONFIG_MAX11205) += max11205.o obj-$(CONFIG_MAX11410) += max11410.o obj-$(CONFIG_MAX1241) += max1241.o obj-$(CONFIG_MAX1363) += max1363.o +obj-$(CONFIG_MAX34408) += max34408.o obj-$(CONFIG_MAX77541_ADC) += max77541-adc.o obj-$(CONFIG_MAX9611) += max9611.o obj-$(CONFIG_MCP320X) += mcp320x.o diff --git a/drivers/iio/adc/ad7091r-base.c b/drivers/iio/adc/ad7091r-base.c index 8e252cde735b..f4255b91acfc 100644 --- a/drivers/iio/adc/ad7091r-base.c +++ b/drivers/iio/adc/ad7091r-base.c @@ -6,6 +6,7 @@ */ #include <linux/bitops.h> +#include <linux/bitfield.h> #include <linux/iio/events.h> #include <linux/iio/iio.h> #include <linux/interrupt.h> @@ -15,67 +16,26 @@ #include "ad7091r-base.h" -#define AD7091R_REG_RESULT 0 -#define AD7091R_REG_CHANNEL 1 -#define AD7091R_REG_CONF 2 -#define AD7091R_REG_ALERT 3 -#define AD7091R_REG_CH_LOW_LIMIT(ch) ((ch) * 3 + 4) -#define AD7091R_REG_CH_HIGH_LIMIT(ch) ((ch) * 3 + 5) -#define AD7091R_REG_CH_HYSTERESIS(ch) ((ch) * 3 + 6) - -/* AD7091R_REG_RESULT */ -#define AD7091R_REG_RESULT_CH_ID(x) (((x) >> 13) & 0x3) -#define AD7091R_REG_RESULT_CONV_RESULT(x) ((x) & 0xfff) - -/* AD7091R_REG_CONF */ -#define AD7091R_REG_CONF_AUTO BIT(8) -#define AD7091R_REG_CONF_CMD BIT(10) - -#define AD7091R_REG_CONF_MODE_MASK \ - (AD7091R_REG_CONF_AUTO | AD7091R_REG_CONF_CMD) - -enum ad7091r_mode { - AD7091R_MODE_SAMPLE, - AD7091R_MODE_COMMAND, - AD7091R_MODE_AUTOCYCLE, +const struct iio_event_spec ad7091r_events[] = { + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_RISING, + .mask_separate = BIT(IIO_EV_INFO_VALUE) | + BIT(IIO_EV_INFO_ENABLE), + }, + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_FALLING, + .mask_separate = BIT(IIO_EV_INFO_VALUE) | + BIT(IIO_EV_INFO_ENABLE), + }, + { + .type = IIO_EV_TYPE_THRESH, + .dir = IIO_EV_DIR_EITHER, + .mask_separate = BIT(IIO_EV_INFO_HYSTERESIS), + }, }; - -struct ad7091r_state { - struct device *dev; - struct regmap *map; - struct regulator *vref; - const struct ad7091r_chip_info *chip_info; - enum ad7091r_mode mode; - struct mutex lock; /*lock to prevent concurent reads */ -}; - -static int ad7091r_set_mode(struct ad7091r_state *st, enum ad7091r_mode mode) -{ - int ret, conf; - - switch (mode) { - case AD7091R_MODE_SAMPLE: - conf = 0; - break; - case AD7091R_MODE_COMMAND: - conf = AD7091R_REG_CONF_CMD; - break; - case AD7091R_MODE_AUTOCYCLE: - conf = AD7091R_REG_CONF_AUTO; - break; - default: - return -EINVAL; - } - - ret = regmap_update_bits(st->map, AD7091R_REG_CONF, - AD7091R_REG_CONF_MODE_MASK, conf); - if (ret) - return ret; - - st->mode = mode; - - return 0; -} +EXPORT_SYMBOL_NS_GPL(ad7091r_events, IIO_AD7091R); static int ad7091r_set_channel(struct ad7091r_state *st, unsigned int channel) { @@ -110,7 +70,7 @@ static int ad7091r_read_one(struct iio_dev *iio_dev, if (ret) return ret; - if (AD7091R_REG_RESULT_CH_ID(val) != channel) + if (st->chip_info->reg_result_chan_id(val) != channel) return -EIO; *read_val = AD7091R_REG_RESULT_CONV_RESULT(val); @@ -168,14 +128,148 @@ unlock: return ret; } +static int ad7091r_read_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir) +{ + struct ad7091r_state *st = iio_priv(indio_dev); + int val, ret; + + switch (dir) { + case IIO_EV_DIR_RISING: + ret = regmap_read(st->map, + AD7091R_REG_CH_HIGH_LIMIT(chan->channel), + &val); + if (ret) + return ret; + return val != AD7091R_HIGH_LIMIT; + case IIO_EV_DIR_FALLING: + ret = regmap_read(st->map, + AD7091R_REG_CH_LOW_LIMIT(chan->channel), + &val); + if (ret) + return ret; + return val != AD7091R_LOW_LIMIT; + default: + return -EINVAL; + } +} + +static int ad7091r_write_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, int state) +{ + struct ad7091r_state *st = iio_priv(indio_dev); + + if (state) { + return regmap_set_bits(st->map, AD7091R_REG_CONF, + AD7091R_REG_CONF_ALERT_EN); + } else { + /* + * Set thresholds either to 0 or to 2^12 - 1 as appropriate to + * prevent alerts and thus disable event generation. + */ + switch (dir) { + case IIO_EV_DIR_RISING: + return regmap_write(st->map, + AD7091R_REG_CH_HIGH_LIMIT(chan->channel), + AD7091R_HIGH_LIMIT); + case IIO_EV_DIR_FALLING: + return regmap_write(st->map, + AD7091R_REG_CH_LOW_LIMIT(chan->channel), + AD7091R_LOW_LIMIT); + default: + return -EINVAL; + } + } +} + +static int ad7091r_read_event_value(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, int *val, int *val2) +{ + struct ad7091r_state *st = iio_priv(indio_dev); + int ret; + + switch (info) { + case IIO_EV_INFO_VALUE: + switch (dir) { + case IIO_EV_DIR_RISING: + ret = regmap_read(st->map, + AD7091R_REG_CH_HIGH_LIMIT(chan->channel), + val); + if (ret) + return ret; + return IIO_VAL_INT; + case IIO_EV_DIR_FALLING: + ret = regmap_read(st->map, + AD7091R_REG_CH_LOW_LIMIT(chan->channel), + val); + if (ret) + return ret; + return IIO_VAL_INT; + default: + return -EINVAL; + } + case IIO_EV_INFO_HYSTERESIS: + ret = regmap_read(st->map, + AD7091R_REG_CH_HYSTERESIS(chan->channel), + val); + if (ret) + return ret; + return IIO_VAL_INT; + default: + return -EINVAL; + } +} + +static int ad7091r_write_event_value(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, int val, int val2) +{ + struct ad7091r_state *st = iio_priv(indio_dev); + + switch (info) { + case IIO_EV_INFO_VALUE: + switch (dir) { + case IIO_EV_DIR_RISING: + return regmap_write(st->map, + AD7091R_REG_CH_HIGH_LIMIT(chan->channel), + val); + case IIO_EV_DIR_FALLING: + return regmap_write(st->map, + AD7091R_REG_CH_LOW_LIMIT(chan->channel), + val); + default: + return -EINVAL; + } + case IIO_EV_INFO_HYSTERESIS: + return regmap_write(st->map, + AD7091R_REG_CH_HYSTERESIS(chan->channel), + val); + default: + return -EINVAL; + } +} + static const struct iio_info ad7091r_info = { .read_raw = ad7091r_read_raw, + .read_event_config = &ad7091r_read_event_config, + .write_event_config = &ad7091r_write_event_config, + .read_event_value = &ad7091r_read_event_value, + .write_event_value = &ad7091r_write_event_value, }; static irqreturn_t ad7091r_event_handler(int irq, void *private) { - struct ad7091r_state *st = (struct ad7091r_state *) private; - struct iio_dev *iio_dev = dev_get_drvdata(st->dev); + struct iio_dev *iio_dev = private; + struct ad7091r_state *st = iio_priv(iio_dev); unsigned int i, read_val; int ret; s64 timestamp = iio_get_time_ns(iio_dev); @@ -207,9 +301,8 @@ static void ad7091r_remove(void *data) regulator_disable(st->vref); } -int ad7091r_probe(struct device *dev, const char *name, - const struct ad7091r_chip_info *chip_info, - struct regmap *map, int irq) +int ad7091r_probe(struct device *dev, const struct ad7091r_init_info *init_info, + int irq) { struct iio_dev *iio_dev; struct ad7091r_state *st; @@ -221,29 +314,54 @@ int ad7091r_probe(struct device *dev, const char *name, st = iio_priv(iio_dev); st->dev = dev; - st->chip_info = chip_info; - st->map = map; + init_info->init_adc_regmap(st, init_info->regmap_config); + if (IS_ERR(st->map)) + return dev_err_probe(st->dev, PTR_ERR(st->map), + "Error initializing regmap\n"); - iio_dev->name = name; iio_dev->info = &ad7091r_info; iio_dev->modes = INDIO_DIRECT_MODE; - iio_dev->num_channels = chip_info->num_channels; - iio_dev->channels = chip_info->channels; + if (init_info->setup) { + ret = init_info->setup(st); + if (ret < 0) + return ret; + } if (irq) { + st->chip_info = init_info->info_irq; + ret = regmap_update_bits(st->map, AD7091R_REG_CONF, + AD7091R_REG_CONF_ALERT_EN, BIT(4)); + if (ret) + return ret; + ret = devm_request_threaded_irq(dev, irq, NULL, - ad7091r_event_handler, - IRQF_TRIGGER_FALLING | IRQF_ONESHOT, name, st); + ad7091r_event_handler, + IRQF_TRIGGER_FALLING | + IRQF_ONESHOT, + st->chip_info->name, iio_dev); if (ret) return ret; + } else { + st->chip_info = init_info->info_no_irq; } + iio_dev->name = st->chip_info->name; + iio_dev->num_channels = st->chip_info->num_channels; + iio_dev->channels = st->chip_info->channels; + st->vref = devm_regulator_get_optional(dev, "vref"); if (IS_ERR(st->vref)) { if (PTR_ERR(st->vref) == -EPROBE_DEFER) return -EPROBE_DEFER; + st->vref = NULL; + /* Enable internal vref */ + ret = regmap_set_bits(st->map, AD7091R_REG_CONF, + AD7091R_REG_CONF_INT_VREF); + if (ret) + return dev_err_probe(st->dev, ret, + "Error on enable internal reference\n"); } else { ret = regulator_enable(st->vref); if (ret) @@ -254,7 +372,7 @@ int ad7091r_probe(struct device *dev, const char *name, } /* Use command mode by default to convert only desired channels*/ - ret = ad7091r_set_mode(st, AD7091R_MODE_COMMAND); + ret = st->chip_info->set_mode(st, AD7091R_MODE_COMMAND); if (ret) return ret; @@ -262,7 +380,7 @@ int ad7091r_probe(struct device *dev, const char *name, } EXPORT_SYMBOL_NS_GPL(ad7091r_probe, IIO_AD7091R); -static bool ad7091r_writeable_reg(struct device *dev, unsigned int reg) +bool ad7091r_writeable_reg(struct device *dev, unsigned int reg) { switch (reg) { case AD7091R_REG_RESULT: @@ -272,8 +390,9 @@ static bool ad7091r_writeable_reg(struct device *dev, unsigned int reg) return true; } } +EXPORT_SYMBOL_NS_GPL(ad7091r_writeable_reg, IIO_AD7091R); -static bool ad7091r_volatile_reg(struct device *dev, unsigned int reg) +bool ad7091r_volatile_reg(struct device *dev, unsigned int reg) { switch (reg) { case AD7091R_REG_RESULT: @@ -283,14 +402,7 @@ static bool ad7091r_volatile_reg(struct device *dev, unsigned int reg) return false; } } - -const struct regmap_config ad7091r_regmap_config = { - .reg_bits = 8, - .val_bits = 16, - .writeable_reg = ad7091r_writeable_reg, - .volatile_reg = ad7091r_volatile_reg, -}; -EXPORT_SYMBOL_NS_GPL(ad7091r_regmap_config, IIO_AD7091R); +EXPORT_SYMBOL_NS_GPL(ad7091r_volatile_reg, IIO_AD7091R); MODULE_AUTHOR("Beniamin Bia <beniamin.bia@analog.com>"); MODULE_DESCRIPTION("Analog Devices AD7091Rx multi-channel converters"); diff --git a/drivers/iio/adc/ad7091r-base.h b/drivers/iio/adc/ad7091r-base.h index 509748aef9b1..696bf7a897bb 100644 --- a/drivers/iio/adc/ad7091r-base.h +++ b/drivers/iio/adc/ad7091r-base.h @@ -8,19 +8,92 @@ #ifndef __DRIVERS_IIO_ADC_AD7091R_BASE_H__ #define __DRIVERS_IIO_ADC_AD7091R_BASE_H__ +#include <linux/regmap.h> + +#define AD7091R_REG_RESULT 0 +#define AD7091R_REG_CHANNEL 1 +#define AD7091R_REG_CONF 2 +#define AD7091R_REG_ALERT 3 +#define AD7091R_REG_CH_LOW_LIMIT(ch) ((ch) * 3 + 4) +#define AD7091R_REG_CH_HIGH_LIMIT(ch) ((ch) * 3 + 5) +#define AD7091R_REG_CH_HYSTERESIS(ch) ((ch) * 3 + 6) + +/* AD7091R_REG_RESULT */ +#define AD7091R5_REG_RESULT_CH_ID(x) (((x) >> 13) & 0x3) +#define AD7091R8_REG_RESULT_CH_ID(x) (((x) >> 13) & 0x7) +#define AD7091R_REG_RESULT_CONV_RESULT(x) ((x) & 0xfff) + +/* AD7091R_REG_CONF */ +#define AD7091R_REG_CONF_INT_VREF BIT(0) +#define AD7091R_REG_CONF_ALERT_EN BIT(4) +#define AD7091R_REG_CONF_AUTO BIT(8) +#define AD7091R_REG_CONF_CMD BIT(10) + +#define AD7091R_REG_CONF_MODE_MASK \ + (AD7091R_REG_CONF_AUTO | AD7091R_REG_CONF_CMD) + +/* AD7091R_REG_CH_LIMIT */ +#define AD7091R_HIGH_LIMIT 0xFFF +#define AD7091R_LOW_LIMIT 0x0 + +#define AD7091R_CHANNEL(idx, bits, ev, num_ev) { \ + .type = IIO_VOLTAGE, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .indexed = 1, \ + .channel = idx, \ + .event_spec = ev, \ + .num_event_specs = num_ev, \ + .scan_type.storagebits = 16, \ + .scan_type.realbits = bits, \ +} + struct device; -struct ad7091r_state; +struct gpio_desc; + +enum ad7091r_mode { + AD7091R_MODE_SAMPLE, + AD7091R_MODE_COMMAND, + AD7091R_MODE_AUTOCYCLE, +}; + +struct ad7091r_state { + struct device *dev; + struct regmap *map; + struct gpio_desc *convst_gpio; + struct gpio_desc *reset_gpio; + struct regulator *vref; + const struct ad7091r_chip_info *chip_info; + enum ad7091r_mode mode; + struct mutex lock; /*lock to prevent concurent reads */ + __be16 tx_buf __aligned(IIO_DMA_MINALIGN); + __be16 rx_buf; +}; struct ad7091r_chip_info { + const char *name; unsigned int num_channels; const struct iio_chan_spec *channels; unsigned int vref_mV; + unsigned int (*reg_result_chan_id)(unsigned int val); + int (*set_mode)(struct ad7091r_state *st, enum ad7091r_mode mode); }; -extern const struct regmap_config ad7091r_regmap_config; +struct ad7091r_init_info { + const struct ad7091r_chip_info *info_irq; + const struct ad7091r_chip_info *info_no_irq; + const struct regmap_config *regmap_config; + void (*init_adc_regmap)(struct ad7091r_state *st, + const struct regmap_config *regmap_conf); + int (*setup)(struct ad7091r_state *st); +}; + +extern const struct iio_event_spec ad7091r_events[3]; + +int ad7091r_probe(struct device *dev, const struct ad7091r_init_info *init_info, + int irq); -int ad7091r_probe(struct device *dev, const char *name, - const struct ad7091r_chip_info *chip_info, - struct regmap *map, int irq); +bool ad7091r_volatile_reg(struct device *dev, unsigned int reg); +bool ad7091r_writeable_reg(struct device *dev, unsigned int reg); #endif /* __DRIVERS_IIO_ADC_AD7091R_BASE_H__ */ diff --git a/drivers/iio/adc/ad7091r5.c b/drivers/iio/adc/ad7091r5.c index 2f048527b7b7..a75837334157 100644 --- a/drivers/iio/adc/ad7091r5.c +++ b/drivers/iio/adc/ad7091r5.c @@ -12,42 +12,11 @@ #include "ad7091r-base.h" -static const struct iio_event_spec ad7091r5_events[] = { - { - .type = IIO_EV_TYPE_THRESH, - .dir = IIO_EV_DIR_RISING, - .mask_separate = BIT(IIO_EV_INFO_VALUE) | - BIT(IIO_EV_INFO_ENABLE), - }, - { - .type = IIO_EV_TYPE_THRESH, - .dir = IIO_EV_DIR_FALLING, - .mask_separate = BIT(IIO_EV_INFO_VALUE) | - BIT(IIO_EV_INFO_ENABLE), - }, - { - .type = IIO_EV_TYPE_THRESH, - .dir = IIO_EV_DIR_EITHER, - .mask_separate = BIT(IIO_EV_INFO_HYSTERESIS), - }, -}; - -#define AD7091R_CHANNEL(idx, bits, ev, num_ev) { \ - .type = IIO_VOLTAGE, \ - .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ - .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ - .indexed = 1, \ - .channel = idx, \ - .event_spec = ev, \ - .num_event_specs = num_ev, \ - .scan_type.storagebits = 16, \ - .scan_type.realbits = bits, \ -} static const struct iio_chan_spec ad7091r5_channels_irq[] = { - AD7091R_CHANNEL(0, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)), - AD7091R_CHANNEL(1, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)), - AD7091R_CHANNEL(2, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)), - AD7091R_CHANNEL(3, 12, ad7091r5_events, ARRAY_SIZE(ad7091r5_events)), + AD7091R_CHANNEL(0, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)), + AD7091R_CHANNEL(1, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)), + AD7091R_CHANNEL(2, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)), + AD7091R_CHANNEL(3, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)), }; static const struct iio_chan_spec ad7091r5_channels_noirq[] = { @@ -57,43 +26,98 @@ static const struct iio_chan_spec ad7091r5_channels_noirq[] = { AD7091R_CHANNEL(3, 12, NULL, 0), }; +static int ad7091r5_set_mode(struct ad7091r_state *st, enum ad7091r_mode mode) +{ + int ret, conf; + + switch (mode) { + case AD7091R_MODE_SAMPLE: + conf = 0; + break; + case AD7091R_MODE_COMMAND: + conf = AD7091R_REG_CONF_CMD; + break; + case AD7091R_MODE_AUTOCYCLE: + conf = AD7091R_REG_CONF_AUTO; + break; + default: + return -EINVAL; + } + + ret = regmap_update_bits(st->map, AD7091R_REG_CONF, + AD7091R_REG_CONF_MODE_MASK, conf); + if (ret) + return ret; + + st->mode = mode; + + return 0; +} + +static unsigned int ad7091r5_reg_result_chan_id(unsigned int val) +{ + return AD7091R5_REG_RESULT_CH_ID(val); +} + static const struct ad7091r_chip_info ad7091r5_chip_info_irq = { + .name = "ad7091r-5", .channels = ad7091r5_channels_irq, .num_channels = ARRAY_SIZE(ad7091r5_channels_irq), .vref_mV = 2500, + .reg_result_chan_id = &ad7091r5_reg_result_chan_id, + .set_mode = &ad7091r5_set_mode, }; static const struct ad7091r_chip_info ad7091r5_chip_info_noirq = { + .name = "ad7091r-5", .channels = ad7091r5_channels_noirq, .num_channels = ARRAY_SIZE(ad7091r5_channels_noirq), .vref_mV = 2500, + .reg_result_chan_id = &ad7091r5_reg_result_chan_id, + .set_mode = &ad7091r5_set_mode, }; -static int ad7091r5_i2c_probe(struct i2c_client *i2c) +static const struct regmap_config ad7091r_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + .writeable_reg = ad7091r_writeable_reg, + .volatile_reg = ad7091r_volatile_reg, +}; + +static void ad7091r5_regmap_init(struct ad7091r_state *st, + const struct regmap_config *regmap_conf) { - const struct i2c_device_id *id = i2c_client_get_device_id(i2c); - const struct ad7091r_chip_info *chip_info; - struct regmap *map = devm_regmap_init_i2c(i2c, &ad7091r_regmap_config); + struct i2c_client *i2c = container_of(st->dev, struct i2c_client, dev); - if (IS_ERR(map)) - return PTR_ERR(map); + st->map = devm_regmap_init_i2c(i2c, regmap_conf); +} + +static struct ad7091r_init_info ad7091r5_init_info = { + .info_irq = &ad7091r5_chip_info_irq, + .info_no_irq = &ad7091r5_chip_info_noirq, + .regmap_config = &ad7091r_regmap_config, + .init_adc_regmap = &ad7091r5_regmap_init +}; + +static int ad7091r5_i2c_probe(struct i2c_client *i2c) +{ + const struct ad7091r_init_info *init_info; - if (i2c->irq) - chip_info = &ad7091r5_chip_info_irq; - else - chip_info = &ad7091r5_chip_info_noirq; + init_info = i2c_get_match_data(i2c); + if (!init_info) + return -EINVAL; - return ad7091r_probe(&i2c->dev, id->name, chip_info, map, i2c->irq); + return ad7091r_probe(&i2c->dev, init_info, i2c->irq); } static const struct of_device_id ad7091r5_dt_ids[] = { - { .compatible = "adi,ad7091r5" }, + { .compatible = "adi,ad7091r5", .data = &ad7091r5_init_info }, {}, }; MODULE_DEVICE_TABLE(of, ad7091r5_dt_ids); static const struct i2c_device_id ad7091r5_i2c_ids[] = { - {"ad7091r5", 0}, + {"ad7091r5", (kernel_ulong_t)&ad7091r5_init_info }, {} }; MODULE_DEVICE_TABLE(i2c, ad7091r5_i2c_ids); diff --git a/drivers/iio/adc/ad7091r8.c b/drivers/iio/adc/ad7091r8.c new file mode 100644 index 000000000000..57700f124803 --- /dev/null +++ b/drivers/iio/adc/ad7091r8.c @@ -0,0 +1,272 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Analog Devices AD7091R8 12-bit SAR ADC driver + * + * Copyright 2023 Analog Devices Inc. + */ + +#include <linux/bitfield.h> +#include <linux/iio/iio.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/gpio/consumer.h> +#include <linux/spi/spi.h> + +#include "ad7091r-base.h" + +#define AD7091R8_REG_ADDR_MSK GENMASK(15, 11) +#define AD7091R8_RD_WR_FLAG_MSK BIT(10) +#define AD7091R8_REG_DATA_MSK GENMASK(9, 0) + +#define AD7091R_SPI_REGMAP_CONFIG(n) { \ + .reg_bits = 8, \ + .val_bits = 16, \ + .volatile_reg = ad7091r_volatile_reg, \ + .writeable_reg = ad7091r_writeable_reg, \ + .max_register = AD7091R_REG_CH_HYSTERESIS(n), \ +} + +static int ad7091r8_set_mode(struct ad7091r_state *st, enum ad7091r_mode mode) +{ + /* AD7091R-2/-4/-8 don't set sample/command/autocycle mode in conf reg */ + st->mode = mode; + return 0; +} + +static unsigned int ad7091r8_reg_result_chan_id(unsigned int val) +{ + return AD7091R8_REG_RESULT_CH_ID(val); +} + +#define AD7091R_SPI_CHIP_INFO(_n, _name) { \ + .name = _name, \ + .channels = ad7091r##_n##_channels, \ + .num_channels = ARRAY_SIZE(ad7091r##_n##_channels), \ + .vref_mV = 2500, \ + .reg_result_chan_id = &ad7091r8_reg_result_chan_id, \ + .set_mode = &ad7091r8_set_mode, \ +} + +#define AD7091R_SPI_CHIP_INFO_IRQ(_n, _name) { \ + .name = _name, \ + .channels = ad7091r##_n##_channels_irq, \ + .num_channels = ARRAY_SIZE(ad7091r##_n##_channels_irq), \ + .vref_mV = 2500, \ + .reg_result_chan_id = &ad7091r8_reg_result_chan_id, \ + .set_mode = &ad7091r8_set_mode, \ +} + +enum ad7091r8_info_ids { + AD7091R2_INFO, + AD7091R4_INFO, + AD7091R4_INFO_IRQ, + AD7091R8_INFO, + AD7091R8_INFO_IRQ, +}; + +static const struct iio_chan_spec ad7091r2_channels[] = { + AD7091R_CHANNEL(0, 12, NULL, 0), + AD7091R_CHANNEL(1, 12, NULL, 0), +}; + +static const struct iio_chan_spec ad7091r4_channels[] = { + AD7091R_CHANNEL(0, 12, NULL, 0), + AD7091R_CHANNEL(1, 12, NULL, 0), + AD7091R_CHANNEL(2, 12, NULL, 0), + AD7091R_CHANNEL(3, 12, NULL, 0), +}; + +static const struct iio_chan_spec ad7091r4_channels_irq[] = { + AD7091R_CHANNEL(0, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)), + AD7091R_CHANNEL(1, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)), + AD7091R_CHANNEL(2, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)), + AD7091R_CHANNEL(3, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)), +}; + +static const struct iio_chan_spec ad7091r8_channels[] = { + AD7091R_CHANNEL(0, 12, NULL, 0), + AD7091R_CHANNEL(1, 12, NULL, 0), + AD7091R_CHANNEL(2, 12, NULL, 0), + AD7091R_CHANNEL(3, 12, NULL, 0), + AD7091R_CHANNEL(4, 12, NULL, 0), + AD7091R_CHANNEL(5, 12, NULL, 0), + AD7091R_CHANNEL(6, 12, NULL, 0), + AD7091R_CHANNEL(7, 12, NULL, 0), +}; + +static const struct iio_chan_spec ad7091r8_channels_irq[] = { + AD7091R_CHANNEL(0, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)), + AD7091R_CHANNEL(1, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)), + AD7091R_CHANNEL(2, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)), + AD7091R_CHANNEL(3, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)), + AD7091R_CHANNEL(4, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)), + AD7091R_CHANNEL(5, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)), + AD7091R_CHANNEL(6, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)), + AD7091R_CHANNEL(7, 12, ad7091r_events, ARRAY_SIZE(ad7091r_events)), +}; + +static void ad7091r_pulse_convst(struct ad7091r_state *st) +{ + gpiod_set_value_cansleep(st->convst_gpio, 1); + gpiod_set_value_cansleep(st->convst_gpio, 0); +} + +static int ad7091r_regmap_bus_reg_read(void *context, unsigned int reg, + unsigned int *val) +{ + struct ad7091r_state *st = context; + struct spi_device *spi = container_of(st->dev, struct spi_device, dev); + int ret; + + struct spi_transfer t[] = { + { + .tx_buf = &st->tx_buf, + .len = 2, + .cs_change = 1, + }, { + .rx_buf = &st->rx_buf, + .len = 2, + } + }; + + if (reg == AD7091R_REG_RESULT) + ad7091r_pulse_convst(st); + + st->tx_buf = cpu_to_be16(reg << 11); + + ret = spi_sync_transfer(spi, t, ARRAY_SIZE(t)); + if (ret < 0) + return ret; + + *val = be16_to_cpu(st->rx_buf); + return 0; +} + +static int ad7091r_regmap_bus_reg_write(void *context, unsigned int reg, + unsigned int val) +{ + struct ad7091r_state *st = context; + struct spi_device *spi = container_of(st->dev, struct spi_device, dev); + + /* + * AD7091R-2/-4/-8 protocol (datasheet page 31) is to do a single SPI + * transfer with reg address set in bits B15:B11 and value set in B9:B0. + */ + st->tx_buf = cpu_to_be16(FIELD_PREP(AD7091R8_REG_DATA_MSK, val) | + FIELD_PREP(AD7091R8_RD_WR_FLAG_MSK, 1) | + FIELD_PREP(AD7091R8_REG_ADDR_MSK, reg)); + + return spi_write(spi, &st->tx_buf, 2); +} + +static struct regmap_bus ad7091r8_regmap_bus = { + .reg_read = ad7091r_regmap_bus_reg_read, + .reg_write = ad7091r_regmap_bus_reg_write, + .reg_format_endian_default = REGMAP_ENDIAN_BIG, + .val_format_endian_default = REGMAP_ENDIAN_BIG, +}; + +static const struct ad7091r_chip_info ad7091r8_infos[] = { + [AD7091R2_INFO] = AD7091R_SPI_CHIP_INFO(2, "ad7091r-2"), + [AD7091R4_INFO] = AD7091R_SPI_CHIP_INFO(4, "ad7091r-4"), + [AD7091R4_INFO_IRQ] = AD7091R_SPI_CHIP_INFO_IRQ(4, "ad7091r-4"), + [AD7091R8_INFO] = AD7091R_SPI_CHIP_INFO(8, "ad7091r-8"), + [AD7091R8_INFO_IRQ] = AD7091R_SPI_CHIP_INFO_IRQ(8, "ad7091r-8") +}; + +static const struct regmap_config ad7091r2_reg_conf = AD7091R_SPI_REGMAP_CONFIG(2); +static const struct regmap_config ad7091r4_reg_conf = AD7091R_SPI_REGMAP_CONFIG(4); +static const struct regmap_config ad7091r8_reg_conf = AD7091R_SPI_REGMAP_CONFIG(8); + +static void ad7091r8_regmap_init(struct ad7091r_state *st, + const struct regmap_config *regmap_conf) +{ + st->map = devm_regmap_init(st->dev, &ad7091r8_regmap_bus, st, + regmap_conf); +} + +static int ad7091r8_gpio_setup(struct ad7091r_state *st) +{ + st->convst_gpio = devm_gpiod_get(st->dev, "convst", GPIOD_OUT_LOW); + if (IS_ERR(st->convst_gpio)) + return dev_err_probe(st->dev, PTR_ERR(st->convst_gpio), + "Error getting convst GPIO\n"); + + st->reset_gpio = devm_gpiod_get_optional(st->dev, "reset", + GPIOD_OUT_HIGH); + if (IS_ERR(st->reset_gpio)) + return dev_err_probe(st->dev, PTR_ERR(st->convst_gpio), + "Error on requesting reset GPIO\n"); + + if (st->reset_gpio) { + fsleep(20); + gpiod_set_value_cansleep(st->reset_gpio, 0); + } + + return 0; +} + +static struct ad7091r_init_info ad7091r2_init_info = { + .info_no_irq = &ad7091r8_infos[AD7091R2_INFO], + .regmap_config = &ad7091r2_reg_conf, + .init_adc_regmap = &ad7091r8_regmap_init, + .setup = &ad7091r8_gpio_setup +}; + +static struct ad7091r_init_info ad7091r4_init_info = { + .info_no_irq = &ad7091r8_infos[AD7091R4_INFO], + .info_irq = &ad7091r8_infos[AD7091R4_INFO_IRQ], + .regmap_config = &ad7091r4_reg_conf, + .init_adc_regmap = &ad7091r8_regmap_init, + .setup = &ad7091r8_gpio_setup +}; + +static struct ad7091r_init_info ad7091r8_init_info = { + .info_no_irq = &ad7091r8_infos[AD7091R8_INFO], + .info_irq = &ad7091r8_infos[AD7091R8_INFO_IRQ], + .regmap_config = &ad7091r8_reg_conf, + .init_adc_regmap = &ad7091r8_regmap_init, + .setup = &ad7091r8_gpio_setup +}; + +static int ad7091r8_spi_probe(struct spi_device *spi) +{ + const struct ad7091r_init_info *init_info; + + init_info = spi_get_device_match_data(spi); + if (!init_info) + return -EINVAL; + + return ad7091r_probe(&spi->dev, init_info, spi->irq); +} + +static const struct of_device_id ad7091r8_of_match[] = { + { .compatible = "adi,ad7091r2", .data = &ad7091r2_init_info }, + { .compatible = "adi,ad7091r4", .data = &ad7091r4_init_info }, + { .compatible = "adi,ad7091r8", .data = &ad7091r8_init_info }, + { } +}; +MODULE_DEVICE_TABLE(of, ad7091r8_of_match); + +static const struct spi_device_id ad7091r8_spi_id[] = { + { "ad7091r2", (kernel_ulong_t)&ad7091r2_init_info }, + { "ad7091r4", (kernel_ulong_t)&ad7091r4_init_info }, + { "ad7091r8", (kernel_ulong_t)&ad7091r8_init_info }, + { } +}; +MODULE_DEVICE_TABLE(spi, ad7091r8_spi_id); + +static struct spi_driver ad7091r8_driver = { + .driver = { + .name = "ad7091r8", + .of_match_table = ad7091r8_of_match, + }, + .probe = ad7091r8_spi_probe, + .id_table = ad7091r8_spi_id, +}; +module_spi_driver(ad7091r8_driver); + +MODULE_AUTHOR("Marcelo Schmitt <marcelo.schmitt@analog.com>"); +MODULE_DESCRIPTION("Analog Devices AD7091R8 ADC driver"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_AD7091R); diff --git a/drivers/iio/adc/ad9467.c b/drivers/iio/adc/ad9467.c index 39eccc28debe..6581fce4ba95 100644 --- a/drivers/iio/adc/ad9467.c +++ b/drivers/iio/adc/ad9467.c @@ -4,8 +4,9 @@ * * Copyright 2012-2020 Analog Devices Inc. */ - +#include <linux/cleanup.h> #include <linux/module.h> +#include <linux/mutex.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> @@ -100,12 +101,6 @@ #define AD9467_DEF_OUTPUT_MODE 0x08 #define AD9467_REG_VREF_MASK 0x0F -enum { - ID_AD9265, - ID_AD9434, - ID_AD9467, -}; - struct ad9467_chip_info { struct adi_axi_adc_chip_info axi_adc_info; unsigned int default_output_mode; @@ -119,9 +114,11 @@ struct ad9467_state { struct spi_device *spi; struct clk *clk; unsigned int output_mode; + unsigned int (*scales)[2]; struct gpio_desc *pwrdown_gpio; - struct gpio_desc *reset_gpio; + /* ensure consistent state obtained on multiple related accesses */ + struct mutex lock; }; static int ad9467_spi_read(struct spi_device *spi, unsigned int reg) @@ -161,11 +158,13 @@ static int ad9467_reg_access(struct adi_axi_adc_conv *conv, unsigned int reg, struct spi_device *spi = st->spi; int ret; - if (readval == NULL) { + if (!readval) { + guard(mutex)(&st->lock); ret = ad9467_spi_write(spi, reg, writeval); - ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER, - AN877_ADC_TRANSFER_SYNC); - return ret; + if (ret) + return ret; + return ad9467_spi_write(spi, AN877_ADC_REG_TRANSFER, + AN877_ADC_TRANSFER_SYNC); } ret = ad9467_spi_read(spi, reg); @@ -212,6 +211,7 @@ static void __ad9467_get_scale(struct adi_axi_adc_conv *conv, int index, .channel = _chan, \ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \ BIT(IIO_CHAN_INFO_SAMP_FREQ), \ + .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE), \ .scan_index = _si, \ .scan_type = { \ .sign = _sign, \ @@ -228,43 +228,46 @@ static const struct iio_chan_spec ad9467_channels[] = { AD9467_CHAN(0, 0, 16, 'S'), }; -static const struct ad9467_chip_info ad9467_chip_tbl[] = { - [ID_AD9265] = { - .axi_adc_info = { - .id = CHIPID_AD9265, - .max_rate = 125000000UL, - .scale_table = ad9265_scale_table, - .num_scales = ARRAY_SIZE(ad9265_scale_table), - .channels = ad9467_channels, - .num_channels = ARRAY_SIZE(ad9467_channels), - }, - .default_output_mode = AD9265_DEF_OUTPUT_MODE, - .vref_mask = AD9265_REG_VREF_MASK, +static const struct ad9467_chip_info ad9467_chip_tbl = { + .axi_adc_info = { + .name = "ad9467", + .id = CHIPID_AD9467, + .max_rate = 250000000UL, + .scale_table = ad9467_scale_table, + .num_scales = ARRAY_SIZE(ad9467_scale_table), + .channels = ad9467_channels, + .num_channels = ARRAY_SIZE(ad9467_channels), }, - [ID_AD9434] = { - .axi_adc_info = { - .id = CHIPID_AD9434, - .max_rate = 500000000UL, - .scale_table = ad9434_scale_table, - .num_scales = ARRAY_SIZE(ad9434_scale_table), - .channels = ad9434_channels, - .num_channels = ARRAY_SIZE(ad9434_channels), - }, - .default_output_mode = AD9434_DEF_OUTPUT_MODE, - .vref_mask = AD9434_REG_VREF_MASK, + .default_output_mode = AD9467_DEF_OUTPUT_MODE, + .vref_mask = AD9467_REG_VREF_MASK, +}; + +static const struct ad9467_chip_info ad9434_chip_tbl = { + .axi_adc_info = { + .name = "ad9434", + .id = CHIPID_AD9434, + .max_rate = 500000000UL, + .scale_table = ad9434_scale_table, + .num_scales = ARRAY_SIZE(ad9434_scale_table), + .channels = ad9434_channels, + .num_channels = ARRAY_SIZE(ad9434_channels), }, - [ID_AD9467] = { - .axi_adc_info = { - .id = CHIPID_AD9467, - .max_rate = 250000000UL, - .scale_table = ad9467_scale_table, - .num_scales = ARRAY_SIZE(ad9467_scale_table), - .channels = ad9467_channels, - .num_channels = ARRAY_SIZE(ad9467_channels), - }, - .default_output_mode = AD9467_DEF_OUTPUT_MODE, - .vref_mask = AD9467_REG_VREF_MASK, + .default_output_mode = AD9434_DEF_OUTPUT_MODE, + .vref_mask = AD9434_REG_VREF_MASK, +}; + +static const struct ad9467_chip_info ad9265_chip_tbl = { + .axi_adc_info = { + .name = "ad9265", + .id = CHIPID_AD9265, + .max_rate = 125000000UL, + .scale_table = ad9265_scale_table, + .num_scales = ARRAY_SIZE(ad9265_scale_table), + .channels = ad9467_channels, + .num_channels = ARRAY_SIZE(ad9467_channels), }, + .default_output_mode = AD9265_DEF_OUTPUT_MODE, + .vref_mask = AD9265_REG_VREF_MASK, }; static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2) @@ -273,10 +276,13 @@ static int ad9467_get_scale(struct adi_axi_adc_conv *conv, int *val, int *val2) const struct ad9467_chip_info *info1 = to_ad9467_chip_info(info); struct ad9467_state *st = adi_axi_adc_conv_priv(conv); unsigned int i, vref_val; + int ret; - vref_val = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF); + ret = ad9467_spi_read(st->spi, AN877_ADC_REG_VREF); + if (ret < 0) + return ret; - vref_val &= info1->vref_mask; + vref_val = ret & info1->vref_mask; for (i = 0; i < info->num_scales; i++) { if (vref_val == info->scale_table[i][1]) @@ -297,6 +303,7 @@ static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2) struct ad9467_state *st = adi_axi_adc_conv_priv(conv); unsigned int scale_val[2]; unsigned int i; + int ret; if (val != 0) return -EINVAL; @@ -306,11 +313,14 @@ static int ad9467_set_scale(struct adi_axi_adc_conv *conv, int val, int val2) if (scale_val[0] != val || scale_val[1] != val2) continue; - ad9467_spi_write(st->spi, AN877_ADC_REG_VREF, - info->scale_table[i][1]); - ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER, - AN877_ADC_TRANSFER_SYNC); - return 0; + guard(mutex)(&st->lock); + ret = ad9467_spi_write(st->spi, AN877_ADC_REG_VREF, + info->scale_table[i][1]); + if (ret < 0) + return ret; + + return ad9467_spi_write(st->spi, AN877_ADC_REG_TRANSFER, + AN877_ADC_TRANSFER_SYNC); } return -EINVAL; @@ -359,6 +369,26 @@ static int ad9467_write_raw(struct adi_axi_adc_conv *conv, } } +static int ad9467_read_avail(struct adi_axi_adc_conv *conv, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long mask) +{ + const struct adi_axi_adc_chip_info *info = conv->chip_info; + struct ad9467_state *st = adi_axi_adc_conv_priv(conv); + + switch (mask) { + case IIO_CHAN_INFO_SCALE: + *vals = (const int *)st->scales; + *type = IIO_VAL_INT_PLUS_MICRO; + /* Values are stored in a 2D matrix */ + *length = info->num_scales * 2; + return IIO_AVAIL_LIST; + default: + return -EINVAL; + } +} + static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode) { int ret; @@ -371,6 +401,26 @@ static int ad9467_outputmode_set(struct spi_device *spi, unsigned int mode) AN877_ADC_TRANSFER_SYNC); } +static int ad9467_scale_fill(struct adi_axi_adc_conv *conv) +{ + const struct adi_axi_adc_chip_info *info = conv->chip_info; + struct ad9467_state *st = adi_axi_adc_conv_priv(conv); + unsigned int i, val1, val2; + + st->scales = devm_kmalloc_array(&st->spi->dev, info->num_scales, + sizeof(*st->scales), GFP_KERNEL); + if (!st->scales) + return -ENOMEM; + + for (i = 0; i < info->num_scales; i++) { + __ad9467_get_scale(conv, i, &val1, &val2); + st->scales[i][0] = val1; + st->scales[i][1] = val2; + } + + return 0; +} + static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv) { struct ad9467_state *st = adi_axi_adc_conv_priv(conv); @@ -378,6 +428,21 @@ static int ad9467_preenable_setup(struct adi_axi_adc_conv *conv) return ad9467_outputmode_set(st->spi, st->output_mode); } +static int ad9467_reset(struct device *dev) +{ + struct gpio_desc *gpio; + + gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR_OR_NULL(gpio)) + return PTR_ERR_OR_ZERO(gpio); + + fsleep(1); + gpiod_set_value_cansleep(gpio, 0); + fsleep(10 * USEC_PER_MSEC); + + return 0; +} + static int ad9467_probe(struct spi_device *spi) { const struct ad9467_chip_info *info; @@ -386,9 +451,7 @@ static int ad9467_probe(struct spi_device *spi) unsigned int id; int ret; - info = of_device_get_match_data(&spi->dev); - if (!info) - info = (void *)spi_get_device_id(spi)->driver_data; + info = spi_get_device_match_data(spi); if (!info) return -ENODEV; @@ -408,21 +471,16 @@ static int ad9467_probe(struct spi_device *spi) if (IS_ERR(st->pwrdown_gpio)) return PTR_ERR(st->pwrdown_gpio); - st->reset_gpio = devm_gpiod_get_optional(&spi->dev, "reset", - GPIOD_OUT_LOW); - if (IS_ERR(st->reset_gpio)) - return PTR_ERR(st->reset_gpio); - - if (st->reset_gpio) { - udelay(1); - ret = gpiod_direction_output(st->reset_gpio, 1); - if (ret) - return ret; - mdelay(10); - } + ret = ad9467_reset(&spi->dev); + if (ret) + return ret; conv->chip_info = &info->axi_adc_info; + ret = ad9467_scale_fill(conv); + if (ret) + return ret; + id = ad9467_spi_read(spi, AN877_ADC_REG_CHIP_ID); if (id != conv->chip_info->id) { dev_err(&spi->dev, "Mismatch CHIP_ID, got 0x%X, expected 0x%X\n", @@ -433,6 +491,7 @@ static int ad9467_probe(struct spi_device *spi) conv->reg_access = ad9467_reg_access; conv->write_raw = ad9467_write_raw; conv->read_raw = ad9467_read_raw; + conv->read_avail = ad9467_read_avail; conv->preenable_setup = ad9467_preenable_setup; st->output_mode = info->default_output_mode | @@ -442,17 +501,17 @@ static int ad9467_probe(struct spi_device *spi) } static const struct of_device_id ad9467_of_match[] = { - { .compatible = "adi,ad9265", .data = &ad9467_chip_tbl[ID_AD9265], }, - { .compatible = "adi,ad9434", .data = &ad9467_chip_tbl[ID_AD9434], }, - { .compatible = "adi,ad9467", .data = &ad9467_chip_tbl[ID_AD9467], }, + { .compatible = "adi,ad9265", .data = &ad9265_chip_tbl, }, + { .compatible = "adi,ad9434", .data = &ad9434_chip_tbl, }, + { .compatible = "adi,ad9467", .data = &ad9467_chip_tbl, }, {} }; MODULE_DEVICE_TABLE(of, ad9467_of_match); static const struct spi_device_id ad9467_ids[] = { - { "ad9265", (kernel_ulong_t)&ad9467_chip_tbl[ID_AD9265] }, - { "ad9434", (kernel_ulong_t)&ad9467_chip_tbl[ID_AD9434] }, - { "ad9467", (kernel_ulong_t)&ad9467_chip_tbl[ID_AD9467] }, + { "ad9265", (kernel_ulong_t)&ad9265_chip_tbl }, + { "ad9434", (kernel_ulong_t)&ad9434_chip_tbl }, + { "ad9467", (kernel_ulong_t)&ad9467_chip_tbl }, {} }; MODULE_DEVICE_TABLE(spi, ad9467_ids); diff --git a/drivers/iio/adc/adi-axi-adc.c b/drivers/iio/adc/adi-axi-adc.c index aff0532a974a..c247ff1541d2 100644 --- a/drivers/iio/adc/adi-axi-adc.c +++ b/drivers/iio/adc/adi-axi-adc.c @@ -14,6 +14,7 @@ #include <linux/of.h> #include <linux/platform_device.h> #include <linux/property.h> +#include <linux/regmap.h> #include <linux/slab.h> #include <linux/iio/iio.h> @@ -62,7 +63,7 @@ struct adi_axi_adc_state { struct mutex lock; struct adi_axi_adc_client *client; - void __iomem *regs; + struct regmap *regmap; }; struct adi_axi_adc_client { @@ -90,19 +91,6 @@ void *adi_axi_adc_conv_priv(struct adi_axi_adc_conv *conv) } EXPORT_SYMBOL_NS_GPL(adi_axi_adc_conv_priv, IIO_ADI_AXI); -static void adi_axi_adc_write(struct adi_axi_adc_state *st, - unsigned int reg, - unsigned int val) -{ - iowrite32(val, st->regs + reg); -} - -static unsigned int adi_axi_adc_read(struct adi_axi_adc_state *st, - unsigned int reg) -{ - return ioread32(st->regs + reg); -} - static int adi_axi_adc_config_dma_buffer(struct device *dev, struct iio_dev *indio_dev) { @@ -144,22 +132,39 @@ static int adi_axi_adc_write_raw(struct iio_dev *indio_dev, return conv->write_raw(conv, chan, val, val2, mask); } +static int adi_axi_adc_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long mask) +{ + struct adi_axi_adc_state *st = iio_priv(indio_dev); + struct adi_axi_adc_conv *conv = &st->client->conv; + + if (!conv->read_avail) + return -EOPNOTSUPP; + + return conv->read_avail(conv, chan, vals, type, length, mask); +} + static int adi_axi_adc_update_scan_mode(struct iio_dev *indio_dev, const unsigned long *scan_mask) { struct adi_axi_adc_state *st = iio_priv(indio_dev); struct adi_axi_adc_conv *conv = &st->client->conv; - unsigned int i, ctrl; + unsigned int i; + int ret; for (i = 0; i < conv->chip_info->num_channels; i++) { - ctrl = adi_axi_adc_read(st, ADI_AXI_REG_CHAN_CTRL(i)); - if (test_bit(i, scan_mask)) - ctrl |= ADI_AXI_REG_CHAN_CTRL_ENABLE; + ret = regmap_set_bits(st->regmap, + ADI_AXI_REG_CHAN_CTRL(i), + ADI_AXI_REG_CHAN_CTRL_ENABLE); else - ctrl &= ~ADI_AXI_REG_CHAN_CTRL_ENABLE; - - adi_axi_adc_write(st, ADI_AXI_REG_CHAN_CTRL(i), ctrl); + ret = regmap_clear_bits(st->regmap, + ADI_AXI_REG_CHAN_CTRL(i), + ADI_AXI_REG_CHAN_CTRL_ENABLE); + if (ret) + return ret; } return 0; @@ -228,69 +233,11 @@ struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev, } EXPORT_SYMBOL_NS_GPL(devm_adi_axi_adc_conv_register, IIO_ADI_AXI); -static ssize_t in_voltage_scale_available_show(struct device *dev, - struct device_attribute *attr, - char *buf) -{ - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct adi_axi_adc_state *st = iio_priv(indio_dev); - struct adi_axi_adc_conv *conv = &st->client->conv; - size_t len = 0; - int i; - - for (i = 0; i < conv->chip_info->num_scales; i++) { - const unsigned int *s = conv->chip_info->scale_table[i]; - - len += scnprintf(buf + len, PAGE_SIZE - len, - "%u.%06u ", s[0], s[1]); - } - buf[len - 1] = '\n'; - - return len; -} - -static IIO_DEVICE_ATTR_RO(in_voltage_scale_available, 0); - -enum { - ADI_AXI_ATTR_SCALE_AVAIL, -}; - -#define ADI_AXI_ATTR(_en_, _file_) \ - [ADI_AXI_ATTR_##_en_] = &iio_dev_attr_##_file_.dev_attr.attr - -static struct attribute *adi_axi_adc_attributes[] = { - ADI_AXI_ATTR(SCALE_AVAIL, in_voltage_scale_available), - NULL -}; - -static umode_t axi_adc_attr_is_visible(struct kobject *kobj, - struct attribute *attr, int n) -{ - struct device *dev = kobj_to_dev(kobj); - struct iio_dev *indio_dev = dev_to_iio_dev(dev); - struct adi_axi_adc_state *st = iio_priv(indio_dev); - struct adi_axi_adc_conv *conv = &st->client->conv; - - switch (n) { - case ADI_AXI_ATTR_SCALE_AVAIL: - if (!conv->chip_info->num_scales) - return 0; - return attr->mode; - default: - return attr->mode; - } -} - -static const struct attribute_group adi_axi_adc_attribute_group = { - .attrs = adi_axi_adc_attributes, - .is_visible = axi_adc_attr_is_visible, -}; - static const struct iio_info adi_axi_adc_info = { .read_raw = &adi_axi_adc_read_raw, .write_raw = &adi_axi_adc_write_raw, - .attrs = &adi_axi_adc_attribute_group, .update_scan_mode = &adi_axi_adc_update_scan_mode, + .read_avail = &adi_axi_adc_read_avail, }; static const struct adi_axi_adc_core_info adi_axi_adc_10_0_a_info = { @@ -354,21 +301,32 @@ static int adi_axi_adc_setup_channels(struct device *dev, } for (i = 0; i < conv->chip_info->num_channels; i++) { - adi_axi_adc_write(st, ADI_AXI_REG_CHAN_CTRL(i), - ADI_AXI_REG_CHAN_CTRL_DEFAULTS); + ret = regmap_write(st->regmap, ADI_AXI_REG_CHAN_CTRL(i), + ADI_AXI_REG_CHAN_CTRL_DEFAULTS); + if (ret) + return ret; } return 0; } -static void axi_adc_reset(struct adi_axi_adc_state *st) +static int axi_adc_reset(struct adi_axi_adc_state *st) { - adi_axi_adc_write(st, ADI_AXI_REG_RSTN, 0); + int ret; + + ret = regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0); + if (ret) + return ret; + mdelay(10); - adi_axi_adc_write(st, ADI_AXI_REG_RSTN, ADI_AXI_REG_RSTN_MMCM_RSTN); + ret = regmap_write(st->regmap, ADI_AXI_REG_RSTN, + ADI_AXI_REG_RSTN_MMCM_RSTN); + if (ret) + return ret; + mdelay(10); - adi_axi_adc_write(st, ADI_AXI_REG_RSTN, - ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN); + return regmap_write(st->regmap, ADI_AXI_REG_RSTN, + ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN); } static void adi_axi_adc_cleanup(void *data) @@ -379,12 +337,20 @@ static void adi_axi_adc_cleanup(void *data) module_put(cl->dev->driver->owner); } +static const struct regmap_config axi_adc_regmap_config = { + .val_bits = 32, + .reg_bits = 32, + .reg_stride = 4, + .max_register = 0x0800, +}; + static int adi_axi_adc_probe(struct platform_device *pdev) { struct adi_axi_adc_conv *conv; struct iio_dev *indio_dev; struct adi_axi_adc_client *cl; struct adi_axi_adc_state *st; + void __iomem *base; unsigned int ver; int ret; @@ -405,15 +371,24 @@ static int adi_axi_adc_probe(struct platform_device *pdev) cl->state = st; mutex_init(&st->lock); - st->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(st->regs)) - return PTR_ERR(st->regs); + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + st->regmap = devm_regmap_init_mmio(&pdev->dev, base, + &axi_adc_regmap_config); + if (IS_ERR(st->regmap)) + return PTR_ERR(st->regmap); conv = &st->client->conv; - axi_adc_reset(st); + ret = axi_adc_reset(st); + if (ret) + return ret; - ver = adi_axi_adc_read(st, ADI_AXI_REG_VERSION); + ret = regmap_read(st->regmap, ADI_AXI_REG_VERSION, &ver); + if (ret) + return ret; if (cl->info->version > ver) { dev_err(&pdev->dev, diff --git a/drivers/iio/adc/max34408.c b/drivers/iio/adc/max34408.c new file mode 100644 index 000000000000..6c2ea2bc52c6 --- /dev/null +++ b/drivers/iio/adc/max34408.c @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * IIO driver for Maxim MAX34409/34408 ADC, 4-Channels/2-Channels, 8bits, I2C + * + * Datasheet: https://www.analog.com/media/en/technical-documentation/data-sheets/MAX34408-MAX34409.pdf + * + * TODO: ALERT interrupt, Overcurrent delay, Shutdown delay + */ + +#include <linux/bitfield.h> +#include <linux/init.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/mod_devicetable.h> +#include <linux/property.h> +#include <linux/regmap.h> + +#include <linux/iio/iio.h> +#include <linux/iio/types.h> + +#define MAX34408_STATUS_REG 0x0 +#define MAX34408_CONTROL_REG 0x1 +#define MAX34408_OCDELAY_REG 0x2 +#define MAX34408_SDDELAY_REG 0x3 + +#define MAX34408_ADC1_REG 0x4 +#define MAX34408_ADC2_REG 0x5 +/* ADC3 & ADC4 always returns 0x0 on 34408 */ +#define MAX34409_ADC3_REG 0x6 +#define MAX34409_ADC4_REG 0x7 + +#define MAX34408_OCT1_REG 0x8 +#define MAX34408_OCT2_REG 0x9 +#define MAX34409_OCT3_REG 0xA +#define MAX34409_OCT4_REG 0xB + +#define MAX34408_DID_REG 0xC +#define MAX34408_DCYY_REG 0xD +#define MAX34408_DCWW_REG 0xE + +/* Bit masks for status register */ +#define MAX34408_STATUS_OC_MSK GENMASK(1, 0) +#define MAX34409_STATUS_OC_MSK GENMASK(3, 0) +#define MAX34408_STATUS_SHTDN BIT(4) +#define MAX34408_STATUS_ENA BIT(5) + +/* Bit masks for control register */ +#define MAX34408_CONTROL_AVG0 BIT(0) +#define MAX34408_CONTROL_AVG1 BIT(1) +#define MAX34408_CONTROL_AVG2 BIT(2) +#define MAX34408_CONTROL_ALERT BIT(3) + +#define MAX34408_DEFAULT_AVG 0x4 + +/* Bit masks for over current delay */ +#define MAX34408_OCDELAY_OCD_MSK GENMASK(6, 0) +#define MAX34408_OCDELAY_RESET BIT(7) + +/* Bit masks for shutdown delay */ +#define MAX34408_SDDELAY_SHD_MSK GENMASK(6, 0) +#define MAX34408_SDDELAY_RESET BIT(7) + +#define MAX34408_DEFAULT_RSENSE 1000 + +/** + * struct max34408_data - max34408/max34409 specific data. + * @regmap: device register map. + * @dev: max34408 device. + * @lock: lock for protecting access to device hardware registers, mostly + * for read modify write cycles for control registers. + * @input_rsense: Rsense values in uOhm, will be overwritten by + * values from channel nodes. + */ +struct max34408_data { + struct regmap *regmap; + struct device *dev; + struct mutex lock; + u32 input_rsense[4]; +}; + +static const struct regmap_config max34408_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = MAX34408_DCWW_REG, +}; + +struct max34408_adc_model_data { + const char *model_name; + const struct iio_chan_spec *channels; + const int num_channels; +}; + +#define MAX34008_CHANNEL(_index, _address) \ + { \ + .type = IIO_CURRENT, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_OFFSET), \ + .channel = (_index), \ + .address = (_address), \ + .indexed = 1, \ + } + +static const struct iio_chan_spec max34408_channels[] = { + MAX34008_CHANNEL(0, MAX34408_ADC1_REG), + MAX34008_CHANNEL(1, MAX34408_ADC2_REG), +}; + +static const struct iio_chan_spec max34409_channels[] = { + MAX34008_CHANNEL(0, MAX34408_ADC1_REG), + MAX34008_CHANNEL(1, MAX34408_ADC2_REG), + MAX34008_CHANNEL(2, MAX34409_ADC3_REG), + MAX34008_CHANNEL(3, MAX34409_ADC4_REG), +}; + +static int max34408_read_adc_avg(struct max34408_data *max34408, + const struct iio_chan_spec *chan, int *val) +{ + unsigned int ctrl; + int rc; + + guard(mutex)(&max34408->lock); + rc = regmap_read(max34408->regmap, MAX34408_CONTROL_REG, (u32 *)&ctrl); + if (rc) + return rc; + + /* set averaging (0b100) default values*/ + rc = regmap_write(max34408->regmap, MAX34408_CONTROL_REG, + MAX34408_DEFAULT_AVG); + if (rc) { + dev_err(max34408->dev, + "Error (%d) writing control register\n", rc); + return rc; + } + + rc = regmap_read(max34408->regmap, chan->address, val); + if (rc) + return rc; + + /* back to old values */ + rc = regmap_write(max34408->regmap, MAX34408_CONTROL_REG, ctrl); + if (rc) + dev_err(max34408->dev, + "Error (%d) writing control register\n", rc); + + return rc; +} + +static int max34408_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct max34408_data *max34408 = iio_priv(indio_dev); + int rc; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + rc = max34408_read_adc_avg(max34408, chan, val); + if (rc) + return rc; + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + /* + * calcluate current for 8bit ADC with Rsense + * value. + * 10 mV * 1000 / Rsense uOhm = max current + * (max current * adc val * 1000) / (2^8 - 1) mA + */ + *val = 10000 / max34408->input_rsense[chan->channel]; + *val2 = 8; + return IIO_VAL_FRACTIONAL_LOG2; + default: + return -EINVAL; + } +} + +static const struct iio_info max34408_info = { + .read_raw = max34408_read_raw, +}; + +static const struct max34408_adc_model_data max34408_model_data = { + .model_name = "max34408", + .channels = max34408_channels, + .num_channels = 2, +}; + +static const struct max34408_adc_model_data max34409_model_data = { + .model_name = "max34409", + .channels = max34409_channels, + .num_channels = 4, +}; + +static int max34408_probe(struct i2c_client *client) +{ + const struct max34408_adc_model_data *model_data; + struct device *dev = &client->dev; + struct max34408_data *max34408; + struct fwnode_handle *node; + struct iio_dev *indio_dev; + struct regmap *regmap; + int rc, i = 0; + + model_data = i2c_get_match_data(client); + if (!model_data) + return -EINVAL; + + regmap = devm_regmap_init_i2c(client, &max34408_regmap_config); + if (IS_ERR(regmap)) { + dev_err_probe(dev, PTR_ERR(regmap), + "regmap_init failed\n"); + return PTR_ERR(regmap); + } + + indio_dev = devm_iio_device_alloc(dev, sizeof(*max34408)); + if (!indio_dev) + return -ENOMEM; + + max34408 = iio_priv(indio_dev); + max34408->regmap = regmap; + max34408->dev = dev; + mutex_init(&max34408->lock); + + device_for_each_child_node(dev, node) { + fwnode_property_read_u32(node, "maxim,rsense-val-micro-ohms", + &max34408->input_rsense[i]); + i++; + } + + /* disable ALERT and averaging */ + rc = regmap_write(max34408->regmap, MAX34408_CONTROL_REG, 0x0); + if (rc) + return rc; + + indio_dev->channels = model_data->channels; + indio_dev->num_channels = model_data->num_channels; + indio_dev->name = model_data->model_name; + + indio_dev->info = &max34408_info; + indio_dev->modes = INDIO_DIRECT_MODE; + + return devm_iio_device_register(dev, indio_dev); +} + +static const struct of_device_id max34408_of_match[] = { + { + .compatible = "maxim,max34408", + .data = &max34408_model_data, + }, + { + .compatible = "maxim,max34409", + .data = &max34409_model_data, + }, + {} +}; +MODULE_DEVICE_TABLE(of, max34408_of_match); + +static const struct i2c_device_id max34408_id[] = { + { "max34408", (kernel_ulong_t)&max34408_model_data }, + { "max34409", (kernel_ulong_t)&max34409_model_data }, + {} +}; +MODULE_DEVICE_TABLE(i2c, max34408_id); + +static struct i2c_driver max34408_driver = { + .driver = { + .name = "max34408", + .of_match_table = max34408_of_match, + }, + .probe = max34408_probe, + .id_table = max34408_id, +}; +module_i2c_driver(max34408_driver); + +MODULE_AUTHOR("Ivan Mikhaylov <fr0st61te@gmail.com>"); +MODULE_DESCRIPTION("Maxim MAX34408/34409 ADC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c index d864558bc087..7a32e7a1be9d 100644 --- a/drivers/iio/adc/mcp3911.c +++ b/drivers/iio/adc/mcp3911.c @@ -7,6 +7,7 @@ */ #include <linux/bitfield.h> #include <linux/bits.h> +#include <linux/cleanup.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> @@ -316,47 +317,37 @@ static int mcp3911_read_raw(struct iio_dev *indio_dev, int *val2, long mask) { struct mcp3911 *adc = iio_priv(indio_dev); - int ret = -EINVAL; + int ret; - mutex_lock(&adc->lock); + guard(mutex)(&adc->lock); switch (mask) { case IIO_CHAN_INFO_RAW: ret = mcp3911_read(adc, MCP3911_CHANNEL(channel->channel), val, 3); if (ret) - goto out; + return ret; *val = sign_extend32(*val, 23); - - ret = IIO_VAL_INT; - break; - + return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: - ret = adc->chip->get_offset(adc, channel->channel, val); if (ret) - goto out; + return ret; - ret = IIO_VAL_INT; - break; + return IIO_VAL_INT; case IIO_CHAN_INFO_OVERSAMPLING_RATIO: ret = adc->chip->get_osr(adc, val); if (ret) - goto out; - - ret = IIO_VAL_INT; - break; + return ret; + return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: *val = mcp3911_scale_table[ilog2(adc->gain[channel->channel])][0]; *val2 = mcp3911_scale_table[ilog2(adc->gain[channel->channel])][1]; - ret = IIO_VAL_INT_PLUS_NANO; - break; + return IIO_VAL_INT_PLUS_NANO; + default: + return -EINVAL; } - -out: - mutex_unlock(&adc->lock); - return ret; } static int mcp3911_write_raw(struct iio_dev *indio_dev, @@ -364,9 +355,8 @@ static int mcp3911_write_raw(struct iio_dev *indio_dev, int val2, long mask) { struct mcp3911 *adc = iio_priv(indio_dev); - int ret = -EINVAL; - mutex_lock(&adc->lock); + guard(mutex)(&adc->lock); switch (mask) { case IIO_CHAN_INFO_SCALE: for (int i = 0; i < MCP3911_NUM_SCALES; i++) { @@ -374,32 +364,25 @@ static int mcp3911_write_raw(struct iio_dev *indio_dev, val2 == mcp3911_scale_table[i][1]) { adc->gain[channel->channel] = BIT(i); - ret = adc->chip->set_scale(adc, channel->channel, i); + return adc->chip->set_scale(adc, channel->channel, i); } } - break; + return -EINVAL; case IIO_CHAN_INFO_OFFSET: - if (val2 != 0) { - ret = -EINVAL; - goto out; - } - - ret = adc->chip->set_offset(adc, channel->channel, val); - break; + if (val2 != 0) + return -EINVAL; + return adc->chip->set_offset(adc, channel->channel, val); case IIO_CHAN_INFO_OVERSAMPLING_RATIO: for (int i = 0; i < ARRAY_SIZE(mcp3911_osr_table); i++) { if (val == mcp3911_osr_table[i]) { - ret = adc->chip->set_osr(adc, i); - break; + return adc->chip->set_osr(adc, i); } } - break; + return -EINVAL; + default: + return -EINVAL; } - -out: - mutex_unlock(&adc->lock); - return ret; } static int mcp3911_calc_scale_table(struct mcp3911 *adc) @@ -532,7 +515,7 @@ static irqreturn_t mcp3911_trigger_handler(int irq, void *p) int i = 0; int ret; - mutex_lock(&adc->lock); + guard(mutex)(&adc->lock); adc->tx_buf = MCP3911_REG_READ(MCP3911_CHANNEL(0), adc->dev_addr); ret = spi_sync_transfer(adc->spi, xfer, ARRAY_SIZE(xfer)); if (ret < 0) { @@ -549,7 +532,6 @@ static irqreturn_t mcp3911_trigger_handler(int irq, void *p) iio_push_to_buffers_with_timestamp(indio_dev, &adc->scan, iio_get_time_ns(indio_dev)); out: - mutex_unlock(&adc->lock); iio_trigger_notify_done(indio_dev->trig); return IRQ_HANDLED; diff --git a/drivers/iio/amplifiers/hmc425a.c b/drivers/iio/amplifiers/hmc425a.c index e87d35d50a95..ed4d72922696 100644 --- a/drivers/iio/amplifiers/hmc425a.c +++ b/drivers/iio/amplifiers/hmc425a.c @@ -5,6 +5,7 @@ * Copyright 2020 Analog Devices Inc. */ +#include <linux/bitops.h> #include <linux/device.h> #include <linux/err.h> #include <linux/gpio/consumer.h> @@ -22,6 +23,7 @@ enum hmc425a_type { ID_HMC425A, ID_HMC540S, + ID_ADRF5740 }; struct hmc425a_chip_info { @@ -74,6 +76,10 @@ static int hmc425a_read_raw(struct iio_dev *indio_dev, case ID_HMC540S: gain = ~code * -1000; break; + case ID_ADRF5740: + code = code & BIT(3) ? code & ~BIT(2) : code; + gain = code * -2000; + break; } *val = gain / 1000; @@ -113,6 +119,10 @@ static int hmc425a_write_raw(struct iio_dev *indio_dev, case ID_HMC540S: code = ~((abs(gain) / 1000) & 0xF); break; + case ID_ADRF5740: + code = (abs(gain) / 2000) & 0xF; + code = code & BIT(3) ? code | BIT(2) : code; + break; } mutex_lock(&st->lock); @@ -165,6 +175,7 @@ static const struct iio_chan_spec hmc425a_channels[] = { static const struct of_device_id hmc425a_of_match[] = { { .compatible = "adi,hmc425a", .data = (void *)ID_HMC425A }, { .compatible = "adi,hmc540s", .data = (void *)ID_HMC540S }, + { .compatible = "adi,adrf5740", .data = (void *)ID_ADRF5740 }, {}, }; MODULE_DEVICE_TABLE(of, hmc425a_of_match); @@ -188,6 +199,15 @@ static struct hmc425a_chip_info hmc425a_chip_info_tbl[] = { .gain_max = 0, .default_gain = -0x10, /* set default gain -15.0db*/ }, + [ID_ADRF5740] = { + .name = "adrf5740", + .channels = hmc425a_channels, + .num_channels = ARRAY_SIZE(hmc425a_channels), + .num_gpios = 4, + .gain_min = -22000, + .gain_max = 0, + .default_gain = 0xF, /* set default gain -22.0db*/ + }, }; static int hmc425a_probe(struct platform_device *pdev) @@ -229,6 +249,9 @@ static int hmc425a_probe(struct platform_device *pdev) indio_dev->info = &hmc425a_info; indio_dev->modes = INDIO_DIRECT_MODE; + /* Set default gain */ + hmc425a_write(indio_dev, st->gain); + return devm_iio_device_register(&pdev->dev, indio_dev); } diff --git a/drivers/iio/buffer/industrialio-buffer-dma.c b/drivers/iio/buffer/industrialio-buffer-dma.c index d348af8b9705..5610ba67925e 100644 --- a/drivers/iio/buffer/industrialio-buffer-dma.c +++ b/drivers/iio/buffer/industrialio-buffer-dma.c @@ -179,7 +179,7 @@ static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block( } block->size = size; - block->state = IIO_BLOCK_STATE_DEQUEUED; + block->state = IIO_BLOCK_STATE_DONE; block->queue = queue; INIT_LIST_HEAD(&block->head); kref_init(&block->kref); @@ -191,16 +191,8 @@ static struct iio_dma_buffer_block *iio_dma_buffer_alloc_block( static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) { - struct iio_dma_buffer_queue *queue = block->queue; - - /* - * The buffer has already been freed by the application, just drop the - * reference. - */ - if (block->state != IIO_BLOCK_STATE_DEAD) { + if (block->state != IIO_BLOCK_STATE_DEAD) block->state = IIO_BLOCK_STATE_DONE; - list_add_tail(&block->head, &queue->outgoing); - } } /** @@ -261,7 +253,6 @@ static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block) * not support abort and has not given back the block yet. */ switch (block->state) { - case IIO_BLOCK_STATE_DEQUEUED: case IIO_BLOCK_STATE_QUEUED: case IIO_BLOCK_STATE_DONE: return true; @@ -317,7 +308,6 @@ int iio_dma_buffer_request_update(struct iio_buffer *buffer) * dead. This means we can reset the lists without having to fear * corrution. */ - INIT_LIST_HEAD(&queue->outgoing); spin_unlock_irq(&queue->list_lock); INIT_LIST_HEAD(&queue->incoming); @@ -356,6 +346,29 @@ out_unlock: } EXPORT_SYMBOL_GPL(iio_dma_buffer_request_update); +static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue) +{ + unsigned int i; + + spin_lock_irq(&queue->list_lock); + for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { + if (!queue->fileio.blocks[i]) + continue; + queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD; + } + spin_unlock_irq(&queue->list_lock); + + INIT_LIST_HEAD(&queue->incoming); + + for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { + if (!queue->fileio.blocks[i]) + continue; + iio_buffer_block_put(queue->fileio.blocks[i]); + queue->fileio.blocks[i] = NULL; + } + queue->fileio.active_block = NULL; +} + static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue, struct iio_dma_buffer_block *block) { @@ -456,14 +469,20 @@ static struct iio_dma_buffer_block *iio_dma_buffer_dequeue( struct iio_dma_buffer_queue *queue) { struct iio_dma_buffer_block *block; + unsigned int idx; spin_lock_irq(&queue->list_lock); - block = list_first_entry_or_null(&queue->outgoing, struct - iio_dma_buffer_block, head); - if (block != NULL) { - list_del(&block->head); - block->state = IIO_BLOCK_STATE_DEQUEUED; + + idx = queue->fileio.next_dequeue; + block = queue->fileio.blocks[idx]; + + if (block->state == IIO_BLOCK_STATE_DONE) { + idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks); + queue->fileio.next_dequeue = idx; + } else { + block = NULL; } + spin_unlock_irq(&queue->list_lock); return block; @@ -539,6 +558,7 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf) struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf); struct iio_dma_buffer_block *block; size_t data_available = 0; + unsigned int i; /* * For counting the available bytes we'll use the size of the block not @@ -552,8 +572,15 @@ size_t iio_dma_buffer_data_available(struct iio_buffer *buf) data_available += queue->fileio.active_block->size; spin_lock_irq(&queue->list_lock); - list_for_each_entry(block, &queue->outgoing, head) - data_available += block->size; + + for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { + block = queue->fileio.blocks[i]; + + if (block != queue->fileio.active_block + && block->state == IIO_BLOCK_STATE_DONE) + data_available += block->size; + } + spin_unlock_irq(&queue->list_lock); mutex_unlock(&queue->lock); @@ -617,7 +644,6 @@ int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, queue->ops = ops; INIT_LIST_HEAD(&queue->incoming); - INIT_LIST_HEAD(&queue->outgoing); mutex_init(&queue->lock); spin_lock_init(&queue->list_lock); @@ -635,28 +661,9 @@ EXPORT_SYMBOL_GPL(iio_dma_buffer_init); */ void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue) { - unsigned int i; - mutex_lock(&queue->lock); - spin_lock_irq(&queue->list_lock); - for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { - if (!queue->fileio.blocks[i]) - continue; - queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD; - } - INIT_LIST_HEAD(&queue->outgoing); - spin_unlock_irq(&queue->list_lock); - - INIT_LIST_HEAD(&queue->incoming); - - for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { - if (!queue->fileio.blocks[i]) - continue; - iio_buffer_block_put(queue->fileio.blocks[i]); - queue->fileio.blocks[i] = NULL; - } - queue->fileio.active_block = NULL; + iio_dma_buffer_fileio_free(queue); queue->ops = NULL; mutex_unlock(&queue->lock); diff --git a/drivers/iio/chemical/Kconfig b/drivers/iio/chemical/Kconfig index c30657e10ee1..02649ab81b3c 100644 --- a/drivers/iio/chemical/Kconfig +++ b/drivers/iio/chemical/Kconfig @@ -5,6 +5,17 @@ menu "Chemical Sensors" +config AOSONG_AGS02MA + tristate "Aosong AGS02MA TVOC sensor driver" + depends on I2C + select CRC8 + help + Say Y here to build support for Aosong AGS02MA TVOC (Total Volatile + Organic Compounds) sensor. + + To compile this driver as module, choose M here: the module will be + called ags02ma. + config ATLAS_PH_SENSOR tristate "Atlas Scientific OEM SM sensors" depends on I2C diff --git a/drivers/iio/chemical/Makefile b/drivers/iio/chemical/Makefile index a11e777a7a00..2f3dee8bb779 100644 --- a/drivers/iio/chemical/Makefile +++ b/drivers/iio/chemical/Makefile @@ -4,6 +4,7 @@ # # When adding new entries keep the list in alphabetical order +obj-$(CONFIG_AOSONG_AGS02MA) += ags02ma.o obj-$(CONFIG_ATLAS_PH_SENSOR) += atlas-sensor.o obj-$(CONFIG_ATLAS_EZO_SENSOR) += atlas-ezo-sensor.o obj-$(CONFIG_BME680) += bme680_core.o diff --git a/drivers/iio/chemical/ags02ma.c b/drivers/iio/chemical/ags02ma.c new file mode 100644 index 000000000000..8fcd80946543 --- /dev/null +++ b/drivers/iio/chemical/ags02ma.c @@ -0,0 +1,165 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2023 Anshul Dalal <anshulusr@gmail.com> + * + * Driver for Aosong AGS02MA + * + * Datasheet: + * https://asairsensors.com/wp-content/uploads/2021/09/AGS02MA.pdf + * Product Page: + * http://www.aosong.com/m/en/products-33.html + */ + +#include <linux/crc8.h> +#include <linux/delay.h> +#include <linux/i2c.h> +#include <linux/module.h> + +#include <linux/iio/iio.h> + +#define AGS02MA_TVOC_READ_REG 0x00 +#define AGS02MA_VERSION_REG 0x11 + +#define AGS02MA_VERSION_PROCESSING_DELAY 30 +#define AGS02MA_TVOC_READ_PROCESSING_DELAY 1500 + +#define AGS02MA_CRC8_INIT 0xff +#define AGS02MA_CRC8_POLYNOMIAL 0x31 + +DECLARE_CRC8_TABLE(ags02ma_crc8_table); + +struct ags02ma_data { + struct i2c_client *client; +}; + +struct ags02ma_reading { + __be32 data; + u8 crc; +} __packed; + +static int ags02ma_register_read(struct i2c_client *client, u8 reg, u16 delay, + u32 *val) +{ + int ret; + u8 crc; + struct ags02ma_reading read_buffer; + + ret = i2c_master_send(client, ®, sizeof(reg)); + if (ret < 0) { + dev_err(&client->dev, + "Failed to send data to register 0x%x: %d", reg, ret); + return ret; + } + + /* Processing Delay, Check Table 7.7 in the datasheet */ + msleep_interruptible(delay); + + ret = i2c_master_recv(client, (u8 *)&read_buffer, sizeof(read_buffer)); + if (ret < 0) { + dev_err(&client->dev, + "Failed to receive from register 0x%x: %d", reg, ret); + return ret; + } + + crc = crc8(ags02ma_crc8_table, (u8 *)&read_buffer.data, + sizeof(read_buffer.data), AGS02MA_CRC8_INIT); + if (crc != read_buffer.crc) { + dev_err(&client->dev, "CRC error\n"); + return -EIO; + } + + *val = be32_to_cpu(read_buffer.data); + return 0; +} + +static int ags02ma_read_raw(struct iio_dev *iio_device, + struct iio_chan_spec const *chan, int *val, + int *val2, long mask) +{ + int ret; + struct ags02ma_data *data = iio_priv(iio_device); + + switch (mask) { + case IIO_CHAN_INFO_RAW: + ret = ags02ma_register_read(data->client, AGS02MA_TVOC_READ_REG, + AGS02MA_TVOC_READ_PROCESSING_DELAY, + val); + if (ret < 0) + return ret; + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + /* The sensor reads data as ppb */ + *val = 0; + *val2 = 100; + return IIO_VAL_INT_PLUS_NANO; + default: + return -EINVAL; + } +} + +static const struct iio_info ags02ma_info = { + .read_raw = ags02ma_read_raw, +}; + +static const struct iio_chan_spec ags02ma_channel = { + .type = IIO_CONCENTRATION, + .channel2 = IIO_MOD_VOC, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), +}; + +static int ags02ma_probe(struct i2c_client *client) +{ + int ret; + struct ags02ma_data *data; + struct iio_dev *indio_dev; + u32 version; + + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; + + crc8_populate_msb(ags02ma_crc8_table, AGS02MA_CRC8_POLYNOMIAL); + + ret = ags02ma_register_read(client, AGS02MA_VERSION_REG, + AGS02MA_VERSION_PROCESSING_DELAY, &version); + if (ret < 0) + return dev_err_probe(&client->dev, ret, + "Failed to read device version\n"); + dev_dbg(&client->dev, "Aosong AGS02MA, Version: 0x%x", version); + + data = iio_priv(indio_dev); + data->client = client; + indio_dev->info = &ags02ma_info; + indio_dev->channels = &ags02ma_channel; + indio_dev->num_channels = 1; + indio_dev->name = "ags02ma"; + + return devm_iio_device_register(&client->dev, indio_dev); +} + +static const struct i2c_device_id ags02ma_id_table[] = { + { "ags02ma" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(i2c, ags02ma_id_table); + +static const struct of_device_id ags02ma_of_table[] = { + { .compatible = "aosong,ags02ma" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ags02ma_of_table); + +static struct i2c_driver ags02ma_driver = { + .driver = { + .name = "ags02ma", + .of_match_table = ags02ma_of_table, + }, + .id_table = ags02ma_id_table, + .probe = ags02ma_probe, +}; +module_i2c_driver(ags02ma_driver); + +MODULE_AUTHOR("Anshul Dalal <anshulusr@gmail.com>"); +MODULE_DESCRIPTION("Aosong AGS02MA TVOC Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig index 93b8be183de6..34eb40bb9529 100644 --- a/drivers/iio/dac/Kconfig +++ b/drivers/iio/dac/Kconfig @@ -400,6 +400,16 @@ config MCP4728 To compile this driver as a module, choose M here: the module will be called mcp4728. +config MCP4821 + tristate "MCP4801/02/11/12/21/22 DAC driver" + depends on SPI + help + Say yes here to build the driver for the Microchip MCP4801 + MCP4802, MCP4811, MCP4812, MCP4821 and MCP4822 DAC devices. + + To compile this driver as a module, choose M here: the module + will be called mcp4821. + config MCP4922 tristate "MCP4902, MCP4912, MCP4922 DAC driver" depends on SPI diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile index 5b2bac900d5a..55bf89739d14 100644 --- a/drivers/iio/dac/Makefile +++ b/drivers/iio/dac/Makefile @@ -42,6 +42,7 @@ obj-$(CONFIG_MAX5522) += max5522.o obj-$(CONFIG_MAX5821) += max5821.o obj-$(CONFIG_MCP4725) += mcp4725.o obj-$(CONFIG_MCP4728) += mcp4728.o +obj-$(CONFIG_MCP4821) += mcp4821.o obj-$(CONFIG_MCP4922) += mcp4922.o obj-$(CONFIG_STM32_DAC_CORE) += stm32-dac-core.o obj-$(CONFIG_STM32_DAC) += stm32-dac.o diff --git a/drivers/iio/dac/ad5791.c b/drivers/iio/dac/ad5791.c index a4167454da81..75b549827e15 100644 --- a/drivers/iio/dac/ad5791.c +++ b/drivers/iio/dac/ad5791.c @@ -345,6 +345,7 @@ static int ad5791_probe(struct spi_device *spi) struct iio_dev *indio_dev; struct ad5791_state *st; int ret, pos_voltage_uv = 0, neg_voltage_uv = 0; + bool use_rbuf_gain2; indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st)); if (!indio_dev) @@ -379,6 +380,12 @@ static int ad5791_probe(struct spi_device *spi) st->pwr_down = true; st->spi = spi; + if (pdata) + use_rbuf_gain2 = pdata->use_rbuf_gain2; + else + use_rbuf_gain2 = device_property_read_bool(&spi->dev, + "adi,rbuf-gain2-en"); + if (!IS_ERR(st->reg_vss) && !IS_ERR(st->reg_vdd)) { st->vref_mv = (pos_voltage_uv + neg_voltage_uv) / 1000; st->vref_neg_mv = neg_voltage_uv / 1000; @@ -398,7 +405,7 @@ static int ad5791_probe(struct spi_device *spi) st->ctrl = AD5761_CTRL_LINCOMP(st->chip_info->get_lin_comp(st->vref_mv)) - | ((pdata && pdata->use_rbuf_gain2) ? 0 : AD5791_CTRL_RBUF) | + | (use_rbuf_gain2 ? 0 : AD5791_CTRL_RBUF) | AD5791_CTRL_BIN2SC; ret = ad5791_spi_write(st, AD5791_ADDR_CTRL, st->ctrl | diff --git a/drivers/iio/dac/mcp4821.c b/drivers/iio/dac/mcp4821.c new file mode 100644 index 000000000000..8a0480d33845 --- /dev/null +++ b/drivers/iio/dac/mcp4821.c @@ -0,0 +1,236 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2023 Anshul Dalal <anshulusr@gmail.com> + * + * Driver for Microchip MCP4801, MCP4802, MCP4811, MCP4812, MCP4821 and MCP4822 + * + * Based on the work of: + * Michael Welling (MCP4922 Driver) + * + * Datasheet: + * MCP48x1: https://ww1.microchip.com/downloads/en/DeviceDoc/22244B.pdf + * MCP48x2: https://ww1.microchip.com/downloads/en/DeviceDoc/20002249B.pdf + * + * TODO: + * - Configurable gain + * - Regulator control + */ + +#include <linux/module.h> +#include <linux/of.h> +#include <linux/spi/spi.h> + +#include <linux/iio/iio.h> +#include <linux/iio/types.h> + +#include <asm/unaligned.h> + +#define MCP4821_ACTIVE_MODE BIT(12) +#define MCP4802_SECOND_CHAN BIT(15) + +/* DAC uses an internal Voltage reference of 4.096V at a gain of 2x */ +#define MCP4821_2X_GAIN_VREF_MV 4096 + +enum mcp4821_supported_drvice_ids { + ID_MCP4801, + ID_MCP4802, + ID_MCP4811, + ID_MCP4812, + ID_MCP4821, + ID_MCP4822, +}; + +struct mcp4821_state { + struct spi_device *spi; + u16 dac_value[2]; +}; + +struct mcp4821_chip_info { + const char *name; + int num_channels; + const struct iio_chan_spec channels[2]; +}; + +#define MCP4821_CHAN(channel_id, resolution) \ + { \ + .type = IIO_VOLTAGE, .output = 1, .indexed = 1, \ + .channel = (channel_id), \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \ + .scan_type = { \ + .realbits = (resolution), \ + .shift = 12 - (resolution), \ + }, \ + } + +static const struct mcp4821_chip_info mcp4821_chip_info_table[6] = { + [ID_MCP4801] = { + .name = "mcp4801", + .num_channels = 1, + .channels = { + MCP4821_CHAN(0, 8), + }, + }, + [ID_MCP4802] = { + .name = "mcp4802", + .num_channels = 2, + .channels = { + MCP4821_CHAN(0, 8), + MCP4821_CHAN(1, 8), + }, + }, + [ID_MCP4811] = { + .name = "mcp4811", + .num_channels = 1, + .channels = { + MCP4821_CHAN(0, 10), + }, + }, + [ID_MCP4812] = { + .name = "mcp4812", + .num_channels = 2, + .channels = { + MCP4821_CHAN(0, 10), + MCP4821_CHAN(1, 10), + }, + }, + [ID_MCP4821] = { + .name = "mcp4821", + .num_channels = 1, + .channels = { + MCP4821_CHAN(0, 12), + }, + }, + [ID_MCP4822] = { + .name = "mcp4822", + .num_channels = 2, + .channels = { + MCP4821_CHAN(0, 12), + MCP4821_CHAN(1, 12), + }, + }, +}; + +static int mcp4821_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int *val, + int *val2, long mask) +{ + struct mcp4821_state *state; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + state = iio_priv(indio_dev); + *val = state->dac_value[chan->channel]; + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + *val = MCP4821_2X_GAIN_VREF_MV; + *val2 = chan->scan_type.realbits; + return IIO_VAL_FRACTIONAL_LOG2; + default: + return -EINVAL; + } +} + +static int mcp4821_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int val, + int val2, long mask) +{ + struct mcp4821_state *state = iio_priv(indio_dev); + u16 write_val; + __be16 write_buffer; + int ret; + + if (val2 != 0) + return -EINVAL; + + if (val < 0 || val >= BIT(chan->scan_type.realbits)) + return -EINVAL; + + if (mask != IIO_CHAN_INFO_RAW) + return -EINVAL; + + write_val = MCP4821_ACTIVE_MODE | val << chan->scan_type.shift; + if (chan->channel) + write_val |= MCP4802_SECOND_CHAN; + + write_buffer = cpu_to_be16(write_val); + ret = spi_write(state->spi, &write_buffer, sizeof(write_buffer)); + if (ret) { + dev_err(&state->spi->dev, "Failed to write to device: %d", ret); + return ret; + } + + state->dac_value[chan->channel] = val; + + return 0; +} + +static const struct iio_info mcp4821_info = { + .read_raw = &mcp4821_read_raw, + .write_raw = &mcp4821_write_raw, +}; + +static int mcp4821_probe(struct spi_device *spi) +{ + struct iio_dev *indio_dev; + struct mcp4821_state *state; + const struct mcp4821_chip_info *info; + + indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*state)); + if (indio_dev == NULL) + return -ENOMEM; + + state = iio_priv(indio_dev); + state->spi = spi; + + info = spi_get_device_match_data(spi); + indio_dev->name = info->name; + indio_dev->info = &mcp4821_info; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = info->channels; + indio_dev->num_channels = info->num_channels; + + return devm_iio_device_register(&spi->dev, indio_dev); +} + +#define MCP4821_COMPATIBLE(of_compatible, id) \ + { \ + .compatible = of_compatible, \ + .data = &mcp4821_chip_info_table[id] \ + } + +static const struct of_device_id mcp4821_of_table[] = { + MCP4821_COMPATIBLE("microchip,mcp4801", ID_MCP4801), + MCP4821_COMPATIBLE("microchip,mcp4802", ID_MCP4802), + MCP4821_COMPATIBLE("microchip,mcp4811", ID_MCP4811), + MCP4821_COMPATIBLE("microchip,mcp4812", ID_MCP4812), + MCP4821_COMPATIBLE("microchip,mcp4821", ID_MCP4821), + MCP4821_COMPATIBLE("microchip,mcp4822", ID_MCP4822), + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mcp4821_of_table); + +static const struct spi_device_id mcp4821_id_table[] = { + { "mcp4801", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4801]}, + { "mcp4802", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4802]}, + { "mcp4811", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4811]}, + { "mcp4812", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4812]}, + { "mcp4821", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4821]}, + { "mcp4822", (kernel_ulong_t)&mcp4821_chip_info_table[ID_MCP4822]}, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(spi, mcp4821_id_table); + +static struct spi_driver mcp4821_driver = { + .driver = { + .name = "mcp4821", + .of_match_table = mcp4821_of_table, + }, + .probe = mcp4821_probe, + .id_table = mcp4821_id_table, +}; +module_spi_driver(mcp4821_driver); + +MODULE_AUTHOR("Anshul Dalal <anshulusr@gmail.com>"); +MODULE_DESCRIPTION("Microchip MCP4821 DAC Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/frequency/adf4377.c b/drivers/iio/frequency/adf4377.c index 26abecbd51e0..9284c13f1abb 100644 --- a/drivers/iio/frequency/adf4377.c +++ b/drivers/iio/frequency/adf4377.c @@ -870,7 +870,6 @@ static const struct iio_chan_spec adf4377_channels[] = { static int adf4377_properties_parse(struct adf4377_state *st) { struct spi_device *spi = st->spi; - const char *str; int ret; st->clkin = devm_clk_get_enabled(&spi->dev, "ref_in"); @@ -896,16 +895,13 @@ static int adf4377_properties_parse(struct adf4377_state *st) return dev_err_probe(&spi->dev, PTR_ERR(st->gpio_enclk2), "failed to get the CE GPIO\n"); - ret = device_property_read_string(&spi->dev, "adi,muxout-select", &str); - if (ret) { - st->muxout_select = ADF4377_MUXOUT_HIGH_Z; - } else { - ret = match_string(adf4377_muxout_modes, ARRAY_SIZE(adf4377_muxout_modes), str); - if (ret < 0) - return ret; - + ret = device_property_match_property_string(&spi->dev, "adi,muxout-select", + adf4377_muxout_modes, + ARRAY_SIZE(adf4377_muxout_modes)); + if (ret >= 0) st->muxout_select = ret; - } + else + st->muxout_select = ADF4377_MUXOUT_HIGH_Z; return 0; } diff --git a/drivers/iio/frequency/admv1014.c b/drivers/iio/frequency/admv1014.c index bb5e1feef42b..b46b73b89eb7 100644 --- a/drivers/iio/frequency/admv1014.c +++ b/drivers/iio/frequency/admv1014.c @@ -710,7 +710,6 @@ static int admv1014_init(struct admv1014_state *st) static int admv1014_properties_parse(struct admv1014_state *st) { - const char *str; unsigned int i; struct spi_device *spi = st->spi; int ret; @@ -719,27 +718,21 @@ static int admv1014_properties_parse(struct admv1014_state *st) st->p1db_comp = device_property_read_bool(&spi->dev, "adi,p1db-compensation-enable"); - ret = device_property_read_string(&spi->dev, "adi,input-mode", &str); - if (ret) { - st->input_mode = ADMV1014_IQ_MODE; - } else { - ret = match_string(input_mode_names, ARRAY_SIZE(input_mode_names), str); - if (ret < 0) - return ret; - + ret = device_property_match_property_string(&spi->dev, "adi,input-mode", + input_mode_names, + ARRAY_SIZE(input_mode_names)); + if (ret >= 0) st->input_mode = ret; - } - - ret = device_property_read_string(&spi->dev, "adi,quad-se-mode", &str); - if (ret) { - st->quad_se_mode = ADMV1014_SE_MODE_POS; - } else { - ret = match_string(quad_se_mode_names, ARRAY_SIZE(quad_se_mode_names), str); - if (ret < 0) - return ret; + else + st->input_mode = ADMV1014_IQ_MODE; + ret = device_property_match_property_string(&spi->dev, "adi,quad-se-mode", + quad_se_mode_names, + ARRAY_SIZE(quad_se_mode_names)); + if (ret >= 0) st->quad_se_mode = ADMV1014_SE_MODE_POS + (ret * 3); - } + else + st->quad_se_mode = ADMV1014_SE_MODE_POS; for (i = 0; i < ADMV1014_NUM_REGULATORS; ++i) st->regulators[i].supply = admv1014_reg_name[i]; diff --git a/drivers/iio/humidity/hdc3020.c b/drivers/iio/humidity/hdc3020.c new file mode 100644 index 000000000000..4e3311170725 --- /dev/null +++ b/drivers/iio/humidity/hdc3020.c @@ -0,0 +1,473 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * hdc3020.c - Support for the TI HDC3020,HDC3021 and HDC3022 + * temperature + relative humidity sensors + * + * Copyright (C) 2023 + * + * Datasheet: https://www.ti.com/lit/ds/symlink/hdc3020.pdf + */ + +#include <linux/bitops.h> +#include <linux/cleanup.h> +#include <linux/crc8.h> +#include <linux/delay.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/module.h> +#include <linux/mutex.h> + +#include <asm/unaligned.h> + +#include <linux/iio/iio.h> + +#define HDC3020_HEATER_CMD_MSB 0x30 /* shared by all heater commands */ +#define HDC3020_HEATER_ENABLE 0x6D +#define HDC3020_HEATER_DISABLE 0x66 +#define HDC3020_HEATER_CONFIG 0x6E + +#define HDC3020_READ_RETRY_TIMES 10 +#define HDC3020_BUSY_DELAY_MS 10 + +#define HDC3020_CRC8_POLYNOMIAL 0x31 + +static const u8 HDC3020_S_AUTO_10HZ_MOD0[2] = { 0x27, 0x37 }; + +static const u8 HDC3020_EXIT_AUTO[2] = { 0x30, 0x93 }; + +static const u8 HDC3020_R_T_RH_AUTO[2] = { 0xE0, 0x00 }; +static const u8 HDC3020_R_T_LOW_AUTO[2] = { 0xE0, 0x02 }; +static const u8 HDC3020_R_T_HIGH_AUTO[2] = { 0xE0, 0x03 }; +static const u8 HDC3020_R_RH_LOW_AUTO[2] = { 0xE0, 0x04 }; +static const u8 HDC3020_R_RH_HIGH_AUTO[2] = { 0xE0, 0x05 }; + +struct hdc3020_data { + struct i2c_client *client; + /* + * Ensure that the sensor configuration (currently only heater is + * supported) will not be changed during the process of reading + * sensor data (this driver will try HDC3020_READ_RETRY_TIMES times + * if the device does not respond). + */ + struct mutex lock; +}; + +static const int hdc3020_heater_vals[] = {0, 1, 0x3FFF}; + +static const struct iio_chan_spec hdc3020_channels[] = { + { + .type = IIO_TEMP, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_PEAK) | + BIT(IIO_CHAN_INFO_TROUGH) | BIT(IIO_CHAN_INFO_OFFSET), + }, + { + .type = IIO_HUMIDITYRELATIVE, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | BIT(IIO_CHAN_INFO_PEAK) | + BIT(IIO_CHAN_INFO_TROUGH), + }, + { + /* + * For setting the internal heater, which can be switched on to + * prevent or remove any condensation that may develop when the + * ambient environment approaches its dew point temperature. + */ + .type = IIO_CURRENT, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), + .info_mask_separate_available = BIT(IIO_CHAN_INFO_RAW), + .output = 1, + }, +}; + +DECLARE_CRC8_TABLE(hdc3020_crc8_table); + +static int hdc3020_write_bytes(struct hdc3020_data *data, const u8 *buf, u8 len) +{ + struct i2c_client *client = data->client; + struct i2c_msg msg; + int ret, cnt; + + msg.addr = client->addr; + msg.flags = 0; + msg.buf = (char *)buf; + msg.len = len; + + /* + * During the measurement process, HDC3020 will not return data. + * So wait for a while and try again + */ + for (cnt = 0; cnt < HDC3020_READ_RETRY_TIMES; cnt++) { + ret = i2c_transfer(client->adapter, &msg, 1); + if (ret == 1) + return 0; + + mdelay(HDC3020_BUSY_DELAY_MS); + } + dev_err(&client->dev, "Could not write sensor command\n"); + + return -ETIMEDOUT; +} + +static int hdc3020_read_bytes(struct hdc3020_data *data, const u8 *buf, + void *val, int len) +{ + int ret, cnt; + struct i2c_client *client = data->client; + struct i2c_msg msg[2] = { + [0] = { + .addr = client->addr, + .flags = 0, + .buf = (char *)buf, + .len = 2, + }, + [1] = { + .addr = client->addr, + .flags = I2C_M_RD, + .buf = val, + .len = len, + }, + }; + + /* + * During the measurement process, HDC3020 will not return data. + * So wait for a while and try again + */ + for (cnt = 0; cnt < HDC3020_READ_RETRY_TIMES; cnt++) { + ret = i2c_transfer(client->adapter, msg, 2); + if (ret == 2) + return 0; + + mdelay(HDC3020_BUSY_DELAY_MS); + } + dev_err(&client->dev, "Could not read sensor data\n"); + + return -ETIMEDOUT; +} + +static int hdc3020_read_measurement(struct hdc3020_data *data, + enum iio_chan_type type, int *val) +{ + u8 crc, buf[6]; + int ret; + + ret = hdc3020_read_bytes(data, HDC3020_R_T_RH_AUTO, buf, 6); + if (ret < 0) + return ret; + + /* CRC check of the temperature measurement */ + crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE); + if (crc != buf[2]) + return -EINVAL; + + /* CRC check of the relative humidity measurement */ + crc = crc8(hdc3020_crc8_table, buf + 3, 2, CRC8_INIT_VALUE); + if (crc != buf[5]) + return -EINVAL; + + if (type == IIO_TEMP) + *val = get_unaligned_be16(buf); + else if (type == IIO_HUMIDITYRELATIVE) + *val = get_unaligned_be16(&buf[3]); + else + return -EINVAL; + + return 0; +} + +/* + * After exiting the automatic measurement mode or resetting, the peak + * value will be reset to the default value + * This method is used to get the highest temp measured during automatic + * measurement + */ +static int hdc3020_read_high_peak_t(struct hdc3020_data *data, int *val) +{ + u8 crc, buf[3]; + int ret; + + ret = hdc3020_read_bytes(data, HDC3020_R_T_HIGH_AUTO, buf, 3); + if (ret < 0) + return ret; + + crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE); + if (crc != buf[2]) + return -EINVAL; + + *val = get_unaligned_be16(buf); + + return 0; +} + +/* + * This method is used to get the lowest temp measured during automatic + * measurement + */ +static int hdc3020_read_low_peak_t(struct hdc3020_data *data, int *val) +{ + u8 crc, buf[3]; + int ret; + + ret = hdc3020_read_bytes(data, HDC3020_R_T_LOW_AUTO, buf, 3); + if (ret < 0) + return ret; + + crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE); + if (crc != buf[2]) + return -EINVAL; + + *val = get_unaligned_be16(buf); + + return 0; +} + +/* + * This method is used to get the highest humidity measured during automatic + * measurement + */ +static int hdc3020_read_high_peak_rh(struct hdc3020_data *data, int *val) +{ + u8 crc, buf[3]; + int ret; + + ret = hdc3020_read_bytes(data, HDC3020_R_RH_HIGH_AUTO, buf, 3); + if (ret < 0) + return ret; + + crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE); + if (crc != buf[2]) + return -EINVAL; + + *val = get_unaligned_be16(buf); + + return 0; +} + +/* + * This method is used to get the lowest humidity measured during automatic + * measurement + */ +static int hdc3020_read_low_peak_rh(struct hdc3020_data *data, int *val) +{ + u8 crc, buf[3]; + int ret; + + ret = hdc3020_read_bytes(data, HDC3020_R_RH_LOW_AUTO, buf, 3); + if (ret < 0) + return ret; + + crc = crc8(hdc3020_crc8_table, buf, 2, CRC8_INIT_VALUE); + if (crc != buf[2]) + return -EINVAL; + + *val = get_unaligned_be16(buf); + + return 0; +} + +static int hdc3020_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int *val, + int *val2, long mask) +{ + struct hdc3020_data *data = iio_priv(indio_dev); + int ret; + + if (chan->type != IIO_TEMP && chan->type != IIO_HUMIDITYRELATIVE) + return -EINVAL; + + switch (mask) { + case IIO_CHAN_INFO_RAW: { + guard(mutex)(&data->lock); + ret = hdc3020_read_measurement(data, chan->type, val); + if (ret < 0) + return ret; + + return IIO_VAL_INT; + } + case IIO_CHAN_INFO_PEAK: { + guard(mutex)(&data->lock); + if (chan->type == IIO_TEMP) { + ret = hdc3020_read_high_peak_t(data, val); + if (ret < 0) + return ret; + } else { + ret = hdc3020_read_high_peak_rh(data, val); + if (ret < 0) + return ret; + } + return IIO_VAL_INT; + } + case IIO_CHAN_INFO_TROUGH: { + guard(mutex)(&data->lock); + if (chan->type == IIO_TEMP) { + ret = hdc3020_read_low_peak_t(data, val); + if (ret < 0) + return ret; + } else { + ret = hdc3020_read_low_peak_rh(data, val); + if (ret < 0) + return ret; + } + return IIO_VAL_INT; + } + case IIO_CHAN_INFO_SCALE: + *val2 = 65536; + if (chan->type == IIO_TEMP) + *val = 175; + else + *val = 100; + return IIO_VAL_FRACTIONAL; + + case IIO_CHAN_INFO_OFFSET: + if (chan->type != IIO_TEMP) + return -EINVAL; + + *val = 16852; + return IIO_VAL_INT; + + default: + return -EINVAL; + } +} + +static int hdc3020_read_available(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, + int *type, int *length, long mask) +{ + if (mask != IIO_CHAN_INFO_RAW || chan->type != IIO_CURRENT) + return -EINVAL; + + *vals = hdc3020_heater_vals; + *type = IIO_VAL_INT; + + return IIO_AVAIL_RANGE; +} + +static int hdc3020_update_heater(struct hdc3020_data *data, int val) +{ + u8 buf[5]; + int ret; + + if (val < hdc3020_heater_vals[0] || val > hdc3020_heater_vals[2]) + return -EINVAL; + + buf[0] = HDC3020_HEATER_CMD_MSB; + + if (!val) { + buf[1] = HDC3020_HEATER_DISABLE; + return hdc3020_write_bytes(data, buf, 2); + } + + buf[1] = HDC3020_HEATER_CONFIG; + put_unaligned_be16(val & GENMASK(13, 0), &buf[2]); + buf[4] = crc8(hdc3020_crc8_table, buf + 2, 2, CRC8_INIT_VALUE); + ret = hdc3020_write_bytes(data, buf, 5); + if (ret < 0) + return ret; + + buf[1] = HDC3020_HEATER_ENABLE; + + return hdc3020_write_bytes(data, buf, 2); +} + +static int hdc3020_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct hdc3020_data *data = iio_priv(indio_dev); + + switch (mask) { + case IIO_CHAN_INFO_RAW: + if (chan->type != IIO_CURRENT) + return -EINVAL; + + guard(mutex)(&data->lock); + return hdc3020_update_heater(data, val); + } + + return -EINVAL; +} + +static const struct iio_info hdc3020_info = { + .read_raw = hdc3020_read_raw, + .write_raw = hdc3020_write_raw, + .read_avail = hdc3020_read_available, +}; + +static void hdc3020_stop(void *data) +{ + hdc3020_write_bytes((struct hdc3020_data *)data, HDC3020_EXIT_AUTO, 2); +} + +static int hdc3020_probe(struct i2c_client *client) +{ + struct iio_dev *indio_dev; + struct hdc3020_data *data; + int ret; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) + return -EOPNOTSUPP; + + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; + + data = iio_priv(indio_dev); + data->client = client; + mutex_init(&data->lock); + + crc8_populate_msb(hdc3020_crc8_table, HDC3020_CRC8_POLYNOMIAL); + + indio_dev->name = "hdc3020"; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->info = &hdc3020_info; + indio_dev->channels = hdc3020_channels; + indio_dev->num_channels = ARRAY_SIZE(hdc3020_channels); + + ret = hdc3020_write_bytes(data, HDC3020_S_AUTO_10HZ_MOD0, 2); + if (ret) + return dev_err_probe(&client->dev, ret, + "Unable to set up measurement\n"); + + ret = devm_add_action_or_reset(&data->client->dev, hdc3020_stop, data); + if (ret) + return ret; + + ret = devm_iio_device_register(&data->client->dev, indio_dev); + if (ret) + return dev_err_probe(&client->dev, ret, "Failed to add device"); + + return 0; +} + +static const struct i2c_device_id hdc3020_id[] = { + { "hdc3020" }, + { "hdc3021" }, + { "hdc3022" }, + { } +}; +MODULE_DEVICE_TABLE(i2c, hdc3020_id); + +static const struct of_device_id hdc3020_dt_ids[] = { + { .compatible = "ti,hdc3020" }, + { .compatible = "ti,hdc3021" }, + { .compatible = "ti,hdc3022" }, + { } +}; +MODULE_DEVICE_TABLE(of, hdc3020_dt_ids); + +static struct i2c_driver hdc3020_driver = { + .driver = { + .name = "hdc3020", + .of_match_table = hdc3020_dt_ids, + }, + .probe = hdc3020_probe, + .id_table = hdc3020_id, +}; +module_i2c_driver(hdc3020_driver); + +MODULE_AUTHOR("Javier Carrasco <javier.carrasco.cruz@gmail.com>"); +MODULE_AUTHOR("Li peiyu <579lpy@gmail.com>"); +MODULE_DESCRIPTION("TI HDC3020 humidity and temperature sensor driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig index c2f97629e9cd..52a155ff3250 100644 --- a/drivers/iio/imu/Kconfig +++ b/drivers/iio/imu/Kconfig @@ -53,6 +53,7 @@ config ADIS16480 ADIS16485, ADIS16488 inertial sensors. source "drivers/iio/imu/bmi160/Kconfig" +source "drivers/iio/imu/bmi323/Kconfig" source "drivers/iio/imu/bno055/Kconfig" config FXOS8700 diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile index 6eb612034722..7e2d7d5c3b7b 100644 --- a/drivers/iio/imu/Makefile +++ b/drivers/iio/imu/Makefile @@ -15,6 +15,7 @@ adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_buffer.o obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o obj-y += bmi160/ +obj-y += bmi323/ obj-y += bno055/ obj-$(CONFIG_FXOS8700) += fxos8700_core.o diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c index bc40240b29e2..495caf4ce87a 100644 --- a/drivers/iio/imu/adis.c +++ b/drivers/iio/imu/adis.c @@ -44,8 +44,6 @@ int __adis_write_reg(struct adis *adis, unsigned int reg, unsigned int value, .cs_change = 1, .delay.value = adis->data->write_delay, .delay.unit = SPI_DELAY_UNIT_USECS, - .cs_change_delay.value = adis->data->cs_change_delay, - .cs_change_delay.unit = SPI_DELAY_UNIT_USECS, }, { .tx_buf = adis->tx + 2, .bits_per_word = 8, @@ -53,8 +51,6 @@ int __adis_write_reg(struct adis *adis, unsigned int reg, unsigned int value, .cs_change = 1, .delay.value = adis->data->write_delay, .delay.unit = SPI_DELAY_UNIT_USECS, - .cs_change_delay.value = adis->data->cs_change_delay, - .cs_change_delay.unit = SPI_DELAY_UNIT_USECS, }, { .tx_buf = adis->tx + 4, .bits_per_word = 8, @@ -62,8 +58,6 @@ int __adis_write_reg(struct adis *adis, unsigned int reg, unsigned int value, .cs_change = 1, .delay.value = adis->data->write_delay, .delay.unit = SPI_DELAY_UNIT_USECS, - .cs_change_delay.value = adis->data->cs_change_delay, - .cs_change_delay.unit = SPI_DELAY_UNIT_USECS, }, { .tx_buf = adis->tx + 6, .bits_per_word = 8, @@ -144,8 +138,6 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val, .cs_change = 1, .delay.value = adis->data->write_delay, .delay.unit = SPI_DELAY_UNIT_USECS, - .cs_change_delay.value = adis->data->cs_change_delay, - .cs_change_delay.unit = SPI_DELAY_UNIT_USECS, }, { .tx_buf = adis->tx + 2, .bits_per_word = 8, @@ -153,8 +145,6 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val, .cs_change = 1, .delay.value = adis->data->read_delay, .delay.unit = SPI_DELAY_UNIT_USECS, - .cs_change_delay.value = adis->data->cs_change_delay, - .cs_change_delay.unit = SPI_DELAY_UNIT_USECS, }, { .tx_buf = adis->tx + 4, .rx_buf = adis->rx, @@ -163,8 +153,6 @@ int __adis_read_reg(struct adis *adis, unsigned int reg, unsigned int *val, .cs_change = 1, .delay.value = adis->data->read_delay, .delay.unit = SPI_DELAY_UNIT_USECS, - .cs_change_delay.value = adis->data->cs_change_delay, - .cs_change_delay.unit = SPI_DELAY_UNIT_USECS, }, { .rx_buf = adis->rx + 2, .bits_per_word = 8, @@ -524,6 +512,12 @@ int adis_init(struct adis *adis, struct iio_dev *indio_dev, } mutex_init(&adis->state_lock); + + if (!spi->cs_inactive.value) { + spi->cs_inactive.value = data->cs_change_delay; + spi->cs_inactive.unit = SPI_DELAY_UNIT_USECS; + } + adis->spi = spi; adis->data = data; iio_device_set_drvdata(indio_dev, adis); diff --git a/drivers/iio/imu/bmi323/Kconfig b/drivers/iio/imu/bmi323/Kconfig new file mode 100644 index 000000000000..ab37b285393c --- /dev/null +++ b/drivers/iio/imu/bmi323/Kconfig @@ -0,0 +1,33 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# BMI323 IMU driver +# + +config BMI323 + tristate + select IIO_BUFFER + select IIO_TRIGGERED_BUFFER + +config BMI323_I2C + tristate "Bosch BMI323 I2C driver" + depends on I2C + select BMI323 + select REGMAP_I2C + help + Enable support for the Bosch BMI323 6-Axis IMU connected to I2C + interface. + + This driver can also be built as a module. If so, the module will be + called bmi323_i2c. + +config BMI323_SPI + tristate "Bosch BMI323 SPI driver" + depends on SPI + select BMI323 + select REGMAP_SPI + help + Enable support for the Bosch BMI323 6-Axis IMU connected to SPI + interface. + + This driver can also be built as a module. If so, the module will be + called bmi323_spi. diff --git a/drivers/iio/imu/bmi323/Makefile b/drivers/iio/imu/bmi323/Makefile new file mode 100644 index 000000000000..a6a6dc0207c9 --- /dev/null +++ b/drivers/iio/imu/bmi323/Makefile @@ -0,0 +1,7 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for Bosch BMI323 IMU +# +obj-$(CONFIG_BMI323) += bmi323_core.o +obj-$(CONFIG_BMI323_I2C) += bmi323_i2c.o +obj-$(CONFIG_BMI323_SPI) += bmi323_spi.o diff --git a/drivers/iio/imu/bmi323/bmi323.h b/drivers/iio/imu/bmi323/bmi323.h new file mode 100644 index 000000000000..dff126d41658 --- /dev/null +++ b/drivers/iio/imu/bmi323/bmi323.h @@ -0,0 +1,209 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * IIO driver for Bosch BMI323 6-Axis IMU + * + * Copyright (C) 2023, Jagath Jog J <jagathjog1996@gmail.com> + */ + +#ifndef _BMI323_H_ +#define _BMI323_H_ + +#include <linux/bits.h> +#include <linux/regmap.h> +#include <linux/units.h> + +#define BMI323_I2C_DUMMY 2 +#define BMI323_SPI_DUMMY 1 + +/* Register map */ + +#define BMI323_CHIP_ID_REG 0x00 +#define BMI323_CHIP_ID_VAL 0x0043 +#define BMI323_CHIP_ID_MSK GENMASK(7, 0) +#define BMI323_ERR_REG 0x01 +#define BMI323_STATUS_REG 0x02 +#define BMI323_STATUS_POR_MSK BIT(0) + +/* Accelero/Gyro/Temp data registers */ +#define BMI323_ACCEL_X_REG 0x03 +#define BMI323_GYRO_X_REG 0x06 +#define BMI323_TEMP_REG 0x09 +#define BMI323_ALL_CHAN_MSK GENMASK(5, 0) + +/* Status registers */ +#define BMI323_STATUS_INT1_REG 0x0D +#define BMI323_STATUS_INT2_REG 0x0E +#define BMI323_STATUS_NOMOTION_MSK BIT(0) +#define BMI323_STATUS_MOTION_MSK BIT(1) +#define BMI323_STATUS_STP_WTR_MSK BIT(5) +#define BMI323_STATUS_TAP_MSK BIT(8) +#define BMI323_STATUS_ERROR_MSK BIT(10) +#define BMI323_STATUS_TMP_DRDY_MSK BIT(11) +#define BMI323_STATUS_GYR_DRDY_MSK BIT(12) +#define BMI323_STATUS_ACC_DRDY_MSK BIT(13) +#define BMI323_STATUS_ACC_GYR_DRDY_MSK GENMASK(13, 12) +#define BMI323_STATUS_FIFO_WTRMRK_MSK BIT(14) +#define BMI323_STATUS_FIFO_FULL_MSK BIT(15) + +/* Feature registers */ +#define BMI323_FEAT_IO0_REG 0x10 +#define BMI323_FEAT_IO0_XYZ_NOMOTION_MSK GENMASK(2, 0) +#define BMI323_FEAT_IO0_XYZ_MOTION_MSK GENMASK(5, 3) +#define BMI323_FEAT_XYZ_MSK GENMASK(2, 0) +#define BMI323_FEAT_IO0_STP_CNT_MSK BIT(9) +#define BMI323_FEAT_IO0_S_TAP_MSK BIT(12) +#define BMI323_FEAT_IO0_D_TAP_MSK BIT(13) +#define BMI323_FEAT_IO1_REG 0x11 +#define BMI323_FEAT_IO1_ERR_MSK GENMASK(3, 0) +#define BMI323_FEAT_IO2_REG 0x12 +#define BMI323_FEAT_IO_STATUS_REG 0x14 +#define BMI323_FEAT_IO_STATUS_MSK BIT(0) +#define BMI323_FEAT_ENG_POLL 2000 +#define BMI323_FEAT_ENG_TIMEOUT 10000 + +/* FIFO registers */ +#define BMI323_FIFO_FILL_LEVEL_REG 0x15 +#define BMI323_FIFO_DATA_REG 0x16 + +/* Accelero/Gyro config registers */ +#define BMI323_ACC_CONF_REG 0x20 +#define BMI323_GYRO_CONF_REG 0x21 +#define BMI323_ACC_GYRO_CONF_MODE_MSK GENMASK(14, 12) +#define BMI323_ACC_GYRO_CONF_ODR_MSK GENMASK(3, 0) +#define BMI323_ACC_GYRO_CONF_SCL_MSK GENMASK(6, 4) +#define BMI323_ACC_GYRO_CONF_BW_MSK BIT(7) +#define BMI323_ACC_GYRO_CONF_AVG_MSK GENMASK(10, 8) + +/* FIFO registers */ +#define BMI323_FIFO_WTRMRK_REG 0x35 +#define BMI323_FIFO_CONF_REG 0x36 +#define BMI323_FIFO_CONF_STP_FUL_MSK BIT(0) +#define BMI323_FIFO_CONF_ACC_GYR_EN_MSK GENMASK(10, 9) +#define BMI323_FIFO_ACC_GYR_MSK GENMASK(1, 0) +#define BMI323_FIFO_CTRL_REG 0x37 +#define BMI323_FIFO_FLUSH_MSK BIT(0) + +/* Interrupt pin config registers */ +#define BMI323_IO_INT_CTR_REG 0x38 +#define BMI323_IO_INT1_LVL_MSK BIT(0) +#define BMI323_IO_INT1_OD_MSK BIT(1) +#define BMI323_IO_INT1_OP_EN_MSK BIT(2) +#define BMI323_IO_INT1_LVL_OD_OP_MSK GENMASK(2, 0) +#define BMI323_IO_INT2_LVL_MSK BIT(8) +#define BMI323_IO_INT2_OD_MSK BIT(9) +#define BMI323_IO_INT2_OP_EN_MSK BIT(10) +#define BMI323_IO_INT2_LVL_OD_OP_MSK GENMASK(10, 8) +#define BMI323_IO_INT_CONF_REG 0x39 +#define BMI323_IO_INT_LTCH_MSK BIT(0) +#define BMI323_INT_MAP1_REG 0x3A +#define BMI323_INT_MAP2_REG 0x3B +#define BMI323_NOMOTION_MSK GENMASK(1, 0) +#define BMI323_MOTION_MSK GENMASK(3, 2) +#define BMI323_STEP_CNT_MSK GENMASK(11, 10) +#define BMI323_TAP_MSK GENMASK(1, 0) +#define BMI323_TMP_DRDY_MSK GENMASK(7, 6) +#define BMI323_GYR_DRDY_MSK GENMASK(9, 8) +#define BMI323_ACC_DRDY_MSK GENMASK(11, 10) +#define BMI323_FIFO_WTRMRK_MSK GENMASK(13, 12) +#define BMI323_FIFO_FULL_MSK GENMASK(15, 14) + +/* Feature registers */ +#define BMI323_FEAT_CTRL_REG 0x40 +#define BMI323_FEAT_ENG_EN_MSK BIT(0) +#define BMI323_FEAT_DATA_ADDR 0x41 +#define BMI323_FEAT_DATA_TX 0x42 +#define BMI323_FEAT_DATA_STATUS 0x43 +#define BMI323_FEAT_DATA_TX_RDY_MSK BIT(1) +#define BMI323_FEAT_EVNT_EXT_REG 0x47 +#define BMI323_FEAT_EVNT_EXT_S_MSK BIT(3) +#define BMI323_FEAT_EVNT_EXT_D_MSK BIT(4) + +#define BMI323_CMD_REG 0x7E +#define BMI323_RST_VAL 0xDEAF +#define BMI323_CFG_RES_REG 0x7F + +/* Extended registers */ +#define BMI323_GEN_SET1_REG 0x02 +#define BMI323_GEN_SET1_MODE_MSK BIT(0) +#define BMI323_GEN_HOLD_DUR_MSK GENMASK(4, 1) + +/* Any Motion/No Motion config registers */ +#define BMI323_ANYMO1_REG 0x05 +#define BMI323_NOMO1_REG 0x08 +#define BMI323_MO2_OFFSET 0x01 +#define BMI323_MO3_OFFSET 0x02 +#define BMI323_MO1_REF_UP_MSK BIT(12) +#define BMI323_MO1_SLOPE_TH_MSK GENMASK(11, 0) +#define BMI323_MO2_HYSTR_MSK GENMASK(9, 0) +#define BMI323_MO3_DURA_MSK GENMASK(12, 0) + +/* Step counter config registers */ +#define BMI323_STEP_SC1_REG 0x10 +#define BMI323_STEP_SC1_WTRMRK_MSK GENMASK(9, 0) +#define BMI323_STEP_SC1_RST_CNT_MSK BIT(10) +#define BMI323_STEP_SC1_REG 0x10 +#define BMI323_STEP_LEN 2 + +/* Tap gesture config registers */ +#define BMI323_TAP1_REG 0x1E +#define BMI323_TAP1_AXIS_SEL_MSK GENMASK(1, 0) +#define BMI323_AXIS_XYZ_MSK GENMASK(1, 0) +#define BMI323_TAP1_TIMOUT_MSK BIT(2) +#define BMI323_TAP1_MAX_PEAKS_MSK GENMASK(5, 3) +#define BMI323_TAP1_MODE_MSK GENMASK(7, 6) +#define BMI323_TAP2_REG 0x1F +#define BMI323_TAP2_THRES_MSK GENMASK(9, 0) +#define BMI323_TAP2_MAX_DUR_MSK GENMASK(15, 10) +#define BMI323_TAP3_REG 0x20 +#define BMI323_TAP3_QUIET_TIM_MSK GENMASK(15, 12) +#define BMI323_TAP3_QT_BW_TAP_MSK GENMASK(11, 8) +#define BMI323_TAP3_QT_AFT_GES_MSK GENMASK(15, 12) + +#define BMI323_MOTION_THRES_SCALE 512 +#define BMI323_MOTION_HYSTR_SCALE 512 +#define BMI323_MOTION_DURAT_SCALE 50 +#define BMI323_TAP_THRES_SCALE 512 +#define BMI323_DUR_BW_TAP_SCALE 200 +#define BMI323_QUITE_TIM_GES_SCALE 25 +#define BMI323_MAX_GES_DUR_SCALE 25 + +/* + * The formula to calculate temperature in C. + * See datasheet section 6.1.1, Register Map Overview + * + * T_C = (temp_raw / 512) + 23 + */ +#define BMI323_TEMP_OFFSET 11776 +#define BMI323_TEMP_SCALE 1953125 + +/* + * The BMI323 features a FIFO with a capacity of 2048 bytes. Each frame + * consists of accelerometer (X, Y, Z) data and gyroscope (X, Y, Z) data, + * totaling 6 words or 12 bytes. The FIFO buffer can hold a total of + * 170 frames. + * + * If a watermark interrupt is configured for 170 frames, the interrupt will + * trigger when the FIFO reaches 169 frames, so limit the maximum watermark + * level to 169 frames. In terms of data, 169 frames would equal 1014 bytes, + * which is approximately 2 frames before the FIFO reaches its full capacity. + * See datasheet section 5.7.3 FIFO Buffer Interrupts + */ +#define BMI323_BYTES_PER_SAMPLE 2 +#define BMI323_FIFO_LENGTH_IN_BYTES 2048 +#define BMI323_FIFO_FRAME_LENGTH 6 +#define BMI323_FIFO_FULL_IN_FRAMES \ + ((BMI323_FIFO_LENGTH_IN_BYTES / \ + (BMI323_BYTES_PER_SAMPLE * BMI323_FIFO_FRAME_LENGTH)) - 1) +#define BMI323_FIFO_FULL_IN_WORDS \ + (BMI323_FIFO_FULL_IN_FRAMES * BMI323_FIFO_FRAME_LENGTH) + +#define BMI323_INT_MICRO_TO_RAW(val, val2, scale) ((val) * (scale) + \ + ((val2) * (scale)) / MEGA) + +#define BMI323_RAW_TO_MICRO(raw, scale) ((((raw) % (scale)) * MEGA) / scale) + +struct device; +int bmi323_core_probe(struct device *dev); +extern const struct regmap_config bmi323_regmap_config; + +#endif diff --git a/drivers/iio/imu/bmi323/bmi323_core.c b/drivers/iio/imu/bmi323/bmi323_core.c new file mode 100644 index 000000000000..183af482828f --- /dev/null +++ b/drivers/iio/imu/bmi323/bmi323_core.c @@ -0,0 +1,2139 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * IIO core driver for Bosch BMI323 6-Axis IMU. + * + * Copyright (C) 2023, Jagath Jog J <jagathjog1996@gmail.com> + * + * Datasheet: https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmi323-ds000.pdf + */ + +#include <linux/bitfield.h> +#include <linux/cleanup.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/minmax.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/property.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/units.h> + +#include <asm/unaligned.h> + +#include <linux/iio/buffer.h> +#include <linux/iio/events.h> +#include <linux/iio/iio.h> +#include <linux/iio/sysfs.h> +#include <linux/iio/trigger.h> +#include <linux/iio/trigger_consumer.h> +#include <linux/iio/triggered_buffer.h> + +#include "bmi323.h" + +enum bmi323_sensor_type { + BMI323_ACCEL, + BMI323_GYRO, + BMI323_SENSORS_CNT, +}; + +enum bmi323_opr_mode { + ACC_GYRO_MODE_DISABLE = 0x00, + GYRO_DRIVE_MODE_ENABLED = 0x01, + ACC_GYRO_MODE_DUTYCYCLE = 0x03, + ACC_GYRO_MODE_CONTINOUS = 0x04, + ACC_GYRO_MODE_HIGH_PERF = 0x07, +}; + +enum bmi323_state { + BMI323_IDLE, + BMI323_BUFFER_DRDY_TRIGGERED, + BMI323_BUFFER_FIFO, +}; + +enum bmi323_irq_pin { + BMI323_IRQ_DISABLED, + BMI323_IRQ_INT1, + BMI323_IRQ_INT2, +}; + +enum bmi323_3db_bw { + BMI323_BW_ODR_BY_2, + BMI323_BW_ODR_BY_4, +}; + +enum bmi323_scan { + BMI323_ACCEL_X, + BMI323_ACCEL_Y, + BMI323_ACCEL_Z, + BMI323_GYRO_X, + BMI323_GYRO_Y, + BMI323_GYRO_Z, + BMI323_CHAN_MAX +}; + +struct bmi323_hw { + u8 data; + u8 config; + const int (*scale_table)[2]; + int scale_table_len; +}; + +/* + * The accelerometer supports +-2G/4G/8G/16G ranges, and the resolution of + * each sample is 16 bits, signed. + * At +-8G the scale can calculated by + * ((8 + 8) * 9.80665 / (2^16 - 1)) * 10^6 = 2394.23819 scale in micro + * + */ +static const int bmi323_accel_scale[][2] = { + { 0, 598 }, + { 0, 1197 }, + { 0, 2394 }, + { 0, 4788 }, +}; + +static const int bmi323_gyro_scale[][2] = { + { 0, 66 }, + { 0, 133 }, + { 0, 266 }, + { 0, 532 }, + { 0, 1065 }, +}; + +static const int bmi323_accel_gyro_avrg[] = {0, 2, 4, 8, 16, 32, 64}; + +static const struct bmi323_hw bmi323_hw[2] = { + [BMI323_ACCEL] = { + .data = BMI323_ACCEL_X_REG, + .config = BMI323_ACC_CONF_REG, + .scale_table = bmi323_accel_scale, + .scale_table_len = ARRAY_SIZE(bmi323_accel_scale), + }, + [BMI323_GYRO] = { + .data = BMI323_GYRO_X_REG, + .config = BMI323_GYRO_CONF_REG, + .scale_table = bmi323_gyro_scale, + .scale_table_len = ARRAY_SIZE(bmi323_gyro_scale), + }, +}; + +struct bmi323_data { + struct device *dev; + struct regmap *regmap; + struct iio_mount_matrix orientation; + enum bmi323_irq_pin irq_pin; + struct iio_trigger *trig; + bool drdy_trigger_enabled; + enum bmi323_state state; + s64 fifo_tstamp, old_fifo_tstamp; + u32 odrns[BMI323_SENSORS_CNT]; + u32 odrhz[BMI323_SENSORS_CNT]; + unsigned int feature_events; + + /* + * Lock to protect the members of device's private data from concurrent + * access and also to serialize the access of extended registers. + * See bmi323_write_ext_reg(..) for more info. + */ + struct mutex mutex; + int watermark; + __le16 fifo_buff[BMI323_FIFO_FULL_IN_WORDS] __aligned(IIO_DMA_MINALIGN); + struct { + __le16 channels[BMI323_CHAN_MAX]; + s64 ts __aligned(8); + } buffer; + __le16 steps_count[BMI323_STEP_LEN]; +}; + +static const struct iio_mount_matrix * +bmi323_get_mount_matrix(const struct iio_dev *idev, + const struct iio_chan_spec *chan) +{ + struct bmi323_data *data = iio_priv(idev); + + return &data->orientation; +} + +static const struct iio_chan_spec_ext_info bmi323_ext_info[] = { + IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, bmi323_get_mount_matrix), + { } +}; + +static const struct iio_event_spec bmi323_step_wtrmrk_event = { + .type = IIO_EV_TYPE_CHANGE, + .dir = IIO_EV_DIR_NONE, + .mask_shared_by_type = BIT(IIO_EV_INFO_ENABLE) | + BIT(IIO_EV_INFO_VALUE), +}; + +static const struct iio_event_spec bmi323_accel_event[] = { + { + .type = IIO_EV_TYPE_MAG, + .dir = IIO_EV_DIR_FALLING, + .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | + BIT(IIO_EV_INFO_PERIOD) | + BIT(IIO_EV_INFO_HYSTERESIS) | + BIT(IIO_EV_INFO_ENABLE), + }, + { + .type = IIO_EV_TYPE_MAG, + .dir = IIO_EV_DIR_RISING, + .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | + BIT(IIO_EV_INFO_PERIOD) | + BIT(IIO_EV_INFO_HYSTERESIS) | + BIT(IIO_EV_INFO_ENABLE), + }, + { + .type = IIO_EV_TYPE_GESTURE, + .dir = IIO_EV_DIR_SINGLETAP, + .mask_shared_by_type = BIT(IIO_EV_INFO_ENABLE) | + BIT(IIO_EV_INFO_VALUE) | + BIT(IIO_EV_INFO_RESET_TIMEOUT), + }, + { + .type = IIO_EV_TYPE_GESTURE, + .dir = IIO_EV_DIR_DOUBLETAP, + .mask_shared_by_type = BIT(IIO_EV_INFO_ENABLE) | + BIT(IIO_EV_INFO_VALUE) | + BIT(IIO_EV_INFO_RESET_TIMEOUT) | + BIT(IIO_EV_INFO_TAP2_MIN_DELAY), + }, +}; + +#define BMI323_ACCEL_CHANNEL(_type, _axis, _index) { \ + .type = _type, \ + .modified = 1, \ + .channel2 = IIO_MOD_##_axis, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \ + .info_mask_shared_by_type_available = \ + BIT(IIO_CHAN_INFO_SAMP_FREQ) | \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \ + .scan_index = _index, \ + .scan_type = { \ + .sign = 's', \ + .realbits = 16, \ + .storagebits = 16, \ + .endianness = IIO_LE, \ + }, \ + .ext_info = bmi323_ext_info, \ + .event_spec = bmi323_accel_event, \ + .num_event_specs = ARRAY_SIZE(bmi323_accel_event), \ +} + +#define BMI323_GYRO_CHANNEL(_type, _axis, _index) { \ + .type = _type, \ + .modified = 1, \ + .channel2 = IIO_MOD_##_axis, \ + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \ + .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SAMP_FREQ) | \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \ + .info_mask_shared_by_type_available = \ + BIT(IIO_CHAN_INFO_SAMP_FREQ) | \ + BIT(IIO_CHAN_INFO_SCALE) | \ + BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO), \ + .scan_index = _index, \ + .scan_type = { \ + .sign = 's', \ + .realbits = 16, \ + .storagebits = 16, \ + .endianness = IIO_LE, \ + }, \ + .ext_info = bmi323_ext_info, \ +} + +static const struct iio_chan_spec bmi323_channels[] = { + BMI323_ACCEL_CHANNEL(IIO_ACCEL, X, BMI323_ACCEL_X), + BMI323_ACCEL_CHANNEL(IIO_ACCEL, Y, BMI323_ACCEL_Y), + BMI323_ACCEL_CHANNEL(IIO_ACCEL, Z, BMI323_ACCEL_Z), + BMI323_GYRO_CHANNEL(IIO_ANGL_VEL, X, BMI323_GYRO_X), + BMI323_GYRO_CHANNEL(IIO_ANGL_VEL, Y, BMI323_GYRO_Y), + BMI323_GYRO_CHANNEL(IIO_ANGL_VEL, Z, BMI323_GYRO_Z), + { + .type = IIO_TEMP, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_OFFSET) | + BIT(IIO_CHAN_INFO_SCALE), + .scan_index = -1, + }, + { + .type = IIO_STEPS, + .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | + BIT(IIO_CHAN_INFO_ENABLE), + .scan_index = -1, + .event_spec = &bmi323_step_wtrmrk_event, + .num_event_specs = 1, + + }, + IIO_CHAN_SOFT_TIMESTAMP(BMI323_CHAN_MAX), +}; + +static const int bmi323_acc_gyro_odr[][2] = { + { 0, 781250 }, + { 1, 562500 }, + { 3, 125000 }, + { 6, 250000 }, + { 12, 500000 }, + { 25, 0 }, + { 50, 0 }, + { 100, 0 }, + { 200, 0 }, + { 400, 0 }, + { 800, 0 }, +}; + +static const int bmi323_acc_gyro_odrns[] = { + 1280 * MEGA, + 640 * MEGA, + 320 * MEGA, + 160 * MEGA, + 80 * MEGA, + 40 * MEGA, + 20 * MEGA, + 10 * MEGA, + 5 * MEGA, + 2500 * KILO, + 1250 * KILO, +}; + +static enum bmi323_sensor_type bmi323_iio_to_sensor(enum iio_chan_type iio_type) +{ + switch (iio_type) { + case IIO_ACCEL: + return BMI323_ACCEL; + case IIO_ANGL_VEL: + return BMI323_GYRO; + default: + return -EINVAL; + } +} + +static int bmi323_set_mode(struct bmi323_data *data, + enum bmi323_sensor_type sensor, + enum bmi323_opr_mode mode) +{ + guard(mutex)(&data->mutex); + return regmap_update_bits(data->regmap, bmi323_hw[sensor].config, + BMI323_ACC_GYRO_CONF_MODE_MSK, + FIELD_PREP(BMI323_ACC_GYRO_CONF_MODE_MSK, + mode)); +} + +/* + * When writing data to extended register there must be no communication to + * any other register before write transaction is complete. + * See datasheet section 6.2 Extended Register Map Description. + */ +static int bmi323_write_ext_reg(struct bmi323_data *data, unsigned int ext_addr, + unsigned int ext_data) +{ + int ret, feature_status; + + ret = regmap_read(data->regmap, BMI323_FEAT_DATA_STATUS, + &feature_status); + if (ret) + return ret; + + if (!FIELD_GET(BMI323_FEAT_DATA_TX_RDY_MSK, feature_status)) + return -EBUSY; + + ret = regmap_write(data->regmap, BMI323_FEAT_DATA_ADDR, ext_addr); + if (ret) + return ret; + + return regmap_write(data->regmap, BMI323_FEAT_DATA_TX, ext_data); +} + +/* + * When reading data from extended register there must be no communication to + * any other register before read transaction is complete. + * See datasheet section 6.2 Extended Register Map Description. + */ +static int bmi323_read_ext_reg(struct bmi323_data *data, unsigned int ext_addr, + unsigned int *ext_data) +{ + int ret, feature_status; + + ret = regmap_read(data->regmap, BMI323_FEAT_DATA_STATUS, + &feature_status); + if (ret) + return ret; + + if (!FIELD_GET(BMI323_FEAT_DATA_TX_RDY_MSK, feature_status)) + return -EBUSY; + + ret = regmap_write(data->regmap, BMI323_FEAT_DATA_ADDR, ext_addr); + if (ret) + return ret; + + return regmap_read(data->regmap, BMI323_FEAT_DATA_TX, ext_data); +} + +static int bmi323_update_ext_reg(struct bmi323_data *data, + unsigned int ext_addr, + unsigned int mask, unsigned int ext_data) +{ + unsigned int value; + int ret; + + ret = bmi323_read_ext_reg(data, ext_addr, &value); + if (ret) + return ret; + + set_mask_bits(&value, mask, ext_data); + + return bmi323_write_ext_reg(data, ext_addr, value); +} + +static int bmi323_get_error_status(struct bmi323_data *data) +{ + int error, ret; + + guard(mutex)(&data->mutex); + ret = regmap_read(data->regmap, BMI323_ERR_REG, &error); + if (ret) + return ret; + + if (error) + dev_err(data->dev, "Sensor error 0x%x\n", error); + + return error; +} + +static int bmi323_feature_engine_events(struct bmi323_data *data, + const unsigned int event_mask, + bool state) +{ + unsigned int value; + int ret; + + ret = regmap_read(data->regmap, BMI323_FEAT_IO0_REG, &value); + if (ret) + return ret; + + /* Register must be cleared before changing an active config */ + ret = regmap_write(data->regmap, BMI323_FEAT_IO0_REG, 0); + if (ret) + return ret; + + if (state) + value |= event_mask; + else + value &= ~event_mask; + + ret = regmap_write(data->regmap, BMI323_FEAT_IO0_REG, value); + if (ret) + return ret; + + return regmap_write(data->regmap, BMI323_FEAT_IO_STATUS_REG, + BMI323_FEAT_IO_STATUS_MSK); +} + +static int bmi323_step_wtrmrk_en(struct bmi323_data *data, int state) +{ + enum bmi323_irq_pin step_irq; + int ret; + + guard(mutex)(&data->mutex); + if (!FIELD_GET(BMI323_FEAT_IO0_STP_CNT_MSK, data->feature_events)) + return -EINVAL; + + if (state) + step_irq = data->irq_pin; + else + step_irq = BMI323_IRQ_DISABLED; + + ret = bmi323_update_ext_reg(data, BMI323_STEP_SC1_REG, + BMI323_STEP_SC1_WTRMRK_MSK, + FIELD_PREP(BMI323_STEP_SC1_WTRMRK_MSK, + state ? 1 : 0)); + if (ret) + return ret; + + return regmap_update_bits(data->regmap, BMI323_INT_MAP1_REG, + BMI323_STEP_CNT_MSK, + FIELD_PREP(BMI323_STEP_CNT_MSK, step_irq)); +} + +static int bmi323_motion_config_reg(enum iio_event_direction dir) +{ + switch (dir) { + case IIO_EV_DIR_RISING: + return BMI323_ANYMO1_REG; + case IIO_EV_DIR_FALLING: + return BMI323_NOMO1_REG; + default: + return -EINVAL; + } +} + +static int bmi323_motion_event_en(struct bmi323_data *data, + enum iio_event_direction dir, int state) +{ + unsigned int state_value = state ? BMI323_FEAT_XYZ_MSK : 0; + int config, ret, msk, raw, field_value; + enum bmi323_irq_pin motion_irq; + int irq_msk, irq_field_val; + + if (state) + motion_irq = data->irq_pin; + else + motion_irq = BMI323_IRQ_DISABLED; + + switch (dir) { + case IIO_EV_DIR_RISING: + msk = BMI323_FEAT_IO0_XYZ_MOTION_MSK; + raw = 512; + config = BMI323_ANYMO1_REG; + irq_msk = BMI323_MOTION_MSK; + irq_field_val = FIELD_PREP(BMI323_MOTION_MSK, motion_irq); + field_value = FIELD_PREP(BMI323_FEAT_IO0_XYZ_MOTION_MSK, + state_value); + break; + case IIO_EV_DIR_FALLING: + msk = BMI323_FEAT_IO0_XYZ_NOMOTION_MSK; + raw = 0; + config = BMI323_NOMO1_REG; + irq_msk = BMI323_NOMOTION_MSK; + irq_field_val = FIELD_PREP(BMI323_NOMOTION_MSK, motion_irq); + field_value = FIELD_PREP(BMI323_FEAT_IO0_XYZ_NOMOTION_MSK, + state_value); + break; + default: + return -EINVAL; + } + + guard(mutex)(&data->mutex); + ret = bmi323_feature_engine_events(data, msk, state); + if (ret) + return ret; + + ret = bmi323_update_ext_reg(data, config, + BMI323_MO1_REF_UP_MSK, + FIELD_PREP(BMI323_MO1_REF_UP_MSK, 0)); + if (ret) + return ret; + + /* Set initial value to avoid interrupts while enabling*/ + ret = bmi323_update_ext_reg(data, config, + BMI323_MO1_SLOPE_TH_MSK, + FIELD_PREP(BMI323_MO1_SLOPE_TH_MSK, raw)); + if (ret) + return ret; + + ret = regmap_update_bits(data->regmap, BMI323_INT_MAP1_REG, irq_msk, + irq_field_val); + if (ret) + return ret; + + set_mask_bits(&data->feature_events, msk, field_value); + + return 0; +} + +static int bmi323_tap_event_en(struct bmi323_data *data, + enum iio_event_direction dir, int state) +{ + enum bmi323_irq_pin tap_irq; + int ret, tap_enabled; + + guard(mutex)(&data->mutex); + + if (data->odrhz[BMI323_ACCEL] < 200) { + dev_err(data->dev, "Invalid accelerometer parameter\n"); + return -EINVAL; + } + + switch (dir) { + case IIO_EV_DIR_SINGLETAP: + ret = bmi323_feature_engine_events(data, + BMI323_FEAT_IO0_S_TAP_MSK, + state); + if (ret) + return ret; + + set_mask_bits(&data->feature_events, BMI323_FEAT_IO0_S_TAP_MSK, + FIELD_PREP(BMI323_FEAT_IO0_S_TAP_MSK, state)); + break; + case IIO_EV_DIR_DOUBLETAP: + ret = bmi323_feature_engine_events(data, + BMI323_FEAT_IO0_D_TAP_MSK, + state); + if (ret) + return ret; + + set_mask_bits(&data->feature_events, BMI323_FEAT_IO0_D_TAP_MSK, + FIELD_PREP(BMI323_FEAT_IO0_D_TAP_MSK, state)); + break; + default: + return -EINVAL; + } + + tap_enabled = FIELD_GET(BMI323_FEAT_IO0_S_TAP_MSK | + BMI323_FEAT_IO0_D_TAP_MSK, + data->feature_events); + + if (tap_enabled) + tap_irq = data->irq_pin; + else + tap_irq = BMI323_IRQ_DISABLED; + + ret = regmap_update_bits(data->regmap, BMI323_INT_MAP2_REG, + BMI323_TAP_MSK, + FIELD_PREP(BMI323_TAP_MSK, tap_irq)); + if (ret) + return ret; + + if (!state) + return 0; + + ret = bmi323_update_ext_reg(data, BMI323_TAP1_REG, + BMI323_TAP1_MAX_PEAKS_MSK, + FIELD_PREP(BMI323_TAP1_MAX_PEAKS_MSK, + 0x04)); + if (ret) + return ret; + + ret = bmi323_update_ext_reg(data, BMI323_TAP1_REG, + BMI323_TAP1_AXIS_SEL_MSK, + FIELD_PREP(BMI323_TAP1_AXIS_SEL_MSK, + BMI323_AXIS_XYZ_MSK)); + if (ret) + return ret; + + return bmi323_update_ext_reg(data, BMI323_TAP1_REG, + BMI323_TAP1_TIMOUT_MSK, + FIELD_PREP(BMI323_TAP1_TIMOUT_MSK, + 0)); +} + +static ssize_t in_accel_gesture_tap_wait_dur_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct bmi323_data *data = iio_priv(indio_dev); + unsigned int reg_value, raw; + int ret, val[2]; + + scoped_guard(mutex, &data->mutex) { + ret = bmi323_read_ext_reg(data, BMI323_TAP2_REG, ®_value); + if (ret) + return ret; + } + + raw = FIELD_GET(BMI323_TAP2_MAX_DUR_MSK, reg_value); + val[0] = raw / BMI323_MAX_GES_DUR_SCALE; + val[1] = BMI323_RAW_TO_MICRO(raw, BMI323_MAX_GES_DUR_SCALE); + + return iio_format_value(buf, IIO_VAL_INT_PLUS_MICRO, ARRAY_SIZE(val), + val); +} + +static ssize_t in_accel_gesture_tap_wait_dur_store(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t len) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct bmi323_data *data = iio_priv(indio_dev); + int ret, val_int, val_fract, raw; + + ret = iio_str_to_fixpoint(buf, 100000, &val_int, &val_fract); + if (ret) + return ret; + + raw = BMI323_INT_MICRO_TO_RAW(val_int, val_fract, + BMI323_MAX_GES_DUR_SCALE); + if (!in_range(raw, 0, 64)) + return -EINVAL; + + guard(mutex)(&data->mutex); + ret = bmi323_update_ext_reg(data, BMI323_TAP2_REG, + BMI323_TAP2_MAX_DUR_MSK, + FIELD_PREP(BMI323_TAP2_MAX_DUR_MSK, raw)); + if (ret) + return ret; + + return len; +} + +/* + * Maximum duration from first tap within the second tap is expected to happen. + * This timeout is applicable only if gesture_tap_wait_timeout is enabled. + */ +static IIO_DEVICE_ATTR_RW(in_accel_gesture_tap_wait_dur, 0); + +static ssize_t in_accel_gesture_tap_wait_timeout_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct bmi323_data *data = iio_priv(indio_dev); + unsigned int reg_value, raw; + int ret; + + scoped_guard(mutex, &data->mutex) { + ret = bmi323_read_ext_reg(data, BMI323_TAP1_REG, ®_value); + if (ret) + return ret; + } + + raw = FIELD_GET(BMI323_TAP1_TIMOUT_MSK, reg_value); + + return iio_format_value(buf, IIO_VAL_INT, 1, &raw); +} + +static ssize_t in_accel_gesture_tap_wait_timeout_store(struct device *dev, + struct device_attribute *attr, + const char *buf, + size_t len) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct bmi323_data *data = iio_priv(indio_dev); + bool val; + int ret; + + ret = kstrtobool(buf, &val); + if (ret) + return ret; + + guard(mutex)(&data->mutex); + ret = bmi323_update_ext_reg(data, BMI323_TAP1_REG, + BMI323_TAP1_TIMOUT_MSK, + FIELD_PREP(BMI323_TAP1_TIMOUT_MSK, val)); + if (ret) + return ret; + + return len; +} + +/* Enable/disable gesture confirmation with wait time */ +static IIO_DEVICE_ATTR_RW(in_accel_gesture_tap_wait_timeout, 0); + +static IIO_CONST_ATTR(in_accel_gesture_tap_wait_dur_available, + "[0.0 0.04 2.52]"); + +static IIO_CONST_ATTR(in_accel_gesture_doubletap_tap2_min_delay_available, + "[0.005 0.005 0.075]"); + +static IIO_CONST_ATTR(in_accel_gesture_tap_reset_timeout_available, + "[0.04 0.04 0.6]"); + +static IIO_CONST_ATTR(in_accel_gesture_tap_value_available, "[0.0 0.002 1.99]"); + +static IIO_CONST_ATTR(in_accel_mag_value_available, "[0.0 0.002 7.99]"); + +static IIO_CONST_ATTR(in_accel_mag_period_available, "[0.0 0.02 162.0]"); + +static IIO_CONST_ATTR(in_accel_mag_hysteresis_available, "[0.0 0.002 1.99]"); + +static struct attribute *bmi323_event_attributes[] = { + &iio_const_attr_in_accel_gesture_tap_value_available.dev_attr.attr, + &iio_const_attr_in_accel_gesture_tap_reset_timeout_available.dev_attr.attr, + &iio_const_attr_in_accel_gesture_doubletap_tap2_min_delay_available.dev_attr.attr, + &iio_const_attr_in_accel_gesture_tap_wait_dur_available.dev_attr.attr, + &iio_dev_attr_in_accel_gesture_tap_wait_timeout.dev_attr.attr, + &iio_dev_attr_in_accel_gesture_tap_wait_dur.dev_attr.attr, + &iio_const_attr_in_accel_mag_value_available.dev_attr.attr, + &iio_const_attr_in_accel_mag_period_available.dev_attr.attr, + &iio_const_attr_in_accel_mag_hysteresis_available.dev_attr.attr, + NULL +}; + +static const struct attribute_group bmi323_event_attribute_group = { + .attrs = bmi323_event_attributes, +}; + +static int bmi323_write_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, int state) +{ + struct bmi323_data *data = iio_priv(indio_dev); + + switch (type) { + case IIO_EV_TYPE_MAG: + return bmi323_motion_event_en(data, dir, state); + case IIO_EV_TYPE_GESTURE: + return bmi323_tap_event_en(data, dir, state); + case IIO_EV_TYPE_CHANGE: + return bmi323_step_wtrmrk_en(data, state); + default: + return -EINVAL; + } +} + +static int bmi323_read_event_config(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir) +{ + struct bmi323_data *data = iio_priv(indio_dev); + int ret, value, reg_val; + + guard(mutex)(&data->mutex); + + switch (chan->type) { + case IIO_ACCEL: + switch (dir) { + case IIO_EV_DIR_SINGLETAP: + ret = FIELD_GET(BMI323_FEAT_IO0_S_TAP_MSK, + data->feature_events); + break; + case IIO_EV_DIR_DOUBLETAP: + ret = FIELD_GET(BMI323_FEAT_IO0_D_TAP_MSK, + data->feature_events); + break; + case IIO_EV_DIR_RISING: + value = FIELD_GET(BMI323_FEAT_IO0_XYZ_MOTION_MSK, + data->feature_events); + ret = value ? 1 : 0; + break; + case IIO_EV_DIR_FALLING: + value = FIELD_GET(BMI323_FEAT_IO0_XYZ_NOMOTION_MSK, + data->feature_events); + ret = value ? 1 : 0; + break; + default: + ret = -EINVAL; + break; + } + return ret; + case IIO_STEPS: + ret = regmap_read(data->regmap, BMI323_INT_MAP1_REG, ®_val); + if (ret) + return ret; + + return FIELD_GET(BMI323_STEP_CNT_MSK, reg_val) ? 1 : 0; + default: + return -EINVAL; + } +} + +static int bmi323_write_event_value(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, + int val, int val2) +{ + struct bmi323_data *data = iio_priv(indio_dev); + unsigned int raw; + int reg; + + guard(mutex)(&data->mutex); + + switch (type) { + case IIO_EV_TYPE_GESTURE: + switch (info) { + case IIO_EV_INFO_VALUE: + if (!in_range(val, 0, 2)) + return -EINVAL; + + raw = BMI323_INT_MICRO_TO_RAW(val, val2, + BMI323_TAP_THRES_SCALE); + + return bmi323_update_ext_reg(data, BMI323_TAP2_REG, + BMI323_TAP2_THRES_MSK, + FIELD_PREP(BMI323_TAP2_THRES_MSK, + raw)); + case IIO_EV_INFO_RESET_TIMEOUT: + if (val || !in_range(val2, 40000, 560001)) + return -EINVAL; + + raw = BMI323_INT_MICRO_TO_RAW(val, val2, + BMI323_QUITE_TIM_GES_SCALE); + + return bmi323_update_ext_reg(data, BMI323_TAP3_REG, + BMI323_TAP3_QT_AFT_GES_MSK, + FIELD_PREP(BMI323_TAP3_QT_AFT_GES_MSK, + raw)); + case IIO_EV_INFO_TAP2_MIN_DELAY: + if (val || !in_range(val2, 5000, 70001)) + return -EINVAL; + + raw = BMI323_INT_MICRO_TO_RAW(val, val2, + BMI323_DUR_BW_TAP_SCALE); + + return bmi323_update_ext_reg(data, BMI323_TAP3_REG, + BMI323_TAP3_QT_BW_TAP_MSK, + FIELD_PREP(BMI323_TAP3_QT_BW_TAP_MSK, + raw)); + default: + return -EINVAL; + } + case IIO_EV_TYPE_MAG: + reg = bmi323_motion_config_reg(dir); + if (reg < 0) + return -EINVAL; + + switch (info) { + case IIO_EV_INFO_VALUE: + if (!in_range(val, 0, 8)) + return -EINVAL; + + raw = BMI323_INT_MICRO_TO_RAW(val, val2, + BMI323_MOTION_THRES_SCALE); + + return bmi323_update_ext_reg(data, reg, + BMI323_MO1_SLOPE_TH_MSK, + FIELD_PREP(BMI323_MO1_SLOPE_TH_MSK, + raw)); + case IIO_EV_INFO_PERIOD: + if (!in_range(val, 0, 163)) + return -EINVAL; + + raw = BMI323_INT_MICRO_TO_RAW(val, val2, + BMI323_MOTION_DURAT_SCALE); + + return bmi323_update_ext_reg(data, + reg + BMI323_MO3_OFFSET, + BMI323_MO3_DURA_MSK, + FIELD_PREP(BMI323_MO3_DURA_MSK, + raw)); + case IIO_EV_INFO_HYSTERESIS: + if (!in_range(val, 0, 2)) + return -EINVAL; + + raw = BMI323_INT_MICRO_TO_RAW(val, val2, + BMI323_MOTION_HYSTR_SCALE); + + return bmi323_update_ext_reg(data, + reg + BMI323_MO2_OFFSET, + BMI323_MO2_HYSTR_MSK, + FIELD_PREP(BMI323_MO2_HYSTR_MSK, + raw)); + default: + return -EINVAL; + } + case IIO_EV_TYPE_CHANGE: + if (!in_range(val, 0, 20461)) + return -EINVAL; + + raw = val / 20; + return bmi323_update_ext_reg(data, BMI323_STEP_SC1_REG, + BMI323_STEP_SC1_WTRMRK_MSK, + FIELD_PREP(BMI323_STEP_SC1_WTRMRK_MSK, + raw)); + default: + return -EINVAL; + } +} + +static int bmi323_read_event_value(struct iio_dev *indio_dev, + const struct iio_chan_spec *chan, + enum iio_event_type type, + enum iio_event_direction dir, + enum iio_event_info info, + int *val, int *val2) +{ + struct bmi323_data *data = iio_priv(indio_dev); + unsigned int raw, reg_value; + int ret, reg; + + guard(mutex)(&data->mutex); + + switch (type) { + case IIO_EV_TYPE_GESTURE: + switch (info) { + case IIO_EV_INFO_VALUE: + ret = bmi323_read_ext_reg(data, BMI323_TAP2_REG, + ®_value); + if (ret) + return ret; + + raw = FIELD_GET(BMI323_TAP2_THRES_MSK, reg_value); + *val = raw / BMI323_TAP_THRES_SCALE; + *val2 = BMI323_RAW_TO_MICRO(raw, BMI323_TAP_THRES_SCALE); + return IIO_VAL_INT_PLUS_MICRO; + case IIO_EV_INFO_RESET_TIMEOUT: + ret = bmi323_read_ext_reg(data, BMI323_TAP3_REG, + ®_value); + if (ret) + return ret; + + raw = FIELD_GET(BMI323_TAP3_QT_AFT_GES_MSK, reg_value); + *val = 0; + *val2 = BMI323_RAW_TO_MICRO(raw, + BMI323_QUITE_TIM_GES_SCALE); + return IIO_VAL_INT_PLUS_MICRO; + case IIO_EV_INFO_TAP2_MIN_DELAY: + ret = bmi323_read_ext_reg(data, BMI323_TAP3_REG, + ®_value); + if (ret) + return ret; + + raw = FIELD_GET(BMI323_TAP3_QT_BW_TAP_MSK, reg_value); + *val = 0; + *val2 = BMI323_RAW_TO_MICRO(raw, + BMI323_DUR_BW_TAP_SCALE); + return IIO_VAL_INT_PLUS_MICRO; + default: + return -EINVAL; + } + case IIO_EV_TYPE_MAG: + reg = bmi323_motion_config_reg(dir); + if (reg < 0) + return -EINVAL; + + switch (info) { + case IIO_EV_INFO_VALUE: + ret = bmi323_read_ext_reg(data, reg, ®_value); + if (ret) + return ret; + + raw = FIELD_GET(BMI323_MO1_SLOPE_TH_MSK, reg_value); + *val = raw / BMI323_MOTION_THRES_SCALE; + *val2 = BMI323_RAW_TO_MICRO(raw, + BMI323_MOTION_THRES_SCALE); + return IIO_VAL_INT_PLUS_MICRO; + case IIO_EV_INFO_PERIOD: + ret = bmi323_read_ext_reg(data, + reg + BMI323_MO3_OFFSET, + ®_value); + if (ret) + return ret; + + raw = FIELD_GET(BMI323_MO3_DURA_MSK, reg_value); + *val = raw / BMI323_MOTION_DURAT_SCALE; + *val2 = BMI323_RAW_TO_MICRO(raw, + BMI323_MOTION_DURAT_SCALE); + return IIO_VAL_INT_PLUS_MICRO; + case IIO_EV_INFO_HYSTERESIS: + ret = bmi323_read_ext_reg(data, + reg + BMI323_MO2_OFFSET, + ®_value); + if (ret) + return ret; + + raw = FIELD_GET(BMI323_MO2_HYSTR_MSK, reg_value); + *val = raw / BMI323_MOTION_HYSTR_SCALE; + *val2 = BMI323_RAW_TO_MICRO(raw, + BMI323_MOTION_HYSTR_SCALE); + return IIO_VAL_INT_PLUS_MICRO; + default: + return -EINVAL; + } + case IIO_EV_TYPE_CHANGE: + ret = bmi323_read_ext_reg(data, BMI323_STEP_SC1_REG, + ®_value); + if (ret) + return ret; + + raw = FIELD_GET(BMI323_STEP_SC1_WTRMRK_MSK, reg_value); + *val = raw * 20; + return IIO_VAL_INT; + default: + return -EINVAL; + } +} + +static int __bmi323_fifo_flush(struct iio_dev *indio_dev) +{ + struct bmi323_data *data = iio_priv(indio_dev); + int i, ret, fifo_lvl, frame_count, bit, index; + __le16 *frame, *pchannels; + u64 sample_period; + s64 tstamp; + + guard(mutex)(&data->mutex); + ret = regmap_read(data->regmap, BMI323_FIFO_FILL_LEVEL_REG, &fifo_lvl); + if (ret) + return ret; + + fifo_lvl = min(fifo_lvl, BMI323_FIFO_FULL_IN_WORDS); + + frame_count = fifo_lvl / BMI323_FIFO_FRAME_LENGTH; + if (!frame_count) + return -EINVAL; + + if (fifo_lvl % BMI323_FIFO_FRAME_LENGTH) + dev_warn(data->dev, "Bad FIFO alignment\n"); + + /* + * Approximate timestamps for each of the sample based on the sampling + * frequency, timestamp for last sample and number of samples. + */ + if (data->old_fifo_tstamp) { + sample_period = data->fifo_tstamp - data->old_fifo_tstamp; + do_div(sample_period, frame_count); + } else { + sample_period = data->odrns[BMI323_ACCEL]; + } + + tstamp = data->fifo_tstamp - (frame_count - 1) * sample_period; + + ret = regmap_noinc_read(data->regmap, BMI323_FIFO_DATA_REG, + &data->fifo_buff[0], + fifo_lvl * BMI323_BYTES_PER_SAMPLE); + if (ret) + return ret; + + for (i = 0; i < frame_count; i++) { + frame = &data->fifo_buff[i * BMI323_FIFO_FRAME_LENGTH]; + pchannels = &data->buffer.channels[0]; + + index = 0; + for_each_set_bit(bit, indio_dev->active_scan_mask, + BMI323_CHAN_MAX) + pchannels[index++] = frame[bit]; + + iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer, + tstamp); + + tstamp += sample_period; + } + + return frame_count; +} + +static int bmi323_set_watermark(struct iio_dev *indio_dev, unsigned int val) +{ + struct bmi323_data *data = iio_priv(indio_dev); + + val = min(val, (u32)BMI323_FIFO_FULL_IN_FRAMES); + + guard(mutex)(&data->mutex); + data->watermark = val; + + return 0; +} + +static int bmi323_fifo_disable(struct bmi323_data *data) +{ + int ret; + + guard(mutex)(&data->mutex); + ret = regmap_write(data->regmap, BMI323_FIFO_CONF_REG, 0); + if (ret) + return ret; + + ret = regmap_update_bits(data->regmap, BMI323_INT_MAP2_REG, + BMI323_FIFO_WTRMRK_MSK, + FIELD_PREP(BMI323_FIFO_WTRMRK_MSK, 0)); + if (ret) + return ret; + + data->fifo_tstamp = 0; + data->state = BMI323_IDLE; + + return 0; +} + +static int bmi323_buffer_predisable(struct iio_dev *indio_dev) +{ + struct bmi323_data *data = iio_priv(indio_dev); + + if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED) + return 0; + + return bmi323_fifo_disable(data); +} + +static int bmi323_update_watermark(struct bmi323_data *data) +{ + int wtrmrk; + + wtrmrk = data->watermark * BMI323_FIFO_FRAME_LENGTH; + + return regmap_write(data->regmap, BMI323_FIFO_WTRMRK_REG, wtrmrk); +} + +static int bmi323_fifo_enable(struct bmi323_data *data) +{ + int ret; + + guard(mutex)(&data->mutex); + ret = regmap_update_bits(data->regmap, BMI323_FIFO_CONF_REG, + BMI323_FIFO_CONF_ACC_GYR_EN_MSK, + FIELD_PREP(BMI323_FIFO_CONF_ACC_GYR_EN_MSK, + BMI323_FIFO_ACC_GYR_MSK)); + if (ret) + return ret; + + ret = regmap_update_bits(data->regmap, BMI323_INT_MAP2_REG, + BMI323_FIFO_WTRMRK_MSK, + FIELD_PREP(BMI323_FIFO_WTRMRK_MSK, + data->irq_pin)); + if (ret) + return ret; + + ret = bmi323_update_watermark(data); + if (ret) + return ret; + + ret = regmap_write(data->regmap, BMI323_FIFO_CTRL_REG, + BMI323_FIFO_FLUSH_MSK); + if (ret) + return ret; + + data->state = BMI323_BUFFER_FIFO; + + return 0; +} + +static int bmi323_buffer_preenable(struct iio_dev *indio_dev) +{ + struct bmi323_data *data = iio_priv(indio_dev); + + guard(mutex)(&data->mutex); + /* + * When the ODR of the accelerometer and gyroscope do not match, the + * maximum ODR value between the accelerometer and gyroscope is used + * for FIFO and the signal with lower ODR will insert dummy frame. + * So allow buffer read only when ODR's of accelero and gyro are equal. + * See datasheet section 5.7 "FIFO Data Buffering". + */ + if (data->odrns[BMI323_ACCEL] != data->odrns[BMI323_GYRO]) { + dev_err(data->dev, "Accelero and Gyro ODR doesn't match\n"); + return -EINVAL; + } + + return 0; +} + +static int bmi323_buffer_postenable(struct iio_dev *indio_dev) +{ + struct bmi323_data *data = iio_priv(indio_dev); + + if (iio_device_get_current_mode(indio_dev) == INDIO_BUFFER_TRIGGERED) + return 0; + + return bmi323_fifo_enable(data); +} + +static ssize_t hwfifo_watermark_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct bmi323_data *data = iio_priv(indio_dev); + int wm; + + scoped_guard(mutex, &data->mutex) + wm = data->watermark; + + return sysfs_emit(buf, "%d\n", wm); +} +static IIO_DEVICE_ATTR_RO(hwfifo_watermark, 0); + +static ssize_t hwfifo_enabled_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + struct iio_dev *indio_dev = dev_to_iio_dev(dev); + struct bmi323_data *data = iio_priv(indio_dev); + bool state; + + scoped_guard(mutex, &data->mutex) + state = data->state == BMI323_BUFFER_FIFO; + + return sysfs_emit(buf, "%d\n", state); +} +static IIO_DEVICE_ATTR_RO(hwfifo_enabled, 0); + +static const struct iio_dev_attr *bmi323_fifo_attributes[] = { + &iio_dev_attr_hwfifo_watermark, + &iio_dev_attr_hwfifo_enabled, + NULL +}; + +static const struct iio_buffer_setup_ops bmi323_buffer_ops = { + .preenable = bmi323_buffer_preenable, + .postenable = bmi323_buffer_postenable, + .predisable = bmi323_buffer_predisable, +}; + +static irqreturn_t bmi323_irq_thread_handler(int irq, void *private) +{ + struct iio_dev *indio_dev = private; + struct bmi323_data *data = iio_priv(indio_dev); + unsigned int status_addr, status, feature_event; + s64 timestamp = iio_get_time_ns(indio_dev); + int ret; + + if (data->irq_pin == BMI323_IRQ_INT1) + status_addr = BMI323_STATUS_INT1_REG; + else + status_addr = BMI323_STATUS_INT2_REG; + + scoped_guard(mutex, &data->mutex) { + ret = regmap_read(data->regmap, status_addr, &status); + if (ret) + return IRQ_NONE; + } + + if (!status || FIELD_GET(BMI323_STATUS_ERROR_MSK, status)) + return IRQ_NONE; + + if (FIELD_GET(BMI323_STATUS_FIFO_WTRMRK_MSK, status)) { + data->old_fifo_tstamp = data->fifo_tstamp; + data->fifo_tstamp = iio_get_time_ns(indio_dev); + ret = __bmi323_fifo_flush(indio_dev); + if (ret < 0) + return IRQ_NONE; + } + + if (FIELD_GET(BMI323_STATUS_ACC_GYR_DRDY_MSK, status)) + iio_trigger_poll_nested(data->trig); + + if (FIELD_GET(BMI323_STATUS_MOTION_MSK, status)) + iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, + IIO_MOD_X_OR_Y_OR_Z, + IIO_EV_TYPE_MAG, + IIO_EV_DIR_RISING), + timestamp); + + if (FIELD_GET(BMI323_STATUS_NOMOTION_MSK, status)) + iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, + IIO_MOD_X_OR_Y_OR_Z, + IIO_EV_TYPE_MAG, + IIO_EV_DIR_FALLING), + timestamp); + + if (FIELD_GET(BMI323_STATUS_STP_WTR_MSK, status)) + iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_STEPS, 0, + IIO_NO_MOD, + IIO_EV_TYPE_CHANGE, + IIO_EV_DIR_NONE), + timestamp); + + if (FIELD_GET(BMI323_STATUS_TAP_MSK, status)) { + scoped_guard(mutex, &data->mutex) { + ret = regmap_read(data->regmap, + BMI323_FEAT_EVNT_EXT_REG, + &feature_event); + if (ret) + return IRQ_NONE; + } + + if (FIELD_GET(BMI323_FEAT_EVNT_EXT_S_MSK, feature_event)) { + iio_push_event(indio_dev, + IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, + IIO_MOD_X_OR_Y_OR_Z, + IIO_EV_TYPE_GESTURE, + IIO_EV_DIR_SINGLETAP), + timestamp); + } + + if (FIELD_GET(BMI323_FEAT_EVNT_EXT_D_MSK, feature_event)) + iio_push_event(indio_dev, + IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, + IIO_MOD_X_OR_Y_OR_Z, + IIO_EV_TYPE_GESTURE, + IIO_EV_DIR_DOUBLETAP), + timestamp); + } + + return IRQ_HANDLED; +} + +static int bmi323_set_drdy_irq(struct bmi323_data *data, + enum bmi323_irq_pin irq_pin) +{ + int ret; + + ret = regmap_update_bits(data->regmap, BMI323_INT_MAP2_REG, + BMI323_GYR_DRDY_MSK, + FIELD_PREP(BMI323_GYR_DRDY_MSK, irq_pin)); + if (ret) + return ret; + + return regmap_update_bits(data->regmap, BMI323_INT_MAP2_REG, + BMI323_ACC_DRDY_MSK, + FIELD_PREP(BMI323_ACC_DRDY_MSK, irq_pin)); +} + +static int bmi323_data_rdy_trigger_set_state(struct iio_trigger *trig, + bool state) +{ + struct bmi323_data *data = iio_trigger_get_drvdata(trig); + enum bmi323_irq_pin irq_pin; + + guard(mutex)(&data->mutex); + + if (data->state == BMI323_BUFFER_FIFO) { + dev_warn(data->dev, "Can't set trigger when FIFO enabled\n"); + return -EBUSY; + } + + if (state) { + data->state = BMI323_BUFFER_DRDY_TRIGGERED; + irq_pin = data->irq_pin; + } else { + data->state = BMI323_IDLE; + irq_pin = BMI323_IRQ_DISABLED; + } + + return bmi323_set_drdy_irq(data, irq_pin); +} + +static const struct iio_trigger_ops bmi323_trigger_ops = { + .set_trigger_state = &bmi323_data_rdy_trigger_set_state, +}; + +static irqreturn_t bmi323_trigger_handler(int irq, void *p) +{ + struct iio_poll_func *pf = p; + struct iio_dev *indio_dev = pf->indio_dev; + struct bmi323_data *data = iio_priv(indio_dev); + int ret, bit, index = 0; + + /* Lock to protect the data->buffer */ + guard(mutex)(&data->mutex); + + if (*indio_dev->active_scan_mask == BMI323_ALL_CHAN_MSK) { + ret = regmap_bulk_read(data->regmap, BMI323_ACCEL_X_REG, + &data->buffer.channels, + ARRAY_SIZE(data->buffer.channels)); + if (ret) + return IRQ_NONE; + } else { + for_each_set_bit(bit, indio_dev->active_scan_mask, + BMI323_CHAN_MAX) { + ret = regmap_raw_read(data->regmap, + BMI323_ACCEL_X_REG + bit, + &data->buffer.channels[index++], + BMI323_BYTES_PER_SAMPLE); + if (ret) + return IRQ_NONE; + } + } + + iio_push_to_buffers_with_timestamp(indio_dev, &data->buffer, + iio_get_time_ns(indio_dev)); + + iio_trigger_notify_done(indio_dev->trig); + + return IRQ_HANDLED; +} + +static int bmi323_set_average(struct bmi323_data *data, + enum bmi323_sensor_type sensor, int avg) +{ + int raw = ARRAY_SIZE(bmi323_accel_gyro_avrg); + + while (raw--) + if (avg == bmi323_accel_gyro_avrg[raw]) + break; + if (raw < 0) + return -EINVAL; + + guard(mutex)(&data->mutex); + return regmap_update_bits(data->regmap, bmi323_hw[sensor].config, + BMI323_ACC_GYRO_CONF_AVG_MSK, + FIELD_PREP(BMI323_ACC_GYRO_CONF_AVG_MSK, + raw)); +} + +static int bmi323_get_average(struct bmi323_data *data, + enum bmi323_sensor_type sensor, int *avg) +{ + int ret, value, raw; + + scoped_guard(mutex, &data->mutex) { + ret = regmap_read(data->regmap, bmi323_hw[sensor].config, &value); + if (ret) + return ret; + } + + raw = FIELD_GET(BMI323_ACC_GYRO_CONF_AVG_MSK, value); + *avg = bmi323_accel_gyro_avrg[raw]; + + return IIO_VAL_INT; +} + +static int bmi323_enable_steps(struct bmi323_data *data, int val) +{ + int ret; + + guard(mutex)(&data->mutex); + if (data->odrhz[BMI323_ACCEL] < 200) { + dev_err(data->dev, "Invalid accelerometer parameter\n"); + return -EINVAL; + } + + ret = bmi323_feature_engine_events(data, BMI323_FEAT_IO0_STP_CNT_MSK, + val ? 1 : 0); + if (ret) + return ret; + + set_mask_bits(&data->feature_events, BMI323_FEAT_IO0_STP_CNT_MSK, + FIELD_PREP(BMI323_FEAT_IO0_STP_CNT_MSK, val ? 1 : 0)); + + return 0; +} + +static int bmi323_read_steps(struct bmi323_data *data, int *val) +{ + int ret; + + guard(mutex)(&data->mutex); + if (!FIELD_GET(BMI323_FEAT_IO0_STP_CNT_MSK, data->feature_events)) + return -EINVAL; + + ret = regmap_bulk_read(data->regmap, BMI323_FEAT_IO2_REG, + data->steps_count, + ARRAY_SIZE(data->steps_count)); + if (ret) + return ret; + + *val = get_unaligned_le32(data->steps_count); + + return IIO_VAL_INT; +} + +static int bmi323_read_axis(struct bmi323_data *data, + struct iio_chan_spec const *chan, int *val) +{ + enum bmi323_sensor_type sensor; + unsigned int value; + u8 addr; + int ret; + + ret = bmi323_get_error_status(data); + if (ret) + return -EINVAL; + + sensor = bmi323_iio_to_sensor(chan->type); + addr = bmi323_hw[sensor].data + (chan->channel2 - IIO_MOD_X); + + scoped_guard(mutex, &data->mutex) { + ret = regmap_read(data->regmap, addr, &value); + if (ret) + return ret; + } + + *val = sign_extend32(value, chan->scan_type.realbits - 1); + + return IIO_VAL_INT; +} + +static int bmi323_get_temp_data(struct bmi323_data *data, int *val) +{ + unsigned int value; + int ret; + + ret = bmi323_get_error_status(data); + if (ret) + return -EINVAL; + + scoped_guard(mutex, &data->mutex) { + ret = regmap_read(data->regmap, BMI323_TEMP_REG, &value); + if (ret) + return ret; + } + + *val = sign_extend32(value, 15); + + return IIO_VAL_INT; +} + +static int bmi323_get_odr(struct bmi323_data *data, + enum bmi323_sensor_type sensor, int *odr, int *uodr) +{ + int ret, value, odr_raw; + + scoped_guard(mutex, &data->mutex) { + ret = regmap_read(data->regmap, bmi323_hw[sensor].config, &value); + if (ret) + return ret; + } + + odr_raw = FIELD_GET(BMI323_ACC_GYRO_CONF_ODR_MSK, value); + *odr = bmi323_acc_gyro_odr[odr_raw - 1][0]; + *uodr = bmi323_acc_gyro_odr[odr_raw - 1][1]; + + return IIO_VAL_INT_PLUS_MICRO; +} + +static int bmi323_configure_power_mode(struct bmi323_data *data, + enum bmi323_sensor_type sensor, + int odr_index) +{ + enum bmi323_opr_mode mode; + + if (bmi323_acc_gyro_odr[odr_index][0] > 25) + mode = ACC_GYRO_MODE_CONTINOUS; + else + mode = ACC_GYRO_MODE_DUTYCYCLE; + + return bmi323_set_mode(data, sensor, mode); +} + +static int bmi323_set_odr(struct bmi323_data *data, + enum bmi323_sensor_type sensor, int odr, int uodr) +{ + int odr_raw, ret; + + odr_raw = ARRAY_SIZE(bmi323_acc_gyro_odr); + + while (odr_raw--) + if (odr == bmi323_acc_gyro_odr[odr_raw][0] && + uodr == bmi323_acc_gyro_odr[odr_raw][1]) + break; + if (odr_raw < 0) + return -EINVAL; + + ret = bmi323_configure_power_mode(data, sensor, odr_raw); + if (ret) + return -EINVAL; + + guard(mutex)(&data->mutex); + data->odrhz[sensor] = bmi323_acc_gyro_odr[odr_raw][0]; + data->odrns[sensor] = bmi323_acc_gyro_odrns[odr_raw]; + + odr_raw++; + + return regmap_update_bits(data->regmap, bmi323_hw[sensor].config, + BMI323_ACC_GYRO_CONF_ODR_MSK, + FIELD_PREP(BMI323_ACC_GYRO_CONF_ODR_MSK, + odr_raw)); +} + +static int bmi323_get_scale(struct bmi323_data *data, + enum bmi323_sensor_type sensor, int *val2) +{ + int ret, value, scale_raw; + + scoped_guard(mutex, &data->mutex) { + ret = regmap_read(data->regmap, bmi323_hw[sensor].config, + &value); + if (ret) + return ret; + } + + scale_raw = FIELD_GET(BMI323_ACC_GYRO_CONF_SCL_MSK, value); + *val2 = bmi323_hw[sensor].scale_table[scale_raw][1]; + + return IIO_VAL_INT_PLUS_MICRO; +} + +static int bmi323_set_scale(struct bmi323_data *data, + enum bmi323_sensor_type sensor, int val, int val2) +{ + int scale_raw; + + scale_raw = bmi323_hw[sensor].scale_table_len; + + while (scale_raw--) + if (val == bmi323_hw[sensor].scale_table[scale_raw][0] && + val2 == bmi323_hw[sensor].scale_table[scale_raw][1]) + break; + if (scale_raw < 0) + return -EINVAL; + + guard(mutex)(&data->mutex); + return regmap_update_bits(data->regmap, bmi323_hw[sensor].config, + BMI323_ACC_GYRO_CONF_SCL_MSK, + FIELD_PREP(BMI323_ACC_GYRO_CONF_SCL_MSK, + scale_raw)); +} + +static int bmi323_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long mask) +{ + enum bmi323_sensor_type sensor; + + switch (mask) { + case IIO_CHAN_INFO_SAMP_FREQ: + *type = IIO_VAL_INT_PLUS_MICRO; + *vals = (const int *)bmi323_acc_gyro_odr; + *length = ARRAY_SIZE(bmi323_acc_gyro_odr) * 2; + return IIO_AVAIL_LIST; + case IIO_CHAN_INFO_SCALE: + sensor = bmi323_iio_to_sensor(chan->type); + *type = IIO_VAL_INT_PLUS_MICRO; + *vals = (const int *)bmi323_hw[sensor].scale_table; + *length = bmi323_hw[sensor].scale_table_len * 2; + return IIO_AVAIL_LIST; + case IIO_CHAN_INFO_OVERSAMPLING_RATIO: + *type = IIO_VAL_INT; + *vals = (const int *)bmi323_accel_gyro_avrg; + *length = ARRAY_SIZE(bmi323_accel_gyro_avrg); + return IIO_AVAIL_LIST; + default: + return -EINVAL; + } +} + +static int bmi323_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int val, + int val2, long mask) +{ + struct bmi323_data *data = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_SAMP_FREQ: + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + + ret = bmi323_set_odr(data, bmi323_iio_to_sensor(chan->type), + val, val2); + iio_device_release_direct_mode(indio_dev); + return ret; + case IIO_CHAN_INFO_SCALE: + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + + ret = bmi323_set_scale(data, bmi323_iio_to_sensor(chan->type), + val, val2); + iio_device_release_direct_mode(indio_dev); + return ret; + case IIO_CHAN_INFO_OVERSAMPLING_RATIO: + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + + ret = bmi323_set_average(data, bmi323_iio_to_sensor(chan->type), + val); + + iio_device_release_direct_mode(indio_dev); + return ret; + case IIO_CHAN_INFO_ENABLE: + return bmi323_enable_steps(data, val); + case IIO_CHAN_INFO_PROCESSED: + scoped_guard(mutex, &data->mutex) { + if (val || !FIELD_GET(BMI323_FEAT_IO0_STP_CNT_MSK, + data->feature_events)) + return -EINVAL; + + /* Clear step counter value */ + ret = bmi323_update_ext_reg(data, BMI323_STEP_SC1_REG, + BMI323_STEP_SC1_RST_CNT_MSK, + FIELD_PREP(BMI323_STEP_SC1_RST_CNT_MSK, + 1)); + } + return ret; + default: + return -EINVAL; + } +} + +static int bmi323_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int *val, + int *val2, long mask) +{ + struct bmi323_data *data = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_PROCESSED: + return bmi323_read_steps(data, val); + case IIO_CHAN_INFO_RAW: + switch (chan->type) { + case IIO_ACCEL: + case IIO_ANGL_VEL: + ret = iio_device_claim_direct_mode(indio_dev); + if (ret) + return ret; + + ret = bmi323_read_axis(data, chan, val); + + iio_device_release_direct_mode(indio_dev); + return ret; + case IIO_TEMP: + return bmi323_get_temp_data(data, val); + default: + return -EINVAL; + } + case IIO_CHAN_INFO_SAMP_FREQ: + return bmi323_get_odr(data, bmi323_iio_to_sensor(chan->type), + val, val2); + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_ACCEL: + case IIO_ANGL_VEL: + *val = 0; + return bmi323_get_scale(data, + bmi323_iio_to_sensor(chan->type), + val2); + case IIO_TEMP: + *val = BMI323_TEMP_SCALE / MEGA; + *val2 = BMI323_TEMP_SCALE % MEGA; + return IIO_VAL_INT_PLUS_MICRO; + default: + return -EINVAL; + } + case IIO_CHAN_INFO_OVERSAMPLING_RATIO: + return bmi323_get_average(data, + bmi323_iio_to_sensor(chan->type), + val); + case IIO_CHAN_INFO_OFFSET: + switch (chan->type) { + case IIO_TEMP: + *val = BMI323_TEMP_OFFSET; + return IIO_VAL_INT; + default: + return -EINVAL; + } + case IIO_CHAN_INFO_ENABLE: + scoped_guard(mutex, &data->mutex) + *val = FIELD_GET(BMI323_FEAT_IO0_STP_CNT_MSK, + data->feature_events); + return IIO_VAL_INT; + default: + return -EINVAL; + } +} + +static const struct iio_info bmi323_info = { + .read_raw = bmi323_read_raw, + .write_raw = bmi323_write_raw, + .read_avail = bmi323_read_avail, + .hwfifo_set_watermark = bmi323_set_watermark, + .write_event_config = bmi323_write_event_config, + .read_event_config = bmi323_read_event_config, + .write_event_value = bmi323_write_event_value, + .read_event_value = bmi323_read_event_value, + .event_attrs = &bmi323_event_attribute_group, +}; + +#define BMI323_SCAN_MASK_ACCEL_3AXIS \ + (BIT(BMI323_ACCEL_X) | BIT(BMI323_ACCEL_Y) | BIT(BMI323_ACCEL_Z)) + +#define BMI323_SCAN_MASK_GYRO_3AXIS \ + (BIT(BMI323_GYRO_X) | BIT(BMI323_GYRO_Y) | BIT(BMI323_GYRO_Z)) + +static const unsigned long bmi323_avail_scan_masks[] = { + /* 3-axis accel */ + BMI323_SCAN_MASK_ACCEL_3AXIS, + /* 3-axis gyro */ + BMI323_SCAN_MASK_GYRO_3AXIS, + /* 3-axis accel + 3-axis gyro */ + BMI323_SCAN_MASK_ACCEL_3AXIS | BMI323_SCAN_MASK_GYRO_3AXIS, + 0 +}; + +static int bmi323_int_pin_config(struct bmi323_data *data, + enum bmi323_irq_pin irq_pin, + bool active_high, bool open_drain, bool latch) +{ + unsigned int mask, field_value; + int ret; + + ret = regmap_update_bits(data->regmap, BMI323_IO_INT_CONF_REG, + BMI323_IO_INT_LTCH_MSK, + FIELD_PREP(BMI323_IO_INT_LTCH_MSK, latch)); + if (ret) + return ret; + + ret = bmi323_update_ext_reg(data, BMI323_GEN_SET1_REG, + BMI323_GEN_HOLD_DUR_MSK, + FIELD_PREP(BMI323_GEN_HOLD_DUR_MSK, 0)); + if (ret) + return ret; + + switch (irq_pin) { + case BMI323_IRQ_INT1: + mask = BMI323_IO_INT1_LVL_OD_OP_MSK; + + field_value = FIELD_PREP(BMI323_IO_INT1_LVL_MSK, active_high) | + FIELD_PREP(BMI323_IO_INT1_OD_MSK, open_drain) | + FIELD_PREP(BMI323_IO_INT1_OP_EN_MSK, 1); + break; + case BMI323_IRQ_INT2: + mask = BMI323_IO_INT2_LVL_OD_OP_MSK; + + field_value = FIELD_PREP(BMI323_IO_INT2_LVL_MSK, active_high) | + FIELD_PREP(BMI323_IO_INT2_OD_MSK, open_drain) | + FIELD_PREP(BMI323_IO_INT2_OP_EN_MSK, 1); + break; + default: + return -EINVAL; + } + + return regmap_update_bits(data->regmap, BMI323_IO_INT_CTR_REG, mask, + field_value); +} + +static int bmi323_trigger_probe(struct bmi323_data *data, + struct iio_dev *indio_dev) +{ + bool open_drain, active_high, latch; + struct fwnode_handle *fwnode; + enum bmi323_irq_pin irq_pin; + int ret, irq, irq_type; + struct irq_data *desc; + + fwnode = dev_fwnode(data->dev); + if (!fwnode) + return -ENODEV; + + irq = fwnode_irq_get_byname(fwnode, "INT1"); + if (irq > 0) { + irq_pin = BMI323_IRQ_INT1; + } else { + irq = fwnode_irq_get_byname(fwnode, "INT2"); + if (irq < 0) + return 0; + + irq_pin = BMI323_IRQ_INT2; + } + + desc = irq_get_irq_data(irq); + if (!desc) + return dev_err_probe(data->dev, -EINVAL, + "Could not find IRQ %d\n", irq); + + irq_type = irqd_get_trigger_type(desc); + switch (irq_type) { + case IRQF_TRIGGER_RISING: + latch = false; + active_high = true; + break; + case IRQF_TRIGGER_HIGH: + latch = true; + active_high = true; + break; + case IRQF_TRIGGER_FALLING: + latch = false; + active_high = false; + break; + case IRQF_TRIGGER_LOW: + latch = true; + active_high = false; + break; + default: + return dev_err_probe(data->dev, -EINVAL, + "Invalid interrupt type 0x%x specified\n", + irq_type); + } + + open_drain = fwnode_property_read_bool(fwnode, "drive-open-drain"); + + ret = bmi323_int_pin_config(data, irq_pin, active_high, open_drain, + latch); + if (ret) + return dev_err_probe(data->dev, ret, + "Failed to configure irq line\n"); + + data->trig = devm_iio_trigger_alloc(data->dev, "%s-trig-%d", + indio_dev->name, irq_pin); + if (!data->trig) + return -ENOMEM; + + data->trig->ops = &bmi323_trigger_ops; + iio_trigger_set_drvdata(data->trig, data); + + ret = devm_request_threaded_irq(data->dev, irq, NULL, + bmi323_irq_thread_handler, + IRQF_ONESHOT, "bmi323-int", indio_dev); + if (ret) + return dev_err_probe(data->dev, ret, "Failed to request IRQ\n"); + + ret = devm_iio_trigger_register(data->dev, data->trig); + if (ret) + return dev_err_probe(data->dev, ret, + "Trigger registration failed\n"); + + data->irq_pin = irq_pin; + + return 0; +} + +static int bmi323_feature_engine_enable(struct bmi323_data *data, bool en) +{ + unsigned int feature_status; + int ret; + + if (!en) + return regmap_write(data->regmap, BMI323_FEAT_CTRL_REG, 0); + + ret = regmap_write(data->regmap, BMI323_FEAT_IO2_REG, 0x012c); + if (ret) + return ret; + + ret = regmap_write(data->regmap, BMI323_FEAT_IO_STATUS_REG, + BMI323_FEAT_IO_STATUS_MSK); + if (ret) + return ret; + + ret = regmap_write(data->regmap, BMI323_FEAT_CTRL_REG, + BMI323_FEAT_ENG_EN_MSK); + if (ret) + return ret; + + /* + * It takes around 4 msec to enable the Feature engine, so check + * the status of the feature engine every 2 msec for a maximum + * of 5 trials. + */ + ret = regmap_read_poll_timeout(data->regmap, BMI323_FEAT_IO1_REG, + feature_status, + FIELD_GET(BMI323_FEAT_IO1_ERR_MSK, + feature_status) == 1, + BMI323_FEAT_ENG_POLL, + BMI323_FEAT_ENG_TIMEOUT); + if (ret) + return dev_err_probe(data->dev, -EINVAL, + "Failed to enable feature engine\n"); + + return 0; +} + +static void bmi323_disable(void *data_ptr) +{ + struct bmi323_data *data = data_ptr; + + bmi323_set_mode(data, BMI323_ACCEL, ACC_GYRO_MODE_DISABLE); + bmi323_set_mode(data, BMI323_GYRO, ACC_GYRO_MODE_DISABLE); +} + +static int bmi323_set_bw(struct bmi323_data *data, + enum bmi323_sensor_type sensor, enum bmi323_3db_bw bw) +{ + return regmap_update_bits(data->regmap, bmi323_hw[sensor].config, + BMI323_ACC_GYRO_CONF_BW_MSK, + FIELD_PREP(BMI323_ACC_GYRO_CONF_BW_MSK, bw)); +} + +static int bmi323_init(struct bmi323_data *data) +{ + int ret, val; + + /* + * Perform soft reset to make sure the device is in a known state after + * start up. A delay of 1.5 ms is required after reset. + * See datasheet section 5.17 "Soft Reset". + */ + ret = regmap_write(data->regmap, BMI323_CMD_REG, BMI323_RST_VAL); + if (ret) + return ret; + + usleep_range(1500, 2000); + + /* + * Dummy read is required to enable SPI interface after reset. + * See datasheet section 7.2.1 "Protocol Selection". + */ + regmap_read(data->regmap, BMI323_CHIP_ID_REG, &val); + + ret = regmap_read(data->regmap, BMI323_STATUS_REG, &val); + if (ret) + return ret; + + if (!FIELD_GET(BMI323_STATUS_POR_MSK, val)) + return dev_err_probe(data->dev, -EINVAL, + "Sensor initialization error\n"); + + ret = regmap_read(data->regmap, BMI323_CHIP_ID_REG, &val); + if (ret) + return ret; + + if (FIELD_GET(BMI323_CHIP_ID_MSK, val) != BMI323_CHIP_ID_VAL) + return dev_err_probe(data->dev, -EINVAL, "Chip ID mismatch\n"); + + ret = bmi323_feature_engine_enable(data, true); + if (ret) + return ret; + + ret = regmap_read(data->regmap, BMI323_ERR_REG, &val); + if (ret) + return ret; + + if (val) + return dev_err_probe(data->dev, -EINVAL, + "Sensor power error = 0x%x\n", val); + + /* + * Set the Bandwidth coefficient which defines the 3 dB cutoff + * frequency in relation to the ODR. + */ + ret = bmi323_set_bw(data, BMI323_ACCEL, BMI323_BW_ODR_BY_2); + if (ret) + return ret; + + ret = bmi323_set_bw(data, BMI323_GYRO, BMI323_BW_ODR_BY_2); + if (ret) + return ret; + + ret = bmi323_set_odr(data, BMI323_ACCEL, 25, 0); + if (ret) + return ret; + + ret = bmi323_set_odr(data, BMI323_GYRO, 25, 0); + if (ret) + return ret; + + return devm_add_action_or_reset(data->dev, bmi323_disable, data); +} + +int bmi323_core_probe(struct device *dev) +{ + static const char * const regulator_names[] = { "vdd", "vddio" }; + struct iio_dev *indio_dev; + struct bmi323_data *data; + struct regmap *regmap; + int ret; + + regmap = dev_get_regmap(dev, NULL); + if (!regmap) + return dev_err_probe(dev, -ENODEV, "Failed to get regmap\n"); + + indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); + if (!indio_dev) + return dev_err_probe(dev, -ENOMEM, + "Failed to allocate device\n"); + + ret = devm_regulator_bulk_get_enable(dev, ARRAY_SIZE(regulator_names), + regulator_names); + if (ret) + return dev_err_probe(dev, ret, "Failed to enable regulators\n"); + + data = iio_priv(indio_dev); + data->dev = dev; + data->regmap = regmap; + mutex_init(&data->mutex); + + ret = bmi323_init(data); + if (ret) + return -EINVAL; + + ret = iio_read_mount_matrix(dev, &data->orientation); + if (ret) + return ret; + + indio_dev->name = "bmi323-imu"; + indio_dev->info = &bmi323_info; + indio_dev->channels = bmi323_channels; + indio_dev->num_channels = ARRAY_SIZE(bmi323_channels); + indio_dev->available_scan_masks = bmi323_avail_scan_masks; + indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE; + dev_set_drvdata(data->dev, indio_dev); + + ret = bmi323_trigger_probe(data, indio_dev); + if (ret) + return -EINVAL; + + ret = devm_iio_triggered_buffer_setup_ext(data->dev, indio_dev, + &iio_pollfunc_store_time, + bmi323_trigger_handler, + IIO_BUFFER_DIRECTION_IN, + &bmi323_buffer_ops, + bmi323_fifo_attributes); + if (ret) + return dev_err_probe(data->dev, ret, + "Failed to setup trigger buffer\n"); + + ret = devm_iio_device_register(data->dev, indio_dev); + if (ret) + return dev_err_probe(data->dev, ret, + "Unable to register iio device\n"); + + return 0; +} +EXPORT_SYMBOL_NS_GPL(bmi323_core_probe, IIO_BMI323); + +MODULE_DESCRIPTION("Bosch BMI323 IMU driver"); +MODULE_AUTHOR("Jagath Jog J <jagathjog1996@gmail.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/imu/bmi323/bmi323_i2c.c b/drivers/iio/imu/bmi323/bmi323_i2c.c new file mode 100644 index 000000000000..20a8001b9956 --- /dev/null +++ b/drivers/iio/imu/bmi323/bmi323_i2c.c @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * I2C driver for Bosch BMI323 6-Axis IMU. + * + * Copyright (C) 2023, Jagath Jog J <jagathjog1996@gmail.com> + */ + +#include <linux/i2c.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/regmap.h> + +#include "bmi323.h" + +struct bmi323_i2c_priv { + struct i2c_client *i2c; + u8 i2c_rx_buffer[BMI323_FIFO_LENGTH_IN_BYTES + BMI323_I2C_DUMMY]; +}; + +/* + * From BMI323 datasheet section 4: Notes on the Serial Interface Support. + * Each I2C register read operation requires to read two dummy bytes before + * the actual payload. + */ +static int bmi323_regmap_i2c_read(void *context, const void *reg_buf, + size_t reg_size, void *val_buf, + size_t val_size) +{ + struct bmi323_i2c_priv *priv = context; + struct i2c_msg msgs[2]; + int ret; + + msgs[0].addr = priv->i2c->addr; + msgs[0].flags = priv->i2c->flags; + msgs[0].len = reg_size; + msgs[0].buf = (u8 *)reg_buf; + + msgs[1].addr = priv->i2c->addr; + msgs[1].len = val_size + BMI323_I2C_DUMMY; + msgs[1].buf = priv->i2c_rx_buffer; + msgs[1].flags = priv->i2c->flags | I2C_M_RD; + + ret = i2c_transfer(priv->i2c->adapter, msgs, ARRAY_SIZE(msgs)); + if (ret < 0) + return -EIO; + + memcpy(val_buf, priv->i2c_rx_buffer + BMI323_I2C_DUMMY, val_size); + + return 0; +} + +static int bmi323_regmap_i2c_write(void *context, const void *data, + size_t count) +{ + struct bmi323_i2c_priv *priv = context; + u8 reg; + + reg = *(u8 *)data; + return i2c_smbus_write_i2c_block_data(priv->i2c, reg, + count - sizeof(u8), + data + sizeof(u8)); +} + +static struct regmap_bus bmi323_regmap_bus = { + .read = bmi323_regmap_i2c_read, + .write = bmi323_regmap_i2c_write, +}; + +static const struct regmap_config bmi323_i2c_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + .max_register = BMI323_CFG_RES_REG, + .val_format_endian = REGMAP_ENDIAN_LITTLE, +}; + +static int bmi323_i2c_probe(struct i2c_client *i2c) +{ + struct device *dev = &i2c->dev; + struct bmi323_i2c_priv *priv; + struct regmap *regmap; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->i2c = i2c; + regmap = devm_regmap_init(dev, &bmi323_regmap_bus, priv, + &bmi323_i2c_regmap_config); + if (IS_ERR(regmap)) + return dev_err_probe(dev, PTR_ERR(regmap), + "Failed to initialize I2C Regmap\n"); + + return bmi323_core_probe(dev); +} + +static const struct i2c_device_id bmi323_i2c_ids[] = { + { "bmi323" }, + { } +}; +MODULE_DEVICE_TABLE(i2c, bmi323_i2c_ids); + +static const struct of_device_id bmi323_of_i2c_match[] = { + { .compatible = "bosch,bmi323" }, + { } +}; +MODULE_DEVICE_TABLE(of, bmi323_of_i2c_match); + +static struct i2c_driver bmi323_i2c_driver = { + .driver = { + .name = "bmi323", + .of_match_table = bmi323_of_i2c_match, + }, + .probe = bmi323_i2c_probe, + .id_table = bmi323_i2c_ids, +}; +module_i2c_driver(bmi323_i2c_driver); + +MODULE_DESCRIPTION("Bosch BMI323 IMU driver"); +MODULE_AUTHOR("Jagath Jog J <jagathjog1996@gmail.com>"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_BMI323); diff --git a/drivers/iio/imu/bmi323/bmi323_spi.c b/drivers/iio/imu/bmi323/bmi323_spi.c new file mode 100644 index 000000000000..7b1e8127d0dd --- /dev/null +++ b/drivers/iio/imu/bmi323/bmi323_spi.c @@ -0,0 +1,92 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * SPI driver for Bosch BMI323 6-Axis IMU. + * + * Copyright (C) 2023, Jagath Jog J <jagathjog1996@gmail.com> + */ + +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/spi/spi.h> + +#include "bmi323.h" + +/* + * From BMI323 datasheet section 4: Notes on the Serial Interface Support. + * Each SPI register read operation requires to read one dummy byte before + * the actual payload. + */ +static int bmi323_regmap_spi_read(void *context, const void *reg_buf, + size_t reg_size, void *val_buf, + size_t val_size) +{ + struct spi_device *spi = context; + + return spi_write_then_read(spi, reg_buf, reg_size, val_buf, val_size); +} + +static int bmi323_regmap_spi_write(void *context, const void *data, + size_t count) +{ + struct spi_device *spi = context; + u8 *data_buff = (u8 *)data; + + data_buff[1] = data_buff[0]; + return spi_write(spi, data_buff + 1, count - 1); +} + +static struct regmap_bus bmi323_regmap_bus = { + .read = bmi323_regmap_spi_read, + .write = bmi323_regmap_spi_write, +}; + +static const struct regmap_config bmi323_spi_regmap_config = { + .reg_bits = 8, + .val_bits = 16, + .pad_bits = 8, + .read_flag_mask = BIT(7), + .max_register = BMI323_CFG_RES_REG, + .val_format_endian = REGMAP_ENDIAN_LITTLE, +}; + +static int bmi323_spi_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct regmap *regmap; + + regmap = devm_regmap_init(dev, &bmi323_regmap_bus, dev, + &bmi323_spi_regmap_config); + if (IS_ERR(regmap)) + return dev_err_probe(dev, PTR_ERR(regmap), + "Failed to initialize SPI Regmap\n"); + + return bmi323_core_probe(dev); +} + +static const struct spi_device_id bmi323_spi_ids[] = { + { "bmi323" }, + { } +}; +MODULE_DEVICE_TABLE(spi, bmi323_spi_ids); + +static const struct of_device_id bmi323_of_spi_match[] = { + { .compatible = "bosch,bmi323" }, + { } +}; +MODULE_DEVICE_TABLE(of, bmi323_of_spi_match); + +static struct spi_driver bmi323_spi_driver = { + .driver = { + .name = "bmi323", + .of_match_table = bmi323_of_spi_match, + }, + .probe = bmi323_spi_probe, + .id_table = bmi323_spi_ids, +}; +module_spi_driver(bmi323_spi_driver); + +MODULE_DESCRIPTION("Bosch BMI323 IMU driver"); +MODULE_AUTHOR("Jagath Jog J <jagathjog1996@gmail.com>"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_BMI323); diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c index b1e4fde27d25..f67bd5a39beb 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c @@ -137,10 +137,7 @@ static int inv_icm42600_accel_update_scan_mode(struct iio_dev *indio_dev, out_unlock: mutex_unlock(&st->lock); /* sleep maximum required time */ - if (sleep_accel > sleep_temp) - sleep = sleep_accel; - else - sleep = sleep_temp; + sleep = max(sleep_accel, sleep_temp); if (sleep) msleep(sleep); return ret; diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c index 6ef1df9d60b7..b52f328fd26c 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c @@ -424,10 +424,7 @@ out_unlock: mutex_unlock(&st->lock); /* sleep maximum required time */ - if (sleep_sensor > sleep_temp) - sleep = sleep_sensor; - else - sleep = sleep_temp; + sleep = max(sleep_sensor, sleep_temp); if (sleep) msleep(sleep); diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c index 3bf946e56e1d..3df0a715e885 100644 --- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c +++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c @@ -137,10 +137,7 @@ static int inv_icm42600_gyro_update_scan_mode(struct iio_dev *indio_dev, out_unlock: mutex_unlock(&st->lock); /* sleep maximum required time */ - if (sleep_gyro > sleep_temp) - sleep = sleep_gyro; - else - sleep = sleep_temp; + sleep = max(sleep_gyro, sleep_temp); if (sleep) msleep(sleep); return ret; diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c index 6b034dccc3b1..0e94e5335e93 100644 --- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c +++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c @@ -567,15 +567,12 @@ static int inv_mpu6050_init_config(struct iio_dev *indio_dev) static int inv_mpu6050_sensor_set(struct inv_mpu6050_state *st, int reg, int axis, int val) { - int ind, result; + int ind; __be16 d = cpu_to_be16(val); ind = (axis - IIO_MOD_X) * 2; - result = regmap_bulk_write(st->map, reg + ind, &d, sizeof(d)); - if (result) - return -EINVAL; - return 0; + return regmap_bulk_write(st->map, reg + ind, &d, sizeof(d)); } static int inv_mpu6050_sensor_show(struct inv_mpu6050_state *st, int reg, @@ -587,7 +584,7 @@ static int inv_mpu6050_sensor_show(struct inv_mpu6050_state *st, int reg, ind = (axis - IIO_MOD_X) * 2; result = regmap_bulk_read(st->map, reg + ind, &d, sizeof(d)); if (result) - return -EINVAL; + return result; *val = (short)be16_to_cpup(&d); return IIO_VAL_INT; diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c index 176d31d9f9d8..b581a7e80566 100644 --- a/drivers/iio/industrialio-buffer.c +++ b/drivers/iio/industrialio-buffer.c @@ -413,6 +413,22 @@ static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, { if (bitmap_empty(mask, masklength)) return NULL; + /* + * The condition here do not handle multi-long masks correctly. + * It only checks the first long to be zero, and will use such mask + * as a terminator even if there was bits set after the first long. + * + * Correct check would require using: + * while (!bitmap_empty(av_masks, masklength)) + * instead. This is potentially hazardous because the + * avaliable_scan_masks is a zero terminated array of longs - and + * using the proper bitmap_empty() check for multi-long wide masks + * would require the array to be terminated with multiple zero longs - + * which is not such an usual pattern. + * + * As writing of this no multi-long wide masks were found in-tree, so + * the simple while (*av_masks) check is working. + */ while (*av_masks) { if (strict) { if (bitmap_equal(mask, av_masks, masklength)) @@ -600,7 +616,7 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, &iio_show_fixed_type, NULL, 0, - 0, + IIO_SEPARATE, &indio_dev->dev, buffer, &buffer->buffer_attr_list); @@ -613,7 +629,7 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, &iio_scan_el_show, &iio_scan_el_store, chan->scan_index, - 0, + IIO_SEPARATE, &indio_dev->dev, buffer, &buffer->buffer_attr_list); @@ -623,7 +639,7 @@ static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, &iio_scan_el_ts_show, &iio_scan_el_ts_store, chan->scan_index, - 0, + IIO_SEPARATE, &indio_dev->dev, buffer, &buffer->buffer_attr_list); diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c index c77745b594bd..9a85752124dd 100644 --- a/drivers/iio/industrialio-core.c +++ b/drivers/iio/industrialio-core.c @@ -117,6 +117,8 @@ static const char * const iio_modifier_names[] = { [IIO_MOD_LIGHT_GREEN] = "green", [IIO_MOD_LIGHT_BLUE] = "blue", [IIO_MOD_LIGHT_UV] = "uv", + [IIO_MOD_LIGHT_UVA] = "uva", + [IIO_MOD_LIGHT_UVB] = "uvb", [IIO_MOD_LIGHT_DUV] = "duv", [IIO_MOD_QUATERNION] = "quaternion", [IIO_MOD_TEMP_AMBIENT] = "ambient", @@ -182,6 +184,7 @@ static const char * const iio_chan_info_postfix[] = { [IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type", [IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient", [IIO_CHAN_INFO_ZEROPOINT] = "zeropoint", + [IIO_CHAN_INFO_TROUGH] = "trough_raw", }; /** * iio_device_id() - query the unique ID for the device @@ -1896,6 +1899,66 @@ static int iio_check_extended_name(const struct iio_dev *indio_dev) static const struct iio_buffer_setup_ops noop_ring_setup_ops; +static void iio_sanity_check_avail_scan_masks(struct iio_dev *indio_dev) +{ + unsigned int num_masks, masklength, longs_per_mask; + const unsigned long *av_masks; + int i; + + av_masks = indio_dev->available_scan_masks; + masklength = indio_dev->masklength; + longs_per_mask = BITS_TO_LONGS(masklength); + + /* + * The code determining how many available_scan_masks is in the array + * will be assuming the end of masks when first long with all bits + * zeroed is encountered. This is incorrect for masks where mask + * consists of more than one long, and where some of the available masks + * has long worth of bits zeroed (but has subsequent bit(s) set). This + * is a safety measure against bug where array of masks is terminated by + * a single zero while mask width is greater than width of a long. + */ + if (longs_per_mask > 1) + dev_warn(indio_dev->dev.parent, + "multi long available scan masks not fully supported\n"); + + if (bitmap_empty(av_masks, masklength)) + dev_warn(indio_dev->dev.parent, "empty scan mask\n"); + + for (num_masks = 0; *av_masks; num_masks++) + av_masks += longs_per_mask; + + if (num_masks < 2) + return; + + av_masks = indio_dev->available_scan_masks; + + /* + * Go through all the masks from first to one before the last, and see + * that no mask found later from the available_scan_masks array is a + * subset of mask found earlier. If this happens, then the mask found + * later will never get used because scanning the array is stopped when + * the first suitable mask is found. Drivers should order the array of + * available masks in the order of preference (presumably the least + * costy to access masks first). + */ + for (i = 0; i < num_masks - 1; i++) { + const unsigned long *mask1; + int j; + + mask1 = av_masks + i * longs_per_mask; + for (j = i + 1; j < num_masks; j++) { + const unsigned long *mask2; + + mask2 = av_masks + j * longs_per_mask; + if (bitmap_subset(mask2, mask1, masklength)) + dev_warn(indio_dev->dev.parent, + "available_scan_mask %d subset of %d. Never used\n", + j, i); + } + } +} + int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod) { struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); @@ -1934,6 +1997,9 @@ int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod) goto error_unreg_debugfs; } + if (indio_dev->available_scan_masks) + iio_sanity_check_avail_scan_masks(indio_dev); + ret = iio_device_register_sysfs(indio_dev); if (ret) { dev_err(indio_dev->dev.parent, diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig index 45edba797e4c..143003232d1c 100644 --- a/drivers/iio/light/Kconfig +++ b/drivers/iio/light/Kconfig @@ -252,6 +252,21 @@ config ISL29125 To compile this driver as a module, choose M here: the module will be called isl29125. +config ISL76682 + tristate "Intersil ISL76682 Light Sensor" + depends on I2C + select REGMAP_I2C + help + Say Y here if you want to build a driver for the Intersil ISL76682 + Ambient Light Sensor and IR Intensity sensor. This driver provides + the readouts via standard IIO sysfs and device interface. Both ALS + illuminance and IR illuminance are provided raw with separate scale + setting which can be configured via sysfs, the default scale is 1000 + lux, other options are 4000/16000/64000 lux. + + To compile this driver as a module, choose M here: the module will be + called isl76682. + config HID_SENSOR_ALS depends on HID_SENSOR_HUB select IIO_BUFFER @@ -347,6 +362,17 @@ config SENSORS_LM3533 changes. The ALS-control output values can be set per zone for the three current output channels. +config LTR390 + tristate "LTR-390UV-01 ambient light and UV sensor" + depends on I2C + select REGMAP_I2C + help + If you say yes here you get support for the Lite-On LTR-390UV-01 + ambient light and UV sensor. + + This driver can also be built as a module. If so, the module + will be called ltr390. + config LTR501 tristate "LTR-501ALS-01 light sensor" depends on I2C @@ -637,6 +663,17 @@ config VEML6070 To compile this driver as a module, choose M here: the module will be called veml6070. +config VEML6075 + tristate "VEML6075 UVA and UVB light sensor" + select REGMAP_I2C + depends on I2C + help + Say Y here if you want to build a driver for the Vishay VEML6075 UVA + and UVB light sensor. + + To compile this driver as a module, choose M here: the + module will be called veml6075. + config VL6180 tristate "VL6180 ALS, range and proximity sensor" depends on I2C diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile index c0db4c4c36ec..2e5fdb33e0e9 100644 --- a/drivers/iio/light/Makefile +++ b/drivers/iio/light/Makefile @@ -28,8 +28,10 @@ obj-$(CONFIG_IQS621_ALS) += iqs621-als.o obj-$(CONFIG_SENSORS_ISL29018) += isl29018.o obj-$(CONFIG_SENSORS_ISL29028) += isl29028.o obj-$(CONFIG_ISL29125) += isl29125.o +obj-$(CONFIG_ISL76682) += isl76682.o obj-$(CONFIG_JSA1212) += jsa1212.o obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o +obj-$(CONFIG_LTR390) += ltr390.o obj-$(CONFIG_LTR501) += ltr501.o obj-$(CONFIG_LTRF216A) += ltrf216a.o obj-$(CONFIG_LV0104CS) += lv0104cs.o @@ -60,5 +62,6 @@ obj-$(CONFIG_VCNL4000) += vcnl4000.o obj-$(CONFIG_VCNL4035) += vcnl4035.o obj-$(CONFIG_VEML6030) += veml6030.o obj-$(CONFIG_VEML6070) += veml6070.o +obj-$(CONFIG_VEML6075) += veml6075.o obj-$(CONFIG_VL6180) += vl6180.o obj-$(CONFIG_ZOPT2201) += zopt2201.o diff --git a/drivers/iio/light/isl76682.c b/drivers/iio/light/isl76682.c new file mode 100644 index 000000000000..cf6ddee44ffc --- /dev/null +++ b/drivers/iio/light/isl76682.c @@ -0,0 +1,345 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * IIO driver for the light sensor ISL76682. + * ISL76682 is Ambient Light Sensor + * + * Copyright (c) 2023 Marek Vasut <marex@denx.de> + */ + +#include <linux/array_size.h> +#include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/regmap.h> +#include <linux/types.h> + +#include <linux/iio/iio.h> + +#define ISL76682_REG_COMMAND 0x00 + +#define ISL76682_COMMAND_EN BIT(7) +#define ISL76682_COMMAND_MODE_CONTINUOUS BIT(6) +#define ISL76682_COMMAND_LIGHT_IR BIT(5) + +#define ISL76682_COMMAND_RANGE_LUX_1K 0x0 +#define ISL76682_COMMAND_RANGE_LUX_4K 0x1 +#define ISL76682_COMMAND_RANGE_LUX_16K 0x2 +#define ISL76682_COMMAND_RANGE_LUX_64K 0x3 +#define ISL76682_COMMAND_RANGE_LUX_MASK GENMASK(1, 0) + +#define ISL76682_REG_ALSIR_L 0x01 + +#define ISL76682_REG_ALSIR_U 0x02 + +#define ISL76682_NUM_REGS (ISL76682_REG_ALSIR_U + 1) + +#define ISL76682_CONV_TIME_MS 100 +#define ISL76682_INT_TIME_US 90000 + +#define ISL76682_ADC_MAX (BIT(16) - 1) + +struct isl76682_chip { + /* + * Lock to synchronize access to device command register + * and the content of range variable below. + */ + struct mutex lock; + struct regmap *regmap; + u8 range; + u8 command; +}; + +struct isl76682_range { + u8 range; + u32 als; + u32 ir; +}; + +static struct isl76682_range isl76682_range_table[] = { + { ISL76682_COMMAND_RANGE_LUX_1K, 15000, 10500 }, + { ISL76682_COMMAND_RANGE_LUX_4K, 60000, 42000 }, + { ISL76682_COMMAND_RANGE_LUX_16K, 240000, 168000 }, + { ISL76682_COMMAND_RANGE_LUX_64K, 960000, 673000 } +}; + +static int isl76682_get(struct isl76682_chip *chip, bool mode_ir, int *data) +{ + u8 command; + int ret; + + command = ISL76682_COMMAND_EN | ISL76682_COMMAND_MODE_CONTINUOUS | + chip->range; + + if (mode_ir) + command |= ISL76682_COMMAND_LIGHT_IR; + + if (command != chip->command) { + ret = regmap_write(chip->regmap, ISL76682_REG_COMMAND, command); + if (ret) + return ret; + + /* Need to wait for conversion time if ALS/IR mode enabled */ + msleep(ISL76682_CONV_TIME_MS); + + chip->command = command; + } + + ret = regmap_bulk_read(chip->regmap, ISL76682_REG_ALSIR_L, data, 2); + *data &= ISL76682_ADC_MAX; + return ret; +} + +static int isl76682_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct isl76682_chip *chip = iio_priv(indio_dev); + int i; + + if (mask != IIO_CHAN_INFO_SCALE) + return -EINVAL; + + if (val != 0) + return -EINVAL; + + for (i = 0; i < ARRAY_SIZE(isl76682_range_table); i++) { + if (chan->type == IIO_LIGHT && val2 != isl76682_range_table[i].als) + continue; + if (chan->type == IIO_INTENSITY && val2 != isl76682_range_table[i].ir) + continue; + + scoped_guard(mutex, &chip->lock) + chip->range = isl76682_range_table[i].range; + return 0; + } + + return -EINVAL; +} + +static int isl76682_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct isl76682_chip *chip = iio_priv(indio_dev); + int ret; + int i; + + guard(mutex)(&chip->lock); + + switch (mask) { + case IIO_CHAN_INFO_RAW: + switch (chan->type) { + case IIO_LIGHT: + ret = isl76682_get(chip, false, val); + return (ret < 0) ? ret : IIO_VAL_INT; + case IIO_INTENSITY: + ret = isl76682_get(chip, true, val); + return (ret < 0) ? ret : IIO_VAL_INT; + default: + return -EINVAL; + } + case IIO_CHAN_INFO_SCALE: + for (i = 0; i < ARRAY_SIZE(isl76682_range_table); i++) { + if (chip->range != isl76682_range_table[i].range) + continue; + + *val = 0; + switch (chan->type) { + case IIO_LIGHT: + *val2 = isl76682_range_table[i].als; + return IIO_VAL_INT_PLUS_MICRO; + case IIO_INTENSITY: + *val2 = isl76682_range_table[i].ir; + return IIO_VAL_INT_PLUS_MICRO; + default: + return -EINVAL; + } + } + return -EINVAL; + case IIO_CHAN_INFO_INT_TIME: + *val = 0; + *val2 = ISL76682_INT_TIME_US; + return IIO_VAL_INT_PLUS_MICRO; + default: + return -EINVAL; + } +} + +static int illuminance_scale_available[] = { + 0, 15000, + 0, 60000, + 0, 240000, + 0, 960000, +}; + +static int intensity_scale_available[] = { + 0, 10500, + 0, 42000, + 0, 168000, + 0, 673000, +}; + +static int isl76682_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, + int *length, long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_SCALE: + switch (chan->type) { + case IIO_LIGHT: + *vals = illuminance_scale_available; + *length = ARRAY_SIZE(illuminance_scale_available); + *type = IIO_VAL_INT_PLUS_MICRO; + return IIO_AVAIL_LIST; + case IIO_INTENSITY: + *vals = intensity_scale_available; + *length = ARRAY_SIZE(intensity_scale_available); + *type = IIO_VAL_INT_PLUS_MICRO; + return IIO_AVAIL_LIST; + default: + return -EINVAL; + } + default: + return -EINVAL; + } +} + +static const struct iio_chan_spec isl76682_channels[] = { + { + .type = IIO_LIGHT, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), + }, { + .type = IIO_INTENSITY, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_type_available = BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), + } +}; + +static const struct iio_info isl76682_info = { + .read_avail = isl76682_read_avail, + .read_raw = isl76682_read_raw, + .write_raw = isl76682_write_raw, +}; + +static int isl76682_clear_configure_reg(struct isl76682_chip *chip) +{ + struct device *dev = regmap_get_device(chip->regmap); + int ret; + + ret = regmap_write(chip->regmap, ISL76682_REG_COMMAND, 0x0); + if (ret < 0) + dev_err(dev, "Error %d clearing the CONFIGURE register\n", ret); + + /* + * In the success case, the command register was zeroed out. + * + * In the error case, we do not know in which state the command + * register is, so we assume it is zeroed out, so that it would + * be reprogrammed at the next data read out, and at that time + * we hope it would be reprogrammed successfully. That is very + * much a best effort approach. + */ + chip->command = 0; + + return ret; +} + +static void isl76682_reset_action(void *chip) +{ + isl76682_clear_configure_reg(chip); +} + +static bool isl76682_is_volatile_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case ISL76682_REG_ALSIR_L: + case ISL76682_REG_ALSIR_U: + return true; + default: + return false; + } +} + +static const struct regmap_config isl76682_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .volatile_reg = isl76682_is_volatile_reg, + .max_register = ISL76682_NUM_REGS - 1, + .num_reg_defaults_raw = ISL76682_NUM_REGS, + .cache_type = REGCACHE_FLAT, +}; + +static int isl76682_probe(struct i2c_client *client) +{ + struct device *dev = &client->dev; + struct isl76682_chip *chip; + struct iio_dev *indio_dev; + int ret; + + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip)); + if (!indio_dev) + return -ENOMEM; + + chip = iio_priv(indio_dev); + + mutex_init(&chip->lock); + + chip->regmap = devm_regmap_init_i2c(client, &isl76682_regmap_config); + ret = PTR_ERR_OR_ZERO(chip->regmap); + if (ret) + return dev_err_probe(dev, ret, "Error initializing regmap\n"); + + chip->range = ISL76682_COMMAND_RANGE_LUX_1K; + + ret = isl76682_clear_configure_reg(chip); + if (ret < 0) + return ret; + + ret = devm_add_action_or_reset(dev, isl76682_reset_action, chip); + if (ret) + return ret; + + indio_dev->info = &isl76682_info; + indio_dev->channels = isl76682_channels; + indio_dev->num_channels = ARRAY_SIZE(isl76682_channels); + indio_dev->name = "isl76682"; + indio_dev->modes = INDIO_DIRECT_MODE; + + return devm_iio_device_register(dev, indio_dev); +} + +static const struct i2c_device_id isl76682_id[] = { + { "isl76682" }, + { } +}; +MODULE_DEVICE_TABLE(i2c, isl76682_id); + +static const struct of_device_id isl76682_of_match[] = { + { .compatible = "isil,isl76682" }, + { } +}; +MODULE_DEVICE_TABLE(of, isl76682_of_match); + +static struct i2c_driver isl76682_driver = { + .driver = { + .name = "isl76682", + .of_match_table = isl76682_of_match, + }, + .probe = isl76682_probe, + .id_table = isl76682_id, +}; +module_i2c_driver(isl76682_driver); + +MODULE_DESCRIPTION("ISL76682 Ambient Light Sensor driver"); +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Marek Vasut <marex@denx.de>"); diff --git a/drivers/iio/light/ltr390.c b/drivers/iio/light/ltr390.c new file mode 100644 index 000000000000..fff1e899097d --- /dev/null +++ b/drivers/iio/light/ltr390.c @@ -0,0 +1,196 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * IIO driver for Lite-On LTR390 ALS and UV sensor + * (7-bit I2C slave address 0x53) + * + * Based on the work of: + * Shreeya Patel and Shi Zhigang (LTRF216 Driver) + * + * Copyright (C) 2023 Anshul Dalal <anshulusr@gmail.com> + * + * Datasheet: + * https://optoelectronics.liteon.com/upload/download/DS86-2015-0004/LTR-390UV_Final_%20DS_V1%201.pdf + * + * TODO: + * - Support for configurable gain and resolution + * - Sensor suspend/resume support + * - Add support for reading the ALS + * - Interrupt support + */ + +#include <linux/i2c.h> +#include <linux/math.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/regmap.h> + +#include <linux/iio/iio.h> + +#include <asm/unaligned.h> + +#define LTR390_MAIN_CTRL 0x00 +#define LTR390_PART_ID 0x06 +#define LTR390_UVS_DATA 0x10 + +#define LTR390_SW_RESET BIT(4) +#define LTR390_UVS_MODE BIT(3) +#define LTR390_SENSOR_ENABLE BIT(1) + +#define LTR390_PART_NUMBER_ID 0xb + +/* + * At 20-bit resolution (integration time: 400ms) and 18x gain, 2300 counts of + * the sensor are equal to 1 UV Index [Datasheet Page#8]. + * + * For the default resolution of 18-bit (integration time: 100ms) and default + * gain of 3x, the counts/uvi are calculated as follows: + * 2300 / ((3/18) * (100/400)) = 95.83 + */ +#define LTR390_COUNTS_PER_UVI 96 + +/* + * Window Factor is needed when the device is under Window glass with coated + * tinted ink. This is to compensate for the light loss due to the lower + * transmission rate of the window glass and helps * in calculating lux. + */ +#define LTR390_WINDOW_FACTOR 1 + +struct ltr390_data { + struct regmap *regmap; + struct i2c_client *client; + /* Protects device from simulataneous reads */ + struct mutex lock; +}; + +static const struct regmap_config ltr390_regmap_config = { + .name = "ltr390", + .reg_bits = 8, + .reg_stride = 1, + .val_bits = 8, +}; + +static int ltr390_register_read(struct ltr390_data *data, u8 register_address) +{ + struct device *dev = &data->client->dev; + int ret; + u8 recieve_buffer[3]; + + guard(mutex)(&data->lock); + + ret = regmap_bulk_read(data->regmap, register_address, recieve_buffer, + sizeof(recieve_buffer)); + if (ret) { + dev_err(dev, "failed to read measurement data"); + return ret; + } + + return get_unaligned_le24(recieve_buffer); +} + +static int ltr390_read_raw(struct iio_dev *iio_device, + struct iio_chan_spec const *chan, int *val, + int *val2, long mask) +{ + int ret; + struct ltr390_data *data = iio_priv(iio_device); + + switch (mask) { + case IIO_CHAN_INFO_RAW: + ret = ltr390_register_read(data, LTR390_UVS_DATA); + if (ret < 0) + return ret; + *val = ret; + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + *val = LTR390_WINDOW_FACTOR; + *val2 = LTR390_COUNTS_PER_UVI; + return IIO_VAL_FRACTIONAL; + default: + return -EINVAL; + } +} + +static const struct iio_info ltr390_info = { + .read_raw = ltr390_read_raw, +}; + +static const struct iio_chan_spec ltr390_channel = { + .type = IIO_UVINDEX, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE) +}; + +static int ltr390_probe(struct i2c_client *client) +{ + struct ltr390_data *data; + struct iio_dev *indio_dev; + struct device *dev; + int ret, part_number; + + dev = &client->dev; + indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; + + data = iio_priv(indio_dev); + + data->regmap = devm_regmap_init_i2c(client, <r390_regmap_config); + if (IS_ERR(data->regmap)) + return dev_err_probe(dev, PTR_ERR(data->regmap), + "regmap initialization failed\n"); + + data->client = client; + mutex_init(&data->lock); + + indio_dev->info = <r390_info; + indio_dev->channels = <r390_channel; + indio_dev->num_channels = 1; + indio_dev->name = "ltr390"; + + ret = regmap_read(data->regmap, LTR390_PART_ID, &part_number); + if (ret) + return dev_err_probe(dev, ret, + "failed to get sensor's part id\n"); + /* Lower 4 bits of `part_number` change with hardware revisions */ + if (part_number >> 4 != LTR390_PART_NUMBER_ID) + dev_info(dev, "received invalid product id: 0x%x", part_number); + dev_dbg(dev, "LTR390, product id: 0x%x\n", part_number); + + /* reset sensor, chip fails to respond to this, so ignore any errors */ + regmap_set_bits(data->regmap, LTR390_MAIN_CTRL, LTR390_SW_RESET); + + /* Wait for the registers to reset before proceeding */ + usleep_range(1000, 2000); + + ret = regmap_set_bits(data->regmap, LTR390_MAIN_CTRL, + LTR390_SENSOR_ENABLE | LTR390_UVS_MODE); + if (ret) + return dev_err_probe(dev, ret, "failed to enable the sensor\n"); + + return devm_iio_device_register(dev, indio_dev); +} + +static const struct i2c_device_id ltr390_id[] = { + { "ltr390" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(i2c, ltr390_id); + +static const struct of_device_id ltr390_of_table[] = { + { .compatible = "liteon,ltr390" }, + { /* Sentinel */ } +}; +MODULE_DEVICE_TABLE(of, ltr390_of_table); + +static struct i2c_driver ltr390_driver = { + .driver = { + .name = "ltr390", + .of_match_table = ltr390_of_table, + }, + .probe = ltr390_probe, + .id_table = ltr390_id, +}; +module_i2c_driver(ltr390_driver); + +MODULE_AUTHOR("Anshul Dalal <anshulusr@gmail.com>"); +MODULE_DESCRIPTION("Lite-On LTR390 ALS and UV sensor Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/light/ltrf216a.c b/drivers/iio/light/ltrf216a.c index 8de4dd849936..68dc48420a88 100644 --- a/drivers/iio/light/ltrf216a.c +++ b/drivers/iio/light/ltrf216a.c @@ -234,7 +234,7 @@ static int ltrf216a_read_data(struct ltrf216a_data *data, u8 addr) static int ltrf216a_get_lux(struct ltrf216a_data *data) { int ret, greendata; - u64 lux, div; + u64 lux; ret = ltrf216a_set_power_state(data, true); if (ret) @@ -246,10 +246,9 @@ static int ltrf216a_get_lux(struct ltrf216a_data *data) ltrf216a_set_power_state(data, false); - lux = greendata * 45 * LTRF216A_WIN_FAC * 100; - div = data->als_gain_fac * data->int_time_fac * 100; + lux = greendata * 45 * LTRF216A_WIN_FAC; - return div_u64(lux, div); + return lux; } static int ltrf216a_read_raw(struct iio_dev *indio_dev, @@ -279,7 +278,8 @@ static int ltrf216a_read_raw(struct iio_dev *indio_dev, if (ret < 0) return ret; *val = ret; - return IIO_VAL_INT; + *val2 = data->als_gain_fac * data->int_time_fac; + return IIO_VAL_FRACTIONAL; case IIO_CHAN_INFO_INT_TIME: mutex_lock(&data->lock); ret = ltrf216a_get_int_time(data, val, val2); diff --git a/drivers/iio/light/pa12203001.c b/drivers/iio/light/pa12203001.c index ed241598aefb..636432c45651 100644 --- a/drivers/iio/light/pa12203001.c +++ b/drivers/iio/light/pa12203001.c @@ -472,7 +472,7 @@ static struct i2c_driver pa12203001_driver = { .driver = { .name = PA12203001_DRIVER_NAME, .pm = &pa12203001_pm_ops, - .acpi_match_table = ACPI_PTR(pa12203001_acpi_match), + .acpi_match_table = pa12203001_acpi_match, }, .probe = pa12203001_probe, .remove = pa12203001_remove, diff --git a/drivers/iio/light/rohm-bu27008.c b/drivers/iio/light/rohm-bu27008.c index 6a6d77805091..0f010eff1981 100644 --- a/drivers/iio/light/rohm-bu27008.c +++ b/drivers/iio/light/rohm-bu27008.c @@ -130,6 +130,7 @@ * @BU27008_BLUE: Blue channel. Via data2 (when used). * @BU27008_CLEAR: Clear channel. Via data2 or data3 (when used). * @BU27008_IR: IR channel. Via data3 (when used). + * @BU27008_LUX: Illuminance channel, computed using RGB and IR. * @BU27008_NUM_CHANS: Number of channel types. */ enum bu27008_chan_type { @@ -138,6 +139,7 @@ enum bu27008_chan_type { BU27008_BLUE, BU27008_CLEAR, BU27008_IR, + BU27008_LUX, BU27008_NUM_CHANS }; @@ -172,6 +174,8 @@ static const unsigned long bu27008_scan_masks[] = { ALWAYS_SCANNABLE | BIT(BU27008_CLEAR) | BIT(BU27008_IR), /* buffer is R, G, B, IR */ ALWAYS_SCANNABLE | BIT(BU27008_BLUE) | BIT(BU27008_IR), + /* buffer is R, G, B, IR, LUX */ + ALWAYS_SCANNABLE | BIT(BU27008_BLUE) | BIT(BU27008_IR) | BIT(BU27008_LUX), 0 }; @@ -331,6 +335,19 @@ static const struct iio_chan_spec bu27008_channels[] = { * Hence we don't advertise available ones either. */ BU27008_CHAN(IR, DATA3, 0), + { + .type = IIO_LIGHT, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), + .channel = BU27008_LUX, + .scan_index = BU27008_LUX, + .scan_type = { + .sign = 'u', + .realbits = 64, + .storagebits = 64, + .endianness = IIO_CPU, + }, + }, IIO_CHAN_SOFT_TIMESTAMP(BU27008_NUM_CHANS), }; @@ -1004,6 +1021,169 @@ static int bu27008_read_one(struct bu27008_data *data, struct iio_dev *idev, return ret; } +#define BU27008_LUX_DATA_RED 0 +#define BU27008_LUX_DATA_GREEN 1 +#define BU27008_LUX_DATA_BLUE 2 +#define BU27008_LUX_DATA_IR 3 +#define LUX_DATA_SIZE (BU27008_NUM_HW_CHANS * sizeof(__le16)) + +static int bu27008_read_lux_chans(struct bu27008_data *data, unsigned int time, + __le16 *chan_data) +{ + int ret, chan_sel, tmpret, valid; + + chan_sel = BU27008_BLUE2_IR3 << (ffs(data->cd->chan_sel_mask) - 1); + + ret = regmap_update_bits(data->regmap, data->cd->chan_sel_reg, + data->cd->chan_sel_mask, chan_sel); + if (ret) + return ret; + + ret = bu27008_meas_set(data, true); + if (ret) + return ret; + + msleep(time / USEC_PER_MSEC); + + ret = regmap_read_poll_timeout(data->regmap, data->cd->valid_reg, + valid, (valid & BU27008_MASK_VALID), + BU27008_VALID_RESULT_WAIT_QUANTA_US, + BU27008_MAX_VALID_RESULT_WAIT_US); + if (ret) + goto out; + + ret = regmap_bulk_read(data->regmap, BU27008_REG_DATA0_LO, chan_data, + LUX_DATA_SIZE); + if (ret) + goto out; +out: + tmpret = bu27008_meas_set(data, false); + if (tmpret) + dev_warn(data->dev, "Stopping measurement failed\n"); + + return ret; +} + +/* + * Following equation for computing lux out of register values was given by + * ROHM HW colleagues; + * + * Red = RedData*1024 / Gain * 20 / meas_mode + * Green = GreenData* 1024 / Gain * 20 / meas_mode + * Blue = BlueData* 1024 / Gain * 20 / meas_mode + * IR = IrData* 1024 / Gain * 20 / meas_mode + * + * where meas_mode is the integration time in mS / 10 + * + * IRratio = (IR > 0.18 * Green) ? 0 : 1 + * + * Lx = max(c1*Red + c2*Green + c3*Blue,0) + * + * for + * IRratio 0: c1 = -0.00002237, c2 = 0.0003219, c3 = -0.000120371 + * IRratio 1: c1 = -0.00001074, c2 = 0.000305415, c3 = -0.000129367 + */ + +/* + * The max chan data is 0xffff. When we multiply it by 1024 * 20, we'll get + * 0x4FFFB000 which still fits in 32-bit integer. This won't overflow. + */ +#define NORM_CHAN_DATA_FOR_LX_CALC(chan, gain, time) (le16_to_cpu(chan) * \ + 1024 * 20 / (gain) / (time)) +static u64 bu27008_calc_nlux(struct bu27008_data *data, __le16 *lux_data, + unsigned int gain, unsigned int gain_ir, unsigned int time) +{ + unsigned int red, green, blue, ir; + s64 c1, c2, c3, nlux; + + time /= 10000; + ir = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_IR], gain_ir, time); + red = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_RED], gain, time); + green = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_GREEN], gain, time); + blue = NORM_CHAN_DATA_FOR_LX_CALC(lux_data[BU27008_LUX_DATA_BLUE], gain, time); + + if ((u64)ir * 100LLU > (u64)green * 18LLU) { + c1 = -22370; + c2 = 321900; + c3 = -120371; + } else { + c1 = -10740; + c2 = 305415; + c3 = -129367; + } + nlux = c1 * red + c2 * green + c3 * blue; + + return max_t(s64, 0, nlux); +} + +static int bu27008_get_time_n_gains(struct bu27008_data *data, + unsigned int *gain, unsigned int *gain_ir, unsigned int *time) +{ + int ret; + + ret = bu27008_get_gain(data, &data->gts, gain); + if (ret < 0) + return ret; + + ret = bu27008_get_gain(data, &data->gts_ir, gain_ir); + if (ret < 0) + return ret; + + ret = bu27008_get_int_time_us(data); + if (ret < 0) + return ret; + + /* Max integration time is 400000. Fits in signed int. */ + *time = ret; + + return 0; +} + +struct bu27008_buf { + __le16 chan[BU27008_NUM_HW_CHANS]; + u64 lux __aligned(8); + s64 ts __aligned(8); +}; + +static int bu27008_buffer_fill_lux(struct bu27008_data *data, + struct bu27008_buf *raw) +{ + unsigned int gain, gain_ir, time; + int ret; + + ret = bu27008_get_time_n_gains(data, &gain, &gain_ir, &time); + if (ret) + return ret; + + raw->lux = bu27008_calc_nlux(data, raw->chan, gain, gain_ir, time); + + return 0; +} + +static int bu27008_read_lux(struct bu27008_data *data, struct iio_dev *idev, + struct iio_chan_spec const *chan, + int *val, int *val2) +{ + __le16 lux_data[BU27008_NUM_HW_CHANS]; + unsigned int gain, gain_ir, time; + u64 nlux; + int ret; + + ret = bu27008_get_time_n_gains(data, &gain, &gain_ir, &time); + if (ret) + return ret; + + ret = bu27008_read_lux_chans(data, time, lux_data); + if (ret) + return ret; + + nlux = bu27008_calc_nlux(data, lux_data, gain, gain_ir, time); + *val = (int)nlux; + *val2 = nlux >> 32LLU; + + return IIO_VAL_INT_64; +} + static int bu27008_read_raw(struct iio_dev *idev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) @@ -1018,7 +1198,10 @@ static int bu27008_read_raw(struct iio_dev *idev, return -EBUSY; mutex_lock(&data->mutex); - ret = bu27008_read_one(data, idev, chan, val, val2); + if (chan->type == IIO_LIGHT) + ret = bu27008_read_lux(data, idev, chan, val, val2); + else + ret = bu27008_read_one(data, idev, chan, val, val2); mutex_unlock(&data->mutex); iio_device_release_direct_mode(idev); @@ -1026,6 +1209,11 @@ static int bu27008_read_raw(struct iio_dev *idev, return ret; case IIO_CHAN_INFO_SCALE: + if (chan->type == IIO_LIGHT) { + *val = 0; + *val2 = 1; + return IIO_VAL_INT_PLUS_NANO; + } ret = bu27008_get_scale(data, chan->scan_index == BU27008_IR, val, val2); if (ret) @@ -1236,10 +1424,7 @@ static irqreturn_t bu27008_trigger_handler(int irq, void *p) struct iio_poll_func *pf = p; struct iio_dev *idev = pf->indio_dev; struct bu27008_data *data = iio_priv(idev); - struct { - __le16 chan[BU27008_NUM_HW_CHANS]; - s64 ts __aligned(8); - } raw; + struct bu27008_buf raw; int ret, dummy; memset(&raw, 0, sizeof(raw)); @@ -1257,6 +1442,12 @@ static irqreturn_t bu27008_trigger_handler(int irq, void *p) if (ret < 0) goto err_read; + if (test_bit(BU27008_LUX, idev->active_scan_mask)) { + ret = bu27008_buffer_fill_lux(data, &raw); + if (ret) + goto err_read; + } + iio_push_to_buffers_with_timestamp(idev, &raw, pf->timestamp); err_read: iio_trigger_notify_done(idev->trig); diff --git a/drivers/iio/light/veml6075.c b/drivers/iio/light/veml6075.c new file mode 100644 index 000000000000..05d4c0e9015d --- /dev/null +++ b/drivers/iio/light/veml6075.c @@ -0,0 +1,474 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Vishay VEML6075 UVA and UVB light sensor + * + * Copyright 2023 Javier Carrasco <javier.carrasco.cruz@gmail.com> + * + * 7-bit I2C slave, address 0x10 + */ + +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/regmap.h> +#include <linux/units.h> + +#include <linux/iio/iio.h> + +#define VEML6075_CMD_CONF 0x00 /* configuration register */ +#define VEML6075_CMD_UVA 0x07 /* UVA channel */ +#define VEML6075_CMD_UVB 0x09 /* UVB channel */ +#define VEML6075_CMD_COMP1 0x0A /* visible light compensation */ +#define VEML6075_CMD_COMP2 0x0B /* infrarred light compensation */ +#define VEML6075_CMD_ID 0x0C /* device ID */ + +#define VEML6075_CONF_IT GENMASK(6, 4) /* intregration time */ +#define VEML6075_CONF_HD BIT(3) /* dynamic setting */ +#define VEML6075_CONF_TRIG BIT(2) /* trigger */ +#define VEML6075_CONF_AF BIT(1) /* active force enable */ +#define VEML6075_CONF_SD BIT(0) /* shutdown */ + +#define VEML6075_IT_50_MS 0x00 +#define VEML6075_IT_100_MS 0x01 +#define VEML6075_IT_200_MS 0x02 +#define VEML6075_IT_400_MS 0x03 +#define VEML6075_IT_800_MS 0x04 + +#define VEML6075_AF_DISABLE 0x00 +#define VEML6075_AF_ENABLE 0x01 + +#define VEML6075_SD_DISABLE 0x00 +#define VEML6075_SD_ENABLE 0x01 + +/* Open-air coefficients and responsivity */ +#define VEML6075_A_COEF 2220 +#define VEML6075_B_COEF 1330 +#define VEML6075_C_COEF 2950 +#define VEML6075_D_COEF 1740 +#define VEML6075_UVA_RESP 1461 +#define VEML6075_UVB_RESP 2591 + +static const int veml6075_it_ms[] = { 50, 100, 200, 400, 800 }; + +struct veml6075_data { + struct i2c_client *client; + struct regmap *regmap; + /* + * prevent integration time modification and triggering + * measurements while a measurement is underway. + */ + struct mutex lock; +}; + +/* channel number */ +enum veml6075_chan { + CH_UVA, + CH_UVB, +}; + +static const struct iio_chan_spec veml6075_channels[] = { + { + .type = IIO_INTENSITY, + .channel = CH_UVA, + .modified = 1, + .channel2 = IIO_MOD_LIGHT_UVA, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), + .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME), + }, + { + .type = IIO_INTENSITY, + .channel = CH_UVB, + .modified = 1, + .channel2 = IIO_MOD_LIGHT_UVB, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), + .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME), + }, + { + .type = IIO_UVINDEX, + .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_INT_TIME), + .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_INT_TIME), + }, +}; + +static int veml6075_request_measurement(struct veml6075_data *data) +{ + int ret, conf, int_time; + + ret = regmap_read(data->regmap, VEML6075_CMD_CONF, &conf); + if (ret < 0) + return ret; + + /* disable shutdown and trigger measurement */ + ret = regmap_write(data->regmap, VEML6075_CMD_CONF, + (conf | VEML6075_CONF_TRIG) & ~VEML6075_CONF_SD); + if (ret < 0) + return ret; + + /* + * A measurement requires between 1.30 and 1.40 times the integration + * time for all possible configurations. Using a 1.50 factor simplifies + * operations and ensures reliability under all circumstances. + */ + int_time = veml6075_it_ms[FIELD_GET(VEML6075_CONF_IT, conf)]; + msleep(int_time + (int_time / 2)); + + /* shutdown again, data registers are still accessible */ + return regmap_update_bits(data->regmap, VEML6075_CMD_CONF, + VEML6075_CONF_SD, VEML6075_CONF_SD); +} + +static int veml6075_uva_comp(int raw_uva, int comp1, int comp2) +{ + int comp1a_c, comp2a_c, uva_comp; + + comp1a_c = (comp1 * VEML6075_A_COEF) / 1000U; + comp2a_c = (comp2 * VEML6075_B_COEF) / 1000U; + uva_comp = raw_uva - comp1a_c - comp2a_c; + + return clamp_val(uva_comp, 0, U16_MAX); +} + +static int veml6075_uvb_comp(int raw_uvb, int comp1, int comp2) +{ + int comp1b_c, comp2b_c, uvb_comp; + + comp1b_c = (comp1 * VEML6075_C_COEF) / 1000U; + comp2b_c = (comp2 * VEML6075_D_COEF) / 1000U; + uvb_comp = raw_uvb - comp1b_c - comp2b_c; + + return clamp_val(uvb_comp, 0, U16_MAX); +} + +static int veml6075_read_comp(struct veml6075_data *data, int *c1, int *c2) +{ + int ret; + + ret = regmap_read(data->regmap, VEML6075_CMD_COMP1, c1); + if (ret < 0) + return ret; + + return regmap_read(data->regmap, VEML6075_CMD_COMP2, c2); +} + +static int veml6075_read_uv_direct(struct veml6075_data *data, int chan, + int *val) +{ + int c1, c2, ret; + + guard(mutex)(&data->lock); + + ret = veml6075_request_measurement(data); + if (ret < 0) + return ret; + + ret = veml6075_read_comp(data, &c1, &c2); + if (ret < 0) + return ret; + + switch (chan) { + case CH_UVA: + ret = regmap_read(data->regmap, VEML6075_CMD_UVA, val); + if (ret < 0) + return ret; + + *val = veml6075_uva_comp(*val, c1, c2); + return IIO_VAL_INT; + case CH_UVB: + ret = regmap_read(data->regmap, VEML6075_CMD_UVB, val); + if (ret < 0) + return ret; + + *val = veml6075_uvb_comp(*val, c1, c2); + return IIO_VAL_INT; + default: + return -EINVAL; + } +} + +static int veml6075_read_int_time_index(struct veml6075_data *data) +{ + int ret, conf; + + ret = regmap_read(data->regmap, VEML6075_CMD_CONF, &conf); + if (ret < 0) + return ret; + + return FIELD_GET(VEML6075_CONF_IT, conf); +} + +static int veml6075_read_int_time_ms(struct veml6075_data *data, int *val) +{ + int int_index; + + guard(mutex)(&data->lock); + int_index = veml6075_read_int_time_index(data); + if (int_index < 0) + return int_index; + + *val = veml6075_it_ms[int_index]; + + return IIO_VAL_INT; +} + +static int veml6075_get_uvi_micro(struct veml6075_data *data, int uva_comp, + int uvb_comp) +{ + int uvia_micro = uva_comp * VEML6075_UVA_RESP; + int uvib_micro = uvb_comp * VEML6075_UVB_RESP; + int int_index; + + int_index = veml6075_read_int_time_index(data); + if (int_index < 0) + return int_index; + + switch (int_index) { + case VEML6075_IT_50_MS: + return uvia_micro + uvib_micro; + case VEML6075_IT_100_MS: + case VEML6075_IT_200_MS: + case VEML6075_IT_400_MS: + case VEML6075_IT_800_MS: + return (uvia_micro + uvib_micro) / (2 << int_index); + default: + return -EINVAL; + } +} + +static int veml6075_read_uvi(struct veml6075_data *data, int *val, int *val2) +{ + int ret, c1, c2, uva, uvb, uvi_micro; + + guard(mutex)(&data->lock); + + ret = veml6075_request_measurement(data); + if (ret < 0) + return ret; + + ret = veml6075_read_comp(data, &c1, &c2); + if (ret < 0) + return ret; + + ret = regmap_read(data->regmap, VEML6075_CMD_UVA, &uva); + if (ret < 0) + return ret; + + ret = regmap_read(data->regmap, VEML6075_CMD_UVB, &uvb); + if (ret < 0) + return ret; + + uvi_micro = veml6075_get_uvi_micro(data, veml6075_uva_comp(uva, c1, c2), + veml6075_uvb_comp(uvb, c1, c2)); + if (uvi_micro < 0) + return uvi_micro; + + *val = uvi_micro / MICRO; + *val2 = uvi_micro % MICRO; + + return IIO_VAL_INT_PLUS_MICRO; +} + +static int veml6075_read_responsivity(int chan, int *val, int *val2) +{ + /* scale = 1 / resp */ + switch (chan) { + case CH_UVA: + /* resp = 0.93 c/uW/cm2: scale = 1.75268817 */ + *val = 1; + *val2 = 75268817; + return IIO_VAL_INT_PLUS_NANO; + case CH_UVB: + /* resp = 2.1 c/uW/cm2: scale = 0.476190476 */ + *val = 0; + *val2 = 476190476; + return IIO_VAL_INT_PLUS_NANO; + default: + return -EINVAL; + } +} + +static int veml6075_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_INT_TIME: + *length = ARRAY_SIZE(veml6075_it_ms); + *vals = veml6075_it_ms; + *type = IIO_VAL_INT; + return IIO_AVAIL_LIST; + + default: + return -EINVAL; + } +} + +static int veml6075_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int *val, int *val2, long mask) +{ + struct veml6075_data *data = iio_priv(indio_dev); + + switch (mask) { + case IIO_CHAN_INFO_RAW: + return veml6075_read_uv_direct(data, chan->channel, val); + case IIO_CHAN_INFO_PROCESSED: + return veml6075_read_uvi(data, val, val2); + case IIO_CHAN_INFO_INT_TIME: + return veml6075_read_int_time_ms(data, val); + case IIO_CHAN_INFO_SCALE: + return veml6075_read_responsivity(chan->channel, val, val2); + default: + return -EINVAL; + } +} + +static int veml6075_write_int_time_ms(struct veml6075_data *data, int val) +{ + int i = ARRAY_SIZE(veml6075_it_ms); + + guard(mutex)(&data->lock); + + while (i-- > 0) { + if (val == veml6075_it_ms[i]) + break; + } + if (i < 0) + return -EINVAL; + + return regmap_update_bits(data->regmap, VEML6075_CMD_CONF, + VEML6075_CONF_IT, + FIELD_PREP(VEML6075_CONF_IT, i)); +} + +static int veml6075_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + int val, int val2, long mask) +{ + struct veml6075_data *data = iio_priv(indio_dev); + + switch (mask) { + case IIO_CHAN_INFO_INT_TIME: + return veml6075_write_int_time_ms(data, val); + default: + return -EINVAL; + } +} + +static const struct iio_info veml6075_info = { + .read_avail = veml6075_read_avail, + .read_raw = veml6075_read_raw, + .write_raw = veml6075_write_raw, +}; + +static bool veml6075_readable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case VEML6075_CMD_CONF: + case VEML6075_CMD_UVA: + case VEML6075_CMD_UVB: + case VEML6075_CMD_COMP1: + case VEML6075_CMD_COMP2: + case VEML6075_CMD_ID: + return true; + default: + return false; + } +} + +static bool veml6075_writable_reg(struct device *dev, unsigned int reg) +{ + switch (reg) { + case VEML6075_CMD_CONF: + return true; + default: + return false; + } +} + +static const struct regmap_config veml6075_regmap_config = { + .name = "veml6075", + .reg_bits = 8, + .val_bits = 16, + .max_register = VEML6075_CMD_ID, + .readable_reg = veml6075_readable_reg, + .writeable_reg = veml6075_writable_reg, + .val_format_endian = REGMAP_ENDIAN_LITTLE, +}; + +static int veml6075_probe(struct i2c_client *client) +{ + struct veml6075_data *data; + struct iio_dev *indio_dev; + struct regmap *regmap; + int config, ret; + + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; + + regmap = devm_regmap_init_i2c(client, &veml6075_regmap_config); + if (IS_ERR(regmap)) + return PTR_ERR(regmap); + + data = iio_priv(indio_dev); + data->client = client; + data->regmap = regmap; + + mutex_init(&data->lock); + + indio_dev->name = "veml6075"; + indio_dev->info = &veml6075_info; + indio_dev->channels = veml6075_channels; + indio_dev->num_channels = ARRAY_SIZE(veml6075_channels); + indio_dev->modes = INDIO_DIRECT_MODE; + + ret = devm_regulator_get_enable(&client->dev, "vdd"); + if (ret < 0) + return ret; + + /* default: 100ms integration time, active force enable, shutdown */ + config = FIELD_PREP(VEML6075_CONF_IT, VEML6075_IT_100_MS) | + FIELD_PREP(VEML6075_CONF_AF, VEML6075_AF_ENABLE) | + FIELD_PREP(VEML6075_CONF_SD, VEML6075_SD_ENABLE); + ret = regmap_write(data->regmap, VEML6075_CMD_CONF, config); + if (ret < 0) + return ret; + + return devm_iio_device_register(&client->dev, indio_dev); +} + +static const struct i2c_device_id veml6075_id[] = { + { "veml6075" }, + { } +}; +MODULE_DEVICE_TABLE(i2c, veml6075_id); + +static const struct of_device_id veml6075_of_match[] = { + { .compatible = "vishay,veml6075" }, + {} +}; +MODULE_DEVICE_TABLE(of, veml6075_of_match); + +static struct i2c_driver veml6075_driver = { + .driver = { + .name = "veml6075", + .of_match_table = veml6075_of_match, + }, + .probe = veml6075_probe, + .id_table = veml6075_id, +}; + +module_i2c_driver(veml6075_driver); + +MODULE_AUTHOR("Javier Carrasco <javier.carrasco.cruz@gmail.com>"); +MODULE_DESCRIPTION("Vishay VEML6075 UVA and UVB light sensor driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/magnetometer/tmag5273.c b/drivers/iio/magnetometer/tmag5273.c index e8c4ca142d21..218b1ce076c1 100644 --- a/drivers/iio/magnetometer/tmag5273.c +++ b/drivers/iio/magnetometer/tmag5273.c @@ -497,17 +497,13 @@ static int tmag5273_set_operating_mode(struct tmag5273_data *data, static void tmag5273_read_device_property(struct tmag5273_data *data) { struct device *dev = data->dev; - const char *str; int ret; data->angle_measurement = TMAG5273_ANGLE_EN_X_Y; - ret = device_property_read_string(dev, "ti,angle-measurement", &str); - if (ret) - return; - - ret = match_string(tmag5273_angle_names, - ARRAY_SIZE(tmag5273_angle_names), str); + ret = device_property_match_property_string(dev, "ti,angle-measurement", + tmag5273_angle_names, + ARRAY_SIZE(tmag5273_angle_names)); if (ret >= 0) data->angle_measurement = ret; } diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig index 95efa32e4289..79adfd059c3a 100644 --- a/drivers/iio/pressure/Kconfig +++ b/drivers/iio/pressure/Kconfig @@ -109,6 +109,28 @@ config HP03 To compile this driver as a module, choose M here: the module will be called hp03. +config HSC030PA + tristate "Honeywell HSC/SSC TruStability pressure sensor series" + depends on (I2C || SPI_MASTER) + select HSC030PA_I2C if I2C + select HSC030PA_SPI if SPI_MASTER + help + Say Y here to build support for the Honeywell TruStability + HSC and SSC pressure and temperature sensor series. + + To compile this driver as a module, choose M here: the module + will be called hsc030pa. + +config HSC030PA_I2C + tristate + depends on HSC030PA + depends on I2C + +config HSC030PA_SPI + tristate + depends on HSC030PA + depends on SPI_MASTER + config ICP10100 tristate "InvenSense ICP-101xx pressure and temperature sensor" depends on I2C diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile index 436aec7e65f3..b0f8b94662f2 100644 --- a/drivers/iio/pressure/Makefile +++ b/drivers/iio/pressure/Makefile @@ -15,6 +15,9 @@ obj-$(CONFIG_DPS310) += dps310.o obj-$(CONFIG_IIO_CROS_EC_BARO) += cros_ec_baro.o obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o obj-$(CONFIG_HP03) += hp03.o +obj-$(CONFIG_HSC030PA) += hsc030pa.o +obj-$(CONFIG_HSC030PA_I2C) += hsc030pa_i2c.o +obj-$(CONFIG_HSC030PA_SPI) += hsc030pa_spi.o obj-$(CONFIG_ICP10100) += icp10100.o obj-$(CONFIG_MPL115) += mpl115.o obj-$(CONFIG_MPL115_I2C) += mpl115_i2c.o diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c index a2ef1373a274..fe8734468ed3 100644 --- a/drivers/iio/pressure/bmp280-core.c +++ b/drivers/iio/pressure/bmp280-core.c @@ -13,6 +13,7 @@ * https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmp280-ds001.pdf * https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bme280-ds002.pdf * https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmp388-ds001.pdf + * https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmp390-ds002.pdf * https://www.bosch-sensortec.com/media/boschsensortec/downloads/datasheets/bst-bmp581-ds004.pdf * * Notice: @@ -794,10 +795,12 @@ static int bmp280_chip_config(struct bmp280_data *data) } static const int bmp280_oversampling_avail[] = { 1, 2, 4, 8, 16 }; +static const u8 bmp280_chip_ids[] = { BMP280_CHIP_ID }; const struct bmp280_chip_info bmp280_chip_info = { .id_reg = BMP280_REG_ID, - .chip_id = BMP280_CHIP_ID, + .chip_id = bmp280_chip_ids, + .num_chip_id = ARRAY_SIZE(bmp280_chip_ids), .regmap_config = &bmp280_regmap_config, .start_up_time = 2000, .channels = bmp280_channels, @@ -846,9 +849,12 @@ static int bme280_chip_config(struct bmp280_data *data) return bmp280_chip_config(data); } +static const u8 bme280_chip_ids[] = { BME280_CHIP_ID }; + const struct bmp280_chip_info bme280_chip_info = { .id_reg = BMP280_REG_ID, - .chip_id = BME280_CHIP_ID, + .chip_id = bme280_chip_ids, + .num_chip_id = ARRAY_SIZE(bme280_chip_ids), .regmap_config = &bmp280_regmap_config, .start_up_time = 2000, .channels = bmp280_channels, @@ -920,7 +926,7 @@ static int bmp380_cmd(struct bmp280_data *data, u8 cmd) } /* - * Returns temperature in Celsius dregrees, resolution is 0.01º C. Output value of + * Returns temperature in Celsius degrees, resolution is 0.01º C. Output value of * "5123" equals 51.2º C. t_fine carries fine temperature as global value. * * Taken from datasheet, Section Appendix 9, "Compensation formula" and repo @@ -1220,10 +1226,12 @@ static int bmp380_chip_config(struct bmp280_data *data) static const int bmp380_oversampling_avail[] = { 1, 2, 4, 8, 16, 32 }; static const int bmp380_iir_filter_coeffs_avail[] = { 1, 2, 4, 8, 16, 32, 64, 128}; +static const u8 bmp380_chip_ids[] = { BMP380_CHIP_ID, BMP390_CHIP_ID }; const struct bmp280_chip_info bmp380_chip_info = { .id_reg = BMP380_REG_ID, - .chip_id = BMP380_CHIP_ID, + .chip_id = bmp380_chip_ids, + .num_chip_id = ARRAY_SIZE(bmp380_chip_ids), .regmap_config = &bmp380_regmap_config, .start_up_time = 2000, .channels = bmp380_channels, @@ -1385,7 +1393,7 @@ static int bmp580_read_temp(struct bmp280_data *data, int *val, int *val2) /* * Temperature is returned in Celsius degrees in fractional - * form down 2^16. We reescale by x1000 to return milli Celsius + * form down 2^16. We rescale by x1000 to return milli Celsius * to respect IIO ABI. */ *val = raw_temp * 1000; @@ -1412,7 +1420,7 @@ static int bmp580_read_press(struct bmp280_data *data, int *val, int *val2) } /* * Pressure is returned in Pascals in fractional form down 2^16. - * We reescale /1000 to convert to kilopascal to respect IIO ABI. + * We rescale /1000 to convert to kilopascal to respect IIO ABI. */ *val = raw_press; *val2 = 64000; /* 2^6 * 1000 */ @@ -1720,10 +1728,12 @@ static int bmp580_chip_config(struct bmp280_data *data) } static const int bmp580_oversampling_avail[] = { 1, 2, 4, 8, 16, 32, 64, 128 }; +static const u8 bmp580_chip_ids[] = { BMP580_CHIP_ID, BMP580_CHIP_ID_ALT }; const struct bmp280_chip_info bmp580_chip_info = { .id_reg = BMP580_REG_CHIP_ID, - .chip_id = BMP580_CHIP_ID, + .chip_id = bmp580_chip_ids, + .num_chip_id = ARRAY_SIZE(bmp580_chip_ids), .regmap_config = &bmp580_regmap_config, .start_up_time = 2000, .channels = bmp380_channels, @@ -1983,10 +1993,12 @@ static int bmp180_chip_config(struct bmp280_data *data) static const int bmp180_oversampling_temp_avail[] = { 1 }; static const int bmp180_oversampling_press_avail[] = { 1, 2, 4, 8 }; +static const u8 bmp180_chip_ids[] = { BMP180_CHIP_ID }; const struct bmp280_chip_info bmp180_chip_info = { .id_reg = BMP280_REG_ID, - .chip_id = BMP180_CHIP_ID, + .chip_id = bmp180_chip_ids, + .num_chip_id = ARRAY_SIZE(bmp180_chip_ids), .regmap_config = &bmp180_regmap_config, .start_up_time = 2000, .channels = bmp280_channels, @@ -2077,6 +2089,7 @@ int bmp280_common_probe(struct device *dev, struct bmp280_data *data; struct gpio_desc *gpiod; unsigned int chip_id; + unsigned int i; int ret; indio_dev = devm_iio_device_alloc(dev, sizeof(*data)); @@ -2142,12 +2155,17 @@ int bmp280_common_probe(struct device *dev, ret = regmap_read(regmap, data->chip_info->id_reg, &chip_id); if (ret < 0) return ret; - if (chip_id != data->chip_info->chip_id) { - dev_err(dev, "bad chip id: expected %x got %x\n", - data->chip_info->chip_id, chip_id); - return -EINVAL; + + for (i = 0; i < data->chip_info->num_chip_id; i++) { + if (chip_id == data->chip_info->chip_id[i]) { + dev_info(dev, "0x%x is a known chip id for %s\n", chip_id, name); + break; + } } + if (i == data->chip_info->num_chip_id) + dev_warn(dev, "bad chip id: 0x%x is not a known chip id\n", chip_id); + if (data->chip_info->preinit) { ret = data->chip_info->preinit(data); if (ret) diff --git a/drivers/iio/pressure/bmp280-i2c.c b/drivers/iio/pressure/bmp280-i2c.c index dbe630ad05b5..34e3bc758493 100644 --- a/drivers/iio/pressure/bmp280-i2c.c +++ b/drivers/iio/pressure/bmp280-i2c.c @@ -7,13 +7,11 @@ static int bmp280_i2c_probe(struct i2c_client *client) { - struct regmap *regmap; - const struct bmp280_chip_info *chip_info; const struct i2c_device_id *id = i2c_client_get_device_id(client); + const struct bmp280_chip_info *chip_info; + struct regmap *regmap; - chip_info = device_get_match_data(&client->dev); - if (!chip_info) - chip_info = (const struct bmp280_chip_info *) id->driver_data; + chip_info = i2c_get_match_data(client); regmap = devm_regmap_init_i2c(client, chip_info->regmap_config); if (IS_ERR(regmap)) { diff --git a/drivers/iio/pressure/bmp280-spi.c b/drivers/iio/pressure/bmp280-spi.c index 1dff9bb7c4e9..433d6fac83c4 100644 --- a/drivers/iio/pressure/bmp280-spi.c +++ b/drivers/iio/pressure/bmp280-spi.c @@ -14,8 +14,7 @@ static int bmp280_regmap_spi_write(void *context, const void *data, size_t count) { - struct device *dev = context; - struct spi_device *spi = to_spi_device(dev); + struct spi_device *spi = to_spi_device(context); u8 buf[2]; memcpy(buf, data, 2); @@ -31,8 +30,7 @@ static int bmp280_regmap_spi_write(void *context, const void *data, static int bmp280_regmap_spi_read(void *context, const void *reg, size_t reg_size, void *val, size_t val_size) { - struct device *dev = context; - struct spi_device *spi = to_spi_device(dev); + struct spi_device *spi = to_spi_device(context); return spi_write_then_read(spi, reg, reg_size, val, val_size); } @@ -58,9 +56,7 @@ static int bmp280_spi_probe(struct spi_device *spi) return ret; } - chip_info = device_get_match_data(&spi->dev); - if (!chip_info) - chip_info = (const struct bmp280_chip_info *) id->driver_data; + chip_info = spi_get_device_match_data(spi); regmap = devm_regmap_init(&spi->dev, &bmp280_regmap_bus, diff --git a/drivers/iio/pressure/bmp280.h b/drivers/iio/pressure/bmp280.h index 5c0563ce7572..4012387d7956 100644 --- a/drivers/iio/pressure/bmp280.h +++ b/drivers/iio/pressure/bmp280.h @@ -292,6 +292,7 @@ #define BMP580_CHIP_ID_ALT 0x51 #define BMP180_CHIP_ID 0x55 #define BMP280_CHIP_ID 0x58 +#define BMP390_CHIP_ID 0x60 #define BME280_CHIP_ID 0x60 #define BMP280_SOFT_RESET_VAL 0xB6 @@ -410,7 +411,7 @@ struct bmp280_data { __le16 bmp280_cal_buf[BMP280_CONTIGUOUS_CALIB_REGS / 2]; __be16 bmp180_cal_buf[BMP180_REG_CALIB_COUNT / 2]; u8 bmp380_cal_buf[BMP380_CALIB_REG_COUNT]; - /* Miscellaneous, endianess-aware data buffers */ + /* Miscellaneous, endianness-aware data buffers */ __le16 le16; __be16 be16; } __aligned(IIO_DMA_MINALIGN); @@ -418,7 +419,8 @@ struct bmp280_data { struct bmp280_chip_info { unsigned int id_reg; - const unsigned int chip_id; + const u8 *chip_id; + int num_chip_id; const struct regmap_config *regmap_config; diff --git a/drivers/iio/pressure/hsc030pa.c b/drivers/iio/pressure/hsc030pa.c new file mode 100644 index 000000000000..d6a51f0c335f --- /dev/null +++ b/drivers/iio/pressure/hsc030pa.c @@ -0,0 +1,494 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Honeywell TruStability HSC Series pressure/temperature sensor + * + * Copyright (c) 2023 Petre Rodan <petre.rodan@subdimension.ro> + * + * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/trustability-hsc-series/documents/sps-siot-trustability-hsc-series-high-accuracy-board-mount-pressure-sensors-50099148-a-en-ciid-151133.pdf + */ + +#include <linux/array_size.h> +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/init.h> +#include <linux/math64.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/printk.h> +#include <linux/property.h> +#include <linux/regulator/consumer.h> +#include <linux/string.h> +#include <linux/types.h> +#include <linux/units.h> + +#include <linux/iio/iio.h> +#include <linux/iio/sysfs.h> + +#include <asm/unaligned.h> + +#include "hsc030pa.h" + +/* + * HSC_PRESSURE_TRIPLET_LEN - length for the string that defines the + * pressure range, measurement unit and type as per the part nomenclature. + * Consult honeywell,pressure-triplet in the bindings file for details. + */ +#define HSC_PRESSURE_TRIPLET_LEN 6 +#define HSC_STATUS_MASK GENMASK(7, 6) +#define HSC_TEMPERATURE_MASK GENMASK(15, 5) +#define HSC_PRESSURE_MASK GENMASK(29, 16) + +struct hsc_func_spec { + u32 output_min; + u32 output_max; +}; + +/* + * function A: 10% - 90% of 2^14 + * function B: 5% - 95% of 2^14 + * function C: 5% - 85% of 2^14 + * function F: 4% - 94% of 2^14 + */ +static const struct hsc_func_spec hsc_func_spec[] = { + [HSC_FUNCTION_A] = { .output_min = 1638, .output_max = 14746 }, + [HSC_FUNCTION_B] = { .output_min = 819, .output_max = 15565 }, + [HSC_FUNCTION_C] = { .output_min = 819, .output_max = 13926 }, + [HSC_FUNCTION_F] = { .output_min = 655, .output_max = 15401 }, +}; + +enum hsc_variants { + HSC001BA = 0x00, HSC1_6BA = 0x01, HSC2_5BA = 0x02, HSC004BA = 0x03, + HSC006BA = 0x04, HSC010BA = 0x05, HSC1_6MD = 0x06, HSC2_5MD = 0x07, + HSC004MD = 0x08, HSC006MD = 0x09, HSC010MD = 0x0a, HSC016MD = 0x0b, + HSC025MD = 0x0c, HSC040MD = 0x0d, HSC060MD = 0x0e, HSC100MD = 0x0f, + HSC160MD = 0x10, HSC250MD = 0x11, HSC400MD = 0x12, HSC600MD = 0x13, + HSC001BD = 0x14, HSC1_6BD = 0x15, HSC2_5BD = 0x16, HSC004BD = 0x17, + HSC2_5MG = 0x18, HSC004MG = 0x19, HSC006MG = 0x1a, HSC010MG = 0x1b, + HSC016MG = 0x1c, HSC025MG = 0x1d, HSC040MG = 0x1e, HSC060MG = 0x1f, + HSC100MG = 0x20, HSC160MG = 0x21, HSC250MG = 0x22, HSC400MG = 0x23, + HSC600MG = 0x24, HSC001BG = 0x25, HSC1_6BG = 0x26, HSC2_5BG = 0x27, + HSC004BG = 0x28, HSC006BG = 0x29, HSC010BG = 0x2a, HSC100KA = 0x2b, + HSC160KA = 0x2c, HSC250KA = 0x2d, HSC400KA = 0x2e, HSC600KA = 0x2f, + HSC001GA = 0x30, HSC160LD = 0x31, HSC250LD = 0x32, HSC400LD = 0x33, + HSC600LD = 0x34, HSC001KD = 0x35, HSC1_6KD = 0x36, HSC2_5KD = 0x37, + HSC004KD = 0x38, HSC006KD = 0x39, HSC010KD = 0x3a, HSC016KD = 0x3b, + HSC025KD = 0x3c, HSC040KD = 0x3d, HSC060KD = 0x3e, HSC100KD = 0x3f, + HSC160KD = 0x40, HSC250KD = 0x41, HSC400KD = 0x42, HSC250LG = 0x43, + HSC400LG = 0x44, HSC600LG = 0x45, HSC001KG = 0x46, HSC1_6KG = 0x47, + HSC2_5KG = 0x48, HSC004KG = 0x49, HSC006KG = 0x4a, HSC010KG = 0x4b, + HSC016KG = 0x4c, HSC025KG = 0x4d, HSC040KG = 0x4e, HSC060KG = 0x4f, + HSC100KG = 0x50, HSC160KG = 0x51, HSC250KG = 0x52, HSC400KG = 0x53, + HSC600KG = 0x54, HSC001GG = 0x55, HSC015PA = 0x56, HSC030PA = 0x57, + HSC060PA = 0x58, HSC100PA = 0x59, HSC150PA = 0x5a, HSC0_5ND = 0x5b, + HSC001ND = 0x5c, HSC002ND = 0x5d, HSC004ND = 0x5e, HSC005ND = 0x5f, + HSC010ND = 0x60, HSC020ND = 0x61, HSC030ND = 0x62, HSC001PD = 0x63, + HSC005PD = 0x64, HSC015PD = 0x65, HSC030PD = 0x66, HSC060PD = 0x67, + HSC001NG = 0x68, HSC002NG = 0x69, HSC004NG = 0x6a, HSC005NG = 0x6b, + HSC010NG = 0x6c, HSC020NG = 0x6d, HSC030NG = 0x6e, HSC001PG = 0x6f, + HSC005PG = 0x70, HSC015PG = 0x71, HSC030PG = 0x72, HSC060PG = 0x73, + HSC100PG = 0x74, HSC150PG = 0x75, HSC_VARIANTS_MAX +}; + +static const char * const hsc_triplet_variants[HSC_VARIANTS_MAX] = { + [HSC001BA] = "001BA", [HSC1_6BA] = "1.6BA", [HSC2_5BA] = "2.5BA", + [HSC004BA] = "004BA", [HSC006BA] = "006BA", [HSC010BA] = "010BA", + [HSC1_6MD] = "1.6MD", [HSC2_5MD] = "2.5MD", [HSC004MD] = "004MD", + [HSC006MD] = "006MD", [HSC010MD] = "010MD", [HSC016MD] = "016MD", + [HSC025MD] = "025MD", [HSC040MD] = "040MD", [HSC060MD] = "060MD", + [HSC100MD] = "100MD", [HSC160MD] = "160MD", [HSC250MD] = "250MD", + [HSC400MD] = "400MD", [HSC600MD] = "600MD", [HSC001BD] = "001BD", + [HSC1_6BD] = "1.6BD", [HSC2_5BD] = "2.5BD", [HSC004BD] = "004BD", + [HSC2_5MG] = "2.5MG", [HSC004MG] = "004MG", [HSC006MG] = "006MG", + [HSC010MG] = "010MG", [HSC016MG] = "016MG", [HSC025MG] = "025MG", + [HSC040MG] = "040MG", [HSC060MG] = "060MG", [HSC100MG] = "100MG", + [HSC160MG] = "160MG", [HSC250MG] = "250MG", [HSC400MG] = "400MG", + [HSC600MG] = "600MG", [HSC001BG] = "001BG", [HSC1_6BG] = "1.6BG", + [HSC2_5BG] = "2.5BG", [HSC004BG] = "004BG", [HSC006BG] = "006BG", + [HSC010BG] = "010BG", [HSC100KA] = "100KA", [HSC160KA] = "160KA", + [HSC250KA] = "250KA", [HSC400KA] = "400KA", [HSC600KA] = "600KA", + [HSC001GA] = "001GA", [HSC160LD] = "160LD", [HSC250LD] = "250LD", + [HSC400LD] = "400LD", [HSC600LD] = "600LD", [HSC001KD] = "001KD", + [HSC1_6KD] = "1.6KD", [HSC2_5KD] = "2.5KD", [HSC004KD] = "004KD", + [HSC006KD] = "006KD", [HSC010KD] = "010KD", [HSC016KD] = "016KD", + [HSC025KD] = "025KD", [HSC040KD] = "040KD", [HSC060KD] = "060KD", + [HSC100KD] = "100KD", [HSC160KD] = "160KD", [HSC250KD] = "250KD", + [HSC400KD] = "400KD", [HSC250LG] = "250LG", [HSC400LG] = "400LG", + [HSC600LG] = "600LG", [HSC001KG] = "001KG", [HSC1_6KG] = "1.6KG", + [HSC2_5KG] = "2.5KG", [HSC004KG] = "004KG", [HSC006KG] = "006KG", + [HSC010KG] = "010KG", [HSC016KG] = "016KG", [HSC025KG] = "025KG", + [HSC040KG] = "040KG", [HSC060KG] = "060KG", [HSC100KG] = "100KG", + [HSC160KG] = "160KG", [HSC250KG] = "250KG", [HSC400KG] = "400KG", + [HSC600KG] = "600KG", [HSC001GG] = "001GG", [HSC015PA] = "015PA", + [HSC030PA] = "030PA", [HSC060PA] = "060PA", [HSC100PA] = "100PA", + [HSC150PA] = "150PA", [HSC0_5ND] = "0.5ND", [HSC001ND] = "001ND", + [HSC002ND] = "002ND", [HSC004ND] = "004ND", [HSC005ND] = "005ND", + [HSC010ND] = "010ND", [HSC020ND] = "020ND", [HSC030ND] = "030ND", + [HSC001PD] = "001PD", [HSC005PD] = "005PD", [HSC015PD] = "015PD", + [HSC030PD] = "030PD", [HSC060PD] = "060PD", [HSC001NG] = "001NG", + [HSC002NG] = "002NG", [HSC004NG] = "004NG", [HSC005NG] = "005NG", + [HSC010NG] = "010NG", [HSC020NG] = "020NG", [HSC030NG] = "030NG", + [HSC001PG] = "001PG", [HSC005PG] = "005PG", [HSC015PG] = "015PG", + [HSC030PG] = "030PG", [HSC060PG] = "060PG", [HSC100PG] = "100PG", + [HSC150PG] = "150PG", +}; + +/** + * struct hsc_range_config - list of pressure ranges based on nomenclature + * @pmin: lowest pressure that can be measured + * @pmax: highest pressure that can be measured + */ +struct hsc_range_config { + const s32 pmin; + const s32 pmax; +}; + +/* All min max limits have been converted to pascals */ +static const struct hsc_range_config hsc_range_config[HSC_VARIANTS_MAX] = { + [HSC001BA] = { .pmin = 0, .pmax = 100000 }, + [HSC1_6BA] = { .pmin = 0, .pmax = 160000 }, + [HSC2_5BA] = { .pmin = 0, .pmax = 250000 }, + [HSC004BA] = { .pmin = 0, .pmax = 400000 }, + [HSC006BA] = { .pmin = 0, .pmax = 600000 }, + [HSC010BA] = { .pmin = 0, .pmax = 1000000 }, + [HSC1_6MD] = { .pmin = -160, .pmax = 160 }, + [HSC2_5MD] = { .pmin = -250, .pmax = 250 }, + [HSC004MD] = { .pmin = -400, .pmax = 400 }, + [HSC006MD] = { .pmin = -600, .pmax = 600 }, + [HSC010MD] = { .pmin = -1000, .pmax = 1000 }, + [HSC016MD] = { .pmin = -1600, .pmax = 1600 }, + [HSC025MD] = { .pmin = -2500, .pmax = 2500 }, + [HSC040MD] = { .pmin = -4000, .pmax = 4000 }, + [HSC060MD] = { .pmin = -6000, .pmax = 6000 }, + [HSC100MD] = { .pmin = -10000, .pmax = 10000 }, + [HSC160MD] = { .pmin = -16000, .pmax = 16000 }, + [HSC250MD] = { .pmin = -25000, .pmax = 25000 }, + [HSC400MD] = { .pmin = -40000, .pmax = 40000 }, + [HSC600MD] = { .pmin = -60000, .pmax = 60000 }, + [HSC001BD] = { .pmin = -100000, .pmax = 100000 }, + [HSC1_6BD] = { .pmin = -160000, .pmax = 160000 }, + [HSC2_5BD] = { .pmin = -250000, .pmax = 250000 }, + [HSC004BD] = { .pmin = -400000, .pmax = 400000 }, + [HSC2_5MG] = { .pmin = 0, .pmax = 250 }, + [HSC004MG] = { .pmin = 0, .pmax = 400 }, + [HSC006MG] = { .pmin = 0, .pmax = 600 }, + [HSC010MG] = { .pmin = 0, .pmax = 1000 }, + [HSC016MG] = { .pmin = 0, .pmax = 1600 }, + [HSC025MG] = { .pmin = 0, .pmax = 2500 }, + [HSC040MG] = { .pmin = 0, .pmax = 4000 }, + [HSC060MG] = { .pmin = 0, .pmax = 6000 }, + [HSC100MG] = { .pmin = 0, .pmax = 10000 }, + [HSC160MG] = { .pmin = 0, .pmax = 16000 }, + [HSC250MG] = { .pmin = 0, .pmax = 25000 }, + [HSC400MG] = { .pmin = 0, .pmax = 40000 }, + [HSC600MG] = { .pmin = 0, .pmax = 60000 }, + [HSC001BG] = { .pmin = 0, .pmax = 100000 }, + [HSC1_6BG] = { .pmin = 0, .pmax = 160000 }, + [HSC2_5BG] = { .pmin = 0, .pmax = 250000 }, + [HSC004BG] = { .pmin = 0, .pmax = 400000 }, + [HSC006BG] = { .pmin = 0, .pmax = 600000 }, + [HSC010BG] = { .pmin = 0, .pmax = 1000000 }, + [HSC100KA] = { .pmin = 0, .pmax = 100000 }, + [HSC160KA] = { .pmin = 0, .pmax = 160000 }, + [HSC250KA] = { .pmin = 0, .pmax = 250000 }, + [HSC400KA] = { .pmin = 0, .pmax = 400000 }, + [HSC600KA] = { .pmin = 0, .pmax = 600000 }, + [HSC001GA] = { .pmin = 0, .pmax = 1000000 }, + [HSC160LD] = { .pmin = -160, .pmax = 160 }, + [HSC250LD] = { .pmin = -250, .pmax = 250 }, + [HSC400LD] = { .pmin = -400, .pmax = 400 }, + [HSC600LD] = { .pmin = -600, .pmax = 600 }, + [HSC001KD] = { .pmin = -1000, .pmax = 1000 }, + [HSC1_6KD] = { .pmin = -1600, .pmax = 1600 }, + [HSC2_5KD] = { .pmin = -2500, .pmax = 2500 }, + [HSC004KD] = { .pmin = -4000, .pmax = 4000 }, + [HSC006KD] = { .pmin = -6000, .pmax = 6000 }, + [HSC010KD] = { .pmin = -10000, .pmax = 10000 }, + [HSC016KD] = { .pmin = -16000, .pmax = 16000 }, + [HSC025KD] = { .pmin = -25000, .pmax = 25000 }, + [HSC040KD] = { .pmin = -40000, .pmax = 40000 }, + [HSC060KD] = { .pmin = -60000, .pmax = 60000 }, + [HSC100KD] = { .pmin = -100000, .pmax = 100000 }, + [HSC160KD] = { .pmin = -160000, .pmax = 160000 }, + [HSC250KD] = { .pmin = -250000, .pmax = 250000 }, + [HSC400KD] = { .pmin = -400000, .pmax = 400000 }, + [HSC250LG] = { .pmin = 0, .pmax = 250 }, + [HSC400LG] = { .pmin = 0, .pmax = 400 }, + [HSC600LG] = { .pmin = 0, .pmax = 600 }, + [HSC001KG] = { .pmin = 0, .pmax = 1000 }, + [HSC1_6KG] = { .pmin = 0, .pmax = 1600 }, + [HSC2_5KG] = { .pmin = 0, .pmax = 2500 }, + [HSC004KG] = { .pmin = 0, .pmax = 4000 }, + [HSC006KG] = { .pmin = 0, .pmax = 6000 }, + [HSC010KG] = { .pmin = 0, .pmax = 10000 }, + [HSC016KG] = { .pmin = 0, .pmax = 16000 }, + [HSC025KG] = { .pmin = 0, .pmax = 25000 }, + [HSC040KG] = { .pmin = 0, .pmax = 40000 }, + [HSC060KG] = { .pmin = 0, .pmax = 60000 }, + [HSC100KG] = { .pmin = 0, .pmax = 100000 }, + [HSC160KG] = { .pmin = 0, .pmax = 160000 }, + [HSC250KG] = { .pmin = 0, .pmax = 250000 }, + [HSC400KG] = { .pmin = 0, .pmax = 400000 }, + [HSC600KG] = { .pmin = 0, .pmax = 600000 }, + [HSC001GG] = { .pmin = 0, .pmax = 1000000 }, + [HSC015PA] = { .pmin = 0, .pmax = 103421 }, + [HSC030PA] = { .pmin = 0, .pmax = 206843 }, + [HSC060PA] = { .pmin = 0, .pmax = 413685 }, + [HSC100PA] = { .pmin = 0, .pmax = 689476 }, + [HSC150PA] = { .pmin = 0, .pmax = 1034214 }, + [HSC0_5ND] = { .pmin = -125, .pmax = 125 }, + [HSC001ND] = { .pmin = -249, .pmax = 249 }, + [HSC002ND] = { .pmin = -498, .pmax = 498 }, + [HSC004ND] = { .pmin = -996, .pmax = 996 }, + [HSC005ND] = { .pmin = -1245, .pmax = 1245 }, + [HSC010ND] = { .pmin = -2491, .pmax = 2491 }, + [HSC020ND] = { .pmin = -4982, .pmax = 4982 }, + [HSC030ND] = { .pmin = -7473, .pmax = 7473 }, + [HSC001PD] = { .pmin = -6895, .pmax = 6895 }, + [HSC005PD] = { .pmin = -34474, .pmax = 34474 }, + [HSC015PD] = { .pmin = -103421, .pmax = 103421 }, + [HSC030PD] = { .pmin = -206843, .pmax = 206843 }, + [HSC060PD] = { .pmin = -413685, .pmax = 413685 }, + [HSC001NG] = { .pmin = 0, .pmax = 249 }, + [HSC002NG] = { .pmin = 0, .pmax = 498 }, + [HSC004NG] = { .pmin = 0, .pmax = 996 }, + [HSC005NG] = { .pmin = 0, .pmax = 1245 }, + [HSC010NG] = { .pmin = 0, .pmax = 2491 }, + [HSC020NG] = { .pmin = 0, .pmax = 4982 }, + [HSC030NG] = { .pmin = 0, .pmax = 7473 }, + [HSC001PG] = { .pmin = 0, .pmax = 6895 }, + [HSC005PG] = { .pmin = 0, .pmax = 34474 }, + [HSC015PG] = { .pmin = 0, .pmax = 103421 }, + [HSC030PG] = { .pmin = 0, .pmax = 206843 }, + [HSC060PG] = { .pmin = 0, .pmax = 413685 }, + [HSC100PG] = { .pmin = 0, .pmax = 689476 }, + [HSC150PG] = { .pmin = 0, .pmax = 1034214 }, +}; + +/** + * hsc_measurement_is_valid() - validate last conversion via status bits + * @data: structure containing instantiated sensor data + * Return: true only if both status bits are zero + * + * the two MSB from the first transfered byte contain a status code + * 00 - normal operation, valid data + * 01 - device in factory programming mode + * 10 - stale data + * 11 - diagnostic condition + */ +static bool hsc_measurement_is_valid(struct hsc_data *data) +{ + return !(data->buffer[0] & HSC_STATUS_MASK); +} + +static int hsc_get_measurement(struct hsc_data *data) +{ + const struct hsc_chip_data *chip = data->chip; + int ret; + + ret = data->recv_cb(data); + if (ret < 0) + return ret; + + data->is_valid = chip->valid(data); + if (!data->is_valid) + return -EAGAIN; + + return 0; +} + +/* + * IIO ABI expects + * value = (conv + offset) * scale + * + * datasheet provides the following formula for determining the temperature + * temp[C] = conv * a + b + * where a = 200/2047; b = -50 + * + * temp[C] = (conv + (b/a)) * a * (1000) + * => + * scale = a * 1000 = .097703957 * 1000 = 97.703957 + * offset = b/a = -50 / .097703957 = -50000000 / 97704 + * + * based on the datasheet + * pressure = (conv - Omin) * Q + Pmin = + * ((conv - Omin) + Pmin/Q) * Q + * => + * scale = Q = (Pmax - Pmin) / (Omax - Omin) + * offset = Pmin/Q - Omin = Pmin * (Omax - Omin) / (Pmax - Pmin) - Omin + */ +static int hsc_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *channel, int *val, + int *val2, long mask) +{ + struct hsc_data *data = iio_priv(indio_dev); + int ret; + u32 recvd; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + ret = hsc_get_measurement(data); + if (ret) + return ret; + + recvd = get_unaligned_be32(data->buffer); + switch (channel->type) { + case IIO_PRESSURE: + *val = FIELD_GET(HSC_PRESSURE_MASK, recvd); + return IIO_VAL_INT; + case IIO_TEMP: + *val = FIELD_GET(HSC_TEMPERATURE_MASK, recvd); + return IIO_VAL_INT; + default: + return -EINVAL; + } + + case IIO_CHAN_INFO_SCALE: + switch (channel->type) { + case IIO_TEMP: + *val = 97; + *val2 = 703957; + return IIO_VAL_INT_PLUS_MICRO; + case IIO_PRESSURE: + *val = data->p_scale; + *val2 = data->p_scale_dec; + return IIO_VAL_INT_PLUS_NANO; + default: + return -EINVAL; + } + + case IIO_CHAN_INFO_OFFSET: + switch (channel->type) { + case IIO_TEMP: + *val = -50000000; + *val2 = 97704; + return IIO_VAL_FRACTIONAL; + case IIO_PRESSURE: + *val = data->p_offset; + *val2 = data->p_offset_dec; + return IIO_VAL_INT_PLUS_MICRO; + default: + return -EINVAL; + } + + default: + return -EINVAL; + } +} + +static const struct iio_chan_spec hsc_channels[] = { + { + .type = IIO_PRESSURE, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_OFFSET), + }, + { + .type = IIO_TEMP, + .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | + BIT(IIO_CHAN_INFO_SCALE) | + BIT(IIO_CHAN_INFO_OFFSET), + }, +}; + +static const struct iio_info hsc_info = { + .read_raw = hsc_read_raw, +}; + +static const struct hsc_chip_data hsc_chip = { + .valid = hsc_measurement_is_valid, + .channels = hsc_channels, + .num_channels = ARRAY_SIZE(hsc_channels), +}; + +int hsc_common_probe(struct device *dev, hsc_recv_fn recv) +{ + struct hsc_data *hsc; + struct iio_dev *indio_dev; + const char *triplet; + u64 tmp; + int ret; + + indio_dev = devm_iio_device_alloc(dev, sizeof(*hsc)); + if (!indio_dev) + return -ENOMEM; + + hsc = iio_priv(indio_dev); + + hsc->chip = &hsc_chip; + hsc->recv_cb = recv; + hsc->dev = dev; + + ret = device_property_read_u32(dev, "honeywell,transfer-function", + &hsc->function); + if (ret) + return dev_err_probe(dev, ret, + "honeywell,transfer-function could not be read\n"); + if (hsc->function > HSC_FUNCTION_F) + return dev_err_probe(dev, -EINVAL, + "honeywell,transfer-function %d invalid\n", + hsc->function); + + ret = device_property_read_string(dev, "honeywell,pressure-triplet", + &triplet); + if (ret) + return dev_err_probe(dev, ret, + "honeywell,pressure-triplet could not be read\n"); + + if (str_has_prefix(triplet, "NA")) { + ret = device_property_read_u32(dev, "honeywell,pmin-pascal", + &hsc->pmin); + if (ret) + return dev_err_probe(dev, ret, + "honeywell,pmin-pascal could not be read\n"); + + ret = device_property_read_u32(dev, "honeywell,pmax-pascal", + &hsc->pmax); + if (ret) + return dev_err_probe(dev, ret, + "honeywell,pmax-pascal could not be read\n"); + } else { + ret = device_property_match_property_string(dev, + "honeywell,pressure-triplet", + hsc_triplet_variants, + HSC_VARIANTS_MAX); + if (ret < 0) + return dev_err_probe(dev, -EINVAL, + "honeywell,pressure-triplet is invalid\n"); + + hsc->pmin = hsc_range_config[ret].pmin; + hsc->pmax = hsc_range_config[ret].pmax; + } + + if (hsc->pmin >= hsc->pmax) + return dev_err_probe(dev, -EINVAL, + "pressure limits are invalid\n"); + + ret = devm_regulator_get_enable(dev, "vdd"); + if (ret) + return dev_err_probe(dev, ret, "can't get vdd supply\n"); + + hsc->outmin = hsc_func_spec[hsc->function].output_min; + hsc->outmax = hsc_func_spec[hsc->function].output_max; + + tmp = div_s64(((s64)(hsc->pmax - hsc->pmin)) * MICRO, + hsc->outmax - hsc->outmin); + hsc->p_scale = div_s64_rem(tmp, NANO, &hsc->p_scale_dec); + tmp = div_s64(((s64)hsc->pmin * (s64)(hsc->outmax - hsc->outmin)) * MICRO, + hsc->pmax - hsc->pmin); + tmp -= (s64)hsc->outmin * MICRO; + hsc->p_offset = div_s64_rem(tmp, MICRO, &hsc->p_offset_dec); + + indio_dev->name = "hsc030pa"; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->info = &hsc_info; + indio_dev->channels = hsc->chip->channels; + indio_dev->num_channels = hsc->chip->num_channels; + + return devm_iio_device_register(dev, indio_dev); +} +EXPORT_SYMBOL_NS(hsc_common_probe, IIO_HONEYWELL_HSC030PA); + +MODULE_AUTHOR("Petre Rodan <petre.rodan@subdimension.ro>"); +MODULE_DESCRIPTION("Honeywell HSC and SSC pressure sensor core driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/pressure/hsc030pa.h b/drivers/iio/pressure/hsc030pa.h new file mode 100644 index 000000000000..d20420dba4f6 --- /dev/null +++ b/drivers/iio/pressure/hsc030pa.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Honeywell TruStability HSC Series pressure/temperature sensor + * + * Copyright (c) 2023 Petre Rodan <petre.rodan@subdimension.ro> + */ + +#ifndef _HSC030PA_H +#define _HSC030PA_H + +#include <linux/types.h> + +#define HSC_REG_MEASUREMENT_RD_SIZE 4 + +struct device; + +struct iio_chan_spec; +struct iio_dev; + +struct hsc_data; +struct hsc_chip_data; + +typedef int (*hsc_recv_fn)(struct hsc_data *); + +/** + * struct hsc_data + * @dev: current device structure + * @chip: structure containing chip's channel properties + * @recv_cb: function that implements the chip reads + * @is_valid: true if last transfer has been validated + * @pmin: minimum measurable pressure limit + * @pmax: maximum measurable pressure limit + * @outmin: minimum raw pressure in counts (based on transfer function) + * @outmax: maximum raw pressure in counts (based on transfer function) + * @function: transfer function + * @p_scale: pressure scale + * @p_scale_dec: pressure scale, decimal places + * @p_offset: pressure offset + * @p_offset_dec: pressure offset, decimal places + * @buffer: raw conversion data + */ +struct hsc_data { + struct device *dev; + const struct hsc_chip_data *chip; + hsc_recv_fn recv_cb; + bool is_valid; + s32 pmin; + s32 pmax; + u32 outmin; + u32 outmax; + u32 function; + s64 p_scale; + s32 p_scale_dec; + s64 p_offset; + s32 p_offset_dec; + u8 buffer[HSC_REG_MEASUREMENT_RD_SIZE] __aligned(IIO_DMA_MINALIGN); +}; + +struct hsc_chip_data { + bool (*valid)(struct hsc_data *data); + const struct iio_chan_spec *channels; + u8 num_channels; +}; + +enum hsc_func_id { + HSC_FUNCTION_A, + HSC_FUNCTION_B, + HSC_FUNCTION_C, + HSC_FUNCTION_F, +}; + +int hsc_common_probe(struct device *dev, hsc_recv_fn recv); + +#endif diff --git a/drivers/iio/pressure/hsc030pa_i2c.c b/drivers/iio/pressure/hsc030pa_i2c.c new file mode 100644 index 000000000000..e2b524b36417 --- /dev/null +++ b/drivers/iio/pressure/hsc030pa_i2c.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Honeywell TruStability HSC Series pressure/temperature sensor + * + * Copyright (c) 2023 Petre Rodan <petre.rodan@subdimension.ro> + * + * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/trustability-hsc-series/documents/sps-siot-trustability-hsc-series-high-accuracy-board-mount-pressure-sensors-50099148-a-en-ciid-151133.pdf [hsc] + * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/common/documents/sps-siot-i2c-comms-digital-output-pressure-sensors-tn-008201-3-en-ciid-45841.pdf [i2c related] + */ + +#include <linux/errno.h> +#include <linux/i2c.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> + +#include <linux/iio/iio.h> + +#include "hsc030pa.h" + +static int hsc_i2c_recv(struct hsc_data *data) +{ + struct i2c_client *client = to_i2c_client(data->dev); + struct i2c_msg msg; + int ret; + + msg.addr = client->addr; + msg.flags = client->flags | I2C_M_RD; + msg.len = HSC_REG_MEASUREMENT_RD_SIZE; + msg.buf = data->buffer; + + ret = i2c_transfer(client->adapter, &msg, 1); + + return (ret == 2) ? 0 : ret; +} + +static int hsc_i2c_probe(struct i2c_client *client) +{ + if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) + return -EOPNOTSUPP; + + return hsc_common_probe(&client->dev, hsc_i2c_recv); +} + +static const struct of_device_id hsc_i2c_match[] = { + { .compatible = "honeywell,hsc030pa" }, + {} +}; +MODULE_DEVICE_TABLE(of, hsc_i2c_match); + +static const struct i2c_device_id hsc_i2c_id[] = { + { "hsc030pa" }, + {} +}; +MODULE_DEVICE_TABLE(i2c, hsc_i2c_id); + +static struct i2c_driver hsc_i2c_driver = { + .driver = { + .name = "hsc030pa", + .of_match_table = hsc_i2c_match, + }, + .probe = hsc_i2c_probe, + .id_table = hsc_i2c_id, +}; +module_i2c_driver(hsc_i2c_driver); + +MODULE_AUTHOR("Petre Rodan <petre.rodan@subdimension.ro>"); +MODULE_DESCRIPTION("Honeywell HSC and SSC pressure sensor i2c driver"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_HONEYWELL_HSC030PA); diff --git a/drivers/iio/pressure/hsc030pa_spi.c b/drivers/iio/pressure/hsc030pa_spi.c new file mode 100644 index 000000000000..a719bade8326 --- /dev/null +++ b/drivers/iio/pressure/hsc030pa_spi.c @@ -0,0 +1,61 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Honeywell TruStability HSC Series pressure/temperature sensor + * + * Copyright (c) 2023 Petre Rodan <petre.rodan@subdimension.ro> + * + * Datasheet: https://prod-edam.honeywell.com/content/dam/honeywell-edam/sps/siot/en-us/products/sensors/pressure-sensors/board-mount-pressure-sensors/trustability-hsc-series/documents/sps-siot-trustability-hsc-series-high-accuracy-board-mount-pressure-sensors-50099148-a-en-ciid-151133.pdf + */ + +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/spi/spi.h> +#include <linux/stddef.h> + +#include <linux/iio/iio.h> + +#include "hsc030pa.h" + +static int hsc_spi_recv(struct hsc_data *data) +{ + struct spi_device *spi = to_spi_device(data->dev); + struct spi_transfer xfer = { + .tx_buf = NULL, + .rx_buf = data->buffer, + .len = HSC_REG_MEASUREMENT_RD_SIZE, + }; + + return spi_sync_transfer(spi, &xfer, 1); +} + +static int hsc_spi_probe(struct spi_device *spi) +{ + return hsc_common_probe(&spi->dev, hsc_spi_recv); +} + +static const struct of_device_id hsc_spi_match[] = { + { .compatible = "honeywell,hsc030pa" }, + {} +}; +MODULE_DEVICE_TABLE(of, hsc_spi_match); + +static const struct spi_device_id hsc_spi_id[] = { + { "hsc030pa" }, + {} +}; +MODULE_DEVICE_TABLE(spi, hsc_spi_id); + +static struct spi_driver hsc_spi_driver = { + .driver = { + .name = "hsc030pa", + .of_match_table = hsc_spi_match, + }, + .probe = hsc_spi_probe, + .id_table = hsc_spi_id, +}; +module_spi_driver(hsc_spi_driver); + +MODULE_AUTHOR("Petre Rodan <petre.rodan@subdimension.ro>"); +MODULE_DESCRIPTION("Honeywell HSC and SSC pressure sensor spi driver"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(IIO_HONEYWELL_HSC030PA); diff --git a/drivers/iio/proximity/irsd200.c b/drivers/iio/proximity/irsd200.c index bdff91f6b1a3..323ac6dac90e 100644 --- a/drivers/iio/proximity/irsd200.c +++ b/drivers/iio/proximity/irsd200.c @@ -7,7 +7,6 @@ #include <asm/unaligned.h> #include <linux/bitfield.h> -#include <linux/gpio.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/regmap.h> diff --git a/drivers/iio/proximity/sx9324.c b/drivers/iio/proximity/sx9324.c index 438f9c9aba6e..ac2ed2da21cc 100644 --- a/drivers/iio/proximity/sx9324.c +++ b/drivers/iio/proximity/sx9324.c @@ -888,7 +888,6 @@ sx9324_get_default_reg(struct device *dev, int idx, char prop[] = SX9324_PROXRAW_DEF; u32 start = 0, raw = 0, pos = 0; int ret, count, ph, pin; - const char *res; memcpy(reg_def, &sx9324_default_regs[idx], sizeof(*reg_def)); @@ -915,24 +914,21 @@ sx9324_get_default_reg(struct device *dev, int idx, reg_def->def = raw; break; case SX9324_REG_AFE_CTRL0: - ret = device_property_read_string(dev, - "semtech,cs-idle-sleep", &res); - if (!ret) - ret = match_string(sx9324_csidle, ARRAY_SIZE(sx9324_csidle), res); + ret = device_property_match_property_string(dev, "semtech,cs-idle-sleep", + sx9324_csidle, + ARRAY_SIZE(sx9324_csidle)); if (ret >= 0) { reg_def->def &= ~SX9324_REG_AFE_CTRL0_CSIDLE_MASK; reg_def->def |= ret << SX9324_REG_AFE_CTRL0_CSIDLE_SHIFT; } - ret = device_property_read_string(dev, - "semtech,int-comp-resistor", &res); - if (ret) - break; - ret = match_string(sx9324_rints, ARRAY_SIZE(sx9324_rints), res); - if (ret < 0) - break; - reg_def->def &= ~SX9324_REG_AFE_CTRL0_RINT_MASK; - reg_def->def |= ret << SX9324_REG_AFE_CTRL0_RINT_SHIFT; + ret = device_property_match_property_string(dev, "semtech,int-comp-resistor", + sx9324_rints, + ARRAY_SIZE(sx9324_rints)); + if (ret >= 0) { + reg_def->def &= ~SX9324_REG_AFE_CTRL0_RINT_MASK; + reg_def->def |= ret << SX9324_REG_AFE_CTRL0_RINT_SHIFT; + } break; case SX9324_REG_AFE_CTRL4: case SX9324_REG_AFE_CTRL7: diff --git a/drivers/iio/resolver/ad2s1210.c b/drivers/iio/resolver/ad2s1210.c index 1bd1b950e7cc..a414eef12e5e 100644 --- a/drivers/iio/resolver/ad2s1210.c +++ b/drivers/iio/resolver/ad2s1210.c @@ -141,7 +141,7 @@ struct ad2s1210_state { struct spi_device *sdev; /** GPIO pin connected to SAMPLE line. */ struct gpio_desc *sample_gpio; - /** GPIO pins connected to A0 and A1 lines. */ + /** GPIO pins connected to A0 and A1 lines (optional). */ struct gpio_descs *mode_gpios; /** Used to access config registers. */ struct regmap *regmap; @@ -149,6 +149,8 @@ struct ad2s1210_state { unsigned long clkin_hz; /** Available raw hysteresis values based on resolution. */ int hysteresis_available[2]; + /* adi,fixed-mode property - only valid when mode_gpios == NULL. */ + enum ad2s1210_mode fixed_mode; /** The selected resolution */ enum ad2s1210_resolution resolution; /** Copy of fault register from the previous read. */ @@ -175,6 +177,9 @@ static int ad2s1210_set_mode(struct ad2s1210_state *st, enum ad2s1210_mode mode) struct gpio_descs *gpios = st->mode_gpios; DECLARE_BITMAP(bitmap, 2); + if (!gpios) + return mode == st->fixed_mode ? 0 : -EOPNOTSUPP; + bitmap[0] = mode; return gpiod_set_array_value(gpios->ndescs, gpios->desc, gpios->info, @@ -276,7 +281,8 @@ static int ad2s1210_regmap_reg_read(void *context, unsigned int reg, * parity error. The fault register is read-only and the D7 bit means * something else there. */ - if (reg != AD2S1210_REG_FAULT && st->rx[1] & AD2S1210_ADDRESS_DATA) + if ((reg > AD2S1210_REG_VELOCITY_LSB && reg != AD2S1210_REG_FAULT) + && st->rx[1] & AD2S1210_ADDRESS_DATA) return -EBADMSG; *val = st->rx[1]; @@ -450,21 +456,53 @@ static int ad2s1210_single_conversion(struct iio_dev *indio_dev, ad2s1210_toggle_sample_line(st); timestamp = iio_get_time_ns(indio_dev); - switch (chan->type) { - case IIO_ANGL: - ret = ad2s1210_set_mode(st, MOD_POS); - break; - case IIO_ANGL_VEL: - ret = ad2s1210_set_mode(st, MOD_VEL); - break; - default: - return -EINVAL; + if (st->fixed_mode == MOD_CONFIG) { + unsigned int reg_val; + + switch (chan->type) { + case IIO_ANGL: + ret = regmap_bulk_read(st->regmap, + AD2S1210_REG_POSITION_MSB, + &st->sample.raw, 2); + if (ret < 0) + return ret; + + break; + case IIO_ANGL_VEL: + ret = regmap_bulk_read(st->regmap, + AD2S1210_REG_VELOCITY_MSB, + &st->sample.raw, 2); + if (ret < 0) + return ret; + + break; + default: + return -EINVAL; + } + + ret = regmap_read(st->regmap, AD2S1210_REG_FAULT, ®_val); + if (ret < 0) + return ret; + + st->sample.fault = reg_val; + } else { + switch (chan->type) { + case IIO_ANGL: + ret = ad2s1210_set_mode(st, MOD_POS); + break; + case IIO_ANGL_VEL: + ret = ad2s1210_set_mode(st, MOD_VEL); + break; + default: + return -EINVAL; + } + if (ret < 0) + return ret; + + ret = spi_read(st->sdev, &st->sample, 3); + if (ret < 0) + return ret; } - if (ret < 0) - return ret; - ret = spi_read(st->sdev, &st->sample, 3); - if (ret < 0) - return ret; switch (chan->type) { case IIO_ANGL: @@ -1252,27 +1290,53 @@ static irqreturn_t ad2s1210_trigger_handler(int irq, void *p) ad2s1210_toggle_sample_line(st); if (test_bit(0, indio_dev->active_scan_mask)) { - ret = ad2s1210_set_mode(st, MOD_POS); - if (ret < 0) - goto error_ret; - - ret = spi_read(st->sdev, &st->sample, 3); - if (ret < 0) - goto error_ret; + if (st->fixed_mode == MOD_CONFIG) { + ret = regmap_bulk_read(st->regmap, + AD2S1210_REG_POSITION_MSB, + &st->sample.raw, 2); + if (ret < 0) + goto error_ret; + } else { + ret = ad2s1210_set_mode(st, MOD_POS); + if (ret < 0) + goto error_ret; + + ret = spi_read(st->sdev, &st->sample, 3); + if (ret < 0) + goto error_ret; + } memcpy(&st->scan.chan[chan++], &st->sample.raw, 2); } if (test_bit(1, indio_dev->active_scan_mask)) { - ret = ad2s1210_set_mode(st, MOD_VEL); - if (ret < 0) - goto error_ret; + if (st->fixed_mode == MOD_CONFIG) { + ret = regmap_bulk_read(st->regmap, + AD2S1210_REG_VELOCITY_MSB, + &st->sample.raw, 2); + if (ret < 0) + goto error_ret; + } else { + ret = ad2s1210_set_mode(st, MOD_VEL); + if (ret < 0) + goto error_ret; + + ret = spi_read(st->sdev, &st->sample, 3); + if (ret < 0) + goto error_ret; + } - ret = spi_read(st->sdev, &st->sample, 3); + memcpy(&st->scan.chan[chan++], &st->sample.raw, 2); + } + + if (st->fixed_mode == MOD_CONFIG) { + unsigned int reg_val; + + ret = regmap_read(st->regmap, AD2S1210_REG_FAULT, ®_val); if (ret < 0) - goto error_ret; + return ret; - memcpy(&st->scan.chan[chan++], &st->sample.raw, 2); + st->sample.fault = reg_val; } ad2s1210_push_events(indio_dev, st->sample.fault, pf->timestamp); @@ -1299,9 +1363,24 @@ static const struct iio_info ad2s1210_info = { static int ad2s1210_setup_properties(struct ad2s1210_state *st) { struct device *dev = &st->sdev->dev; + const char *str_val; u32 val; int ret; + ret = device_property_read_string(dev, "adi,fixed-mode", &str_val); + if (ret == -EINVAL) + st->fixed_mode = -1; + else if (ret < 0) + return dev_err_probe(dev, ret, + "failed to read adi,fixed-mode property\n"); + else { + if (strcmp(str_val, "config")) + return dev_err_probe(dev, -EINVAL, + "only adi,fixed-mode=\"config\" is supported\n"); + + st->fixed_mode = MOD_CONFIG; + } + ret = device_property_read_u32(dev, "assigned-resolution-bits", &val); if (ret < 0) return dev_err_probe(dev, ret, @@ -1347,6 +1426,7 @@ static int ad2s1210_setup_gpios(struct ad2s1210_state *st) { struct device *dev = &st->sdev->dev; struct gpio_descs *resolution_gpios; + struct gpio_desc *reset_gpio; DECLARE_BITMAP(bitmap, 2); int ret; @@ -1357,12 +1437,21 @@ static int ad2s1210_setup_gpios(struct ad2s1210_state *st) "failed to request sample GPIO\n"); /* both pins high means that we start in config mode */ - st->mode_gpios = devm_gpiod_get_array(dev, "mode", GPIOD_OUT_HIGH); + st->mode_gpios = devm_gpiod_get_array_optional(dev, "mode", + GPIOD_OUT_HIGH); if (IS_ERR(st->mode_gpios)) return dev_err_probe(dev, PTR_ERR(st->mode_gpios), "failed to request mode GPIOs\n"); - if (st->mode_gpios->ndescs != 2) + if (!st->mode_gpios && st->fixed_mode == -1) + return dev_err_probe(dev, -EINVAL, + "must specify either adi,fixed-mode or mode-gpios\n"); + + if (st->mode_gpios && st->fixed_mode != -1) + return dev_err_probe(dev, -EINVAL, + "must specify only one of adi,fixed-mode or mode-gpios\n"); + + if (st->mode_gpios && st->mode_gpios->ndescs != 2) return dev_err_probe(dev, -EINVAL, "requires exactly 2 mode-gpios\n"); @@ -1393,6 +1482,17 @@ static int ad2s1210_setup_gpios(struct ad2s1210_state *st) "failed to set resolution gpios\n"); } + /* If the optional reset GPIO is present, toggle it to do a hard reset. */ + reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(reset_gpio)) + return dev_err_probe(dev, PTR_ERR(reset_gpio), + "failed to request reset GPIO\n"); + + if (reset_gpio) { + udelay(10); + gpiod_set_value(reset_gpio, 0); + } + return 0; } diff --git a/drivers/iio/temperature/Kconfig b/drivers/iio/temperature/Kconfig index ed384f33e0c7..ed0e4963362f 100644 --- a/drivers/iio/temperature/Kconfig +++ b/drivers/iio/temperature/Kconfig @@ -76,6 +76,18 @@ config MLX90632 This driver can also be built as a module. If so, the module will be called mlx90632. +config MLX90635 + tristate "MLX90635 contact-less infrared sensor with medical accuracy" + depends on I2C + select REGMAP_I2C + help + If you say yes here you get support for the Melexis + MLX90635 contact-less infrared sensor with medical accuracy + connected with I2C. + + This driver can also be built as a module. If so, the module will + be called mlx90635. + config TMP006 tristate "TMP006 infrared thermopile sensor" depends on I2C @@ -158,4 +170,14 @@ config MAX31865 This driver can also be build as a module. If so, the module will be called max31865. +config MCP9600 + tristate "MCP9600 thermocouple EMF converter" + depends on I2C + help + If you say yes here you get support for MCP9600 + thermocouple EMF converter connected via I2C. + + This driver can also be built as a module. If so, the module + will be called mcp9600. + endmenu diff --git a/drivers/iio/temperature/Makefile b/drivers/iio/temperature/Makefile index dfec8c6d3019..07d6e65709f7 100644 --- a/drivers/iio/temperature/Makefile +++ b/drivers/iio/temperature/Makefile @@ -10,8 +10,10 @@ obj-$(CONFIG_MAXIM_THERMOCOUPLE) += maxim_thermocouple.o obj-$(CONFIG_MAX30208) += max30208.o obj-$(CONFIG_MAX31856) += max31856.o obj-$(CONFIG_MAX31865) += max31865.o +obj-$(CONFIG_MCP9600) += mcp9600.o obj-$(CONFIG_MLX90614) += mlx90614.o obj-$(CONFIG_MLX90632) += mlx90632.o +obj-$(CONFIG_MLX90632) += mlx90635.o obj-$(CONFIG_TMP006) += tmp006.o obj-$(CONFIG_TMP007) += tmp007.o obj-$(CONFIG_TMP117) += tmp117.o diff --git a/drivers/iio/temperature/mcp9600.c b/drivers/iio/temperature/mcp9600.c new file mode 100644 index 000000000000..46845804292b --- /dev/null +++ b/drivers/iio/temperature/mcp9600.c @@ -0,0 +1,139 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * mcp9600.c - Support for Microchip MCP9600 thermocouple EMF converter + * + * Copyright (c) 2022 Andrew Hepp + * Author: <andrew.hepp@ahepp.dev> + */ + +#include <linux/err.h> +#include <linux/i2c.h> +#include <linux/init.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> + +#include <linux/iio/iio.h> + +/* MCP9600 registers */ +#define MCP9600_HOT_JUNCTION 0x0 +#define MCP9600_COLD_JUNCTION 0x2 +#define MCP9600_DEVICE_ID 0x20 + +/* MCP9600 device id value */ +#define MCP9600_DEVICE_ID_MCP9600 0x40 + +static const struct iio_chan_spec mcp9600_channels[] = { + { + .type = IIO_TEMP, + .address = MCP9600_HOT_JUNCTION, + .info_mask_separate = + BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), + }, + { + .type = IIO_TEMP, + .address = MCP9600_COLD_JUNCTION, + .channel2 = IIO_MOD_TEMP_AMBIENT, + .modified = 1, + .info_mask_separate = + BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE), + }, +}; + +struct mcp9600_data { + struct i2c_client *client; +}; + +static int mcp9600_read(struct mcp9600_data *data, + struct iio_chan_spec const *chan, int *val) +{ + int ret; + + ret = i2c_smbus_read_word_swapped(data->client, chan->address); + + if (ret < 0) + return ret; + *val = ret; + + return 0; +} + +static int mcp9600_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, int *val, + int *val2, long mask) +{ + struct mcp9600_data *data = iio_priv(indio_dev); + int ret; + + switch (mask) { + case IIO_CHAN_INFO_RAW: + ret = mcp9600_read(data, chan, val); + if (ret) + return ret; + return IIO_VAL_INT; + case IIO_CHAN_INFO_SCALE: + *val = 62; + *val2 = 500000; + return IIO_VAL_INT_PLUS_MICRO; + default: + return -EINVAL; + } +} + +static const struct iio_info mcp9600_info = { + .read_raw = mcp9600_read_raw, +}; + +static int mcp9600_probe(struct i2c_client *client) +{ + struct iio_dev *indio_dev; + struct mcp9600_data *data; + int ret; + + ret = i2c_smbus_read_byte_data(client, MCP9600_DEVICE_ID); + if (ret < 0) + return dev_err_probe(&client->dev, ret, "Failed to read device ID\n"); + if (ret != MCP9600_DEVICE_ID_MCP9600) + dev_warn(&client->dev, "Expected ID %x, got %x\n", + MCP9600_DEVICE_ID_MCP9600, ret); + + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data)); + if (!indio_dev) + return -ENOMEM; + + data = iio_priv(indio_dev); + data->client = client; + + indio_dev->info = &mcp9600_info; + indio_dev->name = "mcp9600"; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->channels = mcp9600_channels; + indio_dev->num_channels = ARRAY_SIZE(mcp9600_channels); + + return devm_iio_device_register(&client->dev, indio_dev); +} + +static const struct i2c_device_id mcp9600_id[] = { + { "mcp9600" }, + {} +}; +MODULE_DEVICE_TABLE(i2c, mcp9600_id); + +static const struct of_device_id mcp9600_of_match[] = { + { .compatible = "microchip,mcp9600" }, + {} +}; +MODULE_DEVICE_TABLE(of, mcp9600_of_match); + +static struct i2c_driver mcp9600_driver = { + .driver = { + .name = "mcp9600", + .of_match_table = mcp9600_of_match, + }, + .probe = mcp9600_probe, + .id_table = mcp9600_id +}; +module_i2c_driver(mcp9600_driver); + +MODULE_AUTHOR("Andrew Hepp <andrew.hepp@ahepp.dev>"); +MODULE_DESCRIPTION("Microchip MCP9600 thermocouple EMF converter driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/iio/temperature/mlx90635.c b/drivers/iio/temperature/mlx90635.c new file mode 100644 index 000000000000..1f5c962c1818 --- /dev/null +++ b/drivers/iio/temperature/mlx90635.c @@ -0,0 +1,1097 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * mlx90635.c - Melexis MLX90635 contactless IR temperature sensor + * + * Copyright (c) 2023 Melexis <cmo@melexis.com> + * + * Driver for the Melexis MLX90635 I2C 16-bit IR thermopile sensor + */ +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/err.h> +#include <linux/gpio/consumer.h> +#include <linux/i2c.h> +#include <linux/iopoll.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/limits.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/math64.h> +#include <linux/pm_runtime.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> + +#include <linux/iio/iio.h> + +/* Memory sections addresses */ +#define MLX90635_ADDR_RAM 0x0000 /* Start address of ram */ +#define MLX90635_ADDR_EEPROM 0x0018 /* Start address of user eeprom */ + +/* EEPROM addresses - used at startup */ +#define MLX90635_EE_I2C_CFG 0x0018 /* I2C address register initial value */ +#define MLX90635_EE_CTRL1 0x001A /* Control register1 initial value */ +#define MLX90635_EE_CTRL2 0x001C /* Control register2 initial value */ + +#define MLX90635_EE_Ha 0x001E /* Ha customer calib value reg 16bit */ +#define MLX90635_EE_Hb 0x0020 /* Hb customer calib value reg 16bit */ +#define MLX90635_EE_Fa 0x0026 /* Fa calibration register 32bit */ +#define MLX90635_EE_FASCALE 0x002A /* Scaling coefficient for Fa register 16bit */ +#define MLX90635_EE_Ga 0x002C /* Ga calibration register 16bit */ +#define MLX90635_EE_Fb 0x002E /* Fb calibration register 16bit */ +#define MLX90635_EE_Ea 0x0030 /* Ea calibration register 32bit */ +#define MLX90635_EE_Eb 0x0034 /* Eb calibration register 32bit */ +#define MLX90635_EE_P_G 0x0038 /* P_G calibration register 16bit */ +#define MLX90635_EE_P_O 0x003A /* P_O calibration register 16bit */ +#define MLX90635_EE_Aa 0x003C /* Aa calibration register 16bit */ +#define MLX90635_EE_VERSION 0x003E /* Version bits 4:7 and 12:15 */ +#define MLX90635_EE_Gb 0x0040 /* Gb calibration register 16bit */ + +/* Device status register - volatile */ +#define MLX90635_REG_STATUS 0x0000 +#define MLX90635_STAT_BUSY BIT(6) /* Device busy indicator */ +#define MLX90635_STAT_BRST BIT(5) /* Brown out reset indicator */ +#define MLX90635_STAT_CYCLE_POS GENMASK(4, 2) /* Data position */ +#define MLX90635_STAT_END_CONV BIT(1) /* End of conversion indicator */ +#define MLX90635_STAT_DATA_RDY BIT(0) /* Data ready indicator */ + +/* EEPROM control register address - volatile */ +#define MLX90635_REG_EE 0x000C +#define MLX90635_EE_ACTIVE BIT(4) /* Power-on EEPROM */ +#define MLX90635_EE_BUSY_MASK BIT(15) + +#define MLX90635_REG_CMD 0x0010 /* Command register address */ + +/* Control register1 address - volatile */ +#define MLX90635_REG_CTRL1 0x0014 +#define MLX90635_CTRL1_REFRESH_RATE_MASK GENMASK(2, 0) +#define MLX90635_CTRL1_RES_CTRL_MASK GENMASK(4, 3) +#define MLX90635_CTRL1_TABLE_MASK BIT(15) /* Table select */ + +/* Control register2 address - volatile */ +#define MLX90635_REG_CTRL2 0x0016 +#define MLX90635_CTRL2_BURST_CNT_MASK GENMASK(10, 6) /* Burst count */ +#define MLX90635_CTRL2_MODE_MASK GENMASK(12, 11) /* Power mode */ +#define MLX90635_CTRL2_SOB_MASK BIT(15) + +/* PowerModes statuses */ +#define MLX90635_PWR_STATUS_HALT 0 +#define MLX90635_PWR_STATUS_SLEEP_STEP 1 +#define MLX90635_PWR_STATUS_STEP 2 +#define MLX90635_PWR_STATUS_CONTINUOUS 3 + +/* Measurement data addresses */ +#define MLX90635_RESULT_1 0x0002 +#define MLX90635_RESULT_2 0x0004 +#define MLX90635_RESULT_3 0x0006 +#define MLX90635_RESULT_4 0x0008 +#define MLX90635_RESULT_5 0x000A + +/* Timings (ms) */ +#define MLX90635_TIMING_RST_MIN 200 /* Minimum time after addressed reset command */ +#define MLX90635_TIMING_RST_MAX 250 /* Maximum time after addressed reset command */ +#define MLX90635_TIMING_POLLING 10000 /* Time between bit polling*/ +#define MLX90635_TIMING_EE_ACTIVE_MIN 100 /* Minimum time after activating the EEPROM for read */ +#define MLX90635_TIMING_EE_ACTIVE_MAX 150 /* Maximum time after activating the EEPROM for read */ + +/* Magic constants */ +#define MLX90635_ID_DSPv1 0x01 /* EEPROM DSP version */ +#define MLX90635_RESET_CMD 0x0006 /* Reset sensor (address or global) */ +#define MLX90635_MAX_MEAS_NUM 31 /* Maximum number of measurements in list */ +#define MLX90635_PTAT_DIV 12 /* Used to divide the PTAT value in pre-processing */ +#define MLX90635_IR_DIV 24 /* Used to divide the IR value in pre-processing */ +#define MLX90635_SLEEP_DELAY_MS 6000 /* Autosleep delay */ +#define MLX90635_MEAS_MAX_TIME 2000 /* Max measurement time in ms for the lowest refresh rate */ +#define MLX90635_READ_RETRIES 100 /* Number of read retries before quitting with timeout error */ +#define MLX90635_VERSION_MASK (GENMASK(15, 12) | GENMASK(7, 4)) +#define MLX90635_DSP_VERSION(reg) (((reg & GENMASK(14, 12)) >> 9) | ((reg & GENMASK(6, 4)) >> 4)) +#define MLX90635_DSP_FIXED BIT(15) + + +/** + * struct mlx90635_data - private data for the MLX90635 device + * @client: I2C client of the device + * @lock: Internal mutex because multiple reads are needed for single triggered + * measurement to ensure data consistency + * @regmap: Regmap of the device registers + * @regmap_ee: Regmap of the device EEPROM which can be cached + * @emissivity: Object emissivity from 0 to 1000 where 1000 = 1 + * @regulator: Regulator of the device + * @powerstatus: Current POWER status of the device + * @interaction_ts: Timestamp of the last temperature read that is used + * for power management in jiffies + */ +struct mlx90635_data { + struct i2c_client *client; + struct mutex lock; + struct regmap *regmap; + struct regmap *regmap_ee; + u16 emissivity; + struct regulator *regulator; + int powerstatus; + unsigned long interaction_ts; +}; + +static const struct regmap_range mlx90635_volatile_reg_range[] = { + regmap_reg_range(MLX90635_REG_STATUS, MLX90635_REG_STATUS), + regmap_reg_range(MLX90635_RESULT_1, MLX90635_RESULT_5), + regmap_reg_range(MLX90635_REG_EE, MLX90635_REG_EE), + regmap_reg_range(MLX90635_REG_CMD, MLX90635_REG_CMD), + regmap_reg_range(MLX90635_REG_CTRL1, MLX90635_REG_CTRL2), +}; + +static const struct regmap_access_table mlx90635_volatile_regs_tbl = { + .yes_ranges = mlx90635_volatile_reg_range, + .n_yes_ranges = ARRAY_SIZE(mlx90635_volatile_reg_range), +}; + +static const struct regmap_range mlx90635_read_reg_range[] = { + regmap_reg_range(MLX90635_REG_STATUS, MLX90635_REG_STATUS), + regmap_reg_range(MLX90635_RESULT_1, MLX90635_RESULT_5), + regmap_reg_range(MLX90635_REG_EE, MLX90635_REG_EE), + regmap_reg_range(MLX90635_REG_CMD, MLX90635_REG_CMD), + regmap_reg_range(MLX90635_REG_CTRL1, MLX90635_REG_CTRL2), +}; + +static const struct regmap_access_table mlx90635_readable_regs_tbl = { + .yes_ranges = mlx90635_read_reg_range, + .n_yes_ranges = ARRAY_SIZE(mlx90635_read_reg_range), +}; + +static const struct regmap_range mlx90635_no_write_reg_range[] = { + regmap_reg_range(MLX90635_RESULT_1, MLX90635_RESULT_5), +}; + +static const struct regmap_access_table mlx90635_writeable_regs_tbl = { + .no_ranges = mlx90635_no_write_reg_range, + .n_no_ranges = ARRAY_SIZE(mlx90635_no_write_reg_range), +}; + +static const struct regmap_config mlx90635_regmap = { + .name = "mlx90635-registers", + .reg_stride = 1, + .reg_bits = 16, + .val_bits = 16, + + .volatile_table = &mlx90635_volatile_regs_tbl, + .rd_table = &mlx90635_readable_regs_tbl, + .wr_table = &mlx90635_writeable_regs_tbl, + + .use_single_read = true, + .use_single_write = true, + .can_multi_write = false, + .reg_format_endian = REGMAP_ENDIAN_BIG, + .val_format_endian = REGMAP_ENDIAN_BIG, + .cache_type = REGCACHE_RBTREE, +}; + +static const struct regmap_range mlx90635_read_ee_range[] = { + regmap_reg_range(MLX90635_EE_I2C_CFG, MLX90635_EE_CTRL2), + regmap_reg_range(MLX90635_EE_Ha, MLX90635_EE_Gb), +}; + +static const struct regmap_access_table mlx90635_readable_ees_tbl = { + .yes_ranges = mlx90635_read_ee_range, + .n_yes_ranges = ARRAY_SIZE(mlx90635_read_ee_range), +}; + +static const struct regmap_range mlx90635_no_write_ee_range[] = { + regmap_reg_range(MLX90635_ADDR_EEPROM, MLX90635_EE_Gb), +}; + +static const struct regmap_access_table mlx90635_writeable_ees_tbl = { + .no_ranges = mlx90635_no_write_ee_range, + .n_no_ranges = ARRAY_SIZE(mlx90635_no_write_ee_range), +}; + +static const struct regmap_config mlx90635_regmap_ee = { + .name = "mlx90635-eeprom", + .reg_stride = 1, + .reg_bits = 16, + .val_bits = 16, + + .volatile_table = NULL, + .rd_table = &mlx90635_readable_ees_tbl, + .wr_table = &mlx90635_writeable_ees_tbl, + + .use_single_read = true, + .use_single_write = true, + .can_multi_write = false, + .reg_format_endian = REGMAP_ENDIAN_BIG, + .val_format_endian = REGMAP_ENDIAN_BIG, + .cache_type = REGCACHE_RBTREE, +}; + +/** + * mlx90635_reset_delay() - Give the mlx90635 some time to reset properly + * If this is not done, the following I2C command(s) will not be accepted. + */ +static void mlx90635_reset_delay(void) +{ + usleep_range(MLX90635_TIMING_RST_MIN, MLX90635_TIMING_RST_MAX); +} + +static int mlx90635_pwr_sleep_step(struct mlx90635_data *data) +{ + int ret; + + if (data->powerstatus == MLX90635_PWR_STATUS_SLEEP_STEP) + return 0; + + ret = regmap_write_bits(data->regmap, MLX90635_REG_CTRL2, MLX90635_CTRL2_MODE_MASK, + FIELD_PREP(MLX90635_CTRL2_MODE_MASK, MLX90635_PWR_STATUS_SLEEP_STEP)); + if (ret < 0) + return ret; + + data->powerstatus = MLX90635_PWR_STATUS_SLEEP_STEP; + return 0; +} + +static int mlx90635_pwr_continuous(struct mlx90635_data *data) +{ + int ret; + + if (data->powerstatus == MLX90635_PWR_STATUS_CONTINUOUS) + return 0; + + ret = regmap_write_bits(data->regmap, MLX90635_REG_CTRL2, MLX90635_CTRL2_MODE_MASK, + FIELD_PREP(MLX90635_CTRL2_MODE_MASK, MLX90635_PWR_STATUS_CONTINUOUS)); + if (ret < 0) + return ret; + + data->powerstatus = MLX90635_PWR_STATUS_CONTINUOUS; + return 0; +} + +static int mlx90635_read_ee_register(struct regmap *regmap, u16 reg_lsb, + s32 *reg_value) +{ + unsigned int read; + u32 value; + int ret; + + ret = regmap_read(regmap, reg_lsb + 2, &read); + if (ret < 0) + return ret; + + value = read; + + ret = regmap_read(regmap, reg_lsb, &read); + if (ret < 0) + return ret; + + *reg_value = (read << 16) | (value & 0xffff); + + return 0; +} + +static int mlx90635_read_ee_ambient(struct regmap *regmap, s16 *PG, s16 *PO, s16 *Gb) +{ + unsigned int read_tmp; + int ret; + + ret = regmap_read(regmap, MLX90635_EE_P_O, &read_tmp); + if (ret < 0) + return ret; + *PO = (s16)read_tmp; + + ret = regmap_read(regmap, MLX90635_EE_P_G, &read_tmp); + if (ret < 0) + return ret; + *PG = (s16)read_tmp; + + ret = regmap_read(regmap, MLX90635_EE_Gb, &read_tmp); + if (ret < 0) + return ret; + *Gb = (u16)read_tmp; + + return 0; +} + +static int mlx90635_read_ee_object(struct regmap *regmap, u32 *Ea, u32 *Eb, u32 *Fa, s16 *Fb, + s16 *Ga, s16 *Gb, s16 *Ha, s16 *Hb, u16 *Fa_scale) +{ + unsigned int read_tmp; + int ret; + + ret = mlx90635_read_ee_register(regmap, MLX90635_EE_Ea, Ea); + if (ret < 0) + return ret; + + ret = mlx90635_read_ee_register(regmap, MLX90635_EE_Eb, Eb); + if (ret < 0) + return ret; + + ret = mlx90635_read_ee_register(regmap, MLX90635_EE_Fa, Fa); + if (ret < 0) + return ret; + + ret = regmap_read(regmap, MLX90635_EE_Ha, &read_tmp); + if (ret < 0) + return ret; + *Ha = (s16)read_tmp; + + ret = regmap_read(regmap, MLX90635_EE_Hb, &read_tmp); + if (ret < 0) + return ret; + *Hb = (s16)read_tmp; + + ret = regmap_read(regmap, MLX90635_EE_Ga, &read_tmp); + if (ret < 0) + return ret; + *Ga = (s16)read_tmp; + + ret = regmap_read(regmap, MLX90635_EE_Gb, &read_tmp); + if (ret < 0) + return ret; + *Gb = (s16)read_tmp; + + ret = regmap_read(regmap, MLX90635_EE_Fb, &read_tmp); + if (ret < 0) + return ret; + *Fb = (s16)read_tmp; + + ret = regmap_read(regmap, MLX90635_EE_FASCALE, &read_tmp); + if (ret < 0) + return ret; + *Fa_scale = (u16)read_tmp; + + return 0; +} + +static int mlx90635_calculate_dataset_ready_time(struct mlx90635_data *data, int *refresh_time) +{ + unsigned int reg; + int ret; + + ret = regmap_read(data->regmap, MLX90635_REG_CTRL1, ®); + if (ret < 0) + return ret; + + *refresh_time = 2 * (MLX90635_MEAS_MAX_TIME >> FIELD_GET(MLX90635_CTRL1_REFRESH_RATE_MASK, reg)) + 80; + + return 0; +} + +static int mlx90635_perform_measurement_burst(struct mlx90635_data *data) +{ + unsigned int reg_status; + int refresh_time; + int ret; + + ret = regmap_write_bits(data->regmap, MLX90635_REG_STATUS, + MLX90635_STAT_END_CONV, MLX90635_STAT_END_CONV); + if (ret < 0) + return ret; + + ret = mlx90635_calculate_dataset_ready_time(data, &refresh_time); + if (ret < 0) + return ret; + + ret = regmap_write_bits(data->regmap, MLX90635_REG_CTRL2, + FIELD_PREP(MLX90635_CTRL2_SOB_MASK, 1), + FIELD_PREP(MLX90635_CTRL2_SOB_MASK, 1)); + if (ret < 0) + return ret; + + msleep(refresh_time); /* Wait minimum time for dataset to be ready */ + + ret = regmap_read_poll_timeout(data->regmap, MLX90635_REG_STATUS, reg_status, + (!(reg_status & MLX90635_STAT_END_CONV)) == 0, + MLX90635_TIMING_POLLING, MLX90635_READ_RETRIES * 10000); + if (ret < 0) { + dev_err(&data->client->dev, "data not ready"); + return -ETIMEDOUT; + } + + return 0; +} + +static int mlx90635_read_ambient_raw(struct regmap *regmap, + s16 *ambient_new_raw, s16 *ambient_old_raw) +{ + unsigned int read_tmp; + int ret; + + ret = regmap_read(regmap, MLX90635_RESULT_2, &read_tmp); + if (ret < 0) + return ret; + *ambient_new_raw = (s16)read_tmp; + + ret = regmap_read(regmap, MLX90635_RESULT_3, &read_tmp); + if (ret < 0) + return ret; + *ambient_old_raw = (s16)read_tmp; + + return 0; +} + +static int mlx90635_read_object_raw(struct regmap *regmap, s16 *object_raw) +{ + unsigned int read_tmp; + s16 read; + int ret; + + ret = regmap_read(regmap, MLX90635_RESULT_1, &read_tmp); + if (ret < 0) + return ret; + + read = (s16)read_tmp; + + ret = regmap_read(regmap, MLX90635_RESULT_4, &read_tmp); + if (ret < 0) + return ret; + *object_raw = (read - (s16)read_tmp) / 2; + + return 0; +} + +static int mlx90635_read_all_channel(struct mlx90635_data *data, + s16 *ambient_new_raw, s16 *ambient_old_raw, + s16 *object_raw) +{ + int ret; + + mutex_lock(&data->lock); + if (data->powerstatus == MLX90635_PWR_STATUS_SLEEP_STEP) { + /* Trigger measurement in Sleep Step mode */ + ret = mlx90635_perform_measurement_burst(data); + if (ret < 0) + goto read_unlock; + } + + ret = mlx90635_read_ambient_raw(data->regmap, ambient_new_raw, + ambient_old_raw); + if (ret < 0) + goto read_unlock; + + ret = mlx90635_read_object_raw(data->regmap, object_raw); +read_unlock: + mutex_unlock(&data->lock); + return ret; +} + +static s64 mlx90635_preprocess_temp_amb(s16 ambient_new_raw, + s16 ambient_old_raw, s16 Gb) +{ + s64 VR_Ta, kGb, tmp; + + kGb = ((s64)Gb * 1000LL) >> 10ULL; + VR_Ta = (s64)ambient_old_raw * 1000000LL + + kGb * div64_s64(((s64)ambient_new_raw * 1000LL), + (MLX90635_PTAT_DIV)); + tmp = div64_s64( + div64_s64(((s64)ambient_new_raw * 1000000000000LL), + (MLX90635_PTAT_DIV)), VR_Ta); + return div64_s64(tmp << 19ULL, 1000LL); +} + +static s64 mlx90635_preprocess_temp_obj(s16 object_raw, + s16 ambient_new_raw, + s16 ambient_old_raw, s16 Gb) +{ + s64 VR_IR, kGb, tmp; + + kGb = ((s64)Gb * 1000LL) >> 10ULL; + VR_IR = (s64)ambient_old_raw * 1000000LL + + kGb * (div64_s64((s64)ambient_new_raw * 1000LL, + MLX90635_PTAT_DIV)); + tmp = div64_s64( + div64_s64((s64)(object_raw * 1000000LL), + MLX90635_IR_DIV) * 1000000LL, + VR_IR); + return div64_s64((tmp << 19ULL), 1000LL); +} + +static s32 mlx90635_calc_temp_ambient(s16 ambient_new_raw, s16 ambient_old_raw, + u16 P_G, u16 P_O, s16 Gb) +{ + s64 kPG, kPO, AMB; + + AMB = mlx90635_preprocess_temp_amb(ambient_new_raw, ambient_old_raw, + Gb); + kPG = ((s64)P_G * 1000000LL) >> 9ULL; + kPO = AMB - (((s64)P_O * 1000LL) >> 1ULL); + + return 30 * 1000LL + div64_s64(kPO * 1000000LL, kPG); +} + +static s32 mlx90635_calc_temp_object_iteration(s32 prev_object_temp, s64 object, + s64 TAdut, s64 TAdut4, s16 Ga, + u32 Fa, u16 Fa_scale, s16 Fb, + s16 Ha, s16 Hb, u16 emissivity) +{ + s64 calcedGa, calcedGb, calcedFa, Alpha_corr; + s64 Ha_customer, Hb_customer; + + Ha_customer = ((s64)Ha * 1000000LL) >> 14ULL; + Hb_customer = ((s64)Hb * 100) >> 10ULL; + + calcedGa = ((s64)((s64)Ga * (prev_object_temp - 35 * 1000LL) + * 1000LL)) >> 24LL; + calcedGb = ((s64)(Fb * (TAdut - 30 * 1000000LL))) >> 24LL; + + Alpha_corr = ((s64)((s64)Fa * Ha_customer * 10000LL) >> Fa_scale); + Alpha_corr *= ((s64)(1 * 1000000LL + calcedGa + calcedGb)); + + Alpha_corr = div64_s64(Alpha_corr, 1000LL); + Alpha_corr *= emissivity; + Alpha_corr = div64_s64(Alpha_corr, 100LL); + calcedFa = div64_s64((s64)object * 100000000000LL, Alpha_corr); + + return (int_sqrt64(int_sqrt64(calcedFa * 100000000LL + TAdut4)) + - 27315 - Hb_customer) * 10; +} + +static s64 mlx90635_calc_ta4(s64 TAdut, s64 scale) +{ + return (div64_s64(TAdut, scale) + 27315) * + (div64_s64(TAdut, scale) + 27315) * + (div64_s64(TAdut, scale) + 27315) * + (div64_s64(TAdut, scale) + 27315); +} + +static s32 mlx90635_calc_temp_object(s64 object, s64 ambient, u32 Ea, u32 Eb, + s16 Ga, u32 Fa, u16 Fa_scale, s16 Fb, s16 Ha, s16 Hb, + u16 tmp_emi) +{ + s64 kTA, kTA0, TAdut, TAdut4; + s64 temp = 35000; + s8 i; + + kTA = (Ea * 1000LL) >> 16LL; + kTA0 = (Eb * 1000LL) >> 8LL; + TAdut = div64_s64(((ambient - kTA0) * 1000000LL), kTA) + 30 * 1000000LL; + TAdut4 = mlx90635_calc_ta4(TAdut, 10000LL); + + /* Iterations of calculation as described in datasheet */ + for (i = 0; i < 5; ++i) { + temp = mlx90635_calc_temp_object_iteration(temp, object, TAdut, TAdut4, + Ga, Fa, Fa_scale, Fb, Ha, Hb, + tmp_emi); + } + return temp; +} + +static int mlx90635_calc_object(struct mlx90635_data *data, int *val) +{ + s16 ambient_new_raw, ambient_old_raw, object_raw; + s16 Fb, Ga, Gb, Ha, Hb; + s64 object, ambient; + u32 Ea, Eb, Fa; + u16 Fa_scale; + int ret; + + ret = mlx90635_read_ee_object(data->regmap_ee, &Ea, &Eb, &Fa, &Fb, &Ga, &Gb, &Ha, &Hb, &Fa_scale); + if (ret < 0) + return ret; + + ret = mlx90635_read_all_channel(data, + &ambient_new_raw, &ambient_old_raw, + &object_raw); + if (ret < 0) + return ret; + + ambient = mlx90635_preprocess_temp_amb(ambient_new_raw, + ambient_old_raw, Gb); + object = mlx90635_preprocess_temp_obj(object_raw, + ambient_new_raw, + ambient_old_raw, Gb); + + *val = mlx90635_calc_temp_object(object, ambient, Ea, Eb, Ga, Fa, Fa_scale, Fb, + Ha, Hb, data->emissivity); + return 0; +} + +static int mlx90635_calc_ambient(struct mlx90635_data *data, int *val) +{ + s16 ambient_new_raw, ambient_old_raw; + s16 PG, PO, Gb; + int ret; + + ret = mlx90635_read_ee_ambient(data->regmap_ee, &PG, &PO, &Gb); + if (ret < 0) + return ret; + + mutex_lock(&data->lock); + if (data->powerstatus == MLX90635_PWR_STATUS_SLEEP_STEP) { + ret = mlx90635_perform_measurement_burst(data); + if (ret < 0) + goto read_ambient_unlock; + } + + ret = mlx90635_read_ambient_raw(data->regmap, &ambient_new_raw, + &ambient_old_raw); +read_ambient_unlock: + mutex_unlock(&data->lock); + if (ret < 0) + return ret; + + *val = mlx90635_calc_temp_ambient(ambient_new_raw, ambient_old_raw, + PG, PO, Gb); + return ret; +} + +static int mlx90635_get_refresh_rate(struct mlx90635_data *data, + unsigned int *refresh_rate) +{ + unsigned int reg; + int ret; + + ret = regmap_read(data->regmap, MLX90635_REG_CTRL1, ®); + if (ret < 0) + return ret; + + *refresh_rate = FIELD_GET(MLX90635_CTRL1_REFRESH_RATE_MASK, reg); + + return 0; +} + +static const struct { + int val; + int val2; +} mlx90635_freqs[] = { + { 0, 200000 }, + { 0, 500000 }, + { 0, 900000 }, + { 1, 700000 }, + { 3, 0 }, + { 4, 800000 }, + { 6, 900000 }, + { 8, 900000 } +}; + +/** + * mlx90635_pm_interaction_wakeup() - Measure time between user interactions to change powermode + * @data: pointer to mlx90635_data object containing interaction_ts information + * + * Switch to continuous mode when interaction is faster than MLX90635_MEAS_MAX_TIME. Update the + * interaction_ts for each function call with the jiffies to enable measurement between function + * calls. Initial value of the interaction_ts needs to be set before this function call. + */ +static int mlx90635_pm_interaction_wakeup(struct mlx90635_data *data) +{ + unsigned long now; + int ret; + + now = jiffies; + if (time_in_range(now, data->interaction_ts, + data->interaction_ts + + msecs_to_jiffies(MLX90635_MEAS_MAX_TIME + 100))) { + ret = mlx90635_pwr_continuous(data); + if (ret < 0) + return ret; + } + + data->interaction_ts = now; + + return 0; +} + +static int mlx90635_read_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *channel, int *val, + int *val2, long mask) +{ + struct mlx90635_data *data = iio_priv(indio_dev); + int ret; + int cr; + + pm_runtime_get_sync(&data->client->dev); + ret = mlx90635_pm_interaction_wakeup(data); + if (ret < 0) + goto mlx90635_read_raw_pm; + + switch (mask) { + case IIO_CHAN_INFO_PROCESSED: + switch (channel->channel2) { + case IIO_MOD_TEMP_AMBIENT: + ret = mlx90635_calc_ambient(data, val); + if (ret < 0) + goto mlx90635_read_raw_pm; + + ret = IIO_VAL_INT; + break; + case IIO_MOD_TEMP_OBJECT: + ret = mlx90635_calc_object(data, val); + if (ret < 0) + goto mlx90635_read_raw_pm; + + ret = IIO_VAL_INT; + break; + default: + ret = -EINVAL; + break; + } + break; + case IIO_CHAN_INFO_CALIBEMISSIVITY: + if (data->emissivity == 1000) { + *val = 1; + *val2 = 0; + } else { + *val = 0; + *val2 = data->emissivity * 1000; + } + ret = IIO_VAL_INT_PLUS_MICRO; + break; + case IIO_CHAN_INFO_SAMP_FREQ: + ret = mlx90635_get_refresh_rate(data, &cr); + if (ret < 0) + goto mlx90635_read_raw_pm; + + *val = mlx90635_freqs[cr].val; + *val2 = mlx90635_freqs[cr].val2; + ret = IIO_VAL_INT_PLUS_MICRO; + break; + default: + ret = -EINVAL; + break; + } + +mlx90635_read_raw_pm: + pm_runtime_mark_last_busy(&data->client->dev); + pm_runtime_put_autosuspend(&data->client->dev); + return ret; +} + +static int mlx90635_write_raw(struct iio_dev *indio_dev, + struct iio_chan_spec const *channel, int val, + int val2, long mask) +{ + struct mlx90635_data *data = iio_priv(indio_dev); + int ret; + int i; + + switch (mask) { + case IIO_CHAN_INFO_CALIBEMISSIVITY: + /* Confirm we are within 0 and 1.0 */ + if (val < 0 || val2 < 0 || val > 1 || + (val == 1 && val2 != 0)) + return -EINVAL; + data->emissivity = val * 1000 + val2 / 1000; + return 0; + case IIO_CHAN_INFO_SAMP_FREQ: + for (i = 0; i < ARRAY_SIZE(mlx90635_freqs); i++) { + if (val == mlx90635_freqs[i].val && + val2 == mlx90635_freqs[i].val2) + break; + } + if (i == ARRAY_SIZE(mlx90635_freqs)) + return -EINVAL; + + ret = regmap_write_bits(data->regmap, MLX90635_REG_CTRL1, + MLX90635_CTRL1_REFRESH_RATE_MASK, i); + + return ret; + default: + return -EINVAL; + } +} + +static int mlx90635_read_avail(struct iio_dev *indio_dev, + struct iio_chan_spec const *chan, + const int **vals, int *type, int *length, + long mask) +{ + switch (mask) { + case IIO_CHAN_INFO_SAMP_FREQ: + *vals = (int *)mlx90635_freqs; + *type = IIO_VAL_INT_PLUS_MICRO; + *length = 2 * ARRAY_SIZE(mlx90635_freqs); + return IIO_AVAIL_LIST; + default: + return -EINVAL; + } +} + +static const struct iio_chan_spec mlx90635_channels[] = { + { + .type = IIO_TEMP, + .modified = 1, + .channel2 = IIO_MOD_TEMP_AMBIENT, + .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), + .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), + }, + { + .type = IIO_TEMP, + .modified = 1, + .channel2 = IIO_MOD_TEMP_OBJECT, + .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) | + BIT(IIO_CHAN_INFO_CALIBEMISSIVITY), + .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ), + .info_mask_shared_by_all_available = BIT(IIO_CHAN_INFO_SAMP_FREQ), + }, +}; + +static const struct iio_info mlx90635_info = { + .read_raw = mlx90635_read_raw, + .write_raw = mlx90635_write_raw, + .read_avail = mlx90635_read_avail, +}; + +static void mlx90635_sleep(void *_data) +{ + struct mlx90635_data *data = _data; + + mlx90635_pwr_sleep_step(data); +} + +static int mlx90635_suspend(struct mlx90635_data *data) +{ + return mlx90635_pwr_sleep_step(data); +} + +static int mlx90635_wakeup(struct mlx90635_data *data) +{ + s16 Fb, Ga, Gb, Ha, Hb, PG, PO; + unsigned int dsp_version; + u32 Ea, Eb, Fa; + u16 Fa_scale; + int ret; + + regcache_cache_bypass(data->regmap_ee, false); + regcache_cache_only(data->regmap_ee, false); + regcache_cache_only(data->regmap, false); + + ret = mlx90635_pwr_continuous(data); + if (ret < 0) { + dev_err(&data->client->dev, "Switch to continuous mode failed\n"); + return ret; + } + ret = regmap_write_bits(data->regmap, MLX90635_REG_EE, + MLX90635_EE_ACTIVE, MLX90635_EE_ACTIVE); + if (ret < 0) { + dev_err(&data->client->dev, "Powering EEPROM failed\n"); + return ret; + } + usleep_range(MLX90635_TIMING_EE_ACTIVE_MIN, MLX90635_TIMING_EE_ACTIVE_MAX); + + regcache_mark_dirty(data->regmap_ee); + + ret = regcache_sync(data->regmap_ee); + if (ret < 0) { + dev_err(&data->client->dev, + "Failed to sync cache: %d\n", ret); + return ret; + } + + ret = mlx90635_read_ee_ambient(data->regmap_ee, &PG, &PO, &Gb); + if (ret < 0) { + dev_err(&data->client->dev, + "Failed to read to cache Ambient coefficients EEPROM region: %d\n", ret); + return ret; + } + + ret = mlx90635_read_ee_object(data->regmap_ee, &Ea, &Eb, &Fa, &Fb, &Ga, &Gb, &Ha, &Hb, &Fa_scale); + if (ret < 0) { + dev_err(&data->client->dev, + "Failed to read to cache Object coefficients EEPROM region: %d\n", ret); + return ret; + } + + ret = regmap_read(data->regmap_ee, MLX90635_EE_VERSION, &dsp_version); + if (ret < 0) { + dev_err(&data->client->dev, + "Failed to read to cache of EEPROM version: %d\n", ret); + return ret; + } + + regcache_cache_only(data->regmap_ee, true); + + return ret; +} + +static void mlx90635_disable_regulator(void *_data) +{ + struct mlx90635_data *data = _data; + int ret; + + ret = regulator_disable(data->regulator); + if (ret < 0) + dev_err(regmap_get_device(data->regmap), + "Failed to disable power regulator: %d\n", ret); +} + +static int mlx90635_enable_regulator(struct mlx90635_data *data) +{ + int ret; + + ret = regulator_enable(data->regulator); + if (ret < 0) { + dev_err(regmap_get_device(data->regmap), "Failed to enable power regulator!\n"); + return ret; + } + + mlx90635_reset_delay(); + + return ret; +} + +static int mlx90635_probe(struct i2c_client *client) +{ + struct mlx90635_data *mlx90635; + struct iio_dev *indio_dev; + unsigned int dsp_version; + struct regmap *regmap; + struct regmap *regmap_ee; + int ret; + + indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*mlx90635)); + if (!indio_dev) + return dev_err_probe(&client->dev, -ENOMEM, "failed to allocate device\n"); + + regmap = devm_regmap_init_i2c(client, &mlx90635_regmap); + if (IS_ERR(regmap)) + return dev_err_probe(&client->dev, PTR_ERR(regmap), + "failed to allocate regmap\n"); + + regmap_ee = devm_regmap_init_i2c(client, &mlx90635_regmap_ee); + if (IS_ERR(regmap)) + return dev_err_probe(&client->dev, PTR_ERR(regmap), + "failed to allocate regmap\n"); + + mlx90635 = iio_priv(indio_dev); + i2c_set_clientdata(client, indio_dev); + mlx90635->client = client; + mlx90635->regmap = regmap; + mlx90635->regmap_ee = regmap_ee; + mlx90635->powerstatus = MLX90635_PWR_STATUS_SLEEP_STEP; + + mutex_init(&mlx90635->lock); + indio_dev->name = "mlx90635"; + indio_dev->modes = INDIO_DIRECT_MODE; + indio_dev->info = &mlx90635_info; + indio_dev->channels = mlx90635_channels; + indio_dev->num_channels = ARRAY_SIZE(mlx90635_channels); + + mlx90635->regulator = devm_regulator_get(&client->dev, "vdd"); + if (IS_ERR(mlx90635->regulator)) + return dev_err_probe(&client->dev, PTR_ERR(mlx90635->regulator), + "failed to get vdd regulator"); + + ret = mlx90635_enable_regulator(mlx90635); + if (ret < 0) + return ret; + + ret = devm_add_action_or_reset(&client->dev, mlx90635_disable_regulator, + mlx90635); + if (ret < 0) + return dev_err_probe(&client->dev, ret, + "failed to setup regulator cleanup action\n"); + + ret = mlx90635_wakeup(mlx90635); + if (ret < 0) + return dev_err_probe(&client->dev, ret, "wakeup failed\n"); + + ret = devm_add_action_or_reset(&client->dev, mlx90635_sleep, mlx90635); + if (ret < 0) + return dev_err_probe(&client->dev, ret, + "failed to setup low power cleanup\n"); + + ret = regmap_read(mlx90635->regmap_ee, MLX90635_EE_VERSION, &dsp_version); + if (ret < 0) + return dev_err_probe(&client->dev, ret, "read of version failed\n"); + + dsp_version = dsp_version & MLX90635_VERSION_MASK; + + if (FIELD_GET(MLX90635_DSP_FIXED, dsp_version)) { + if (MLX90635_DSP_VERSION(dsp_version) == MLX90635_ID_DSPv1) { + dev_dbg(&client->dev, + "Detected DSP v1 calibration %x\n", dsp_version); + } else { + dev_dbg(&client->dev, + "Detected Unknown EEPROM calibration %lx\n", + MLX90635_DSP_VERSION(dsp_version)); + } + } else { + return dev_err_probe(&client->dev, -EPROTONOSUPPORT, + "Wrong fixed top bit %x (expected 0x8X0X)\n", + dsp_version); + } + + mlx90635->emissivity = 1000; + mlx90635->interaction_ts = jiffies; /* Set initial value */ + + pm_runtime_get_noresume(&client->dev); + pm_runtime_set_active(&client->dev); + + ret = devm_pm_runtime_enable(&client->dev); + if (ret) + return dev_err_probe(&client->dev, ret, + "failed to enable powermanagement\n"); + + pm_runtime_set_autosuspend_delay(&client->dev, MLX90635_SLEEP_DELAY_MS); + pm_runtime_use_autosuspend(&client->dev); + pm_runtime_put_autosuspend(&client->dev); + + return devm_iio_device_register(&client->dev, indio_dev); +} + +static const struct i2c_device_id mlx90635_id[] = { + { "mlx90635" }, + { } +}; +MODULE_DEVICE_TABLE(i2c, mlx90635_id); + +static const struct of_device_id mlx90635_of_match[] = { + { .compatible = "melexis,mlx90635" }, + { } +}; +MODULE_DEVICE_TABLE(of, mlx90635_of_match); + +static int mlx90635_pm_suspend(struct device *dev) +{ + struct mlx90635_data *data = iio_priv(dev_get_drvdata(dev)); + int ret; + + ret = mlx90635_suspend(data); + if (ret < 0) + return ret; + + ret = regulator_disable(data->regulator); + if (ret < 0) + dev_err(regmap_get_device(data->regmap), + "Failed to disable power regulator: %d\n", ret); + + return ret; +} + +static int mlx90635_pm_resume(struct device *dev) +{ + struct mlx90635_data *data = iio_priv(dev_get_drvdata(dev)); + int ret; + + ret = mlx90635_enable_regulator(data); + if (ret < 0) + return ret; + + return mlx90635_wakeup(data); +} + +static int mlx90635_pm_runtime_suspend(struct device *dev) +{ + struct mlx90635_data *data = iio_priv(dev_get_drvdata(dev)); + + return mlx90635_pwr_sleep_step(data); +} + +static const struct dev_pm_ops mlx90635_pm_ops = { + SYSTEM_SLEEP_PM_OPS(mlx90635_pm_suspend, mlx90635_pm_resume) + RUNTIME_PM_OPS(mlx90635_pm_runtime_suspend, NULL, NULL) +}; + +static struct i2c_driver mlx90635_driver = { + .driver = { + .name = "mlx90635", + .of_match_table = mlx90635_of_match, + .pm = pm_ptr(&mlx90635_pm_ops), + }, + .probe = mlx90635_probe, + .id_table = mlx90635_id, +}; +module_i2c_driver(mlx90635_driver); + +MODULE_AUTHOR("Crt Mori <cmo@melexis.com>"); +MODULE_DESCRIPTION("Melexis MLX90635 contactless Infra Red temperature sensor driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/interconnect/imx/imx8mm.c b/drivers/interconnect/imx/imx8mm.c index b43325364aa3..8c40f4182263 100644 --- a/drivers/interconnect/imx/imx8mm.c +++ b/drivers/interconnect/imx/imx8mm.c @@ -86,16 +86,9 @@ static int imx8mm_icc_probe(struct platform_device *pdev) return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), NULL); } -static int imx8mm_icc_remove(struct platform_device *pdev) -{ - imx_icc_unregister(pdev); - - return 0; -} - static struct platform_driver imx8mm_icc_driver = { .probe = imx8mm_icc_probe, - .remove = imx8mm_icc_remove, + .remove_new = imx_icc_unregister, .driver = { .name = "imx8mm-interconnect", }, diff --git a/drivers/interconnect/imx/imx8mn.c b/drivers/interconnect/imx/imx8mn.c index 8ce6d8e4bf5e..fa3d4f97dfa4 100644 --- a/drivers/interconnect/imx/imx8mn.c +++ b/drivers/interconnect/imx/imx8mn.c @@ -75,16 +75,9 @@ static int imx8mn_icc_probe(struct platform_device *pdev) return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), NULL); } -static int imx8mn_icc_remove(struct platform_device *pdev) -{ - imx_icc_unregister(pdev); - - return 0; -} - static struct platform_driver imx8mn_icc_driver = { .probe = imx8mn_icc_probe, - .remove = imx8mn_icc_remove, + .remove_new = imx_icc_unregister, .driver = { .name = "imx8mn-interconnect", }, diff --git a/drivers/interconnect/imx/imx8mp.c b/drivers/interconnect/imx/imx8mp.c index a66ae3638b18..d218bb47757a 100644 --- a/drivers/interconnect/imx/imx8mp.c +++ b/drivers/interconnect/imx/imx8mp.c @@ -239,16 +239,9 @@ static int imx8mp_icc_probe(struct platform_device *pdev) return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), noc_setting_nodes); } -static int imx8mp_icc_remove(struct platform_device *pdev) -{ - imx_icc_unregister(pdev); - - return 0; -} - static struct platform_driver imx8mp_icc_driver = { .probe = imx8mp_icc_probe, - .remove = imx8mp_icc_remove, + .remove_new = imx_icc_unregister, .driver = { .name = "imx8mp-interconnect", }, diff --git a/drivers/interconnect/imx/imx8mq.c b/drivers/interconnect/imx/imx8mq.c index b6fb71305c99..8bbd672b346e 100644 --- a/drivers/interconnect/imx/imx8mq.c +++ b/drivers/interconnect/imx/imx8mq.c @@ -85,16 +85,9 @@ static int imx8mq_icc_probe(struct platform_device *pdev) return imx_icc_register(pdev, nodes, ARRAY_SIZE(nodes), NULL); } -static int imx8mq_icc_remove(struct platform_device *pdev) -{ - imx_icc_unregister(pdev); - - return 0; -} - static struct platform_driver imx8mq_icc_driver = { .probe = imx8mq_icc_probe, - .remove = imx8mq_icc_remove, + .remove_new = imx_icc_unregister, .driver = { .name = "imx8mq-interconnect", .sync_state = icc_sync_state, diff --git a/drivers/interconnect/qcom/Kconfig b/drivers/interconnect/qcom/Kconfig index 62b516d38d03..697f96c49f6f 100644 --- a/drivers/interconnect/qcom/Kconfig +++ b/drivers/interconnect/qcom/Kconfig @@ -191,6 +191,15 @@ config INTERCONNECT_QCOM_SDX75 This is a driver for the Qualcomm Network-on-Chip on sdx75-based platforms. +config INTERCONNECT_QCOM_SM6115 + tristate "Qualcomm SM6115 interconnect driver" + depends on INTERCONNECT_QCOM + depends on QCOM_SMD_RPM + select INTERCONNECT_QCOM_SMD_RPM + help + This is a driver for the Qualcomm Network-on-Chip on sm6115-based + platforms. + config INTERCONNECT_QCOM_SM6350 tristate "Qualcomm SM6350 interconnect driver" depends on INTERCONNECT_QCOM_RPMH_POSSIBLE @@ -245,5 +254,23 @@ config INTERCONNECT_QCOM_SM8550 This is a driver for the Qualcomm Network-on-Chip on SM8550-based platforms. +config INTERCONNECT_QCOM_SM8650 + tristate "Qualcomm SM8650 interconnect driver" + depends on INTERCONNECT_QCOM_RPMH_POSSIBLE + select INTERCONNECT_QCOM_RPMH + select INTERCONNECT_QCOM_BCM_VOTER + help + This is a driver for the Qualcomm Network-on-Chip on SM8650-based + platforms. + +config INTERCONNECT_QCOM_X1E80100 + tristate "Qualcomm X1E80100 interconnect driver" + depends on INTERCONNECT_QCOM_RPMH_POSSIBLE + select INTERCONNECT_QCOM_RPMH + select INTERCONNECT_QCOM_BCM_VOTER + help + This is a driver for the Qualcomm Network-on-Chip on X1E80100-based + platforms. + config INTERCONNECT_QCOM_SMD_RPM tristate diff --git a/drivers/interconnect/qcom/Makefile b/drivers/interconnect/qcom/Makefile index c5320e293960..704846165022 100644 --- a/drivers/interconnect/qcom/Makefile +++ b/drivers/interconnect/qcom/Makefile @@ -24,12 +24,15 @@ qnoc-sdm845-objs := sdm845.o qnoc-sdx55-objs := sdx55.o qnoc-sdx65-objs := sdx65.o qnoc-sdx75-objs := sdx75.o +qnoc-sm6115-objs := sm6115.o qnoc-sm6350-objs := sm6350.o qnoc-sm8150-objs := sm8150.o qnoc-sm8250-objs := sm8250.o qnoc-sm8350-objs := sm8350.o qnoc-sm8450-objs := sm8450.o qnoc-sm8550-objs := sm8550.o +qnoc-sm8650-objs := sm8650.o +qnoc-x1e80100-objs := x1e80100.o icc-smd-rpm-objs := smd-rpm.o icc-rpm.o icc-rpm-clocks.o obj-$(CONFIG_INTERCONNECT_QCOM_BCM_VOTER) += icc-bcm-voter.o @@ -53,10 +56,13 @@ obj-$(CONFIG_INTERCONNECT_QCOM_SDM845) += qnoc-sdm845.o obj-$(CONFIG_INTERCONNECT_QCOM_SDX55) += qnoc-sdx55.o obj-$(CONFIG_INTERCONNECT_QCOM_SDX65) += qnoc-sdx65.o obj-$(CONFIG_INTERCONNECT_QCOM_SDX75) += qnoc-sdx75.o +obj-$(CONFIG_INTERCONNECT_QCOM_SM6115) += qnoc-sm6115.o obj-$(CONFIG_INTERCONNECT_QCOM_SM6350) += qnoc-sm6350.o obj-$(CONFIG_INTERCONNECT_QCOM_SM8150) += qnoc-sm8150.o obj-$(CONFIG_INTERCONNECT_QCOM_SM8250) += qnoc-sm8250.o obj-$(CONFIG_INTERCONNECT_QCOM_SM8350) += qnoc-sm8350.o obj-$(CONFIG_INTERCONNECT_QCOM_SM8450) += qnoc-sm8450.o obj-$(CONFIG_INTERCONNECT_QCOM_SM8550) += qnoc-sm8550.o +obj-$(CONFIG_INTERCONNECT_QCOM_SM8650) += qnoc-sm8650.o +obj-$(CONFIG_INTERCONNECT_QCOM_X1E80100) += qnoc-x1e80100.o obj-$(CONFIG_INTERCONNECT_QCOM_SMD_RPM) += icc-smd-rpm.o diff --git a/drivers/interconnect/qcom/icc-rpm.c b/drivers/interconnect/qcom/icc-rpm.c index dbacb2a7af50..a8ed435f696c 100644 --- a/drivers/interconnect/qcom/icc-rpm.c +++ b/drivers/interconnect/qcom/icc-rpm.c @@ -627,14 +627,12 @@ err_disable_unprepare_clk: } EXPORT_SYMBOL(qnoc_probe); -int qnoc_remove(struct platform_device *pdev) +void qnoc_remove(struct platform_device *pdev) { struct qcom_icc_provider *qp = platform_get_drvdata(pdev); icc_provider_deregister(&qp->provider); icc_nodes_remove(&qp->provider); clk_disable_unprepare(qp->bus_clk); - - return 0; } EXPORT_SYMBOL(qnoc_remove); diff --git a/drivers/interconnect/qcom/icc-rpm.h b/drivers/interconnect/qcom/icc-rpm.h index a13768cfd231..f4883d43eae4 100644 --- a/drivers/interconnect/qcom/icc-rpm.h +++ b/drivers/interconnect/qcom/icc-rpm.h @@ -161,7 +161,7 @@ extern const struct rpm_clk_resource aggre1_branch_clk; extern const struct rpm_clk_resource aggre2_branch_clk; int qnoc_probe(struct platform_device *pdev); -int qnoc_remove(struct platform_device *pdev); +void qnoc_remove(struct platform_device *pdev); bool qcom_icc_rpm_smd_available(void); int qcom_icc_rpm_smd_send(int ctx, int rsc_type, int id, u32 val); diff --git a/drivers/interconnect/qcom/msm8916.c b/drivers/interconnect/qcom/msm8916.c index 35148880b3e8..499b1a9ac413 100644 --- a/drivers/interconnect/qcom/msm8916.c +++ b/drivers/interconnect/qcom/msm8916.c @@ -1344,7 +1344,7 @@ MODULE_DEVICE_TABLE(of, msm8916_noc_of_match); static struct platform_driver msm8916_noc_driver = { .probe = qnoc_probe, - .remove = qnoc_remove, + .remove_new = qnoc_remove, .driver = { .name = "qnoc-msm8916", .of_match_table = msm8916_noc_of_match, diff --git a/drivers/interconnect/qcom/msm8939.c b/drivers/interconnect/qcom/msm8939.c index b52c5ac1175c..8ff2c23b1ca0 100644 --- a/drivers/interconnect/qcom/msm8939.c +++ b/drivers/interconnect/qcom/msm8939.c @@ -1421,7 +1421,7 @@ MODULE_DEVICE_TABLE(of, msm8939_noc_of_match); static struct platform_driver msm8939_noc_driver = { .probe = qnoc_probe, - .remove = qnoc_remove, + .remove_new = qnoc_remove, .driver = { .name = "qnoc-msm8939", .of_match_table = msm8939_noc_of_match, diff --git a/drivers/interconnect/qcom/msm8974.c b/drivers/interconnect/qcom/msm8974.c index 21f6c852141e..241076b5f36b 100644 --- a/drivers/interconnect/qcom/msm8974.c +++ b/drivers/interconnect/qcom/msm8974.c @@ -740,15 +740,13 @@ err_remove_nodes: return ret; } -static int msm8974_icc_remove(struct platform_device *pdev) +static void msm8974_icc_remove(struct platform_device *pdev) { struct msm8974_icc_provider *qp = platform_get_drvdata(pdev); icc_provider_deregister(&qp->provider); icc_nodes_remove(&qp->provider); clk_bulk_disable_unprepare(qp->num_clks, qp->bus_clks); - - return 0; } static const struct of_device_id msm8974_noc_of_match[] = { @@ -764,7 +762,7 @@ MODULE_DEVICE_TABLE(of, msm8974_noc_of_match); static struct platform_driver msm8974_noc_driver = { .probe = msm8974_icc_probe, - .remove = msm8974_icc_remove, + .remove_new = msm8974_icc_remove, .driver = { .name = "qnoc-msm8974", .of_match_table = msm8974_noc_of_match, diff --git a/drivers/interconnect/qcom/msm8996.c b/drivers/interconnect/qcom/msm8996.c index b73566c9b21f..788131400cd1 100644 --- a/drivers/interconnect/qcom/msm8996.c +++ b/drivers/interconnect/qcom/msm8996.c @@ -2108,7 +2108,7 @@ MODULE_DEVICE_TABLE(of, qnoc_of_match); static struct platform_driver qnoc_driver = { .probe = qnoc_probe, - .remove = qnoc_remove, + .remove_new = qnoc_remove, .driver = { .name = "qnoc-msm8996", .of_match_table = qnoc_of_match, diff --git a/drivers/interconnect/qcom/osm-l3.c b/drivers/interconnect/qcom/osm-l3.c index e97478bbc282..61a8695a9adc 100644 --- a/drivers/interconnect/qcom/osm-l3.c +++ b/drivers/interconnect/qcom/osm-l3.c @@ -148,14 +148,12 @@ static int qcom_osm_l3_set(struct icc_node *src, struct icc_node *dst) return 0; } -static int qcom_osm_l3_remove(struct platform_device *pdev) +static void qcom_osm_l3_remove(struct platform_device *pdev) { struct qcom_osm_l3_icc_provider *qp = platform_get_drvdata(pdev); icc_provider_deregister(&qp->provider); icc_nodes_remove(&qp->provider); - - return 0; } static int qcom_osm_l3_probe(struct platform_device *pdev) @@ -292,7 +290,7 @@ MODULE_DEVICE_TABLE(of, osm_l3_of_match); static struct platform_driver osm_l3_driver = { .probe = qcom_osm_l3_probe, - .remove = qcom_osm_l3_remove, + .remove_new = qcom_osm_l3_remove, .driver = { .name = "osm-l3", .of_match_table = osm_l3_of_match, diff --git a/drivers/interconnect/qcom/qcm2290.c b/drivers/interconnect/qcom/qcm2290.c index b88cf9a022e0..96735800b13c 100644 --- a/drivers/interconnect/qcom/qcm2290.c +++ b/drivers/interconnect/qcom/qcm2290.c @@ -1367,7 +1367,7 @@ MODULE_DEVICE_TABLE(of, qcm2290_noc_of_match); static struct platform_driver qcm2290_noc_driver = { .probe = qnoc_probe, - .remove = qnoc_remove, + .remove_new = qnoc_remove, .driver = { .name = "qnoc-qcm2290", .of_match_table = qcm2290_noc_of_match, diff --git a/drivers/interconnect/qcom/qcs404.c b/drivers/interconnect/qcom/qcs404.c index 9fa1da70c843..11b49a89c03d 100644 --- a/drivers/interconnect/qcom/qcs404.c +++ b/drivers/interconnect/qcom/qcs404.c @@ -1083,7 +1083,7 @@ MODULE_DEVICE_TABLE(of, qcs404_noc_of_match); static struct platform_driver qcs404_noc_driver = { .probe = qnoc_probe, - .remove = qnoc_remove, + .remove_new = qnoc_remove, .driver = { .name = "qnoc-qcs404", .of_match_table = qcs404_noc_of_match, diff --git a/drivers/interconnect/qcom/sdm660.c b/drivers/interconnect/qcom/sdm660.c index 7392bebba334..ab91de446da8 100644 --- a/drivers/interconnect/qcom/sdm660.c +++ b/drivers/interconnect/qcom/sdm660.c @@ -1714,7 +1714,7 @@ MODULE_DEVICE_TABLE(of, sdm660_noc_of_match); static struct platform_driver sdm660_noc_driver = { .probe = qnoc_probe, - .remove = qnoc_remove, + .remove_new = qnoc_remove, .driver = { .name = "qnoc-sdm660", .of_match_table = sdm660_noc_of_match, diff --git a/drivers/interconnect/qcom/sm6115.c b/drivers/interconnect/qcom/sm6115.c new file mode 100644 index 000000000000..88b67634aa2f --- /dev/null +++ b/drivers/interconnect/qcom/sm6115.c @@ -0,0 +1,1423 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023, Linaro Limited + */ + +#include <dt-bindings/interconnect/qcom,sm6115.h> +#include <linux/device.h> +#include <linux/interconnect-provider.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +#include "icc-rpm.h" + +static const char * const snoc_intf_clocks[] = { + "cpu_axi", + "ufs_axi", + "usb_axi", + "ipa", /* Required by qxm_ipa */ +}; + +static const char * const cnoc_intf_clocks[] = { + "usb_axi", +}; + +enum { + SM6115_MASTER_AMPSS_M0, + SM6115_MASTER_ANOC_SNOC, + SM6115_MASTER_BIMC_SNOC, + SM6115_MASTER_CAMNOC_HF, + SM6115_MASTER_CAMNOC_SF, + SM6115_MASTER_CRYPTO_CORE0, + SM6115_MASTER_GRAPHICS_3D, + SM6115_MASTER_IPA, + SM6115_MASTER_MDP_PORT0, + SM6115_MASTER_PIMEM, + SM6115_MASTER_QDSS_BAM, + SM6115_MASTER_QDSS_DAP, + SM6115_MASTER_QDSS_ETR, + SM6115_MASTER_QPIC, + SM6115_MASTER_QUP_0, + SM6115_MASTER_QUP_CORE_0, + SM6115_MASTER_SDCC_1, + SM6115_MASTER_SDCC_2, + SM6115_MASTER_SNOC_BIMC_NRT, + SM6115_MASTER_SNOC_BIMC_RT, + SM6115_MASTER_SNOC_BIMC, + SM6115_MASTER_SNOC_CFG, + SM6115_MASTER_SNOC_CNOC, + SM6115_MASTER_TCU_0, + SM6115_MASTER_TIC, + SM6115_MASTER_USB3, + SM6115_MASTER_VIDEO_P0, + SM6115_MASTER_VIDEO_PROC, + + SM6115_SLAVE_AHB2PHY_USB, + SM6115_SLAVE_ANOC_SNOC, + SM6115_SLAVE_APPSS, + SM6115_SLAVE_APSS_THROTTLE_CFG, + SM6115_SLAVE_BIMC_CFG, + SM6115_SLAVE_BIMC_SNOC, + SM6115_SLAVE_BOOT_ROM, + SM6115_SLAVE_CAMERA_CFG, + SM6115_SLAVE_CAMERA_NRT_THROTTLE_CFG, + SM6115_SLAVE_CAMERA_RT_THROTTLE_CFG, + SM6115_SLAVE_CLK_CTL, + SM6115_SLAVE_CNOC_MSS, + SM6115_SLAVE_CRYPTO_0_CFG, + SM6115_SLAVE_DCC_CFG, + SM6115_SLAVE_DDR_PHY_CFG, + SM6115_SLAVE_DDR_SS_CFG, + SM6115_SLAVE_DISPLAY_CFG, + SM6115_SLAVE_DISPLAY_THROTTLE_CFG, + SM6115_SLAVE_EBI_CH0, + SM6115_SLAVE_GPU_CFG, + SM6115_SLAVE_GPU_THROTTLE_CFG, + SM6115_SLAVE_HWKM_CORE, + SM6115_SLAVE_IMEM_CFG, + SM6115_SLAVE_IPA_CFG, + SM6115_SLAVE_LPASS, + SM6115_SLAVE_MAPSS, + SM6115_SLAVE_MDSP_MPU_CFG, + SM6115_SLAVE_MESSAGE_RAM, + SM6115_SLAVE_OCIMEM, + SM6115_SLAVE_PDM, + SM6115_SLAVE_PIMEM_CFG, + SM6115_SLAVE_PIMEM, + SM6115_SLAVE_PKA_CORE, + SM6115_SLAVE_PMIC_ARB, + SM6115_SLAVE_QDSS_CFG, + SM6115_SLAVE_QDSS_STM, + SM6115_SLAVE_QM_CFG, + SM6115_SLAVE_QM_MPU_CFG, + SM6115_SLAVE_QPIC, + SM6115_SLAVE_QUP_0, + SM6115_SLAVE_QUP_CORE_0, + SM6115_SLAVE_RBCPR_CX_CFG, + SM6115_SLAVE_RBCPR_MX_CFG, + SM6115_SLAVE_RPM, + SM6115_SLAVE_SDCC_1, + SM6115_SLAVE_SDCC_2, + SM6115_SLAVE_SECURITY, + SM6115_SLAVE_SERVICE_CNOC, + SM6115_SLAVE_SERVICE_SNOC, + SM6115_SLAVE_SNOC_BIMC_NRT, + SM6115_SLAVE_SNOC_BIMC_RT, + SM6115_SLAVE_SNOC_BIMC, + SM6115_SLAVE_SNOC_CFG, + SM6115_SLAVE_SNOC_CNOC, + SM6115_SLAVE_TCSR, + SM6115_SLAVE_TCU, + SM6115_SLAVE_TLMM, + SM6115_SLAVE_USB3, + SM6115_SLAVE_VENUS_CFG, + SM6115_SLAVE_VENUS_THROTTLE_CFG, + SM6115_SLAVE_VSENSE_CTRL_CFG, +}; + +static const u16 slv_ebi_slv_bimc_snoc_links[] = { + SM6115_SLAVE_EBI_CH0, + SM6115_SLAVE_BIMC_SNOC, +}; + +static struct qcom_icc_node apps_proc = { + .name = "apps_proc", + .id = SM6115_MASTER_AMPSS_M0, + .channels = 1, + .buswidth = 16, + .qos.qos_port = 0, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.prio_level = 0, + .qos.areq_prio = 0, + .mas_rpm_id = 0, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(slv_ebi_slv_bimc_snoc_links), + .links = slv_ebi_slv_bimc_snoc_links, +}; + +static const u16 link_slv_ebi[] = { + SM6115_SLAVE_EBI_CH0, +}; + +static struct qcom_icc_node mas_snoc_bimc_rt = { + .name = "mas_snoc_bimc_rt", + .id = SM6115_MASTER_SNOC_BIMC_RT, + .channels = 1, + .buswidth = 16, + .qos.qos_port = 2, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(link_slv_ebi), + .links = link_slv_ebi, +}; + +static struct qcom_icc_node mas_snoc_bimc_nrt = { + .name = "mas_snoc_bimc_nrt", + .id = SM6115_MASTER_SNOC_BIMC_NRT, + .channels = 1, + .buswidth = 16, + .qos.qos_port = 3, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(link_slv_ebi), + .links = link_slv_ebi, +}; + +static struct qcom_icc_node mas_snoc_bimc = { + .name = "mas_snoc_bimc", + .id = SM6115_MASTER_SNOC_BIMC, + .channels = 1, + .buswidth = 16, + .qos.qos_port = 6, + .qos.qos_mode = NOC_QOS_MODE_BYPASS, + .qos.areq_prio = 0, + .qos.prio_level = 0, + .mas_rpm_id = 3, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(link_slv_ebi), + .links = link_slv_ebi, +}; + +static struct qcom_icc_node qnm_gpu = { + .name = "qnm_gpu", + .id = SM6115_MASTER_GRAPHICS_3D, + .channels = 1, + .buswidth = 32, + .qos.qos_port = 1, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.prio_level = 0, + .qos.areq_prio = 0, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(slv_ebi_slv_bimc_snoc_links), + .links = slv_ebi_slv_bimc_snoc_links, +}; + +static struct qcom_icc_node tcu_0 = { + .name = "tcu_0", + .id = SM6115_MASTER_TCU_0, + .channels = 1, + .buswidth = 8, + .qos.qos_port = 4, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.prio_level = 6, + .qos.areq_prio = 6, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(slv_ebi_slv_bimc_snoc_links), + .links = slv_ebi_slv_bimc_snoc_links, +}; + +static const u16 qup_core_0_links[] = { + SM6115_SLAVE_QUP_CORE_0, +}; + +static struct qcom_icc_node qup0_core_master = { + .name = "qup0_core_master", + .id = SM6115_MASTER_QUP_CORE_0, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = 170, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(qup_core_0_links), + .links = qup_core_0_links, +}; + +static const u16 link_slv_anoc_snoc[] = { + SM6115_SLAVE_ANOC_SNOC, +}; + +static struct qcom_icc_node crypto_c0 = { + .name = "crypto_c0", + .id = SM6115_MASTER_CRYPTO_CORE0, + .channels = 1, + .buswidth = 8, + .qos.qos_port = 43, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = 23, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(link_slv_anoc_snoc), + .links = link_slv_anoc_snoc, +}; + +static const u16 mas_snoc_cnoc_links[] = { + SM6115_SLAVE_AHB2PHY_USB, + SM6115_SLAVE_APSS_THROTTLE_CFG, + SM6115_SLAVE_BIMC_CFG, + SM6115_SLAVE_BOOT_ROM, + SM6115_SLAVE_CAMERA_CFG, + SM6115_SLAVE_CAMERA_NRT_THROTTLE_CFG, + SM6115_SLAVE_CAMERA_RT_THROTTLE_CFG, + SM6115_SLAVE_CLK_CTL, + SM6115_SLAVE_CNOC_MSS, + SM6115_SLAVE_CRYPTO_0_CFG, + SM6115_SLAVE_DCC_CFG, + SM6115_SLAVE_DDR_PHY_CFG, + SM6115_SLAVE_DDR_SS_CFG, + SM6115_SLAVE_DISPLAY_CFG, + SM6115_SLAVE_DISPLAY_THROTTLE_CFG, + SM6115_SLAVE_GPU_CFG, + SM6115_SLAVE_GPU_THROTTLE_CFG, + SM6115_SLAVE_HWKM_CORE, + SM6115_SLAVE_IMEM_CFG, + SM6115_SLAVE_IPA_CFG, + SM6115_SLAVE_LPASS, + SM6115_SLAVE_MAPSS, + SM6115_SLAVE_MDSP_MPU_CFG, + SM6115_SLAVE_MESSAGE_RAM, + SM6115_SLAVE_PDM, + SM6115_SLAVE_PIMEM_CFG, + SM6115_SLAVE_PKA_CORE, + SM6115_SLAVE_PMIC_ARB, + SM6115_SLAVE_QDSS_CFG, + SM6115_SLAVE_QM_CFG, + SM6115_SLAVE_QM_MPU_CFG, + SM6115_SLAVE_QPIC, + SM6115_SLAVE_QUP_0, + SM6115_SLAVE_RBCPR_CX_CFG, + SM6115_SLAVE_RBCPR_MX_CFG, + SM6115_SLAVE_RPM, + SM6115_SLAVE_SDCC_1, + SM6115_SLAVE_SDCC_2, + SM6115_SLAVE_SECURITY, + SM6115_SLAVE_SERVICE_CNOC, + SM6115_SLAVE_SNOC_CFG, + SM6115_SLAVE_TCSR, + SM6115_SLAVE_TLMM, + SM6115_SLAVE_USB3, + SM6115_SLAVE_VENUS_CFG, + SM6115_SLAVE_VENUS_THROTTLE_CFG, + SM6115_SLAVE_VSENSE_CTRL_CFG, +}; + +static struct qcom_icc_node mas_snoc_cnoc = { + .name = "mas_snoc_cnoc", + .id = SM6115_MASTER_SNOC_CNOC, + .channels = 1, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_snoc_cnoc_links), + .links = mas_snoc_cnoc_links, +}; + +static struct qcom_icc_node xm_dap = { + .name = "xm_dap", + .id = SM6115_MASTER_QDSS_DAP, + .channels = 1, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_snoc_cnoc_links), + .links = mas_snoc_cnoc_links, +}; + +static const u16 link_slv_snoc_bimc_nrt[] = { + SM6115_SLAVE_SNOC_BIMC_NRT, +}; + +static struct qcom_icc_node qnm_camera_nrt = { + .name = "qnm_camera_nrt", + .id = SM6115_MASTER_CAMNOC_SF, + .channels = 1, + .buswidth = 32, + .qos.qos_port = 25, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 3, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(link_slv_snoc_bimc_nrt), + .links = link_slv_snoc_bimc_nrt, +}; + +static struct qcom_icc_node qxm_venus0 = { + .name = "qxm_venus0", + .id = SM6115_MASTER_VIDEO_P0, + .channels = 1, + .buswidth = 16, + .qos.qos_port = 30, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 3, + .qos.urg_fwd_en = true, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(link_slv_snoc_bimc_nrt), + .links = link_slv_snoc_bimc_nrt, +}; + +static struct qcom_icc_node qxm_venus_cpu = { + .name = "qxm_venus_cpu", + .id = SM6115_MASTER_VIDEO_PROC, + .channels = 1, + .buswidth = 8, + .qos.qos_port = 34, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(link_slv_snoc_bimc_nrt), + .links = link_slv_snoc_bimc_nrt, +}; + +static const u16 link_slv_snoc_bimc_rt[] = { + SM6115_SLAVE_SNOC_BIMC_RT, +}; + +static struct qcom_icc_node qnm_camera_rt = { + .name = "qnm_camera_rt", + .id = SM6115_MASTER_CAMNOC_HF, + .channels = 1, + .buswidth = 32, + .qos.qos_port = 31, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 3, + .qos.urg_fwd_en = true, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(link_slv_snoc_bimc_rt), + .links = link_slv_snoc_bimc_rt, +}; + +static struct qcom_icc_node qxm_mdp0 = { + .name = "qxm_mdp0", + .id = SM6115_MASTER_MDP_PORT0, + .channels = 1, + .buswidth = 16, + .qos.qos_port = 26, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 3, + .qos.urg_fwd_en = true, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(link_slv_snoc_bimc_rt), + .links = link_slv_snoc_bimc_rt, +}; + +static const u16 slv_service_snoc_links[] = { + SM6115_SLAVE_SERVICE_SNOC, +}; + +static struct qcom_icc_node qhm_snoc_cfg = { + .name = "qhm_snoc_cfg", + .id = SM6115_MASTER_SNOC_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(slv_service_snoc_links), + .links = slv_service_snoc_links, +}; + +static const u16 mas_tic_links[] = { + SM6115_SLAVE_APPSS, + SM6115_SLAVE_OCIMEM, + SM6115_SLAVE_PIMEM, + SM6115_SLAVE_QDSS_STM, + SM6115_SLAVE_TCU, + SM6115_SLAVE_SNOC_BIMC, + SM6115_SLAVE_SNOC_CNOC, +}; + +static struct qcom_icc_node qhm_tic = { + .name = "qhm_tic", + .id = SM6115_MASTER_TIC, + .channels = 1, + .buswidth = 4, + .qos.qos_port = 29, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_tic_links), + .links = mas_tic_links, +}; + +static struct qcom_icc_node mas_anoc_snoc = { + .name = "mas_anoc_snoc", + .id = SM6115_MASTER_ANOC_SNOC, + .channels = 1, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_tic_links), + .links = mas_tic_links, +}; + +static const u16 mas_bimc_snoc_links[] = { + SM6115_SLAVE_APPSS, + SM6115_SLAVE_SNOC_CNOC, + SM6115_SLAVE_OCIMEM, + SM6115_SLAVE_PIMEM, + SM6115_SLAVE_QDSS_STM, + SM6115_SLAVE_TCU, +}; + +static struct qcom_icc_node mas_bimc_snoc = { + .name = "mas_bimc_snoc", + .id = SM6115_MASTER_BIMC_SNOC, + .channels = 1, + .buswidth = 8, + .mas_rpm_id = 21, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_bimc_snoc_links), + .links = mas_bimc_snoc_links, +}; + +static const u16 mas_pimem_links[] = { + SM6115_SLAVE_OCIMEM, + SM6115_SLAVE_SNOC_BIMC, +}; + +static struct qcom_icc_node qxm_pimem = { + .name = "qxm_pimem", + .id = SM6115_MASTER_PIMEM, + .channels = 1, + .buswidth = 8, + .qos.qos_port = 41, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(mas_pimem_links), + .links = mas_pimem_links, +}; + +static struct qcom_icc_node qhm_qdss_bam = { + .name = "qhm_qdss_bam", + .id = SM6115_MASTER_QDSS_BAM, + .channels = 1, + .buswidth = 4, + .qos.qos_port = 23, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(link_slv_anoc_snoc), + .links = link_slv_anoc_snoc, +}; + +static struct qcom_icc_node qhm_qpic = { + .name = "qhm_qpic", + .id = SM6115_MASTER_QPIC, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(link_slv_anoc_snoc), + .links = link_slv_anoc_snoc, +}; + +static struct qcom_icc_node qhm_qup0 = { + .name = "qhm_qup0", + .id = SM6115_MASTER_QUP_0, + .channels = 1, + .buswidth = 4, + .qos.qos_port = 21, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = 166, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(link_slv_anoc_snoc), + .links = link_slv_anoc_snoc, +}; + +static struct qcom_icc_node qxm_ipa = { + .name = "qxm_ipa", + .id = SM6115_MASTER_IPA, + .channels = 1, + .buswidth = 8, + .qos.qos_port = 24, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = 59, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(link_slv_anoc_snoc), + .links = link_slv_anoc_snoc, +}; + +static struct qcom_icc_node xm_qdss_etr = { + .name = "xm_qdss_etr", + .id = SM6115_MASTER_QDSS_ETR, + .channels = 1, + .buswidth = 8, + .qos.qos_port = 33, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(link_slv_anoc_snoc), + .links = link_slv_anoc_snoc, +}; + +static struct qcom_icc_node xm_sdc1 = { + .name = "xm_sdc1", + .id = SM6115_MASTER_SDCC_1, + .channels = 1, + .buswidth = 8, + .qos.qos_port = 38, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = 33, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(link_slv_anoc_snoc), + .links = link_slv_anoc_snoc, +}; + +static struct qcom_icc_node xm_sdc2 = { + .name = "xm_sdc2", + .id = SM6115_MASTER_SDCC_2, + .channels = 1, + .buswidth = 8, + .qos.qos_port = 44, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = 35, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(link_slv_anoc_snoc), + .links = link_slv_anoc_snoc, +}; + +static struct qcom_icc_node xm_usb3_0 = { + .name = "xm_usb3_0", + .id = SM6115_MASTER_USB3, + .channels = 1, + .buswidth = 8, + .qos.qos_port = 45, + .qos.qos_mode = NOC_QOS_MODE_FIXED, + .qos.areq_prio = 2, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(link_slv_anoc_snoc), + .links = link_slv_anoc_snoc, +}; + +static struct qcom_icc_node ebi = { + .name = "ebi", + .id = SM6115_SLAVE_EBI_CH0, + .channels = 2, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 0, +}; + +static const u16 slv_bimc_snoc_links[] = { + SM6115_MASTER_BIMC_SNOC, +}; + +static struct qcom_icc_node slv_bimc_snoc = { + .name = "slv_bimc_snoc", + .id = SM6115_SLAVE_BIMC_SNOC, + .channels = 1, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 2, + .num_links = ARRAY_SIZE(slv_bimc_snoc_links), + .links = slv_bimc_snoc_links, +}; + +static struct qcom_icc_node qup0_core_slave = { + .name = "qup0_core_slave", + .id = SM6115_SLAVE_QUP_CORE_0, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_ahb2phy_usb = { + .name = "qhs_ahb2phy_usb", + .id = SM6115_SLAVE_AHB2PHY_USB, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_apss_throttle_cfg = { + .name = "qhs_apss_throttle_cfg", + .id = SM6115_SLAVE_APSS_THROTTLE_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_bimc_cfg = { + .name = "qhs_bimc_cfg", + .id = SM6115_SLAVE_BIMC_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_boot_rom = { + .name = "qhs_boot_rom", + .id = SM6115_SLAVE_BOOT_ROM, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_camera_nrt_throttle_cfg = { + .name = "qhs_camera_nrt_throttle_cfg", + .id = SM6115_SLAVE_CAMERA_NRT_THROTTLE_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_camera_rt_throttle_cfg = { + .name = "qhs_camera_rt_throttle_cfg", + .id = SM6115_SLAVE_CAMERA_RT_THROTTLE_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_camera_ss_cfg = { + .name = "qhs_camera_ss_cfg", + .id = SM6115_SLAVE_CAMERA_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_clk_ctl = { + .name = "qhs_clk_ctl", + .id = SM6115_SLAVE_CLK_CTL, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_cpr_cx = { + .name = "qhs_cpr_cx", + .id = SM6115_SLAVE_RBCPR_CX_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_cpr_mx = { + .name = "qhs_cpr_mx", + .id = SM6115_SLAVE_RBCPR_MX_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_crypto0_cfg = { + .name = "qhs_crypto0_cfg", + .id = SM6115_SLAVE_CRYPTO_0_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_dcc_cfg = { + .name = "qhs_dcc_cfg", + .id = SM6115_SLAVE_DCC_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_ddr_phy_cfg = { + .name = "qhs_ddr_phy_cfg", + .id = SM6115_SLAVE_DDR_PHY_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_ddr_ss_cfg = { + .name = "qhs_ddr_ss_cfg", + .id = SM6115_SLAVE_DDR_SS_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_disp_ss_cfg = { + .name = "qhs_disp_ss_cfg", + .id = SM6115_SLAVE_DISPLAY_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_display_throttle_cfg = { + .name = "qhs_display_throttle_cfg", + .id = SM6115_SLAVE_DISPLAY_THROTTLE_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_gpu_cfg = { + .name = "qhs_gpu_cfg", + .id = SM6115_SLAVE_GPU_CFG, + .channels = 1, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_gpu_throttle_cfg = { + .name = "qhs_gpu_throttle_cfg", + .id = SM6115_SLAVE_GPU_THROTTLE_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_hwkm = { + .name = "qhs_hwkm", + .id = SM6115_SLAVE_HWKM_CORE, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_imem_cfg = { + .name = "qhs_imem_cfg", + .id = SM6115_SLAVE_IMEM_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_ipa_cfg = { + .name = "qhs_ipa_cfg", + .id = SM6115_SLAVE_IPA_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_lpass = { + .name = "qhs_lpass", + .id = SM6115_SLAVE_LPASS, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_mapss = { + .name = "qhs_mapss", + .id = SM6115_SLAVE_MAPSS, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_mdsp_mpu_cfg = { + .name = "qhs_mdsp_mpu_cfg", + .id = SM6115_SLAVE_MDSP_MPU_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_mesg_ram = { + .name = "qhs_mesg_ram", + .id = SM6115_SLAVE_MESSAGE_RAM, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_mss = { + .name = "qhs_mss", + .id = SM6115_SLAVE_CNOC_MSS, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_pdm = { + .name = "qhs_pdm", + .id = SM6115_SLAVE_PDM, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_pimem_cfg = { + .name = "qhs_pimem_cfg", + .id = SM6115_SLAVE_PIMEM_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_pka_wrapper = { + .name = "qhs_pka_wrapper", + .id = SM6115_SLAVE_PKA_CORE, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_pmic_arb = { + .name = "qhs_pmic_arb", + .id = SM6115_SLAVE_PMIC_ARB, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_qdss_cfg = { + .name = "qhs_qdss_cfg", + .id = SM6115_SLAVE_QDSS_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_qm_cfg = { + .name = "qhs_qm_cfg", + .id = SM6115_SLAVE_QM_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_qm_mpu_cfg = { + .name = "qhs_qm_mpu_cfg", + .id = SM6115_SLAVE_QM_MPU_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_qpic = { + .name = "qhs_qpic", + .id = SM6115_SLAVE_QPIC, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_qup0 = { + .name = "qhs_qup0", + .id = SM6115_SLAVE_QUP_0, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_rpm = { + .name = "qhs_rpm", + .id = SM6115_SLAVE_RPM, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_sdc1 = { + .name = "qhs_sdc1", + .id = SM6115_SLAVE_SDCC_1, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_sdc2 = { + .name = "qhs_sdc2", + .id = SM6115_SLAVE_SDCC_2, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_security = { + .name = "qhs_security", + .id = SM6115_SLAVE_SECURITY, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static const u16 slv_snoc_cfg_links[] = { + SM6115_MASTER_SNOC_CFG, +}; + +static struct qcom_icc_node qhs_snoc_cfg = { + .name = "qhs_snoc_cfg", + .id = SM6115_SLAVE_SNOC_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(slv_snoc_cfg_links), + .links = slv_snoc_cfg_links, +}; + +static struct qcom_icc_node qhs_tcsr = { + .name = "qhs_tcsr", + .id = SM6115_SLAVE_TCSR, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_tlmm = { + .name = "qhs_tlmm", + .id = SM6115_SLAVE_TLMM, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_usb3 = { + .name = "qhs_usb3", + .id = SM6115_SLAVE_USB3, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_venus_cfg = { + .name = "qhs_venus_cfg", + .id = SM6115_SLAVE_VENUS_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_venus_throttle_cfg = { + .name = "qhs_venus_throttle_cfg", + .id = SM6115_SLAVE_VENUS_THROTTLE_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node qhs_vsense_ctrl_cfg = { + .name = "qhs_vsense_ctrl_cfg", + .id = SM6115_SLAVE_VSENSE_CTRL_CFG, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node srvc_cnoc = { + .name = "srvc_cnoc", + .id = SM6115_SLAVE_SERVICE_CNOC, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static const u16 slv_snoc_bimc_nrt_links[] = { + SM6115_MASTER_SNOC_BIMC_NRT, +}; + +static struct qcom_icc_node slv_snoc_bimc_nrt = { + .name = "slv_snoc_bimc_nrt", + .id = SM6115_SLAVE_SNOC_BIMC_NRT, + .channels = 1, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(slv_snoc_bimc_nrt_links), + .links = slv_snoc_bimc_nrt_links, +}; + +static const u16 slv_snoc_bimc_rt_links[] = { + SM6115_MASTER_SNOC_BIMC_RT, +}; + +static struct qcom_icc_node slv_snoc_bimc_rt = { + .name = "slv_snoc_bimc_rt", + .id = SM6115_SLAVE_SNOC_BIMC_RT, + .channels = 1, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(slv_snoc_bimc_rt_links), + .links = slv_snoc_bimc_rt_links, +}; + +static struct qcom_icc_node qhs_apss = { + .name = "qhs_apss", + .id = SM6115_SLAVE_APPSS, + .channels = 1, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static const u16 slv_snoc_cnoc_links[] = { + SM6115_MASTER_SNOC_CNOC +}; + +static struct qcom_icc_node slv_snoc_cnoc = { + .name = "slv_snoc_cnoc", + .id = SM6115_SLAVE_SNOC_CNOC, + .channels = 1, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 25, + .num_links = ARRAY_SIZE(slv_snoc_cnoc_links), + .links = slv_snoc_cnoc_links, +}; + +static struct qcom_icc_node qxs_imem = { + .name = "qxs_imem", + .id = SM6115_SLAVE_OCIMEM, + .channels = 1, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = 26, +}; + +static struct qcom_icc_node qxs_pimem = { + .name = "qxs_pimem", + .id = SM6115_SLAVE_PIMEM, + .channels = 1, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static const u16 slv_snoc_bimc_links[] = { + SM6115_MASTER_SNOC_BIMC, +}; + +static struct qcom_icc_node slv_snoc_bimc = { + .name = "slv_snoc_bimc", + .id = SM6115_SLAVE_SNOC_BIMC, + .channels = 1, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = 24, + .num_links = ARRAY_SIZE(slv_snoc_bimc_links), + .links = slv_snoc_bimc_links, +}; + +static struct qcom_icc_node srvc_snoc = { + .name = "srvc_snoc", + .id = SM6115_SLAVE_SERVICE_SNOC, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static struct qcom_icc_node xs_qdss_stm = { + .name = "xs_qdss_stm", + .id = SM6115_SLAVE_QDSS_STM, + .channels = 1, + .buswidth = 4, + .mas_rpm_id = -1, + .slv_rpm_id = 30, +}; + +static struct qcom_icc_node xs_sys_tcu_cfg = { + .name = "xs_sys_tcu_cfg", + .id = SM6115_SLAVE_TCU, + .channels = 1, + .buswidth = 8, + .mas_rpm_id = -1, + .slv_rpm_id = -1, +}; + +static const u16 slv_anoc_snoc_links[] = { + SM6115_MASTER_ANOC_SNOC, +}; + +static struct qcom_icc_node slv_anoc_snoc = { + .name = "slv_anoc_snoc", + .id = SM6115_SLAVE_ANOC_SNOC, + .channels = 1, + .buswidth = 16, + .mas_rpm_id = -1, + .slv_rpm_id = -1, + .num_links = ARRAY_SIZE(slv_anoc_snoc_links), + .links = slv_anoc_snoc_links, +}; + +static struct qcom_icc_node *bimc_nodes[] = { + [MASTER_AMPSS_M0] = &apps_proc, + [MASTER_SNOC_BIMC_RT] = &mas_snoc_bimc_rt, + [MASTER_SNOC_BIMC_NRT] = &mas_snoc_bimc_nrt, + [SNOC_BIMC_MAS] = &mas_snoc_bimc, + [MASTER_GRAPHICS_3D] = &qnm_gpu, + [MASTER_TCU_0] = &tcu_0, + [SLAVE_EBI_CH0] = &ebi, + [BIMC_SNOC_SLV] = &slv_bimc_snoc, +}; + +static const struct regmap_config bimc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x80000, + .fast_io = true, +}; + +static const struct qcom_icc_desc sm6115_bimc = { + .type = QCOM_ICC_BIMC, + .nodes = bimc_nodes, + .num_nodes = ARRAY_SIZE(bimc_nodes), + .regmap_cfg = &bimc_regmap_config, + .bus_clk_desc = &bimc_clk, + .keep_alive = true, + .qos_offset = 0x8000, + .ab_coeff = 153, +}; + +static struct qcom_icc_node *config_noc_nodes[] = { + [SNOC_CNOC_MAS] = &mas_snoc_cnoc, + [MASTER_QDSS_DAP] = &xm_dap, + [SLAVE_AHB2PHY_USB] = &qhs_ahb2phy_usb, + [SLAVE_APSS_THROTTLE_CFG] = &qhs_apss_throttle_cfg, + [SLAVE_BIMC_CFG] = &qhs_bimc_cfg, + [SLAVE_BOOT_ROM] = &qhs_boot_rom, + [SLAVE_CAMERA_NRT_THROTTLE_CFG] = &qhs_camera_nrt_throttle_cfg, + [SLAVE_CAMERA_RT_THROTTLE_CFG] = &qhs_camera_rt_throttle_cfg, + [SLAVE_CAMERA_CFG] = &qhs_camera_ss_cfg, + [SLAVE_CLK_CTL] = &qhs_clk_ctl, + [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx, + [SLAVE_RBCPR_MX_CFG] = &qhs_cpr_mx, + [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg, + [SLAVE_DCC_CFG] = &qhs_dcc_cfg, + [SLAVE_DDR_PHY_CFG] = &qhs_ddr_phy_cfg, + [SLAVE_DDR_SS_CFG] = &qhs_ddr_ss_cfg, + [SLAVE_DISPLAY_CFG] = &qhs_disp_ss_cfg, + [SLAVE_DISPLAY_THROTTLE_CFG] = &qhs_display_throttle_cfg, + [SLAVE_GPU_CFG] = &qhs_gpu_cfg, + [SLAVE_GPU_THROTTLE_CFG] = &qhs_gpu_throttle_cfg, + [SLAVE_HWKM_CORE] = &qhs_hwkm, + [SLAVE_IMEM_CFG] = &qhs_imem_cfg, + [SLAVE_IPA_CFG] = &qhs_ipa_cfg, + [SLAVE_LPASS] = &qhs_lpass, + [SLAVE_MAPSS] = &qhs_mapss, + [SLAVE_MDSP_MPU_CFG] = &qhs_mdsp_mpu_cfg, + [SLAVE_MESSAGE_RAM] = &qhs_mesg_ram, + [SLAVE_CNOC_MSS] = &qhs_mss, + [SLAVE_PDM] = &qhs_pdm, + [SLAVE_PIMEM_CFG] = &qhs_pimem_cfg, + [SLAVE_PKA_CORE] = &qhs_pka_wrapper, + [SLAVE_PMIC_ARB] = &qhs_pmic_arb, + [SLAVE_QDSS_CFG] = &qhs_qdss_cfg, + [SLAVE_QM_CFG] = &qhs_qm_cfg, + [SLAVE_QM_MPU_CFG] = &qhs_qm_mpu_cfg, + [SLAVE_QPIC] = &qhs_qpic, + [SLAVE_QUP_0] = &qhs_qup0, + [SLAVE_RPM] = &qhs_rpm, + [SLAVE_SDCC_1] = &qhs_sdc1, + [SLAVE_SDCC_2] = &qhs_sdc2, + [SLAVE_SECURITY] = &qhs_security, + [SLAVE_SNOC_CFG] = &qhs_snoc_cfg, + [SLAVE_TCSR] = &qhs_tcsr, + [SLAVE_TLMM] = &qhs_tlmm, + [SLAVE_USB3] = &qhs_usb3, + [SLAVE_VENUS_CFG] = &qhs_venus_cfg, + [SLAVE_VENUS_THROTTLE_CFG] = &qhs_venus_throttle_cfg, + [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg, + [SLAVE_SERVICE_CNOC] = &srvc_cnoc, +}; + +static const struct regmap_config cnoc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x6200, + .fast_io = true, +}; + +static const struct qcom_icc_desc sm6115_config_noc = { + .type = QCOM_ICC_QNOC, + .nodes = config_noc_nodes, + .num_nodes = ARRAY_SIZE(config_noc_nodes), + .regmap_cfg = &cnoc_regmap_config, + .intf_clocks = cnoc_intf_clocks, + .num_intf_clocks = ARRAY_SIZE(cnoc_intf_clocks), + .bus_clk_desc = &bus_1_clk, + .keep_alive = true, +}; + +static struct qcom_icc_node *sys_noc_nodes[] = { + [MASTER_CRYPTO_CORE0] = &crypto_c0, + [MASTER_SNOC_CFG] = &qhm_snoc_cfg, + [MASTER_TIC] = &qhm_tic, + [MASTER_ANOC_SNOC] = &mas_anoc_snoc, + [BIMC_SNOC_MAS] = &mas_bimc_snoc, + [MASTER_PIMEM] = &qxm_pimem, + [MASTER_QDSS_BAM] = &qhm_qdss_bam, + [MASTER_QPIC] = &qhm_qpic, + [MASTER_QUP_0] = &qhm_qup0, + [MASTER_IPA] = &qxm_ipa, + [MASTER_QDSS_ETR] = &xm_qdss_etr, + [MASTER_SDCC_1] = &xm_sdc1, + [MASTER_SDCC_2] = &xm_sdc2, + [MASTER_USB3] = &xm_usb3_0, + [SLAVE_APPSS] = &qhs_apss, + [SNOC_CNOC_SLV] = &slv_snoc_cnoc, + [SLAVE_OCIMEM] = &qxs_imem, + [SLAVE_PIMEM] = &qxs_pimem, + [SNOC_BIMC_SLV] = &slv_snoc_bimc, + [SLAVE_SERVICE_SNOC] = &srvc_snoc, + [SLAVE_QDSS_STM] = &xs_qdss_stm, + [SLAVE_TCU] = &xs_sys_tcu_cfg, + [SLAVE_ANOC_SNOC] = &slv_anoc_snoc, +}; + +static const struct regmap_config sys_noc_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x5f080, + .fast_io = true, +}; + +static const struct qcom_icc_desc sm6115_sys_noc = { + .type = QCOM_ICC_QNOC, + .nodes = sys_noc_nodes, + .num_nodes = ARRAY_SIZE(sys_noc_nodes), + .regmap_cfg = &sys_noc_regmap_config, + .intf_clocks = snoc_intf_clocks, + .num_intf_clocks = ARRAY_SIZE(snoc_intf_clocks), + .bus_clk_desc = &bus_2_clk, + .keep_alive = true, +}; + +static struct qcom_icc_node *clk_virt_nodes[] = { + [MASTER_QUP_CORE_0] = &qup0_core_master, + [SLAVE_QUP_CORE_0] = &qup0_core_slave, +}; + +static const struct qcom_icc_desc sm6115_clk_virt = { + .type = QCOM_ICC_QNOC, + .nodes = clk_virt_nodes, + .num_nodes = ARRAY_SIZE(clk_virt_nodes), + .regmap_cfg = &sys_noc_regmap_config, + .bus_clk_desc = &qup_clk, + .keep_alive = true, +}; + +static struct qcom_icc_node *mmnrt_virt_nodes[] = { + [MASTER_CAMNOC_SF] = &qnm_camera_nrt, + [MASTER_VIDEO_P0] = &qxm_venus0, + [MASTER_VIDEO_PROC] = &qxm_venus_cpu, + [SLAVE_SNOC_BIMC_NRT] = &slv_snoc_bimc_nrt, +}; + +static const struct qcom_icc_desc sm6115_mmnrt_virt = { + .type = QCOM_ICC_QNOC, + .nodes = mmnrt_virt_nodes, + .num_nodes = ARRAY_SIZE(mmnrt_virt_nodes), + .regmap_cfg = &sys_noc_regmap_config, + .bus_clk_desc = &mmaxi_0_clk, + .keep_alive = true, + .ab_coeff = 142, +}; + +static struct qcom_icc_node *mmrt_virt_nodes[] = { + [MASTER_CAMNOC_HF] = &qnm_camera_rt, + [MASTER_MDP_PORT0] = &qxm_mdp0, + [SLAVE_SNOC_BIMC_RT] = &slv_snoc_bimc_rt, +}; + +static const struct qcom_icc_desc sm6115_mmrt_virt = { + .type = QCOM_ICC_QNOC, + .nodes = mmrt_virt_nodes, + .num_nodes = ARRAY_SIZE(mmrt_virt_nodes), + .regmap_cfg = &sys_noc_regmap_config, + .bus_clk_desc = &mmaxi_1_clk, + .keep_alive = true, + .ab_coeff = 139, +}; + +static const struct of_device_id qnoc_of_match[] = { + { .compatible = "qcom,sm6115-bimc", .data = &sm6115_bimc }, + { .compatible = "qcom,sm6115-clk-virt", .data = &sm6115_clk_virt }, + { .compatible = "qcom,sm6115-cnoc", .data = &sm6115_config_noc }, + { .compatible = "qcom,sm6115-mmrt-virt", .data = &sm6115_mmrt_virt }, + { .compatible = "qcom,sm6115-mmnrt-virt", .data = &sm6115_mmnrt_virt }, + { .compatible = "qcom,sm6115-snoc", .data = &sm6115_sys_noc }, + { } +}; +MODULE_DEVICE_TABLE(of, qnoc_of_match); + +static struct platform_driver qnoc_driver = { + .probe = qnoc_probe, + .remove_new = qnoc_remove, + .driver = { + .name = "qnoc-sm6115", + .of_match_table = qnoc_of_match, + .sync_state = icc_sync_state, + }, +}; + +static int __init qnoc_driver_init(void) +{ + return platform_driver_register(&qnoc_driver); +} +core_initcall(qnoc_driver_init); + +static void __exit qnoc_driver_exit(void) +{ + platform_driver_unregister(&qnoc_driver); +} +module_exit(qnoc_driver_exit); + +MODULE_DESCRIPTION("SM6115 NoC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/interconnect/qcom/sm8650.c b/drivers/interconnect/qcom/sm8650.c new file mode 100644 index 000000000000..b83de54577b6 --- /dev/null +++ b/drivers/interconnect/qcom/sm8650.c @@ -0,0 +1,1674 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023, Linaro Limited + */ + +#include <linux/device.h> +#include <linux/interconnect.h> +#include <linux/interconnect-provider.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <dt-bindings/interconnect/qcom,sm8650-rpmh.h> + +#include "bcm-voter.h" +#include "icc-common.h" +#include "icc-rpmh.h" +#include "sm8650.h" + +static struct qcom_icc_node qhm_qspi = { + .name = "qhm_qspi", + .id = SM8650_MASTER_QSPI_0, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8650_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node qhm_qup1 = { + .name = "qhm_qup1", + .id = SM8650_MASTER_QUP_1, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8650_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node qxm_qup02 = { + .name = "qxm_qup02", + .id = SM8650_MASTER_QUP_3, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8650_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node xm_sdc4 = { + .name = "xm_sdc4", + .id = SM8650_MASTER_SDCC_4, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8650_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node xm_ufs_mem = { + .name = "xm_ufs_mem", + .id = SM8650_MASTER_UFS_MEM, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8650_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node xm_usb3_0 = { + .name = "xm_usb3_0", + .id = SM8650_MASTER_USB3_0, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8650_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node qhm_qdss_bam = { + .name = "qhm_qdss_bam", + .id = SM8650_MASTER_QDSS_BAM, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8650_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qhm_qup2 = { + .name = "qhm_qup2", + .id = SM8650_MASTER_QUP_2, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8650_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qxm_crypto = { + .name = "qxm_crypto", + .id = SM8650_MASTER_CRYPTO, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8650_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qxm_ipa = { + .name = "qxm_ipa", + .id = SM8650_MASTER_IPA, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8650_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qxm_sp = { + .name = "qxm_sp", + .id = SM8650_MASTER_SP, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8650_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node xm_qdss_etr_0 = { + .name = "xm_qdss_etr_0", + .id = SM8650_MASTER_QDSS_ETR, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8650_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node xm_qdss_etr_1 = { + .name = "xm_qdss_etr_1", + .id = SM8650_MASTER_QDSS_ETR_1, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8650_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node xm_sdc2 = { + .name = "xm_sdc2", + .id = SM8650_MASTER_SDCC_2, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8650_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qup0_core_master = { + .name = "qup0_core_master", + .id = SM8650_MASTER_QUP_CORE_0, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8650_SLAVE_QUP_CORE_0 }, +}; + +static struct qcom_icc_node qup1_core_master = { + .name = "qup1_core_master", + .id = SM8650_MASTER_QUP_CORE_1, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8650_SLAVE_QUP_CORE_1 }, +}; + +static struct qcom_icc_node qup2_core_master = { + .name = "qup2_core_master", + .id = SM8650_MASTER_QUP_CORE_2, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8650_SLAVE_QUP_CORE_2 }, +}; + +static struct qcom_icc_node qsm_cfg = { + .name = "qsm_cfg", + .id = SM8650_MASTER_CNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 46, + .links = { SM8650_SLAVE_AHB2PHY_SOUTH, SM8650_SLAVE_AHB2PHY_NORTH, + SM8650_SLAVE_CAMERA_CFG, SM8650_SLAVE_CLK_CTL, + SM8650_SLAVE_RBCPR_CX_CFG, SM8650_SLAVE_CPR_HMX, + SM8650_SLAVE_RBCPR_MMCX_CFG, SM8650_SLAVE_RBCPR_MXA_CFG, + SM8650_SLAVE_RBCPR_MXC_CFG, SM8650_SLAVE_CPR_NSPCX, + SM8650_SLAVE_CRYPTO_0_CFG, SM8650_SLAVE_CX_RDPM, + SM8650_SLAVE_DISPLAY_CFG, SM8650_SLAVE_GFX3D_CFG, + SM8650_SLAVE_I2C, SM8650_SLAVE_I3C_IBI0_CFG, + SM8650_SLAVE_I3C_IBI1_CFG, SM8650_SLAVE_IMEM_CFG, + SM8650_SLAVE_CNOC_MSS, SM8650_SLAVE_MX_2_RDPM, + SM8650_SLAVE_MX_RDPM, SM8650_SLAVE_PCIE_0_CFG, + SM8650_SLAVE_PCIE_1_CFG, SM8650_SLAVE_PCIE_RSCC, + SM8650_SLAVE_PDM, SM8650_SLAVE_PRNG, + SM8650_SLAVE_QDSS_CFG, SM8650_SLAVE_QSPI_0, + SM8650_SLAVE_QUP_3, SM8650_SLAVE_QUP_1, + SM8650_SLAVE_QUP_2, SM8650_SLAVE_SDCC_2, + SM8650_SLAVE_SDCC_4, SM8650_SLAVE_SPSS_CFG, + SM8650_SLAVE_TCSR, SM8650_SLAVE_TLMM, + SM8650_SLAVE_UFS_MEM_CFG, SM8650_SLAVE_USB3_0, + SM8650_SLAVE_VENUS_CFG, SM8650_SLAVE_VSENSE_CTRL_CFG, + SM8650_SLAVE_CNOC_MNOC_CFG, SM8650_SLAVE_NSP_QTB_CFG, + SM8650_SLAVE_PCIE_ANOC_CFG, SM8650_SLAVE_SERVICE_CNOC_CFG, + SM8650_SLAVE_QDSS_STM, SM8650_SLAVE_TCU }, +}; + +static struct qcom_icc_node qnm_gemnoc_cnoc = { + .name = "qnm_gemnoc_cnoc", + .id = SM8650_MASTER_GEM_NOC_CNOC, + .channels = 1, + .buswidth = 16, + .num_links = 9, + .links = { SM8650_SLAVE_AOSS, SM8650_SLAVE_IPA_CFG, + SM8650_SLAVE_IPC_ROUTER_CFG, SM8650_SLAVE_TME_CFG, + SM8650_SLAVE_APPSS, SM8650_SLAVE_CNOC_CFG, + SM8650_SLAVE_DDRSS_CFG, SM8650_SLAVE_IMEM, + SM8650_SLAVE_SERVICE_CNOC }, +}; + +static struct qcom_icc_node qnm_gemnoc_pcie = { + .name = "qnm_gemnoc_pcie", + .id = SM8650_MASTER_GEM_NOC_PCIE_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 2, + .links = { SM8650_SLAVE_PCIE_0, SM8650_SLAVE_PCIE_1 }, +}; + +static struct qcom_icc_node alm_gpu_tcu = { + .name = "alm_gpu_tcu", + .id = SM8650_MASTER_GPU_TCU, + .channels = 1, + .buswidth = 8, + .num_links = 2, + .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC }, +}; + +static struct qcom_icc_node alm_sys_tcu = { + .name = "alm_sys_tcu", + .id = SM8650_MASTER_SYS_TCU, + .channels = 1, + .buswidth = 8, + .num_links = 2, + .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC }, +}; + +static struct qcom_icc_node alm_ubwc_p_tcu = { + .name = "alm_ubwc_p_tcu", + .id = SM8650_MASTER_UBWC_P_TCU, + .channels = 1, + .buswidth = 8, + .num_links = 2, + .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC }, +}; + +static struct qcom_icc_node chm_apps = { + .name = "chm_apps", + .id = SM8650_MASTER_APPSS_PROC, + .channels = 3, + .buswidth = 32, + .num_links = 3, + .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC, + SM8650_SLAVE_MEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node qnm_gpu = { + .name = "qnm_gpu", + .id = SM8650_MASTER_GFX3D, + .channels = 2, + .buswidth = 32, + .num_links = 2, + .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC }, +}; + +static struct qcom_icc_node qnm_lpass_gemnoc = { + .name = "qnm_lpass_gemnoc", + .id = SM8650_MASTER_LPASS_GEM_NOC, + .channels = 1, + .buswidth = 16, + .num_links = 3, + .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC, + SM8650_SLAVE_MEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node qnm_mdsp = { + .name = "qnm_mdsp", + .id = SM8650_MASTER_MSS_PROC, + .channels = 1, + .buswidth = 16, + .num_links = 3, + .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC, + SM8650_SLAVE_MEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node qnm_mnoc_hf = { + .name = "qnm_mnoc_hf", + .id = SM8650_MASTER_MNOC_HF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 2, + .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC }, +}; + +static struct qcom_icc_node qnm_mnoc_sf = { + .name = "qnm_mnoc_sf", + .id = SM8650_MASTER_MNOC_SF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 2, + .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC }, +}; + +static struct qcom_icc_node qnm_nsp_gemnoc = { + .name = "qnm_nsp_gemnoc", + .id = SM8650_MASTER_COMPUTE_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 3, + .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC, + SM8650_SLAVE_MEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node qnm_pcie = { + .name = "qnm_pcie", + .id = SM8650_MASTER_ANOC_PCIE_GEM_NOC, + .channels = 1, + .buswidth = 16, + .num_links = 2, + .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC }, +}; + +static struct qcom_icc_node qnm_snoc_sf = { + .name = "qnm_snoc_sf", + .id = SM8650_MASTER_SNOC_SF_MEM_NOC, + .channels = 1, + .buswidth = 16, + .num_links = 3, + .links = { SM8650_SLAVE_GEM_NOC_CNOC, SM8650_SLAVE_LLCC, + SM8650_SLAVE_MEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node qnm_ubwc_p = { + .name = "qnm_ubwc_p", + .id = SM8650_MASTER_UBWC_P, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SM8650_SLAVE_LLCC }, +}; + +static struct qcom_icc_node xm_gic = { + .name = "xm_gic", + .id = SM8650_MASTER_GIC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8650_SLAVE_LLCC }, +}; + +static struct qcom_icc_node qnm_lpiaon_noc = { + .name = "qnm_lpiaon_noc", + .id = SM8650_MASTER_LPIAON_NOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8650_SLAVE_LPASS_GEM_NOC }, +}; + +static struct qcom_icc_node qnm_lpass_lpinoc = { + .name = "qnm_lpass_lpinoc", + .id = SM8650_MASTER_LPASS_LPINOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8650_SLAVE_LPIAON_NOC_LPASS_AG_NOC }, +}; + +static struct qcom_icc_node qxm_lpinoc_dsp_axim = { + .name = "qxm_lpinoc_dsp_axim", + .id = SM8650_MASTER_LPASS_PROC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8650_SLAVE_LPICX_NOC_LPIAON_NOC }, +}; + +static struct qcom_icc_node llcc_mc = { + .name = "llcc_mc", + .id = SM8650_MASTER_LLCC, + .channels = 4, + .buswidth = 4, + .num_links = 1, + .links = { SM8650_SLAVE_EBI1 }, +}; + +static struct qcom_icc_node qnm_camnoc_hf = { + .name = "qnm_camnoc_hf", + .id = SM8650_MASTER_CAMNOC_HF, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8650_SLAVE_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_camnoc_icp = { + .name = "qnm_camnoc_icp", + .id = SM8650_MASTER_CAMNOC_ICP, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_camnoc_sf = { + .name = "qnm_camnoc_sf", + .id = SM8650_MASTER_CAMNOC_SF, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_mdp = { + .name = "qnm_mdp", + .id = SM8650_MASTER_MDP, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8650_SLAVE_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_vapss_hcp = { + .name = "qnm_vapss_hcp", + .id = SM8650_MASTER_CDSP_HCP, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_video = { + .name = "qnm_video", + .id = SM8650_MASTER_VIDEO, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_video_cv_cpu = { + .name = "qnm_video_cv_cpu", + .id = SM8650_MASTER_VIDEO_CV_PROC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_video_cvp = { + .name = "qnm_video_cvp", + .id = SM8650_MASTER_VIDEO_PROC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_video_v_cpu = { + .name = "qnm_video_v_cpu", + .id = SM8650_MASTER_VIDEO_V_PROC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8650_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qsm_mnoc_cfg = { + .name = "qsm_mnoc_cfg", + .id = SM8650_MASTER_CNOC_MNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8650_SLAVE_SERVICE_MNOC }, +}; + +static struct qcom_icc_node qnm_nsp = { + .name = "qnm_nsp", + .id = SM8650_MASTER_CDSP_PROC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8650_SLAVE_CDSP_MEM_NOC }, +}; + +static struct qcom_icc_node qsm_pcie_anoc_cfg = { + .name = "qsm_pcie_anoc_cfg", + .id = SM8650_MASTER_PCIE_ANOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8650_SLAVE_SERVICE_PCIE_ANOC }, +}; + +static struct qcom_icc_node xm_pcie3_0 = { + .name = "xm_pcie3_0", + .id = SM8650_MASTER_PCIE_0, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8650_SLAVE_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_node xm_pcie3_1 = { + .name = "xm_pcie3_1", + .id = SM8650_MASTER_PCIE_1, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8650_SLAVE_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_node qnm_aggre1_noc = { + .name = "qnm_aggre1_noc", + .id = SM8650_MASTER_A1NOC_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8650_SLAVE_SNOC_GEM_NOC_SF }, +}; + +static struct qcom_icc_node qnm_aggre2_noc = { + .name = "qnm_aggre2_noc", + .id = SM8650_MASTER_A2NOC_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8650_SLAVE_SNOC_GEM_NOC_SF }, +}; + +static struct qcom_icc_node qns_a1noc_snoc = { + .name = "qns_a1noc_snoc", + .id = SM8650_SLAVE_A1NOC_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8650_MASTER_A1NOC_SNOC }, +}; + +static struct qcom_icc_node qns_a2noc_snoc = { + .name = "qns_a2noc_snoc", + .id = SM8650_SLAVE_A2NOC_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8650_MASTER_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qup0_core_slave = { + .name = "qup0_core_slave", + .id = SM8650_SLAVE_QUP_CORE_0, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qup1_core_slave = { + .name = "qup1_core_slave", + .id = SM8650_SLAVE_QUP_CORE_1, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qup2_core_slave = { + .name = "qup2_core_slave", + .id = SM8650_SLAVE_QUP_CORE_2, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_ahb2phy0 = { + .name = "qhs_ahb2phy0", + .id = SM8650_SLAVE_AHB2PHY_SOUTH, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_ahb2phy1 = { + .name = "qhs_ahb2phy1", + .id = SM8650_SLAVE_AHB2PHY_NORTH, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_camera_cfg = { + .name = "qhs_camera_cfg", + .id = SM8650_SLAVE_CAMERA_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_clk_ctl = { + .name = "qhs_clk_ctl", + .id = SM8650_SLAVE_CLK_CTL, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_cpr_cx = { + .name = "qhs_cpr_cx", + .id = SM8650_SLAVE_RBCPR_CX_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_cpr_hmx = { + .name = "qhs_cpr_hmx", + .id = SM8650_SLAVE_CPR_HMX, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_cpr_mmcx = { + .name = "qhs_cpr_mmcx", + .id = SM8650_SLAVE_RBCPR_MMCX_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_cpr_mxa = { + .name = "qhs_cpr_mxa", + .id = SM8650_SLAVE_RBCPR_MXA_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_cpr_mxc = { + .name = "qhs_cpr_mxc", + .id = SM8650_SLAVE_RBCPR_MXC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_cpr_nspcx = { + .name = "qhs_cpr_nspcx", + .id = SM8650_SLAVE_CPR_NSPCX, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_crypto0_cfg = { + .name = "qhs_crypto0_cfg", + .id = SM8650_SLAVE_CRYPTO_0_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_cx_rdpm = { + .name = "qhs_cx_rdpm", + .id = SM8650_SLAVE_CX_RDPM, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_display_cfg = { + .name = "qhs_display_cfg", + .id = SM8650_SLAVE_DISPLAY_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_gpuss_cfg = { + .name = "qhs_gpuss_cfg", + .id = SM8650_SLAVE_GFX3D_CFG, + .channels = 1, + .buswidth = 8, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_i2c = { + .name = "qhs_i2c", + .id = SM8650_SLAVE_I2C, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_i3c_ibi0_cfg = { + .name = "qhs_i3c_ibi0_cfg", + .id = SM8650_SLAVE_I3C_IBI0_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_i3c_ibi1_cfg = { + .name = "qhs_i3c_ibi1_cfg", + .id = SM8650_SLAVE_I3C_IBI1_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_imem_cfg = { + .name = "qhs_imem_cfg", + .id = SM8650_SLAVE_IMEM_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_mss_cfg = { + .name = "qhs_mss_cfg", + .id = SM8650_SLAVE_CNOC_MSS, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_mx_2_rdpm = { + .name = "qhs_mx_2_rdpm", + .id = SM8650_SLAVE_MX_2_RDPM, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_mx_rdpm = { + .name = "qhs_mx_rdpm", + .id = SM8650_SLAVE_MX_RDPM, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pcie0_cfg = { + .name = "qhs_pcie0_cfg", + .id = SM8650_SLAVE_PCIE_0_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pcie1_cfg = { + .name = "qhs_pcie1_cfg", + .id = SM8650_SLAVE_PCIE_1_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pcie_rscc = { + .name = "qhs_pcie_rscc", + .id = SM8650_SLAVE_PCIE_RSCC, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pdm = { + .name = "qhs_pdm", + .id = SM8650_SLAVE_PDM, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_prng = { + .name = "qhs_prng", + .id = SM8650_SLAVE_PRNG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qdss_cfg = { + .name = "qhs_qdss_cfg", + .id = SM8650_SLAVE_QDSS_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qspi = { + .name = "qhs_qspi", + .id = SM8650_SLAVE_QSPI_0, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qup02 = { + .name = "qhs_qup02", + .id = SM8650_SLAVE_QUP_3, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qup1 = { + .name = "qhs_qup1", + .id = SM8650_SLAVE_QUP_1, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qup2 = { + .name = "qhs_qup2", + .id = SM8650_SLAVE_QUP_2, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_sdc2 = { + .name = "qhs_sdc2", + .id = SM8650_SLAVE_SDCC_2, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_sdc4 = { + .name = "qhs_sdc4", + .id = SM8650_SLAVE_SDCC_4, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_spss_cfg = { + .name = "qhs_spss_cfg", + .id = SM8650_SLAVE_SPSS_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_tcsr = { + .name = "qhs_tcsr", + .id = SM8650_SLAVE_TCSR, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_tlmm = { + .name = "qhs_tlmm", + .id = SM8650_SLAVE_TLMM, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_ufs_mem_cfg = { + .name = "qhs_ufs_mem_cfg", + .id = SM8650_SLAVE_UFS_MEM_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_usb3_0 = { + .name = "qhs_usb3_0", + .id = SM8650_SLAVE_USB3_0, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_venus_cfg = { + .name = "qhs_venus_cfg", + .id = SM8650_SLAVE_VENUS_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_vsense_ctrl_cfg = { + .name = "qhs_vsense_ctrl_cfg", + .id = SM8650_SLAVE_VSENSE_CTRL_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qss_mnoc_cfg = { + .name = "qss_mnoc_cfg", + .id = SM8650_SLAVE_CNOC_MNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8650_MASTER_CNOC_MNOC_CFG }, +}; + +static struct qcom_icc_node qss_nsp_qtb_cfg = { + .name = "qss_nsp_qtb_cfg", + .id = SM8650_SLAVE_NSP_QTB_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qss_pcie_anoc_cfg = { + .name = "qss_pcie_anoc_cfg", + .id = SM8650_SLAVE_PCIE_ANOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8650_MASTER_PCIE_ANOC_CFG }, +}; + +static struct qcom_icc_node srvc_cnoc_cfg = { + .name = "srvc_cnoc_cfg", + .id = SM8650_SLAVE_SERVICE_CNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node xs_qdss_stm = { + .name = "xs_qdss_stm", + .id = SM8650_SLAVE_QDSS_STM, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node xs_sys_tcu_cfg = { + .name = "xs_sys_tcu_cfg", + .id = SM8650_SLAVE_TCU, + .channels = 1, + .buswidth = 8, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_aoss = { + .name = "qhs_aoss", + .id = SM8650_SLAVE_AOSS, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_ipa = { + .name = "qhs_ipa", + .id = SM8650_SLAVE_IPA_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_ipc_router = { + .name = "qhs_ipc_router", + .id = SM8650_SLAVE_IPC_ROUTER_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_tme_cfg = { + .name = "qhs_tme_cfg", + .id = SM8650_SLAVE_TME_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qss_apss = { + .name = "qss_apss", + .id = SM8650_SLAVE_APPSS, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qss_cfg = { + .name = "qss_cfg", + .id = SM8650_SLAVE_CNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { SM8650_MASTER_CNOC_CFG }, +}; + +static struct qcom_icc_node qss_ddrss_cfg = { + .name = "qss_ddrss_cfg", + .id = SM8650_SLAVE_DDRSS_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qxs_imem = { + .name = "qxs_imem", + .id = SM8650_SLAVE_IMEM, + .channels = 1, + .buswidth = 8, + .num_links = 0, +}; + +static struct qcom_icc_node srvc_cnoc_main = { + .name = "srvc_cnoc_main", + .id = SM8650_SLAVE_SERVICE_CNOC, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node xs_pcie_0 = { + .name = "xs_pcie_0", + .id = SM8650_SLAVE_PCIE_0, + .channels = 1, + .buswidth = 8, + .num_links = 0, +}; + +static struct qcom_icc_node xs_pcie_1 = { + .name = "xs_pcie_1", + .id = SM8650_SLAVE_PCIE_1, + .channels = 1, + .buswidth = 16, + .num_links = 0, +}; + +static struct qcom_icc_node qns_gem_noc_cnoc = { + .name = "qns_gem_noc_cnoc", + .id = SM8650_SLAVE_GEM_NOC_CNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8650_MASTER_GEM_NOC_CNOC }, +}; + +static struct qcom_icc_node qns_llcc = { + .name = "qns_llcc", + .id = SM8650_SLAVE_LLCC, + .channels = 4, + .buswidth = 16, + .num_links = 1, + .links = { SM8650_MASTER_LLCC }, +}; + +static struct qcom_icc_node qns_pcie = { + .name = "qns_pcie", + .id = SM8650_SLAVE_MEM_NOC_PCIE_SNOC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { SM8650_MASTER_GEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = { + .name = "qns_lpass_ag_noc_gemnoc", + .id = SM8650_SLAVE_LPASS_GEM_NOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8650_MASTER_LPASS_GEM_NOC }, +}; + +static struct qcom_icc_node qns_lpass_aggnoc = { + .name = "qns_lpass_aggnoc", + .id = SM8650_SLAVE_LPIAON_NOC_LPASS_AG_NOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8650_MASTER_LPIAON_NOC }, +}; + +static struct qcom_icc_node qns_lpi_aon_noc = { + .name = "qns_lpi_aon_noc", + .id = SM8650_SLAVE_LPICX_NOC_LPIAON_NOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8650_MASTER_LPASS_LPINOC }, +}; + +static struct qcom_icc_node ebi = { + .name = "ebi", + .id = SM8650_SLAVE_EBI1, + .channels = 4, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qns_mem_noc_hf = { + .name = "qns_mem_noc_hf", + .id = SM8650_SLAVE_MNOC_HF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8650_MASTER_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_node qns_mem_noc_sf = { + .name = "qns_mem_noc_sf", + .id = SM8650_SLAVE_MNOC_SF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8650_MASTER_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node srvc_mnoc = { + .name = "srvc_mnoc", + .id = SM8650_SLAVE_SERVICE_MNOC, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qns_nsp_gemnoc = { + .name = "qns_nsp_gemnoc", + .id = SM8650_SLAVE_CDSP_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { SM8650_MASTER_COMPUTE_NOC }, +}; + +static struct qcom_icc_node qns_pcie_mem_noc = { + .name = "qns_pcie_mem_noc", + .id = SM8650_SLAVE_ANOC_PCIE_GEM_NOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8650_MASTER_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_node srvc_pcie_aggre_noc = { + .name = "srvc_pcie_aggre_noc", + .id = SM8650_SLAVE_SERVICE_PCIE_ANOC, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qns_gemnoc_sf = { + .name = "qns_gemnoc_sf", + .id = SM8650_SLAVE_SNOC_GEM_NOC_SF, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { SM8650_MASTER_SNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_bcm bcm_acv = { + .name = "ACV", + .enable_mask = BIT(3), + .num_nodes = 1, + .nodes = { &ebi }, +}; + +static struct qcom_icc_bcm bcm_ce0 = { + .name = "CE0", + .num_nodes = 1, + .nodes = { &qxm_crypto }, +}; + +static struct qcom_icc_bcm bcm_cn0 = { + .name = "CN0", + .enable_mask = BIT(0), + .keepalive = true, + .num_nodes = 59, + .nodes = { &qsm_cfg, &qhs_ahb2phy0, + &qhs_ahb2phy1, &qhs_camera_cfg, + &qhs_clk_ctl, &qhs_cpr_cx, + &qhs_cpr_hmx, &qhs_cpr_mmcx, + &qhs_cpr_mxa, &qhs_cpr_mxc, + &qhs_cpr_nspcx, &qhs_crypto0_cfg, + &qhs_cx_rdpm, &qhs_display_cfg, + &qhs_gpuss_cfg, &qhs_i2c, + &qhs_i3c_ibi0_cfg, &qhs_i3c_ibi1_cfg, + &qhs_imem_cfg, &qhs_mss_cfg, + &qhs_mx_2_rdpm, &qhs_mx_rdpm, + &qhs_pcie0_cfg, &qhs_pcie1_cfg, + &qhs_pcie_rscc, &qhs_pdm, + &qhs_prng, &qhs_qdss_cfg, + &qhs_qspi, &qhs_qup02, + &qhs_qup1, &qhs_qup2, + &qhs_sdc2, &qhs_sdc4, + &qhs_spss_cfg, &qhs_tcsr, + &qhs_tlmm, &qhs_ufs_mem_cfg, + &qhs_usb3_0, &qhs_venus_cfg, + &qhs_vsense_ctrl_cfg, &qss_mnoc_cfg, + &qss_nsp_qtb_cfg, &qss_pcie_anoc_cfg, + &srvc_cnoc_cfg, &xs_qdss_stm, + &xs_sys_tcu_cfg, &qnm_gemnoc_cnoc, + &qnm_gemnoc_pcie, &qhs_aoss, + &qhs_ipa, &qhs_ipc_router, + &qhs_tme_cfg, &qss_apss, + &qss_cfg, &qss_ddrss_cfg, + &qxs_imem, &srvc_cnoc_main, + &xs_pcie_0, &xs_pcie_1 }, +}; + +static struct qcom_icc_bcm bcm_co0 = { + .name = "CO0", + .enable_mask = BIT(0), + .num_nodes = 2, + .nodes = { &qnm_nsp, &qns_nsp_gemnoc }, +}; + +static struct qcom_icc_bcm bcm_lp0 = { + .name = "LP0", + .num_nodes = 2, + .nodes = { &qnm_lpass_lpinoc, &qns_lpass_aggnoc }, +}; + +static struct qcom_icc_bcm bcm_mc0 = { + .name = "MC0", + .keepalive = true, + .num_nodes = 1, + .nodes = { &ebi }, +}; + +static struct qcom_icc_bcm bcm_mm0 = { + .name = "MM0", + .num_nodes = 1, + .nodes = { &qns_mem_noc_hf }, +}; + +static struct qcom_icc_bcm bcm_mm1 = { + .name = "MM1", + .enable_mask = BIT(0), + .num_nodes = 8, + .nodes = { &qnm_camnoc_hf, &qnm_camnoc_icp, + &qnm_camnoc_sf, &qnm_vapss_hcp, + &qnm_video_cv_cpu, &qnm_video_cvp, + &qnm_video_v_cpu, &qns_mem_noc_sf }, +}; + +static struct qcom_icc_bcm bcm_qup0 = { + .name = "QUP0", + .keepalive = true, + .vote_scale = 1, + .num_nodes = 1, + .nodes = { &qup0_core_slave }, +}; + +static struct qcom_icc_bcm bcm_qup1 = { + .name = "QUP1", + .keepalive = true, + .vote_scale = 1, + .num_nodes = 1, + .nodes = { &qup1_core_slave }, +}; + +static struct qcom_icc_bcm bcm_qup2 = { + .name = "QUP2", + .keepalive = true, + .vote_scale = 1, + .num_nodes = 1, + .nodes = { &qup2_core_slave }, +}; + +static struct qcom_icc_bcm bcm_sh0 = { + .name = "SH0", + .keepalive = true, + .num_nodes = 1, + .nodes = { &qns_llcc }, +}; + +static struct qcom_icc_bcm bcm_sh1 = { + .name = "SH1", + .enable_mask = BIT(0), + .num_nodes = 15, + .nodes = { &alm_gpu_tcu, &alm_sys_tcu, + &alm_ubwc_p_tcu, &chm_apps, + &qnm_gpu, &qnm_mdsp, + &qnm_mnoc_hf, &qnm_mnoc_sf, + &qnm_nsp_gemnoc, &qnm_pcie, + &qnm_snoc_sf, &qnm_ubwc_p, + &xm_gic, &qns_gem_noc_cnoc, + &qns_pcie }, +}; + +static struct qcom_icc_bcm bcm_sn0 = { + .name = "SN0", + .keepalive = true, + .num_nodes = 1, + .nodes = { &qns_gemnoc_sf }, +}; + +static struct qcom_icc_bcm bcm_sn2 = { + .name = "SN2", + .num_nodes = 1, + .nodes = { &qnm_aggre1_noc }, +}; + +static struct qcom_icc_bcm bcm_sn3 = { + .name = "SN3", + .num_nodes = 1, + .nodes = { &qnm_aggre2_noc }, +}; + +static struct qcom_icc_bcm bcm_sn4 = { + .name = "SN4", + .num_nodes = 1, + .nodes = { &qns_pcie_mem_noc }, +}; + +static struct qcom_icc_node * const aggre1_noc_nodes[] = { + [MASTER_QSPI_0] = &qhm_qspi, + [MASTER_QUP_1] = &qhm_qup1, + [MASTER_QUP_3] = &qxm_qup02, + [MASTER_SDCC_4] = &xm_sdc4, + [MASTER_UFS_MEM] = &xm_ufs_mem, + [MASTER_USB3_0] = &xm_usb3_0, + [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc, +}; + +static const struct qcom_icc_desc sm8650_aggre1_noc = { + .nodes = aggre1_noc_nodes, + .num_nodes = ARRAY_SIZE(aggre1_noc_nodes), +}; + +static struct qcom_icc_bcm * const aggre2_noc_bcms[] = { + &bcm_ce0, +}; + +static struct qcom_icc_node * const aggre2_noc_nodes[] = { + [MASTER_QDSS_BAM] = &qhm_qdss_bam, + [MASTER_QUP_2] = &qhm_qup2, + [MASTER_CRYPTO] = &qxm_crypto, + [MASTER_IPA] = &qxm_ipa, + [MASTER_SP] = &qxm_sp, + [MASTER_QDSS_ETR] = &xm_qdss_etr_0, + [MASTER_QDSS_ETR_1] = &xm_qdss_etr_1, + [MASTER_SDCC_2] = &xm_sdc2, + [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc, +}; + +static const struct qcom_icc_desc sm8650_aggre2_noc = { + .nodes = aggre2_noc_nodes, + .num_nodes = ARRAY_SIZE(aggre2_noc_nodes), + .bcms = aggre2_noc_bcms, + .num_bcms = ARRAY_SIZE(aggre2_noc_bcms), +}; + +static struct qcom_icc_bcm * const clk_virt_bcms[] = { + &bcm_qup0, + &bcm_qup1, + &bcm_qup2, +}; + +static struct qcom_icc_node * const clk_virt_nodes[] = { + [MASTER_QUP_CORE_0] = &qup0_core_master, + [MASTER_QUP_CORE_1] = &qup1_core_master, + [MASTER_QUP_CORE_2] = &qup2_core_master, + [SLAVE_QUP_CORE_0] = &qup0_core_slave, + [SLAVE_QUP_CORE_1] = &qup1_core_slave, + [SLAVE_QUP_CORE_2] = &qup2_core_slave, +}; + +static const struct qcom_icc_desc sm8650_clk_virt = { + .nodes = clk_virt_nodes, + .num_nodes = ARRAY_SIZE(clk_virt_nodes), + .bcms = clk_virt_bcms, + .num_bcms = ARRAY_SIZE(clk_virt_bcms), +}; + +static struct qcom_icc_bcm * const config_noc_bcms[] = { + &bcm_cn0, +}; + +static struct qcom_icc_node * const config_noc_nodes[] = { + [MASTER_CNOC_CFG] = &qsm_cfg, + [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0, + [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1, + [SLAVE_CAMERA_CFG] = &qhs_camera_cfg, + [SLAVE_CLK_CTL] = &qhs_clk_ctl, + [SLAVE_RBCPR_CX_CFG] = &qhs_cpr_cx, + [SLAVE_CPR_HMX] = &qhs_cpr_hmx, + [SLAVE_RBCPR_MMCX_CFG] = &qhs_cpr_mmcx, + [SLAVE_RBCPR_MXA_CFG] = &qhs_cpr_mxa, + [SLAVE_RBCPR_MXC_CFG] = &qhs_cpr_mxc, + [SLAVE_CPR_NSPCX] = &qhs_cpr_nspcx, + [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg, + [SLAVE_CX_RDPM] = &qhs_cx_rdpm, + [SLAVE_DISPLAY_CFG] = &qhs_display_cfg, + [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg, + [SLAVE_I2C] = &qhs_i2c, + [SLAVE_I3C_IBI0_CFG] = &qhs_i3c_ibi0_cfg, + [SLAVE_I3C_IBI1_CFG] = &qhs_i3c_ibi1_cfg, + [SLAVE_IMEM_CFG] = &qhs_imem_cfg, + [SLAVE_CNOC_MSS] = &qhs_mss_cfg, + [SLAVE_MX_2_RDPM] = &qhs_mx_2_rdpm, + [SLAVE_MX_RDPM] = &qhs_mx_rdpm, + [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg, + [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg, + [SLAVE_PCIE_RSCC] = &qhs_pcie_rscc, + [SLAVE_PDM] = &qhs_pdm, + [SLAVE_PRNG] = &qhs_prng, + [SLAVE_QDSS_CFG] = &qhs_qdss_cfg, + [SLAVE_QSPI_0] = &qhs_qspi, + [SLAVE_QUP_3] = &qhs_qup02, + [SLAVE_QUP_1] = &qhs_qup1, + [SLAVE_QUP_2] = &qhs_qup2, + [SLAVE_SDCC_2] = &qhs_sdc2, + [SLAVE_SDCC_4] = &qhs_sdc4, + [SLAVE_SPSS_CFG] = &qhs_spss_cfg, + [SLAVE_TCSR] = &qhs_tcsr, + [SLAVE_TLMM] = &qhs_tlmm, + [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg, + [SLAVE_USB3_0] = &qhs_usb3_0, + [SLAVE_VENUS_CFG] = &qhs_venus_cfg, + [SLAVE_VSENSE_CTRL_CFG] = &qhs_vsense_ctrl_cfg, + [SLAVE_CNOC_MNOC_CFG] = &qss_mnoc_cfg, + [SLAVE_NSP_QTB_CFG] = &qss_nsp_qtb_cfg, + [SLAVE_PCIE_ANOC_CFG] = &qss_pcie_anoc_cfg, + [SLAVE_SERVICE_CNOC_CFG] = &srvc_cnoc_cfg, + [SLAVE_QDSS_STM] = &xs_qdss_stm, + [SLAVE_TCU] = &xs_sys_tcu_cfg, +}; + +static const struct qcom_icc_desc sm8650_config_noc = { + .nodes = config_noc_nodes, + .num_nodes = ARRAY_SIZE(config_noc_nodes), + .bcms = config_noc_bcms, + .num_bcms = ARRAY_SIZE(config_noc_bcms), +}; + +static struct qcom_icc_bcm * const cnoc_main_bcms[] = { + &bcm_cn0, +}; + +static struct qcom_icc_node * const cnoc_main_nodes[] = { + [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc, + [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie, + [SLAVE_AOSS] = &qhs_aoss, + [SLAVE_IPA_CFG] = &qhs_ipa, + [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router, + [SLAVE_TME_CFG] = &qhs_tme_cfg, + [SLAVE_APPSS] = &qss_apss, + [SLAVE_CNOC_CFG] = &qss_cfg, + [SLAVE_DDRSS_CFG] = &qss_ddrss_cfg, + [SLAVE_IMEM] = &qxs_imem, + [SLAVE_SERVICE_CNOC] = &srvc_cnoc_main, + [SLAVE_PCIE_0] = &xs_pcie_0, + [SLAVE_PCIE_1] = &xs_pcie_1, +}; + +static const struct qcom_icc_desc sm8650_cnoc_main = { + .nodes = cnoc_main_nodes, + .num_nodes = ARRAY_SIZE(cnoc_main_nodes), + .bcms = cnoc_main_bcms, + .num_bcms = ARRAY_SIZE(cnoc_main_bcms), +}; + +static struct qcom_icc_bcm * const gem_noc_bcms[] = { + &bcm_sh0, + &bcm_sh1, +}; + +static struct qcom_icc_node * const gem_noc_nodes[] = { + [MASTER_GPU_TCU] = &alm_gpu_tcu, + [MASTER_SYS_TCU] = &alm_sys_tcu, + [MASTER_UBWC_P_TCU] = &alm_ubwc_p_tcu, + [MASTER_APPSS_PROC] = &chm_apps, + [MASTER_GFX3D] = &qnm_gpu, + [MASTER_LPASS_GEM_NOC] = &qnm_lpass_gemnoc, + [MASTER_MSS_PROC] = &qnm_mdsp, + [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf, + [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf, + [MASTER_COMPUTE_NOC] = &qnm_nsp_gemnoc, + [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie, + [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf, + [MASTER_UBWC_P] = &qnm_ubwc_p, + [MASTER_GIC] = &xm_gic, + [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc, + [SLAVE_LLCC] = &qns_llcc, + [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie, +}; + +static const struct qcom_icc_desc sm8650_gem_noc = { + .nodes = gem_noc_nodes, + .num_nodes = ARRAY_SIZE(gem_noc_nodes), + .bcms = gem_noc_bcms, + .num_bcms = ARRAY_SIZE(gem_noc_bcms), +}; + +static struct qcom_icc_node * const lpass_ag_noc_nodes[] = { + [MASTER_LPIAON_NOC] = &qnm_lpiaon_noc, + [SLAVE_LPASS_GEM_NOC] = &qns_lpass_ag_noc_gemnoc, +}; + +static const struct qcom_icc_desc sm8650_lpass_ag_noc = { + .nodes = lpass_ag_noc_nodes, + .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes), +}; + +static struct qcom_icc_bcm * const lpass_lpiaon_noc_bcms[] = { + &bcm_lp0, +}; + +static struct qcom_icc_node * const lpass_lpiaon_noc_nodes[] = { + [MASTER_LPASS_LPINOC] = &qnm_lpass_lpinoc, + [SLAVE_LPIAON_NOC_LPASS_AG_NOC] = &qns_lpass_aggnoc, +}; + +static const struct qcom_icc_desc sm8650_lpass_lpiaon_noc = { + .nodes = lpass_lpiaon_noc_nodes, + .num_nodes = ARRAY_SIZE(lpass_lpiaon_noc_nodes), + .bcms = lpass_lpiaon_noc_bcms, + .num_bcms = ARRAY_SIZE(lpass_lpiaon_noc_bcms), +}; + +static struct qcom_icc_node * const lpass_lpicx_noc_nodes[] = { + [MASTER_LPASS_PROC] = &qxm_lpinoc_dsp_axim, + [SLAVE_LPICX_NOC_LPIAON_NOC] = &qns_lpi_aon_noc, +}; + +static const struct qcom_icc_desc sm8650_lpass_lpicx_noc = { + .nodes = lpass_lpicx_noc_nodes, + .num_nodes = ARRAY_SIZE(lpass_lpicx_noc_nodes), +}; + +static struct qcom_icc_bcm * const mc_virt_bcms[] = { + &bcm_acv, + &bcm_mc0, +}; + +static struct qcom_icc_node * const mc_virt_nodes[] = { + [MASTER_LLCC] = &llcc_mc, + [SLAVE_EBI1] = &ebi, +}; + +static const struct qcom_icc_desc sm8650_mc_virt = { + .nodes = mc_virt_nodes, + .num_nodes = ARRAY_SIZE(mc_virt_nodes), + .bcms = mc_virt_bcms, + .num_bcms = ARRAY_SIZE(mc_virt_bcms), +}; + +static struct qcom_icc_bcm * const mmss_noc_bcms[] = { + &bcm_mm0, + &bcm_mm1, +}; + +static struct qcom_icc_node * const mmss_noc_nodes[] = { + [MASTER_CAMNOC_HF] = &qnm_camnoc_hf, + [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp, + [MASTER_CAMNOC_SF] = &qnm_camnoc_sf, + [MASTER_MDP] = &qnm_mdp, + [MASTER_CDSP_HCP] = &qnm_vapss_hcp, + [MASTER_VIDEO] = &qnm_video, + [MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu, + [MASTER_VIDEO_PROC] = &qnm_video_cvp, + [MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu, + [MASTER_CNOC_MNOC_CFG] = &qsm_mnoc_cfg, + [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf, + [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf, + [SLAVE_SERVICE_MNOC] = &srvc_mnoc, +}; + +static const struct qcom_icc_desc sm8650_mmss_noc = { + .nodes = mmss_noc_nodes, + .num_nodes = ARRAY_SIZE(mmss_noc_nodes), + .bcms = mmss_noc_bcms, + .num_bcms = ARRAY_SIZE(mmss_noc_bcms), +}; + +static struct qcom_icc_bcm * const nsp_noc_bcms[] = { + &bcm_co0, +}; + +static struct qcom_icc_node * const nsp_noc_nodes[] = { + [MASTER_CDSP_PROC] = &qnm_nsp, + [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc, +}; + +static const struct qcom_icc_desc sm8650_nsp_noc = { + .nodes = nsp_noc_nodes, + .num_nodes = ARRAY_SIZE(nsp_noc_nodes), + .bcms = nsp_noc_bcms, + .num_bcms = ARRAY_SIZE(nsp_noc_bcms), +}; + +static struct qcom_icc_bcm * const pcie_anoc_bcms[] = { + &bcm_sn4, +}; + +static struct qcom_icc_node * const pcie_anoc_nodes[] = { + [MASTER_PCIE_ANOC_CFG] = &qsm_pcie_anoc_cfg, + [MASTER_PCIE_0] = &xm_pcie3_0, + [MASTER_PCIE_1] = &xm_pcie3_1, + [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc, + [SLAVE_SERVICE_PCIE_ANOC] = &srvc_pcie_aggre_noc, +}; + +static const struct qcom_icc_desc sm8650_pcie_anoc = { + .nodes = pcie_anoc_nodes, + .num_nodes = ARRAY_SIZE(pcie_anoc_nodes), + .bcms = pcie_anoc_bcms, + .num_bcms = ARRAY_SIZE(pcie_anoc_bcms), +}; + +static struct qcom_icc_bcm * const system_noc_bcms[] = { + &bcm_sn0, + &bcm_sn2, + &bcm_sn3, +}; + +static struct qcom_icc_node * const system_noc_nodes[] = { + [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc, + [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc, + [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf, +}; + +static const struct qcom_icc_desc sm8650_system_noc = { + .nodes = system_noc_nodes, + .num_nodes = ARRAY_SIZE(system_noc_nodes), + .bcms = system_noc_bcms, + .num_bcms = ARRAY_SIZE(system_noc_bcms), +}; + +static const struct of_device_id qnoc_of_match[] = { + { .compatible = "qcom,sm8650-aggre1-noc", .data = &sm8650_aggre1_noc }, + { .compatible = "qcom,sm8650-aggre2-noc", .data = &sm8650_aggre2_noc }, + { .compatible = "qcom,sm8650-clk-virt", .data = &sm8650_clk_virt }, + { .compatible = "qcom,sm8650-config-noc", .data = &sm8650_config_noc }, + { .compatible = "qcom,sm8650-cnoc-main", .data = &sm8650_cnoc_main }, + { .compatible = "qcom,sm8650-gem-noc", .data = &sm8650_gem_noc }, + { .compatible = "qcom,sm8650-lpass-ag-noc", .data = &sm8650_lpass_ag_noc }, + { .compatible = "qcom,sm8650-lpass-lpiaon-noc", .data = &sm8650_lpass_lpiaon_noc }, + { .compatible = "qcom,sm8650-lpass-lpicx-noc", .data = &sm8650_lpass_lpicx_noc }, + { .compatible = "qcom,sm8650-mc-virt", .data = &sm8650_mc_virt }, + { .compatible = "qcom,sm8650-mmss-noc", .data = &sm8650_mmss_noc }, + { .compatible = "qcom,sm8650-nsp-noc", .data = &sm8650_nsp_noc }, + { .compatible = "qcom,sm8650-pcie-anoc", .data = &sm8650_pcie_anoc }, + { .compatible = "qcom,sm8650-system-noc", .data = &sm8650_system_noc }, + { } +}; +MODULE_DEVICE_TABLE(of, qnoc_of_match); + +static struct platform_driver qnoc_driver = { + .probe = qcom_icc_rpmh_probe, + .remove_new = qcom_icc_rpmh_remove, + .driver = { + .name = "qnoc-sm8650", + .of_match_table = qnoc_of_match, + .sync_state = icc_sync_state, + }, +}; + +static int __init qnoc_driver_init(void) +{ + return platform_driver_register(&qnoc_driver); +} +core_initcall(qnoc_driver_init); + +static void __exit qnoc_driver_exit(void) +{ + platform_driver_unregister(&qnoc_driver); +} +module_exit(qnoc_driver_exit); + +MODULE_DESCRIPTION("sm8650 NoC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/interconnect/qcom/sm8650.h b/drivers/interconnect/qcom/sm8650.h new file mode 100644 index 000000000000..de35c956fe49 --- /dev/null +++ b/drivers/interconnect/qcom/sm8650.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * SM8650 interconnect IDs + * + * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef __DRIVERS_INTERCONNECT_QCOM_SM8650_H +#define __DRIVERS_INTERCONNECT_QCOM_SM8650_H + +#define SM8650_MASTER_A1NOC_SNOC 0 +#define SM8650_MASTER_A2NOC_SNOC 1 +#define SM8650_MASTER_ANOC_PCIE_GEM_NOC 2 +#define SM8650_MASTER_APPSS_PROC 3 +#define SM8650_MASTER_CAMNOC_HF 4 +#define SM8650_MASTER_CAMNOC_ICP 5 +#define SM8650_MASTER_CAMNOC_SF 6 +#define SM8650_MASTER_CDSP_HCP 7 +#define SM8650_MASTER_CDSP_PROC 8 +#define SM8650_MASTER_CNOC_CFG 9 +#define SM8650_MASTER_CNOC_MNOC_CFG 10 +#define SM8650_MASTER_COMPUTE_NOC 11 +#define SM8650_MASTER_CRYPTO 12 +#define SM8650_MASTER_GEM_NOC_CNOC 13 +#define SM8650_MASTER_GEM_NOC_PCIE_SNOC 14 +#define SM8650_MASTER_GFX3D 15 +#define SM8650_MASTER_GIC 16 +#define SM8650_MASTER_GPU_TCU 17 +#define SM8650_MASTER_IPA 18 +#define SM8650_MASTER_LLCC 19 +#define SM8650_MASTER_LPASS_GEM_NOC 20 +#define SM8650_MASTER_LPASS_LPINOC 21 +#define SM8650_MASTER_LPASS_PROC 22 +#define SM8650_MASTER_LPIAON_NOC 23 +#define SM8650_MASTER_MDP 24 +#define SM8650_MASTER_MNOC_HF_MEM_NOC 25 +#define SM8650_MASTER_MNOC_SF_MEM_NOC 26 +#define SM8650_MASTER_MSS_PROC 27 +#define SM8650_MASTER_PCIE_0 28 +#define SM8650_MASTER_PCIE_1 29 +#define SM8650_MASTER_PCIE_ANOC_CFG 30 +#define SM8650_MASTER_QDSS_BAM 31 +#define SM8650_MASTER_QDSS_ETR 32 +#define SM8650_MASTER_QDSS_ETR_1 33 +#define SM8650_MASTER_QSPI_0 34 +#define SM8650_MASTER_QUP_1 35 +#define SM8650_MASTER_QUP_2 36 +#define SM8650_MASTER_QUP_3 37 +#define SM8650_MASTER_QUP_CORE_0 38 +#define SM8650_MASTER_QUP_CORE_1 39 +#define SM8650_MASTER_QUP_CORE_2 40 +#define SM8650_MASTER_SDCC_2 41 +#define SM8650_MASTER_SDCC_4 42 +#define SM8650_MASTER_SNOC_SF_MEM_NOC 43 +#define SM8650_MASTER_SP 44 +#define SM8650_MASTER_SYS_TCU 45 +#define SM8650_MASTER_UBWC_P 46 +#define SM8650_MASTER_UBWC_P_TCU 47 +#define SM8650_MASTER_UFS_MEM 48 +#define SM8650_MASTER_USB3_0 49 +#define SM8650_MASTER_VIDEO 50 +#define SM8650_MASTER_VIDEO_CV_PROC 51 +#define SM8650_MASTER_VIDEO_PROC 52 +#define SM8650_MASTER_VIDEO_V_PROC 53 +#define SM8650_SLAVE_A1NOC_SNOC 54 +#define SM8650_SLAVE_A2NOC_SNOC 55 +#define SM8650_SLAVE_AHB2PHY_NORTH 56 +#define SM8650_SLAVE_AHB2PHY_SOUTH 57 +#define SM8650_SLAVE_ANOC_PCIE_GEM_NOC 58 +#define SM8650_SLAVE_AOSS 59 +#define SM8650_SLAVE_APPSS 60 +#define SM8650_SLAVE_CAMERA_CFG 61 +#define SM8650_SLAVE_CDSP_MEM_NOC 62 +#define SM8650_SLAVE_CLK_CTL 63 +#define SM8650_SLAVE_CNOC_CFG 64 +#define SM8650_SLAVE_CNOC_MNOC_CFG 65 +#define SM8650_SLAVE_CNOC_MSS 66 +#define SM8650_SLAVE_CPR_HMX 67 +#define SM8650_SLAVE_CPR_NSPCX 68 +#define SM8650_SLAVE_CRYPTO_0_CFG 69 +#define SM8650_SLAVE_CX_RDPM 70 +#define SM8650_SLAVE_DDRSS_CFG 71 +#define SM8650_SLAVE_DISPLAY_CFG 72 +#define SM8650_SLAVE_EBI1 73 +#define SM8650_SLAVE_GEM_NOC_CNOC 74 +#define SM8650_SLAVE_GFX3D_CFG 75 +#define SM8650_SLAVE_I2C 76 +#define SM8650_SLAVE_I3C_IBI0_CFG 77 +#define SM8650_SLAVE_I3C_IBI1_CFG 78 +#define SM8650_SLAVE_IMEM 79 +#define SM8650_SLAVE_IMEM_CFG 80 +#define SM8650_SLAVE_IPA_CFG 81 +#define SM8650_SLAVE_IPC_ROUTER_CFG 82 +#define SM8650_SLAVE_LLCC 83 +#define SM8650_SLAVE_LPASS_GEM_NOC 84 +#define SM8650_SLAVE_LPIAON_NOC_LPASS_AG_NOC 85 +#define SM8650_SLAVE_LPICX_NOC_LPIAON_NOC 86 +#define SM8650_SLAVE_MEM_NOC_PCIE_SNOC 87 +#define SM8650_SLAVE_MNOC_HF_MEM_NOC 88 +#define SM8650_SLAVE_MNOC_SF_MEM_NOC 89 +#define SM8650_SLAVE_MX_2_RDPM 90 +#define SM8650_SLAVE_MX_RDPM 91 +#define SM8650_SLAVE_NSP_QTB_CFG 92 +#define SM8650_SLAVE_PCIE_0 93 +#define SM8650_SLAVE_PCIE_1 94 +#define SM8650_SLAVE_PCIE_0_CFG 95 +#define SM8650_SLAVE_PCIE_1_CFG 96 +#define SM8650_SLAVE_PCIE_ANOC_CFG 97 +#define SM8650_SLAVE_PCIE_RSCC 98 +#define SM8650_SLAVE_PDM 99 +#define SM8650_SLAVE_PRNG 100 +#define SM8650_SLAVE_QDSS_CFG 101 +#define SM8650_SLAVE_QDSS_STM 102 +#define SM8650_SLAVE_QSPI_0 103 +#define SM8650_SLAVE_QUP_1 104 +#define SM8650_SLAVE_QUP_2 105 +#define SM8650_SLAVE_QUP_3 106 +#define SM8650_SLAVE_QUP_CORE_0 107 +#define SM8650_SLAVE_QUP_CORE_1 108 +#define SM8650_SLAVE_QUP_CORE_2 109 +#define SM8650_SLAVE_RBCPR_CX_CFG 110 +#define SM8650_SLAVE_RBCPR_MMCX_CFG 111 +#define SM8650_SLAVE_RBCPR_MXA_CFG 112 +#define SM8650_SLAVE_RBCPR_MXC_CFG 113 +#define SM8650_SLAVE_SDCC_2 114 +#define SM8650_SLAVE_SDCC_4 115 +#define SM8650_SLAVE_SERVICE_CNOC 116 +#define SM8650_SLAVE_SERVICE_CNOC_CFG 117 +#define SM8650_SLAVE_SERVICE_MNOC 118 +#define SM8650_SLAVE_SERVICE_PCIE_ANOC 119 +#define SM8650_SLAVE_SNOC_GEM_NOC_SF 120 +#define SM8650_SLAVE_SPSS_CFG 121 +#define SM8650_SLAVE_TCSR 122 +#define SM8650_SLAVE_TCU 123 +#define SM8650_SLAVE_TLMM 124 +#define SM8650_SLAVE_TME_CFG 125 +#define SM8650_SLAVE_UFS_MEM_CFG 126 +#define SM8650_SLAVE_USB3_0 127 +#define SM8650_SLAVE_VENUS_CFG 128 +#define SM8650_SLAVE_VSENSE_CTRL_CFG 129 + +#endif diff --git a/drivers/interconnect/qcom/smd-rpm.c b/drivers/interconnect/qcom/smd-rpm.c index 16a145a3c914..3816bfb4e2f3 100644 --- a/drivers/interconnect/qcom/smd-rpm.c +++ b/drivers/interconnect/qcom/smd-rpm.c @@ -63,11 +63,9 @@ int qcom_icc_rpm_set_bus_rate(const struct rpm_clk_resource *clk, int ctx, u32 r } EXPORT_SYMBOL_GPL(qcom_icc_rpm_set_bus_rate); -static int qcom_icc_rpm_smd_remove(struct platform_device *pdev) +static void qcom_icc_rpm_smd_remove(struct platform_device *pdev) { icc_smd_rpm = NULL; - - return 0; } static int qcom_icc_rpm_smd_probe(struct platform_device *pdev) @@ -87,7 +85,7 @@ static struct platform_driver qcom_interconnect_rpm_smd_driver = { .name = "icc_smd_rpm", }, .probe = qcom_icc_rpm_smd_probe, - .remove = qcom_icc_rpm_smd_remove, + .remove_new = qcom_icc_rpm_smd_remove, }; module_platform_driver(qcom_interconnect_rpm_smd_driver); MODULE_AUTHOR("Georgi Djakov <georgi.djakov@linaro.org>"); diff --git a/drivers/interconnect/qcom/x1e80100.c b/drivers/interconnect/qcom/x1e80100.c new file mode 100644 index 000000000000..d19501d913b3 --- /dev/null +++ b/drivers/interconnect/qcom/x1e80100.c @@ -0,0 +1,2328 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023, Linaro Limited + * + */ + +#include <linux/device.h> +#include <linux/interconnect.h> +#include <linux/interconnect-provider.h> +#include <linux/module.h> +#include <linux/of_platform.h> +#include <dt-bindings/interconnect/qcom,x1e80100-rpmh.h> + +#include "bcm-voter.h" +#include "icc-common.h" +#include "icc-rpmh.h" +#include "x1e80100.h" + +static struct qcom_icc_node qhm_qspi = { + .name = "qhm_qspi", + .id = X1E80100_MASTER_QSPI_0, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { X1E80100_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node qhm_qup1 = { + .name = "qhm_qup1", + .id = X1E80100_MASTER_QUP_1, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { X1E80100_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node xm_sdc4 = { + .name = "xm_sdc4", + .id = X1E80100_MASTER_SDCC_4, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node xm_ufs_mem = { + .name = "xm_ufs_mem", + .id = X1E80100_MASTER_UFS_MEM, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_SLAVE_A1NOC_SNOC }, +}; + +static struct qcom_icc_node qhm_qup0 = { + .name = "qhm_qup0", + .id = X1E80100_MASTER_QUP_0, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { X1E80100_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qhm_qup2 = { + .name = "qhm_qup2", + .id = X1E80100_MASTER_QUP_2, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { X1E80100_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qxm_crypto = { + .name = "qxm_crypto", + .id = X1E80100_MASTER_CRYPTO, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node qxm_sp = { + .name = "qxm_sp", + .id = X1E80100_MASTER_SP, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node xm_qdss_etr_0 = { + .name = "xm_qdss_etr_0", + .id = X1E80100_MASTER_QDSS_ETR, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node xm_qdss_etr_1 = { + .name = "xm_qdss_etr_1", + .id = X1E80100_MASTER_QDSS_ETR_1, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node xm_sdc2 = { + .name = "xm_sdc2", + .id = X1E80100_MASTER_SDCC_2, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_A2NOC_SNOC }, +}; + +static struct qcom_icc_node ddr_perf_mode_master = { + .name = "ddr_perf_mode_master", + .id = X1E80100_MASTER_DDR_PERF_MODE, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { X1E80100_SLAVE_DDR_PERF_MODE }, +}; + +static struct qcom_icc_node qup0_core_master = { + .name = "qup0_core_master", + .id = X1E80100_MASTER_QUP_CORE_0, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { X1E80100_SLAVE_QUP_CORE_0 }, +}; + +static struct qcom_icc_node qup1_core_master = { + .name = "qup1_core_master", + .id = X1E80100_MASTER_QUP_CORE_1, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { X1E80100_SLAVE_QUP_CORE_1 }, +}; + +static struct qcom_icc_node qup2_core_master = { + .name = "qup2_core_master", + .id = X1E80100_MASTER_QUP_CORE_2, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { X1E80100_SLAVE_QUP_CORE_2 }, +}; + +static struct qcom_icc_node qsm_cfg = { + .name = "qsm_cfg", + .id = X1E80100_MASTER_CNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 47, + .links = { X1E80100_SLAVE_AHB2PHY_SOUTH, X1E80100_SLAVE_AHB2PHY_NORTH, + X1E80100_SLAVE_AHB2PHY_2, X1E80100_SLAVE_AV1_ENC_CFG, + X1E80100_SLAVE_CAMERA_CFG, X1E80100_SLAVE_CLK_CTL, + X1E80100_SLAVE_CRYPTO_0_CFG, X1E80100_SLAVE_DISPLAY_CFG, + X1E80100_SLAVE_GFX3D_CFG, X1E80100_SLAVE_IMEM_CFG, + X1E80100_SLAVE_IPC_ROUTER_CFG, X1E80100_SLAVE_PCIE_0_CFG, + X1E80100_SLAVE_PCIE_1_CFG, X1E80100_SLAVE_PCIE_2_CFG, + X1E80100_SLAVE_PCIE_3_CFG, X1E80100_SLAVE_PCIE_4_CFG, + X1E80100_SLAVE_PCIE_5_CFG, X1E80100_SLAVE_PCIE_6A_CFG, + X1E80100_SLAVE_PCIE_6B_CFG, X1E80100_SLAVE_PCIE_RSC_CFG, + X1E80100_SLAVE_PDM, X1E80100_SLAVE_PRNG, + X1E80100_SLAVE_QDSS_CFG, X1E80100_SLAVE_QSPI_0, + X1E80100_SLAVE_QUP_0, X1E80100_SLAVE_QUP_1, + X1E80100_SLAVE_QUP_2, X1E80100_SLAVE_SDCC_2, + X1E80100_SLAVE_SDCC_4, X1E80100_SLAVE_SMMUV3_CFG, + X1E80100_SLAVE_TCSR, X1E80100_SLAVE_TLMM, + X1E80100_SLAVE_UFS_MEM_CFG, X1E80100_SLAVE_USB2, + X1E80100_SLAVE_USB3_0, X1E80100_SLAVE_USB3_1, + X1E80100_SLAVE_USB3_2, X1E80100_SLAVE_USB3_MP, + X1E80100_SLAVE_USB4_0, X1E80100_SLAVE_USB4_1, + X1E80100_SLAVE_USB4_2, X1E80100_SLAVE_VENUS_CFG, + X1E80100_SLAVE_LPASS_QTB_CFG, X1E80100_SLAVE_CNOC_MNOC_CFG, + X1E80100_SLAVE_NSP_QTB_CFG, X1E80100_SLAVE_QDSS_STM, + X1E80100_SLAVE_TCU }, +}; + +static struct qcom_icc_node qnm_gemnoc_cnoc = { + .name = "qnm_gemnoc_cnoc", + .id = X1E80100_MASTER_GEM_NOC_CNOC, + .channels = 1, + .buswidth = 16, + .num_links = 6, + .links = { X1E80100_SLAVE_AOSS, X1E80100_SLAVE_TME_CFG, + X1E80100_SLAVE_APPSS, X1E80100_SLAVE_CNOC_CFG, + X1E80100_SLAVE_BOOT_IMEM, X1E80100_SLAVE_IMEM }, +}; + +static struct qcom_icc_node qnm_gemnoc_pcie = { + .name = "qnm_gemnoc_pcie", + .id = X1E80100_MASTER_GEM_NOC_PCIE_SNOC, + .channels = 1, + .buswidth = 32, + .num_links = 8, + .links = { X1E80100_SLAVE_PCIE_0, X1E80100_SLAVE_PCIE_1, + X1E80100_SLAVE_PCIE_2, X1E80100_SLAVE_PCIE_3, + X1E80100_SLAVE_PCIE_4, X1E80100_SLAVE_PCIE_5, + X1E80100_SLAVE_PCIE_6A, X1E80100_SLAVE_PCIE_6B }, +}; + +static struct qcom_icc_node alm_gpu_tcu = { + .name = "alm_gpu_tcu", + .id = X1E80100_MASTER_GPU_TCU, + .channels = 1, + .buswidth = 8, + .num_links = 2, + .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC }, +}; + +static struct qcom_icc_node alm_pcie_tcu = { + .name = "alm_pcie_tcu", + .id = X1E80100_MASTER_PCIE_TCU, + .channels = 1, + .buswidth = 8, + .num_links = 2, + .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC }, +}; + +static struct qcom_icc_node alm_sys_tcu = { + .name = "alm_sys_tcu", + .id = X1E80100_MASTER_SYS_TCU, + .channels = 1, + .buswidth = 8, + .num_links = 2, + .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC }, +}; + +static struct qcom_icc_node chm_apps = { + .name = "chm_apps", + .id = X1E80100_MASTER_APPSS_PROC, + .channels = 6, + .buswidth = 32, + .num_links = 3, + .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC, + X1E80100_SLAVE_MEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node qnm_gpu = { + .name = "qnm_gpu", + .id = X1E80100_MASTER_GFX3D, + .channels = 4, + .buswidth = 32, + .num_links = 2, + .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC }, +}; + +static struct qcom_icc_node qnm_lpass = { + .name = "qnm_lpass", + .id = X1E80100_MASTER_LPASS_GEM_NOC, + .channels = 1, + .buswidth = 16, + .num_links = 3, + .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC, + X1E80100_SLAVE_MEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node qnm_mnoc_hf = { + .name = "qnm_mnoc_hf", + .id = X1E80100_MASTER_MNOC_HF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 2, + .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC }, +}; + +static struct qcom_icc_node qnm_mnoc_sf = { + .name = "qnm_mnoc_sf", + .id = X1E80100_MASTER_MNOC_SF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 2, + .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC }, +}; + +static struct qcom_icc_node qnm_nsp_noc = { + .name = "qnm_nsp_noc", + .id = X1E80100_MASTER_COMPUTE_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 3, + .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC, + X1E80100_SLAVE_MEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node qnm_pcie = { + .name = "qnm_pcie", + .id = X1E80100_MASTER_ANOC_PCIE_GEM_NOC, + .channels = 1, + .buswidth = 64, + .num_links = 2, + .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC }, +}; + +static struct qcom_icc_node qnm_snoc_sf = { + .name = "qnm_snoc_sf", + .id = X1E80100_MASTER_SNOC_SF_MEM_NOC, + .channels = 1, + .buswidth = 64, + .num_links = 3, + .links = { X1E80100_SLAVE_GEM_NOC_CNOC, X1E80100_SLAVE_LLCC, + X1E80100_SLAVE_MEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node xm_gic = { + .name = "xm_gic", + .id = X1E80100_MASTER_GIC2, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_LLCC }, +}; + +static struct qcom_icc_node qnm_lpiaon_noc = { + .name = "qnm_lpiaon_noc", + .id = X1E80100_MASTER_LPIAON_NOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_SLAVE_LPASS_GEM_NOC }, +}; + +static struct qcom_icc_node qnm_lpass_lpinoc = { + .name = "qnm_lpass_lpinoc", + .id = X1E80100_MASTER_LPASS_LPINOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_SLAVE_LPIAON_NOC_LPASS_AG_NOC }, +}; + +static struct qcom_icc_node qxm_lpinoc_dsp_axim = { + .name = "qxm_lpinoc_dsp_axim", + .id = X1E80100_MASTER_LPASS_PROC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_SLAVE_LPICX_NOC_LPIAON_NOC }, +}; + +static struct qcom_icc_node llcc_mc = { + .name = "llcc_mc", + .id = X1E80100_MASTER_LLCC, + .channels = 8, + .buswidth = 4, + .num_links = 1, + .links = { X1E80100_SLAVE_EBI1 }, +}; + +static struct qcom_icc_node qnm_av1_enc = { + .name = "qnm_av1_enc", + .id = X1E80100_MASTER_AV1_ENC, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_camnoc_hf = { + .name = "qnm_camnoc_hf", + .id = X1E80100_MASTER_CAMNOC_HF, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { X1E80100_SLAVE_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_camnoc_icp = { + .name = "qnm_camnoc_icp", + .id = X1E80100_MASTER_CAMNOC_ICP, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_camnoc_sf = { + .name = "qnm_camnoc_sf", + .id = X1E80100_MASTER_CAMNOC_SF, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_eva = { + .name = "qnm_eva", + .id = X1E80100_MASTER_EVA, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_mdp = { + .name = "qnm_mdp", + .id = X1E80100_MASTER_MDP, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { X1E80100_SLAVE_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_video = { + .name = "qnm_video", + .id = X1E80100_MASTER_VIDEO, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_video_cv_cpu = { + .name = "qnm_video_cv_cpu", + .id = X1E80100_MASTER_VIDEO_CV_PROC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_video_v_cpu = { + .name = "qnm_video_v_cpu", + .id = X1E80100_MASTER_VIDEO_V_PROC, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qsm_mnoc_cfg = { + .name = "qsm_mnoc_cfg", + .id = X1E80100_MASTER_CNOC_MNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { X1E80100_SLAVE_SERVICE_MNOC }, +}; + +static struct qcom_icc_node qxm_nsp = { + .name = "qxm_nsp", + .id = X1E80100_MASTER_CDSP_PROC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { X1E80100_SLAVE_CDSP_MEM_NOC }, +}; + +static struct qcom_icc_node qnm_pcie_north_gem_noc = { + .name = "qnm_pcie_north_gem_noc", + .id = X1E80100_MASTER_PCIE_NORTH, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_SLAVE_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_node qnm_pcie_south_gem_noc = { + .name = "qnm_pcie_south_gem_noc", + .id = X1E80100_MASTER_PCIE_SOUTH, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_SLAVE_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_node xm_pcie_3 = { + .name = "xm_pcie_3", + .id = X1E80100_MASTER_PCIE_3, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_SLAVE_PCIE_NORTH }, +}; + +static struct qcom_icc_node xm_pcie_4 = { + .name = "xm_pcie_4", + .id = X1E80100_MASTER_PCIE_4, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_PCIE_NORTH }, +}; + +static struct qcom_icc_node xm_pcie_5 = { + .name = "xm_pcie_5", + .id = X1E80100_MASTER_PCIE_5, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_PCIE_NORTH }, +}; + +static struct qcom_icc_node xm_pcie_0 = { + .name = "xm_pcie_0", + .id = X1E80100_MASTER_PCIE_0, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_SLAVE_PCIE_SOUTH }, +}; + +static struct qcom_icc_node xm_pcie_1 = { + .name = "xm_pcie_1", + .id = X1E80100_MASTER_PCIE_1, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_SLAVE_PCIE_SOUTH }, +}; + +static struct qcom_icc_node xm_pcie_2 = { + .name = "xm_pcie_2", + .id = X1E80100_MASTER_PCIE_2, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_SLAVE_PCIE_SOUTH }, +}; + +static struct qcom_icc_node xm_pcie_6a = { + .name = "xm_pcie_6a", + .id = X1E80100_MASTER_PCIE_6A, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { X1E80100_SLAVE_PCIE_SOUTH }, +}; + +static struct qcom_icc_node xm_pcie_6b = { + .name = "xm_pcie_6b", + .id = X1E80100_MASTER_PCIE_6B, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_SLAVE_PCIE_SOUTH }, +}; + +static struct qcom_icc_node qnm_aggre1_noc = { + .name = "qnm_aggre1_noc", + .id = X1E80100_MASTER_A1NOC_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_SLAVE_SNOC_GEM_NOC_SF }, +}; + +static struct qcom_icc_node qnm_aggre2_noc = { + .name = "qnm_aggre2_noc", + .id = X1E80100_MASTER_A2NOC_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_SLAVE_SNOC_GEM_NOC_SF }, +}; + +static struct qcom_icc_node qnm_gic = { + .name = "qnm_gic", + .id = X1E80100_MASTER_GIC1, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_SNOC_GEM_NOC_SF }, +}; + +static struct qcom_icc_node qnm_usb_anoc = { + .name = "qnm_usb_anoc", + .id = X1E80100_MASTER_USB_NOC_SNOC, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_SLAVE_SNOC_GEM_NOC_SF }, +}; + +static struct qcom_icc_node qnm_aggre_usb_north_snoc = { + .name = "qnm_aggre_usb_north_snoc", + .id = X1E80100_MASTER_AGGRE_USB_NORTH, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_SLAVE_USB_NOC_SNOC }, +}; + +static struct qcom_icc_node qnm_aggre_usb_south_snoc = { + .name = "qnm_aggre_usb_south_snoc", + .id = X1E80100_MASTER_AGGRE_USB_SOUTH, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_SLAVE_USB_NOC_SNOC }, +}; + +static struct qcom_icc_node xm_usb2_0 = { + .name = "xm_usb2_0", + .id = X1E80100_MASTER_USB2, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_AGGRE_USB_NORTH }, +}; + +static struct qcom_icc_node xm_usb3_mp = { + .name = "xm_usb3_mp", + .id = X1E80100_MASTER_USB3_MP, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_SLAVE_AGGRE_USB_NORTH }, +}; + +static struct qcom_icc_node xm_usb3_0 = { + .name = "xm_usb3_0", + .id = X1E80100_MASTER_USB3_0, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH }, +}; + +static struct qcom_icc_node xm_usb3_1 = { + .name = "xm_usb3_1", + .id = X1E80100_MASTER_USB3_1, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH }, +}; + +static struct qcom_icc_node xm_usb3_2 = { + .name = "xm_usb3_2", + .id = X1E80100_MASTER_USB3_2, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH }, +}; + +static struct qcom_icc_node xm_usb4_0 = { + .name = "xm_usb4_0", + .id = X1E80100_MASTER_USB4_0, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH }, +}; + +static struct qcom_icc_node xm_usb4_1 = { + .name = "xm_usb4_1", + .id = X1E80100_MASTER_USB4_1, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH }, +}; + +static struct qcom_icc_node xm_usb4_2 = { + .name = "xm_usb4_2", + .id = X1E80100_MASTER_USB4_2, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_SLAVE_AGGRE_USB_SOUTH }, +}; + +static struct qcom_icc_node qnm_mnoc_hf_disp = { + .name = "qnm_mnoc_hf_disp", + .id = X1E80100_MASTER_MNOC_HF_MEM_NOC_DISP, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { X1E80100_SLAVE_LLCC_DISP }, +}; + +static struct qcom_icc_node qnm_pcie_disp = { + .name = "qnm_pcie_disp", + .id = X1E80100_MASTER_ANOC_PCIE_GEM_NOC_DISP, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_SLAVE_LLCC_DISP }, +}; + +static struct qcom_icc_node llcc_mc_disp = { + .name = "llcc_mc_disp", + .id = X1E80100_MASTER_LLCC_DISP, + .channels = 8, + .buswidth = 4, + .num_links = 1, + .links = { X1E80100_SLAVE_EBI1_DISP }, +}; + +static struct qcom_icc_node qnm_mdp_disp = { + .name = "qnm_mdp_disp", + .id = X1E80100_MASTER_MDP_DISP, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { X1E80100_SLAVE_MNOC_HF_MEM_NOC_DISP }, +}; + +static struct qcom_icc_node qnm_pcie_pcie = { + .name = "qnm_pcie_pcie", + .id = X1E80100_MASTER_ANOC_PCIE_GEM_NOC_PCIE, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_SLAVE_LLCC_PCIE }, +}; + +static struct qcom_icc_node llcc_mc_pcie = { + .name = "llcc_mc_pcie", + .id = X1E80100_MASTER_LLCC_PCIE, + .channels = 8, + .buswidth = 4, + .num_links = 1, + .links = { X1E80100_SLAVE_EBI1_PCIE }, +}; + +static struct qcom_icc_node qnm_pcie_north_gem_noc_pcie = { + .name = "qnm_pcie_north_gem_noc_pcie", + .id = X1E80100_MASTER_PCIE_NORTH_PCIE, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE }, +}; + +static struct qcom_icc_node qnm_pcie_south_gem_noc_pcie = { + .name = "qnm_pcie_south_gem_noc_pcie", + .id = X1E80100_MASTER_PCIE_SOUTH_PCIE, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE }, +}; + +static struct qcom_icc_node xm_pcie_3_pcie = { + .name = "xm_pcie_3_pcie", + .id = X1E80100_MASTER_PCIE_3_PCIE, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_SLAVE_PCIE_NORTH_PCIE }, +}; + +static struct qcom_icc_node xm_pcie_4_pcie = { + .name = "xm_pcie_4_pcie", + .id = X1E80100_MASTER_PCIE_4_PCIE, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_PCIE_NORTH_PCIE }, +}; + +static struct qcom_icc_node xm_pcie_5_pcie = { + .name = "xm_pcie_5_pcie", + .id = X1E80100_MASTER_PCIE_5_PCIE, + .channels = 1, + .buswidth = 8, + .num_links = 1, + .links = { X1E80100_SLAVE_PCIE_NORTH_PCIE }, +}; + +static struct qcom_icc_node xm_pcie_0_pcie = { + .name = "xm_pcie_0_pcie", + .id = X1E80100_MASTER_PCIE_0_PCIE, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE }, +}; + +static struct qcom_icc_node xm_pcie_1_pcie = { + .name = "xm_pcie_1_pcie", + .id = X1E80100_MASTER_PCIE_1_PCIE, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE }, +}; + +static struct qcom_icc_node xm_pcie_2_pcie = { + .name = "xm_pcie_2_pcie", + .id = X1E80100_MASTER_PCIE_2_PCIE, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE }, +}; + +static struct qcom_icc_node xm_pcie_6a_pcie = { + .name = "xm_pcie_6a_pcie", + .id = X1E80100_MASTER_PCIE_6A_PCIE, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE }, +}; + +static struct qcom_icc_node xm_pcie_6b_pcie = { + .name = "xm_pcie_6b_pcie", + .id = X1E80100_MASTER_PCIE_6B_PCIE, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_SLAVE_PCIE_SOUTH_PCIE }, +}; + +static struct qcom_icc_node qns_a1noc_snoc = { + .name = "qns_a1noc_snoc", + .id = X1E80100_SLAVE_A1NOC_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_MASTER_A1NOC_SNOC }, +}; + +static struct qcom_icc_node qns_a2noc_snoc = { + .name = "qns_a2noc_snoc", + .id = X1E80100_SLAVE_A2NOC_SNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_MASTER_A2NOC_SNOC }, +}; + +static struct qcom_icc_node ddr_perf_mode_slave = { + .name = "ddr_perf_mode_slave", + .id = X1E80100_SLAVE_DDR_PERF_MODE, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qup0_core_slave = { + .name = "qup0_core_slave", + .id = X1E80100_SLAVE_QUP_CORE_0, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qup1_core_slave = { + .name = "qup1_core_slave", + .id = X1E80100_SLAVE_QUP_CORE_1, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qup2_core_slave = { + .name = "qup2_core_slave", + .id = X1E80100_SLAVE_QUP_CORE_2, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_ahb2phy0 = { + .name = "qhs_ahb2phy0", + .id = X1E80100_SLAVE_AHB2PHY_SOUTH, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_ahb2phy1 = { + .name = "qhs_ahb2phy1", + .id = X1E80100_SLAVE_AHB2PHY_NORTH, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_ahb2phy2 = { + .name = "qhs_ahb2phy2", + .id = X1E80100_SLAVE_AHB2PHY_2, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_av1_enc_cfg = { + .name = "qhs_av1_enc_cfg", + .id = X1E80100_SLAVE_AV1_ENC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_camera_cfg = { + .name = "qhs_camera_cfg", + .id = X1E80100_SLAVE_CAMERA_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_clk_ctl = { + .name = "qhs_clk_ctl", + .id = X1E80100_SLAVE_CLK_CTL, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_crypto0_cfg = { + .name = "qhs_crypto0_cfg", + .id = X1E80100_SLAVE_CRYPTO_0_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_display_cfg = { + .name = "qhs_display_cfg", + .id = X1E80100_SLAVE_DISPLAY_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_gpuss_cfg = { + .name = "qhs_gpuss_cfg", + .id = X1E80100_SLAVE_GFX3D_CFG, + .channels = 1, + .buswidth = 8, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_imem_cfg = { + .name = "qhs_imem_cfg", + .id = X1E80100_SLAVE_IMEM_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_ipc_router = { + .name = "qhs_ipc_router", + .id = X1E80100_SLAVE_IPC_ROUTER_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pcie0_cfg = { + .name = "qhs_pcie0_cfg", + .id = X1E80100_SLAVE_PCIE_0_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pcie1_cfg = { + .name = "qhs_pcie1_cfg", + .id = X1E80100_SLAVE_PCIE_1_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pcie2_cfg = { + .name = "qhs_pcie2_cfg", + .id = X1E80100_SLAVE_PCIE_2_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pcie3_cfg = { + .name = "qhs_pcie3_cfg", + .id = X1E80100_SLAVE_PCIE_3_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pcie4_cfg = { + .name = "qhs_pcie4_cfg", + .id = X1E80100_SLAVE_PCIE_4_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pcie5_cfg = { + .name = "qhs_pcie5_cfg", + .id = X1E80100_SLAVE_PCIE_5_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pcie6a_cfg = { + .name = "qhs_pcie6a_cfg", + .id = X1E80100_SLAVE_PCIE_6A_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pcie6b_cfg = { + .name = "qhs_pcie6b_cfg", + .id = X1E80100_SLAVE_PCIE_6B_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pcie_rsc_cfg = { + .name = "qhs_pcie_rsc_cfg", + .id = X1E80100_SLAVE_PCIE_RSC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_pdm = { + .name = "qhs_pdm", + .id = X1E80100_SLAVE_PDM, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_prng = { + .name = "qhs_prng", + .id = X1E80100_SLAVE_PRNG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qdss_cfg = { + .name = "qhs_qdss_cfg", + .id = X1E80100_SLAVE_QDSS_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qspi = { + .name = "qhs_qspi", + .id = X1E80100_SLAVE_QSPI_0, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qup0 = { + .name = "qhs_qup0", + .id = X1E80100_SLAVE_QUP_0, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qup1 = { + .name = "qhs_qup1", + .id = X1E80100_SLAVE_QUP_1, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_qup2 = { + .name = "qhs_qup2", + .id = X1E80100_SLAVE_QUP_2, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_sdc2 = { + .name = "qhs_sdc2", + .id = X1E80100_SLAVE_SDCC_2, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_sdc4 = { + .name = "qhs_sdc4", + .id = X1E80100_SLAVE_SDCC_4, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_smmuv3_cfg = { + .name = "qhs_smmuv3_cfg", + .id = X1E80100_SLAVE_SMMUV3_CFG, + .channels = 1, + .buswidth = 8, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_tcsr = { + .name = "qhs_tcsr", + .id = X1E80100_SLAVE_TCSR, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_tlmm = { + .name = "qhs_tlmm", + .id = X1E80100_SLAVE_TLMM, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_ufs_mem_cfg = { + .name = "qhs_ufs_mem_cfg", + .id = X1E80100_SLAVE_UFS_MEM_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_usb2_0_cfg = { + .name = "qhs_usb2_0_cfg", + .id = X1E80100_SLAVE_USB2, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_usb3_0_cfg = { + .name = "qhs_usb3_0_cfg", + .id = X1E80100_SLAVE_USB3_0, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_usb3_1_cfg = { + .name = "qhs_usb3_1_cfg", + .id = X1E80100_SLAVE_USB3_1, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_usb3_2_cfg = { + .name = "qhs_usb3_2_cfg", + .id = X1E80100_SLAVE_USB3_2, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_usb3_mp_cfg = { + .name = "qhs_usb3_mp_cfg", + .id = X1E80100_SLAVE_USB3_MP, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_usb4_0_cfg = { + .name = "qhs_usb4_0_cfg", + .id = X1E80100_SLAVE_USB4_0, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_usb4_1_cfg = { + .name = "qhs_usb4_1_cfg", + .id = X1E80100_SLAVE_USB4_1, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_usb4_2_cfg = { + .name = "qhs_usb4_2_cfg", + .id = X1E80100_SLAVE_USB4_2, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_venus_cfg = { + .name = "qhs_venus_cfg", + .id = X1E80100_SLAVE_VENUS_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qss_lpass_qtb_cfg = { + .name = "qss_lpass_qtb_cfg", + .id = X1E80100_SLAVE_LPASS_QTB_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qss_mnoc_cfg = { + .name = "qss_mnoc_cfg", + .id = X1E80100_SLAVE_CNOC_MNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { X1E80100_MASTER_CNOC_MNOC_CFG }, +}; + +static struct qcom_icc_node qss_nsp_qtb_cfg = { + .name = "qss_nsp_qtb_cfg", + .id = X1E80100_SLAVE_NSP_QTB_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node xs_qdss_stm = { + .name = "xs_qdss_stm", + .id = X1E80100_SLAVE_QDSS_STM, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node xs_sys_tcu_cfg = { + .name = "xs_sys_tcu_cfg", + .id = X1E80100_SLAVE_TCU, + .channels = 1, + .buswidth = 8, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_aoss = { + .name = "qhs_aoss", + .id = X1E80100_SLAVE_AOSS, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qhs_tme_cfg = { + .name = "qhs_tme_cfg", + .id = X1E80100_SLAVE_TME_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qns_apss = { + .name = "qns_apss", + .id = X1E80100_SLAVE_APPSS, + .channels = 1, + .buswidth = 8, + .num_links = 0, +}; + +static struct qcom_icc_node qss_cfg = { + .name = "qss_cfg", + .id = X1E80100_SLAVE_CNOC_CFG, + .channels = 1, + .buswidth = 4, + .num_links = 1, + .links = { X1E80100_MASTER_CNOC_CFG }, +}; + +static struct qcom_icc_node qxs_boot_imem = { + .name = "qxs_boot_imem", + .id = X1E80100_SLAVE_BOOT_IMEM, + .channels = 1, + .buswidth = 16, + .num_links = 0, +}; + +static struct qcom_icc_node qxs_imem = { + .name = "qxs_imem", + .id = X1E80100_SLAVE_IMEM, + .channels = 1, + .buswidth = 8, + .num_links = 0, +}; + +static struct qcom_icc_node xs_pcie_0 = { + .name = "xs_pcie_0", + .id = X1E80100_SLAVE_PCIE_0, + .channels = 1, + .buswidth = 16, + .num_links = 0, +}; + +static struct qcom_icc_node xs_pcie_1 = { + .name = "xs_pcie_1", + .id = X1E80100_SLAVE_PCIE_1, + .channels = 1, + .buswidth = 16, + .num_links = 0, +}; + +static struct qcom_icc_node xs_pcie_2 = { + .name = "xs_pcie_2", + .id = X1E80100_SLAVE_PCIE_2, + .channels = 1, + .buswidth = 16, + .num_links = 0, +}; + +static struct qcom_icc_node xs_pcie_3 = { + .name = "xs_pcie_3", + .id = X1E80100_SLAVE_PCIE_3, + .channels = 1, + .buswidth = 64, + .num_links = 0, +}; + +static struct qcom_icc_node xs_pcie_4 = { + .name = "xs_pcie_4", + .id = X1E80100_SLAVE_PCIE_4, + .channels = 1, + .buswidth = 8, + .num_links = 0, +}; + +static struct qcom_icc_node xs_pcie_5 = { + .name = "xs_pcie_5", + .id = X1E80100_SLAVE_PCIE_5, + .channels = 1, + .buswidth = 8, + .num_links = 0, +}; + +static struct qcom_icc_node xs_pcie_6a = { + .name = "xs_pcie_6a", + .id = X1E80100_SLAVE_PCIE_6A, + .channels = 1, + .buswidth = 32, + .num_links = 0, +}; + +static struct qcom_icc_node xs_pcie_6b = { + .name = "xs_pcie_6b", + .id = X1E80100_SLAVE_PCIE_6B, + .channels = 1, + .buswidth = 16, + .num_links = 0, +}; + +static struct qcom_icc_node qns_gem_noc_cnoc = { + .name = "qns_gem_noc_cnoc", + .id = X1E80100_SLAVE_GEM_NOC_CNOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_MASTER_GEM_NOC_CNOC }, +}; + +static struct qcom_icc_node qns_llcc = { + .name = "qns_llcc", + .id = X1E80100_SLAVE_LLCC, + .channels = 8, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_MASTER_LLCC }, +}; + +static struct qcom_icc_node qns_pcie = { + .name = "qns_pcie", + .id = X1E80100_SLAVE_MEM_NOC_PCIE_SNOC, + .channels = 1, + .buswidth = 32, + .num_links = 1, + .links = { X1E80100_MASTER_GEM_NOC_PCIE_SNOC }, +}; + +static struct qcom_icc_node qns_lpass_ag_noc_gemnoc = { + .name = "qns_lpass_ag_noc_gemnoc", + .id = X1E80100_SLAVE_LPASS_GEM_NOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_MASTER_LPASS_GEM_NOC }, +}; + +static struct qcom_icc_node qns_lpass_aggnoc = { + .name = "qns_lpass_aggnoc", + .id = X1E80100_SLAVE_LPIAON_NOC_LPASS_AG_NOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_MASTER_LPIAON_NOC }, +}; + +static struct qcom_icc_node qns_lpi_aon_noc = { + .name = "qns_lpi_aon_noc", + .id = X1E80100_SLAVE_LPICX_NOC_LPIAON_NOC, + .channels = 1, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_MASTER_LPASS_LPINOC }, +}; + +static struct qcom_icc_node ebi = { + .name = "ebi", + .id = X1E80100_SLAVE_EBI1, + .channels = 8, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qns_mem_noc_hf = { + .name = "qns_mem_noc_hf", + .id = X1E80100_SLAVE_MNOC_HF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { X1E80100_MASTER_MNOC_HF_MEM_NOC }, +}; + +static struct qcom_icc_node qns_mem_noc_sf = { + .name = "qns_mem_noc_sf", + .id = X1E80100_SLAVE_MNOC_SF_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { X1E80100_MASTER_MNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node srvc_mnoc = { + .name = "srvc_mnoc", + .id = X1E80100_SLAVE_SERVICE_MNOC, + .channels = 1, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qns_nsp_gemnoc = { + .name = "qns_nsp_gemnoc", + .id = X1E80100_SLAVE_CDSP_MEM_NOC, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { X1E80100_MASTER_COMPUTE_NOC }, +}; + +static struct qcom_icc_node qns_pcie_mem_noc = { + .name = "qns_pcie_mem_noc", + .id = X1E80100_SLAVE_ANOC_PCIE_GEM_NOC, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_MASTER_ANOC_PCIE_GEM_NOC }, +}; + +static struct qcom_icc_node qns_pcie_north_gem_noc = { + .name = "qns_pcie_north_gem_noc", + .id = X1E80100_SLAVE_PCIE_NORTH, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_MASTER_PCIE_NORTH }, +}; + +static struct qcom_icc_node qns_pcie_south_gem_noc = { + .name = "qns_pcie_south_gem_noc", + .id = X1E80100_SLAVE_PCIE_SOUTH, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_MASTER_PCIE_SOUTH }, +}; + +static struct qcom_icc_node qns_gemnoc_sf = { + .name = "qns_gemnoc_sf", + .id = X1E80100_SLAVE_SNOC_GEM_NOC_SF, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_MASTER_SNOC_SF_MEM_NOC }, +}; + +static struct qcom_icc_node qns_aggre_usb_snoc = { + .name = "qns_aggre_usb_snoc", + .id = X1E80100_SLAVE_USB_NOC_SNOC, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_MASTER_USB_NOC_SNOC }, +}; + +static struct qcom_icc_node qns_aggre_usb_north_snoc = { + .name = "qns_aggre_usb_north_snoc", + .id = X1E80100_SLAVE_AGGRE_USB_NORTH, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_MASTER_AGGRE_USB_NORTH }, +}; + +static struct qcom_icc_node qns_aggre_usb_south_snoc = { + .name = "qns_aggre_usb_south_snoc", + .id = X1E80100_SLAVE_AGGRE_USB_SOUTH, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_MASTER_AGGRE_USB_SOUTH }, +}; + +static struct qcom_icc_node qns_llcc_disp = { + .name = "qns_llcc_disp", + .id = X1E80100_SLAVE_LLCC_DISP, + .channels = 8, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_MASTER_LLCC_DISP }, +}; + +static struct qcom_icc_node ebi_disp = { + .name = "ebi_disp", + .id = X1E80100_SLAVE_EBI1_DISP, + .channels = 8, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qns_mem_noc_hf_disp = { + .name = "qns_mem_noc_hf_disp", + .id = X1E80100_SLAVE_MNOC_HF_MEM_NOC_DISP, + .channels = 2, + .buswidth = 32, + .num_links = 1, + .links = { X1E80100_MASTER_MNOC_HF_MEM_NOC_DISP }, +}; + +static struct qcom_icc_node qns_llcc_pcie = { + .name = "qns_llcc_pcie", + .id = X1E80100_SLAVE_LLCC_PCIE, + .channels = 8, + .buswidth = 16, + .num_links = 1, + .links = { X1E80100_MASTER_LLCC_PCIE }, +}; + +static struct qcom_icc_node ebi_pcie = { + .name = "ebi_pcie", + .id = X1E80100_SLAVE_EBI1_PCIE, + .channels = 8, + .buswidth = 4, + .num_links = 0, +}; + +static struct qcom_icc_node qns_pcie_mem_noc_pcie = { + .name = "qns_pcie_mem_noc_pcie", + .id = X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_MASTER_ANOC_PCIE_GEM_NOC_PCIE }, +}; + +static struct qcom_icc_node qns_pcie_north_gem_noc_pcie = { + .name = "qns_pcie_north_gem_noc_pcie", + .id = X1E80100_SLAVE_PCIE_NORTH_PCIE, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_MASTER_PCIE_NORTH_PCIE }, +}; + +static struct qcom_icc_node qns_pcie_south_gem_noc_pcie = { + .name = "qns_pcie_south_gem_noc_pcie", + .id = X1E80100_SLAVE_PCIE_SOUTH_PCIE, + .channels = 1, + .buswidth = 64, + .num_links = 1, + .links = { X1E80100_MASTER_PCIE_SOUTH_PCIE }, +}; + +static struct qcom_icc_bcm bcm_acv = { + .name = "ACV", + .num_nodes = 1, + .nodes = { &ebi }, +}; + +static struct qcom_icc_bcm bcm_acv_perf = { + .name = "ACV_PERF", + .num_nodes = 1, + .nodes = { &ddr_perf_mode_slave }, +}; + +static struct qcom_icc_bcm bcm_ce0 = { + .name = "CE0", + .num_nodes = 1, + .nodes = { &qxm_crypto }, +}; + +static struct qcom_icc_bcm bcm_cn0 = { + .name = "CN0", + .keepalive = true, + .num_nodes = 63, + .nodes = { &qsm_cfg, &qhs_ahb2phy0, + &qhs_ahb2phy1, &qhs_ahb2phy2, + &qhs_av1_enc_cfg, &qhs_camera_cfg, + &qhs_clk_ctl, &qhs_crypto0_cfg, + &qhs_gpuss_cfg, &qhs_imem_cfg, + &qhs_ipc_router, &qhs_pcie0_cfg, + &qhs_pcie1_cfg, &qhs_pcie2_cfg, + &qhs_pcie3_cfg, &qhs_pcie4_cfg, + &qhs_pcie5_cfg, &qhs_pcie6a_cfg, + &qhs_pcie6b_cfg, &qhs_pcie_rsc_cfg, + &qhs_pdm, &qhs_prng, + &qhs_qdss_cfg, &qhs_qspi, + &qhs_qup0, &qhs_qup1, + &qhs_qup2, &qhs_sdc2, + &qhs_sdc4, &qhs_smmuv3_cfg, + &qhs_tcsr, &qhs_tlmm, + &qhs_ufs_mem_cfg, &qhs_usb2_0_cfg, + &qhs_usb3_0_cfg, &qhs_usb3_1_cfg, + &qhs_usb3_2_cfg, &qhs_usb3_mp_cfg, + &qhs_usb4_0_cfg, &qhs_usb4_1_cfg, + &qhs_usb4_2_cfg, &qhs_venus_cfg, + &qss_lpass_qtb_cfg, &qss_mnoc_cfg, + &qss_nsp_qtb_cfg, &xs_qdss_stm, + &xs_sys_tcu_cfg, &qnm_gemnoc_cnoc, + &qnm_gemnoc_pcie, &qhs_aoss, + &qhs_tme_cfg, &qns_apss, + &qss_cfg, &qxs_boot_imem, + &qxs_imem, &xs_pcie_0, + &xs_pcie_1, &xs_pcie_2, + &xs_pcie_3, &xs_pcie_4, + &xs_pcie_5, &xs_pcie_6a, + &xs_pcie_6b }, +}; + +static struct qcom_icc_bcm bcm_cn1 = { + .name = "CN1", + .num_nodes = 1, + .nodes = { &qhs_display_cfg }, +}; + +static struct qcom_icc_bcm bcm_co0 = { + .name = "CO0", + .num_nodes = 2, + .nodes = { &qxm_nsp, &qns_nsp_gemnoc }, +}; + +static struct qcom_icc_bcm bcm_lp0 = { + .name = "LP0", + .num_nodes = 2, + .nodes = { &qnm_lpass_lpinoc, &qns_lpass_aggnoc }, +}; + +static struct qcom_icc_bcm bcm_mc0 = { + .name = "MC0", + .keepalive = true, + .num_nodes = 1, + .nodes = { &ebi }, +}; + +static struct qcom_icc_bcm bcm_mm0 = { + .name = "MM0", + .num_nodes = 1, + .nodes = { &qns_mem_noc_hf }, +}; + +static struct qcom_icc_bcm bcm_mm1 = { + .name = "MM1", + .num_nodes = 10, + .nodes = { &qnm_av1_enc, &qnm_camnoc_hf, + &qnm_camnoc_icp, &qnm_camnoc_sf, + &qnm_eva, &qnm_mdp, + &qnm_video, &qnm_video_cv_cpu, + &qnm_video_v_cpu, &qns_mem_noc_sf }, +}; + +static struct qcom_icc_bcm bcm_pc0 = { + .name = "PC0", + .num_nodes = 1, + .nodes = { &qns_pcie_mem_noc }, +}; + +static struct qcom_icc_bcm bcm_qup0 = { + .name = "QUP0", + .keepalive = true, + .vote_scale = 1, + .num_nodes = 1, + .nodes = { &qup0_core_slave }, +}; + +static struct qcom_icc_bcm bcm_qup1 = { + .name = "QUP1", + .keepalive = true, + .vote_scale = 1, + .num_nodes = 1, + .nodes = { &qup1_core_slave }, +}; + +static struct qcom_icc_bcm bcm_qup2 = { + .name = "QUP2", + .keepalive = true, + .vote_scale = 1, + .num_nodes = 1, + .nodes = { &qup2_core_slave }, +}; + +static struct qcom_icc_bcm bcm_sh0 = { + .name = "SH0", + .keepalive = true, + .num_nodes = 1, + .nodes = { &qns_llcc }, +}; + +static struct qcom_icc_bcm bcm_sh1 = { + .name = "SH1", + .num_nodes = 13, + .nodes = { &alm_gpu_tcu, &alm_pcie_tcu, + &alm_sys_tcu, &chm_apps, + &qnm_gpu, &qnm_lpass, + &qnm_mnoc_hf, &qnm_mnoc_sf, + &qnm_nsp_noc, &qnm_pcie, + &xm_gic, &qns_gem_noc_cnoc, + &qns_pcie }, +}; + +static struct qcom_icc_bcm bcm_sn0 = { + .name = "SN0", + .keepalive = true, + .num_nodes = 1, + .nodes = { &qns_gemnoc_sf }, +}; + +static struct qcom_icc_bcm bcm_sn2 = { + .name = "SN2", + .num_nodes = 1, + .nodes = { &qnm_aggre1_noc }, +}; + +static struct qcom_icc_bcm bcm_sn3 = { + .name = "SN3", + .num_nodes = 1, + .nodes = { &qnm_aggre2_noc }, +}; + +static struct qcom_icc_bcm bcm_sn4 = { + .name = "SN4", + .num_nodes = 1, + .nodes = { &qnm_usb_anoc }, +}; + +static struct qcom_icc_bcm bcm_acv_disp = { + .name = "ACV", + .num_nodes = 1, + .nodes = { &ebi_disp }, +}; + +static struct qcom_icc_bcm bcm_mc0_disp = { + .name = "MC0", + .num_nodes = 1, + .nodes = { &ebi_disp }, +}; + +static struct qcom_icc_bcm bcm_mm0_disp = { + .name = "MM0", + .num_nodes = 1, + .nodes = { &qns_mem_noc_hf_disp }, +}; + +static struct qcom_icc_bcm bcm_mm1_disp = { + .name = "MM1", + .num_nodes = 1, + .nodes = { &qnm_mdp_disp }, +}; + +static struct qcom_icc_bcm bcm_sh0_disp = { + .name = "SH0", + .num_nodes = 1, + .nodes = { &qns_llcc_disp }, +}; + +static struct qcom_icc_bcm bcm_sh1_disp = { + .name = "SH1", + .num_nodes = 2, + .nodes = { &qnm_mnoc_hf_disp, &qnm_pcie_disp }, +}; + +static struct qcom_icc_bcm bcm_acv_pcie = { + .name = "ACV", + .num_nodes = 1, + .nodes = { &ebi_pcie }, +}; + +static struct qcom_icc_bcm bcm_mc0_pcie = { + .name = "MC0", + .num_nodes = 1, + .nodes = { &ebi_pcie }, +}; + +static struct qcom_icc_bcm bcm_pc0_pcie = { + .name = "PC0", + .num_nodes = 1, + .nodes = { &qns_pcie_mem_noc_pcie }, +}; + +static struct qcom_icc_bcm bcm_sh0_pcie = { + .name = "SH0", + .num_nodes = 1, + .nodes = { &qns_llcc_pcie }, +}; + +static struct qcom_icc_bcm bcm_sh1_pcie = { + .name = "SH1", + .num_nodes = 1, + .nodes = { &qnm_pcie_pcie }, +}; + +static struct qcom_icc_bcm *aggre1_noc_bcms[] = { +}; + +static struct qcom_icc_node * const aggre1_noc_nodes[] = { + [MASTER_QSPI_0] = &qhm_qspi, + [MASTER_QUP_1] = &qhm_qup1, + [MASTER_SDCC_4] = &xm_sdc4, + [MASTER_UFS_MEM] = &xm_ufs_mem, + [SLAVE_A1NOC_SNOC] = &qns_a1noc_snoc, +}; + +static const struct qcom_icc_desc x1e80100_aggre1_noc = { + .nodes = aggre1_noc_nodes, + .num_nodes = ARRAY_SIZE(aggre1_noc_nodes), + .bcms = aggre1_noc_bcms, + .num_bcms = ARRAY_SIZE(aggre1_noc_bcms), +}; + +static struct qcom_icc_bcm * const aggre2_noc_bcms[] = { + &bcm_ce0, +}; + +static struct qcom_icc_node * const aggre2_noc_nodes[] = { + [MASTER_QUP_0] = &qhm_qup0, + [MASTER_QUP_2] = &qhm_qup2, + [MASTER_CRYPTO] = &qxm_crypto, + [MASTER_SP] = &qxm_sp, + [MASTER_QDSS_ETR] = &xm_qdss_etr_0, + [MASTER_QDSS_ETR_1] = &xm_qdss_etr_1, + [MASTER_SDCC_2] = &xm_sdc2, + [SLAVE_A2NOC_SNOC] = &qns_a2noc_snoc, +}; + +static const struct qcom_icc_desc x1e80100_aggre2_noc = { + .nodes = aggre2_noc_nodes, + .num_nodes = ARRAY_SIZE(aggre2_noc_nodes), + .bcms = aggre2_noc_bcms, + .num_bcms = ARRAY_SIZE(aggre2_noc_bcms), +}; + +static struct qcom_icc_bcm * const clk_virt_bcms[] = { + &bcm_acv_perf, + &bcm_qup0, + &bcm_qup1, + &bcm_qup2, +}; + +static struct qcom_icc_node * const clk_virt_nodes[] = { + [MASTER_DDR_PERF_MODE] = &ddr_perf_mode_master, + [MASTER_QUP_CORE_0] = &qup0_core_master, + [MASTER_QUP_CORE_1] = &qup1_core_master, + [MASTER_QUP_CORE_2] = &qup2_core_master, + [SLAVE_DDR_PERF_MODE] = &ddr_perf_mode_slave, + [SLAVE_QUP_CORE_0] = &qup0_core_slave, + [SLAVE_QUP_CORE_1] = &qup1_core_slave, + [SLAVE_QUP_CORE_2] = &qup2_core_slave, +}; + +static const struct qcom_icc_desc x1e80100_clk_virt = { + .nodes = clk_virt_nodes, + .num_nodes = ARRAY_SIZE(clk_virt_nodes), + .bcms = clk_virt_bcms, + .num_bcms = ARRAY_SIZE(clk_virt_bcms), +}; + +static struct qcom_icc_bcm * const cnoc_cfg_bcms[] = { + &bcm_cn0, + &bcm_cn1, +}; + +static struct qcom_icc_node * const cnoc_cfg_nodes[] = { + [MASTER_CNOC_CFG] = &qsm_cfg, + [SLAVE_AHB2PHY_SOUTH] = &qhs_ahb2phy0, + [SLAVE_AHB2PHY_NORTH] = &qhs_ahb2phy1, + [SLAVE_AHB2PHY_2] = &qhs_ahb2phy2, + [SLAVE_AV1_ENC_CFG] = &qhs_av1_enc_cfg, + [SLAVE_CAMERA_CFG] = &qhs_camera_cfg, + [SLAVE_CLK_CTL] = &qhs_clk_ctl, + [SLAVE_CRYPTO_0_CFG] = &qhs_crypto0_cfg, + [SLAVE_DISPLAY_CFG] = &qhs_display_cfg, + [SLAVE_GFX3D_CFG] = &qhs_gpuss_cfg, + [SLAVE_IMEM_CFG] = &qhs_imem_cfg, + [SLAVE_IPC_ROUTER_CFG] = &qhs_ipc_router, + [SLAVE_PCIE_0_CFG] = &qhs_pcie0_cfg, + [SLAVE_PCIE_1_CFG] = &qhs_pcie1_cfg, + [SLAVE_PCIE_2_CFG] = &qhs_pcie2_cfg, + [SLAVE_PCIE_3_CFG] = &qhs_pcie3_cfg, + [SLAVE_PCIE_4_CFG] = &qhs_pcie4_cfg, + [SLAVE_PCIE_5_CFG] = &qhs_pcie5_cfg, + [SLAVE_PCIE_6A_CFG] = &qhs_pcie6a_cfg, + [SLAVE_PCIE_6B_CFG] = &qhs_pcie6b_cfg, + [SLAVE_PCIE_RSC_CFG] = &qhs_pcie_rsc_cfg, + [SLAVE_PDM] = &qhs_pdm, + [SLAVE_PRNG] = &qhs_prng, + [SLAVE_QDSS_CFG] = &qhs_qdss_cfg, + [SLAVE_QSPI_0] = &qhs_qspi, + [SLAVE_QUP_0] = &qhs_qup0, + [SLAVE_QUP_1] = &qhs_qup1, + [SLAVE_QUP_2] = &qhs_qup2, + [SLAVE_SDCC_2] = &qhs_sdc2, + [SLAVE_SDCC_4] = &qhs_sdc4, + [SLAVE_SMMUV3_CFG] = &qhs_smmuv3_cfg, + [SLAVE_TCSR] = &qhs_tcsr, + [SLAVE_TLMM] = &qhs_tlmm, + [SLAVE_UFS_MEM_CFG] = &qhs_ufs_mem_cfg, + [SLAVE_USB2] = &qhs_usb2_0_cfg, + [SLAVE_USB3_0] = &qhs_usb3_0_cfg, + [SLAVE_USB3_1] = &qhs_usb3_1_cfg, + [SLAVE_USB3_2] = &qhs_usb3_2_cfg, + [SLAVE_USB3_MP] = &qhs_usb3_mp_cfg, + [SLAVE_USB4_0] = &qhs_usb4_0_cfg, + [SLAVE_USB4_1] = &qhs_usb4_1_cfg, + [SLAVE_USB4_2] = &qhs_usb4_2_cfg, + [SLAVE_VENUS_CFG] = &qhs_venus_cfg, + [SLAVE_LPASS_QTB_CFG] = &qss_lpass_qtb_cfg, + [SLAVE_CNOC_MNOC_CFG] = &qss_mnoc_cfg, + [SLAVE_NSP_QTB_CFG] = &qss_nsp_qtb_cfg, + [SLAVE_QDSS_STM] = &xs_qdss_stm, + [SLAVE_TCU] = &xs_sys_tcu_cfg, +}; + +static const struct qcom_icc_desc x1e80100_cnoc_cfg = { + .nodes = cnoc_cfg_nodes, + .num_nodes = ARRAY_SIZE(cnoc_cfg_nodes), + .bcms = cnoc_cfg_bcms, + .num_bcms = ARRAY_SIZE(cnoc_cfg_bcms), +}; + +static struct qcom_icc_bcm * const cnoc_main_bcms[] = { + &bcm_cn0, +}; + +static struct qcom_icc_node * const cnoc_main_nodes[] = { + [MASTER_GEM_NOC_CNOC] = &qnm_gemnoc_cnoc, + [MASTER_GEM_NOC_PCIE_SNOC] = &qnm_gemnoc_pcie, + [SLAVE_AOSS] = &qhs_aoss, + [SLAVE_TME_CFG] = &qhs_tme_cfg, + [SLAVE_APPSS] = &qns_apss, + [SLAVE_CNOC_CFG] = &qss_cfg, + [SLAVE_BOOT_IMEM] = &qxs_boot_imem, + [SLAVE_IMEM] = &qxs_imem, + [SLAVE_PCIE_0] = &xs_pcie_0, + [SLAVE_PCIE_1] = &xs_pcie_1, + [SLAVE_PCIE_2] = &xs_pcie_2, + [SLAVE_PCIE_3] = &xs_pcie_3, + [SLAVE_PCIE_4] = &xs_pcie_4, + [SLAVE_PCIE_5] = &xs_pcie_5, + [SLAVE_PCIE_6A] = &xs_pcie_6a, + [SLAVE_PCIE_6B] = &xs_pcie_6b, +}; + +static const struct qcom_icc_desc x1e80100_cnoc_main = { + .nodes = cnoc_main_nodes, + .num_nodes = ARRAY_SIZE(cnoc_main_nodes), + .bcms = cnoc_main_bcms, + .num_bcms = ARRAY_SIZE(cnoc_main_bcms), +}; + +static struct qcom_icc_bcm * const gem_noc_bcms[] = { + &bcm_sh0, + &bcm_sh1, + &bcm_sh0_disp, + &bcm_sh1_disp, + &bcm_sh0_pcie, + &bcm_sh1_pcie, +}; + +static struct qcom_icc_node * const gem_noc_nodes[] = { + [MASTER_GPU_TCU] = &alm_gpu_tcu, + [MASTER_PCIE_TCU] = &alm_pcie_tcu, + [MASTER_SYS_TCU] = &alm_sys_tcu, + [MASTER_APPSS_PROC] = &chm_apps, + [MASTER_GFX3D] = &qnm_gpu, + [MASTER_LPASS_GEM_NOC] = &qnm_lpass, + [MASTER_MNOC_HF_MEM_NOC] = &qnm_mnoc_hf, + [MASTER_MNOC_SF_MEM_NOC] = &qnm_mnoc_sf, + [MASTER_COMPUTE_NOC] = &qnm_nsp_noc, + [MASTER_ANOC_PCIE_GEM_NOC] = &qnm_pcie, + [MASTER_SNOC_SF_MEM_NOC] = &qnm_snoc_sf, + [MASTER_GIC2] = &xm_gic, + [SLAVE_GEM_NOC_CNOC] = &qns_gem_noc_cnoc, + [SLAVE_LLCC] = &qns_llcc, + [SLAVE_MEM_NOC_PCIE_SNOC] = &qns_pcie, + [MASTER_MNOC_HF_MEM_NOC_DISP] = &qnm_mnoc_hf_disp, + [MASTER_ANOC_PCIE_GEM_NOC_DISP] = &qnm_pcie_disp, + [SLAVE_LLCC_DISP] = &qns_llcc_disp, + [MASTER_ANOC_PCIE_GEM_NOC_PCIE] = &qnm_pcie_pcie, + [SLAVE_LLCC_PCIE] = &qns_llcc_pcie, +}; + +static const struct qcom_icc_desc x1e80100_gem_noc = { + .nodes = gem_noc_nodes, + .num_nodes = ARRAY_SIZE(gem_noc_nodes), + .bcms = gem_noc_bcms, + .num_bcms = ARRAY_SIZE(gem_noc_bcms), +}; + +static struct qcom_icc_bcm *lpass_ag_noc_bcms[] = { +}; + +static struct qcom_icc_node * const lpass_ag_noc_nodes[] = { + [MASTER_LPIAON_NOC] = &qnm_lpiaon_noc, + [SLAVE_LPASS_GEM_NOC] = &qns_lpass_ag_noc_gemnoc, +}; + +static const struct qcom_icc_desc x1e80100_lpass_ag_noc = { + .nodes = lpass_ag_noc_nodes, + .num_nodes = ARRAY_SIZE(lpass_ag_noc_nodes), + .bcms = lpass_ag_noc_bcms, + .num_bcms = ARRAY_SIZE(lpass_ag_noc_bcms), +}; + +static struct qcom_icc_bcm * const lpass_lpiaon_noc_bcms[] = { + &bcm_lp0, +}; + +static struct qcom_icc_node * const lpass_lpiaon_noc_nodes[] = { + [MASTER_LPASS_LPINOC] = &qnm_lpass_lpinoc, + [SLAVE_LPIAON_NOC_LPASS_AG_NOC] = &qns_lpass_aggnoc, +}; + +static const struct qcom_icc_desc x1e80100_lpass_lpiaon_noc = { + .nodes = lpass_lpiaon_noc_nodes, + .num_nodes = ARRAY_SIZE(lpass_lpiaon_noc_nodes), + .bcms = lpass_lpiaon_noc_bcms, + .num_bcms = ARRAY_SIZE(lpass_lpiaon_noc_bcms), +}; + +static struct qcom_icc_bcm * const lpass_lpicx_noc_bcms[] = { +}; + +static struct qcom_icc_node * const lpass_lpicx_noc_nodes[] = { + [MASTER_LPASS_PROC] = &qxm_lpinoc_dsp_axim, + [SLAVE_LPICX_NOC_LPIAON_NOC] = &qns_lpi_aon_noc, +}; + +static const struct qcom_icc_desc x1e80100_lpass_lpicx_noc = { + .nodes = lpass_lpicx_noc_nodes, + .num_nodes = ARRAY_SIZE(lpass_lpicx_noc_nodes), + .bcms = lpass_lpicx_noc_bcms, + .num_bcms = ARRAY_SIZE(lpass_lpicx_noc_bcms), +}; + +static struct qcom_icc_bcm * const mc_virt_bcms[] = { + &bcm_acv, + &bcm_mc0, + &bcm_acv_disp, + &bcm_mc0_disp, + &bcm_acv_pcie, + &bcm_mc0_pcie, +}; + +static struct qcom_icc_node * const mc_virt_nodes[] = { + [MASTER_LLCC] = &llcc_mc, + [SLAVE_EBI1] = &ebi, + [MASTER_LLCC_DISP] = &llcc_mc_disp, + [SLAVE_EBI1_DISP] = &ebi_disp, + [MASTER_LLCC_PCIE] = &llcc_mc_pcie, + [SLAVE_EBI1_PCIE] = &ebi_pcie, +}; + +static const struct qcom_icc_desc x1e80100_mc_virt = { + .nodes = mc_virt_nodes, + .num_nodes = ARRAY_SIZE(mc_virt_nodes), + .bcms = mc_virt_bcms, + .num_bcms = ARRAY_SIZE(mc_virt_bcms), +}; + +static struct qcom_icc_bcm * const mmss_noc_bcms[] = { + &bcm_mm0, + &bcm_mm1, + &bcm_mm0_disp, + &bcm_mm1_disp, +}; + +static struct qcom_icc_node * const mmss_noc_nodes[] = { + [MASTER_AV1_ENC] = &qnm_av1_enc, + [MASTER_CAMNOC_HF] = &qnm_camnoc_hf, + [MASTER_CAMNOC_ICP] = &qnm_camnoc_icp, + [MASTER_CAMNOC_SF] = &qnm_camnoc_sf, + [MASTER_EVA] = &qnm_eva, + [MASTER_MDP] = &qnm_mdp, + [MASTER_VIDEO] = &qnm_video, + [MASTER_VIDEO_CV_PROC] = &qnm_video_cv_cpu, + [MASTER_VIDEO_V_PROC] = &qnm_video_v_cpu, + [MASTER_CNOC_MNOC_CFG] = &qsm_mnoc_cfg, + [SLAVE_MNOC_HF_MEM_NOC] = &qns_mem_noc_hf, + [SLAVE_MNOC_SF_MEM_NOC] = &qns_mem_noc_sf, + [SLAVE_SERVICE_MNOC] = &srvc_mnoc, + [MASTER_MDP_DISP] = &qnm_mdp_disp, + [SLAVE_MNOC_HF_MEM_NOC_DISP] = &qns_mem_noc_hf_disp, +}; + +static const struct qcom_icc_desc x1e80100_mmss_noc = { + .nodes = mmss_noc_nodes, + .num_nodes = ARRAY_SIZE(mmss_noc_nodes), + .bcms = mmss_noc_bcms, + .num_bcms = ARRAY_SIZE(mmss_noc_bcms), +}; + +static struct qcom_icc_bcm * const nsp_noc_bcms[] = { + &bcm_co0, +}; + +static struct qcom_icc_node * const nsp_noc_nodes[] = { + [MASTER_CDSP_PROC] = &qxm_nsp, + [SLAVE_CDSP_MEM_NOC] = &qns_nsp_gemnoc, +}; + +static const struct qcom_icc_desc x1e80100_nsp_noc = { + .nodes = nsp_noc_nodes, + .num_nodes = ARRAY_SIZE(nsp_noc_nodes), + .bcms = nsp_noc_bcms, + .num_bcms = ARRAY_SIZE(nsp_noc_bcms), +}; + +static struct qcom_icc_bcm * const pcie_center_anoc_bcms[] = { + &bcm_pc0, + &bcm_pc0_pcie, +}; + +static struct qcom_icc_node * const pcie_center_anoc_nodes[] = { + [MASTER_PCIE_NORTH] = &qnm_pcie_north_gem_noc, + [MASTER_PCIE_SOUTH] = &qnm_pcie_south_gem_noc, + [SLAVE_ANOC_PCIE_GEM_NOC] = &qns_pcie_mem_noc, + [MASTER_PCIE_NORTH_PCIE] = &qnm_pcie_north_gem_noc_pcie, + [MASTER_PCIE_SOUTH_PCIE] = &qnm_pcie_south_gem_noc_pcie, + [SLAVE_ANOC_PCIE_GEM_NOC_PCIE] = &qns_pcie_mem_noc_pcie, +}; + +static const struct qcom_icc_desc x1e80100_pcie_center_anoc = { + .nodes = pcie_center_anoc_nodes, + .num_nodes = ARRAY_SIZE(pcie_center_anoc_nodes), + .bcms = pcie_center_anoc_bcms, + .num_bcms = ARRAY_SIZE(pcie_center_anoc_bcms), +}; + +static struct qcom_icc_bcm * const pcie_north_anoc_bcms[] = { +}; + +static struct qcom_icc_node * const pcie_north_anoc_nodes[] = { + [MASTER_PCIE_3] = &xm_pcie_3, + [MASTER_PCIE_4] = &xm_pcie_4, + [MASTER_PCIE_5] = &xm_pcie_5, + [SLAVE_PCIE_NORTH] = &qns_pcie_north_gem_noc, + [MASTER_PCIE_3_PCIE] = &xm_pcie_3_pcie, + [MASTER_PCIE_4_PCIE] = &xm_pcie_4_pcie, + [MASTER_PCIE_5_PCIE] = &xm_pcie_5_pcie, + [SLAVE_PCIE_NORTH_PCIE] = &qns_pcie_north_gem_noc_pcie, +}; + +static const struct qcom_icc_desc x1e80100_pcie_north_anoc = { + .nodes = pcie_north_anoc_nodes, + .num_nodes = ARRAY_SIZE(pcie_north_anoc_nodes), + .bcms = pcie_north_anoc_bcms, + .num_bcms = ARRAY_SIZE(pcie_north_anoc_bcms), +}; + +static struct qcom_icc_bcm *pcie_south_anoc_bcms[] = { +}; + +static struct qcom_icc_node * const pcie_south_anoc_nodes[] = { + [MASTER_PCIE_0] = &xm_pcie_0, + [MASTER_PCIE_1] = &xm_pcie_1, + [MASTER_PCIE_2] = &xm_pcie_2, + [MASTER_PCIE_6A] = &xm_pcie_6a, + [MASTER_PCIE_6B] = &xm_pcie_6b, + [SLAVE_PCIE_SOUTH] = &qns_pcie_south_gem_noc, + [MASTER_PCIE_0_PCIE] = &xm_pcie_0_pcie, + [MASTER_PCIE_1_PCIE] = &xm_pcie_1_pcie, + [MASTER_PCIE_2_PCIE] = &xm_pcie_2_pcie, + [MASTER_PCIE_6A_PCIE] = &xm_pcie_6a_pcie, + [MASTER_PCIE_6B_PCIE] = &xm_pcie_6b_pcie, + [SLAVE_PCIE_SOUTH_PCIE] = &qns_pcie_south_gem_noc_pcie, +}; + +static const struct qcom_icc_desc x1e80100_pcie_south_anoc = { + .nodes = pcie_south_anoc_nodes, + .num_nodes = ARRAY_SIZE(pcie_south_anoc_nodes), + .bcms = pcie_south_anoc_bcms, + .num_bcms = ARRAY_SIZE(pcie_south_anoc_bcms), +}; + +static struct qcom_icc_bcm *system_noc_bcms[] = { + &bcm_sn0, + &bcm_sn2, + &bcm_sn3, + &bcm_sn4, +}; + +static struct qcom_icc_node * const system_noc_nodes[] = { + [MASTER_A1NOC_SNOC] = &qnm_aggre1_noc, + [MASTER_A2NOC_SNOC] = &qnm_aggre2_noc, + [MASTER_GIC1] = &qnm_gic, + [MASTER_USB_NOC_SNOC] = &qnm_usb_anoc, + [SLAVE_SNOC_GEM_NOC_SF] = &qns_gemnoc_sf, +}; + +static const struct qcom_icc_desc x1e80100_system_noc = { + .nodes = system_noc_nodes, + .num_nodes = ARRAY_SIZE(system_noc_nodes), + .bcms = system_noc_bcms, + .num_bcms = ARRAY_SIZE(system_noc_bcms), +}; + +static struct qcom_icc_bcm * const usb_center_anoc_bcms[] = { +}; + +static struct qcom_icc_node * const usb_center_anoc_nodes[] = { + [MASTER_AGGRE_USB_NORTH] = &qnm_aggre_usb_north_snoc, + [MASTER_AGGRE_USB_SOUTH] = &qnm_aggre_usb_south_snoc, + [SLAVE_USB_NOC_SNOC] = &qns_aggre_usb_snoc, +}; + +static const struct qcom_icc_desc x1e80100_usb_center_anoc = { + .nodes = usb_center_anoc_nodes, + .num_nodes = ARRAY_SIZE(usb_center_anoc_nodes), + .bcms = usb_center_anoc_bcms, + .num_bcms = ARRAY_SIZE(usb_center_anoc_bcms), +}; + +static struct qcom_icc_bcm *usb_north_anoc_bcms[] = { +}; + +static struct qcom_icc_node * const usb_north_anoc_nodes[] = { + [MASTER_USB2] = &xm_usb2_0, + [MASTER_USB3_MP] = &xm_usb3_mp, + [SLAVE_AGGRE_USB_NORTH] = &qns_aggre_usb_north_snoc, +}; + +static const struct qcom_icc_desc x1e80100_usb_north_anoc = { + .nodes = usb_north_anoc_nodes, + .num_nodes = ARRAY_SIZE(usb_north_anoc_nodes), + .bcms = usb_north_anoc_bcms, + .num_bcms = ARRAY_SIZE(usb_north_anoc_bcms), +}; + +static struct qcom_icc_bcm *usb_south_anoc_bcms[] = { +}; + +static struct qcom_icc_node * const usb_south_anoc_nodes[] = { + [MASTER_USB3_0] = &xm_usb3_0, + [MASTER_USB3_1] = &xm_usb3_1, + [MASTER_USB3_2] = &xm_usb3_2, + [MASTER_USB4_0] = &xm_usb4_0, + [MASTER_USB4_1] = &xm_usb4_1, + [MASTER_USB4_2] = &xm_usb4_2, + [SLAVE_AGGRE_USB_SOUTH] = &qns_aggre_usb_south_snoc, +}; + +static const struct qcom_icc_desc x1e80100_usb_south_anoc = { + .nodes = usb_south_anoc_nodes, + .num_nodes = ARRAY_SIZE(usb_south_anoc_nodes), + .bcms = usb_south_anoc_bcms, + .num_bcms = ARRAY_SIZE(usb_south_anoc_bcms), +}; + +static const struct of_device_id qnoc_of_match[] = { + { .compatible = "qcom,x1e80100-aggre1-noc", .data = &x1e80100_aggre1_noc}, + { .compatible = "qcom,x1e80100-aggre2-noc", .data = &x1e80100_aggre2_noc}, + { .compatible = "qcom,x1e80100-clk-virt", .data = &x1e80100_clk_virt}, + { .compatible = "qcom,x1e80100-cnoc-cfg", .data = &x1e80100_cnoc_cfg}, + { .compatible = "qcom,x1e80100-cnoc-main", .data = &x1e80100_cnoc_main}, + { .compatible = "qcom,x1e80100-gem-noc", .data = &x1e80100_gem_noc}, + { .compatible = "qcom,x1e80100-lpass-ag-noc", .data = &x1e80100_lpass_ag_noc}, + { .compatible = "qcom,x1e80100-lpass-lpiaon-noc", .data = &x1e80100_lpass_lpiaon_noc}, + { .compatible = "qcom,x1e80100-lpass-lpicx-noc", .data = &x1e80100_lpass_lpicx_noc}, + { .compatible = "qcom,x1e80100-mc-virt", .data = &x1e80100_mc_virt}, + { .compatible = "qcom,x1e80100-mmss-noc", .data = &x1e80100_mmss_noc}, + { .compatible = "qcom,x1e80100-nsp-noc", .data = &x1e80100_nsp_noc}, + { .compatible = "qcom,x1e80100-pcie-center-anoc", .data = &x1e80100_pcie_center_anoc}, + { .compatible = "qcom,x1e80100-pcie-north-anoc", .data = &x1e80100_pcie_north_anoc}, + { .compatible = "qcom,x1e80100-pcie-south-anoc", .data = &x1e80100_pcie_south_anoc}, + { .compatible = "qcom,x1e80100-system-noc", .data = &x1e80100_system_noc}, + { .compatible = "qcom,x1e80100-usb-center-anoc", .data = &x1e80100_usb_center_anoc}, + { .compatible = "qcom,x1e80100-usb-north-anoc", .data = &x1e80100_usb_north_anoc}, + { .compatible = "qcom,x1e80100-usb-south-anoc", .data = &x1e80100_usb_south_anoc}, + { } +}; +MODULE_DEVICE_TABLE(of, qnoc_of_match); + +static struct platform_driver qnoc_driver = { + .probe = qcom_icc_rpmh_probe, + .remove_new = qcom_icc_rpmh_remove, + .driver = { + .name = "qnoc-x1e80100", + .of_match_table = qnoc_of_match, + .sync_state = icc_sync_state, + }, +}; + +static int __init qnoc_driver_init(void) +{ + return platform_driver_register(&qnoc_driver); +} +core_initcall(qnoc_driver_init); + +static void __exit qnoc_driver_exit(void) +{ + platform_driver_unregister(&qnoc_driver); +} +module_exit(qnoc_driver_exit); + +MODULE_DESCRIPTION("x1e80100 NoC driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/interconnect/qcom/x1e80100.h b/drivers/interconnect/qcom/x1e80100.h new file mode 100644 index 000000000000..2e14264f4c2b --- /dev/null +++ b/drivers/interconnect/qcom/x1e80100.h @@ -0,0 +1,192 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * X1E80100 interconnect IDs + * + * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved. + * Copyright (c) 2023, Linaro Limited + */ + +#ifndef __DRIVERS_INTERCONNECT_QCOM_X1E80100_H +#define __DRIVERS_INTERCONNECT_QCOM_X1E80100_H + +#define X1E80100_MASTER_A1NOC_SNOC 0 +#define X1E80100_MASTER_A2NOC_SNOC 1 +#define X1E80100_MASTER_ANOC_PCIE_GEM_NOC 2 +#define X1E80100_MASTER_ANOC_PCIE_GEM_NOC_DISP 3 +#define X1E80100_MASTER_APPSS_PROC 4 +#define X1E80100_MASTER_CAMNOC_HF 5 +#define X1E80100_MASTER_CAMNOC_ICP 6 +#define X1E80100_MASTER_CAMNOC_SF 7 +#define X1E80100_MASTER_CDSP_PROC 8 +#define X1E80100_MASTER_CNOC_CFG 9 +#define X1E80100_MASTER_CNOC_MNOC_CFG 10 +#define X1E80100_MASTER_COMPUTE_NOC 11 +#define X1E80100_MASTER_CRYPTO 12 +#define X1E80100_MASTER_GEM_NOC_CNOC 13 +#define X1E80100_MASTER_GEM_NOC_PCIE_SNOC 14 +#define X1E80100_MASTER_GFX3D 15 +#define X1E80100_MASTER_GPU_TCU 16 +#define X1E80100_MASTER_IPA 17 +#define X1E80100_MASTER_LLCC 18 +#define X1E80100_MASTER_LLCC_DISP 19 +#define X1E80100_MASTER_LPASS_GEM_NOC 20 +#define X1E80100_MASTER_LPASS_LPINOC 21 +#define X1E80100_MASTER_LPASS_PROC 22 +#define X1E80100_MASTER_LPIAON_NOC 23 +#define X1E80100_MASTER_MDP 24 +#define X1E80100_MASTER_MDP_DISP 25 +#define X1E80100_MASTER_MNOC_HF_MEM_NOC 26 +#define X1E80100_MASTER_MNOC_HF_MEM_NOC_DISP 27 +#define X1E80100_MASTER_MNOC_SF_MEM_NOC 28 +#define X1E80100_MASTER_PCIE_0 29 +#define X1E80100_MASTER_PCIE_1 30 +#define X1E80100_MASTER_QDSS_ETR 31 +#define X1E80100_MASTER_QDSS_ETR_1 32 +#define X1E80100_MASTER_QSPI_0 33 +#define X1E80100_MASTER_QUP_0 34 +#define X1E80100_MASTER_QUP_1 35 +#define X1E80100_MASTER_QUP_2 36 +#define X1E80100_MASTER_QUP_CORE_0 37 +#define X1E80100_MASTER_QUP_CORE_1 38 +#define X1E80100_MASTER_SDCC_2 39 +#define X1E80100_MASTER_SDCC_4 40 +#define X1E80100_MASTER_SNOC_SF_MEM_NOC 41 +#define X1E80100_MASTER_SP 42 +#define X1E80100_MASTER_SYS_TCU 43 +#define X1E80100_MASTER_UFS_MEM 44 +#define X1E80100_MASTER_USB3_0 45 +#define X1E80100_MASTER_VIDEO 46 +#define X1E80100_MASTER_VIDEO_CV_PROC 47 +#define X1E80100_MASTER_VIDEO_V_PROC 48 +#define X1E80100_SLAVE_A1NOC_SNOC 49 +#define X1E80100_SLAVE_A2NOC_SNOC 50 +#define X1E80100_SLAVE_AHB2PHY_NORTH 51 +#define X1E80100_SLAVE_AHB2PHY_SOUTH 52 +#define X1E80100_SLAVE_ANOC_PCIE_GEM_NOC 53 +#define X1E80100_SLAVE_AOSS 54 +#define X1E80100_SLAVE_APPSS 55 +#define X1E80100_SLAVE_BOOT_IMEM 56 +#define X1E80100_SLAVE_CAMERA_CFG 57 +#define X1E80100_SLAVE_CDSP_MEM_NOC 58 +#define X1E80100_SLAVE_CLK_CTL 59 +#define X1E80100_SLAVE_CNOC_CFG 60 +#define X1E80100_SLAVE_CNOC_MNOC_CFG 61 +#define X1E80100_SLAVE_CRYPTO_0_CFG 62 +#define X1E80100_SLAVE_DISPLAY_CFG 63 +#define X1E80100_SLAVE_EBI1 64 +#define X1E80100_SLAVE_EBI1_DISP 65 +#define X1E80100_SLAVE_GEM_NOC_CNOC 66 +#define X1E80100_SLAVE_GFX3D_CFG 67 +#define X1E80100_SLAVE_IMEM 68 +#define X1E80100_SLAVE_IMEM_CFG 69 +#define X1E80100_SLAVE_IPC_ROUTER_CFG 70 +#define X1E80100_SLAVE_LLCC 71 +#define X1E80100_SLAVE_LLCC_DISP 72 +#define X1E80100_SLAVE_LPASS_GEM_NOC 73 +#define X1E80100_SLAVE_LPASS_QTB_CFG 74 +#define X1E80100_SLAVE_LPIAON_NOC_LPASS_AG_NOC 75 +#define X1E80100_SLAVE_LPICX_NOC_LPIAON_NOC 76 +#define X1E80100_SLAVE_MEM_NOC_PCIE_SNOC 77 +#define X1E80100_SLAVE_MNOC_HF_MEM_NOC 78 +#define X1E80100_SLAVE_MNOC_HF_MEM_NOC_DISP 79 +#define X1E80100_SLAVE_MNOC_SF_MEM_NOC 80 +#define X1E80100_SLAVE_NSP_QTB_CFG 81 +#define X1E80100_SLAVE_PCIE_0 82 +#define X1E80100_SLAVE_PCIE_0_CFG 83 +#define X1E80100_SLAVE_PCIE_1 84 +#define X1E80100_SLAVE_PCIE_1_CFG 85 +#define X1E80100_SLAVE_PDM 86 +#define X1E80100_SLAVE_PRNG 87 +#define X1E80100_SLAVE_QDSS_CFG 88 +#define X1E80100_SLAVE_QDSS_STM 89 +#define X1E80100_SLAVE_QSPI_0 90 +#define X1E80100_SLAVE_QUP_1 91 +#define X1E80100_SLAVE_QUP_2 92 +#define X1E80100_SLAVE_QUP_CORE_0 93 +#define X1E80100_SLAVE_QUP_CORE_1 94 +#define X1E80100_SLAVE_QUP_CORE_2 95 +#define X1E80100_SLAVE_SDCC_2 96 +#define X1E80100_SLAVE_SDCC_4 97 +#define X1E80100_SLAVE_SERVICE_MNOC 98 +#define X1E80100_SLAVE_SNOC_GEM_NOC_SF 99 +#define X1E80100_SLAVE_TCSR 100 +#define X1E80100_SLAVE_TCU 101 +#define X1E80100_SLAVE_TLMM 102 +#define X1E80100_SLAVE_TME_CFG 103 +#define X1E80100_SLAVE_UFS_MEM_CFG 104 +#define X1E80100_SLAVE_USB3_0 105 +#define X1E80100_SLAVE_VENUS_CFG 106 +#define X1E80100_MASTER_DDR_PERF_MODE 107 +#define X1E80100_MASTER_QUP_CORE_2 108 +#define X1E80100_MASTER_PCIE_TCU 109 +#define X1E80100_MASTER_GIC2 110 +#define X1E80100_MASTER_AV1_ENC 111 +#define X1E80100_MASTER_EVA 112 +#define X1E80100_MASTER_PCIE_NORTH 113 +#define X1E80100_MASTER_PCIE_SOUTH 114 +#define X1E80100_MASTER_PCIE_3 115 +#define X1E80100_MASTER_PCIE_4 116 +#define X1E80100_MASTER_PCIE_5 117 +#define X1E80100_MASTER_PCIE_2 118 +#define X1E80100_MASTER_PCIE_6A 119 +#define X1E80100_MASTER_PCIE_6B 120 +#define X1E80100_MASTER_GIC1 121 +#define X1E80100_MASTER_USB_NOC_SNOC 122 +#define X1E80100_MASTER_AGGRE_USB_NORTH 123 +#define X1E80100_MASTER_AGGRE_USB_SOUTH 124 +#define X1E80100_MASTER_USB2 125 +#define X1E80100_MASTER_USB3_MP 126 +#define X1E80100_MASTER_USB3_1 127 +#define X1E80100_MASTER_USB3_2 128 +#define X1E80100_MASTER_USB4_0 129 +#define X1E80100_MASTER_USB4_1 130 +#define X1E80100_MASTER_USB4_2 131 +#define X1E80100_MASTER_ANOC_PCIE_GEM_NOC_PCIE 132 +#define X1E80100_MASTER_LLCC_PCIE 133 +#define X1E80100_MASTER_PCIE_NORTH_PCIE 134 +#define X1E80100_MASTER_PCIE_SOUTH_PCIE 135 +#define X1E80100_MASTER_PCIE_3_PCIE 136 +#define X1E80100_MASTER_PCIE_4_PCIE 137 +#define X1E80100_MASTER_PCIE_5_PCIE 138 +#define X1E80100_MASTER_PCIE_0_PCIE 139 +#define X1E80100_MASTER_PCIE_1_PCIE 140 +#define X1E80100_MASTER_PCIE_2_PCIE 141 +#define X1E80100_MASTER_PCIE_6A_PCIE 142 +#define X1E80100_MASTER_PCIE_6B_PCIE 143 +#define X1E80100_SLAVE_AHB2PHY_2 144 +#define X1E80100_SLAVE_AV1_ENC_CFG 145 +#define X1E80100_SLAVE_PCIE_2_CFG 146 +#define X1E80100_SLAVE_PCIE_3_CFG 147 +#define X1E80100_SLAVE_PCIE_4_CFG 148 +#define X1E80100_SLAVE_PCIE_5_CFG 149 +#define X1E80100_SLAVE_PCIE_6A_CFG 150 +#define X1E80100_SLAVE_PCIE_6B_CFG 151 +#define X1E80100_SLAVE_PCIE_RSC_CFG 152 +#define X1E80100_SLAVE_QUP_0 153 +#define X1E80100_SLAVE_SMMUV3_CFG 154 +#define X1E80100_SLAVE_USB2 155 +#define X1E80100_SLAVE_USB3_1 156 +#define X1E80100_SLAVE_USB3_2 157 +#define X1E80100_SLAVE_USB3_MP 158 +#define X1E80100_SLAVE_USB4_0 159 +#define X1E80100_SLAVE_USB4_1 160 +#define X1E80100_SLAVE_USB4_2 161 +#define X1E80100_SLAVE_PCIE_2 162 +#define X1E80100_SLAVE_PCIE_3 163 +#define X1E80100_SLAVE_PCIE_4 164 +#define X1E80100_SLAVE_PCIE_5 165 +#define X1E80100_SLAVE_PCIE_6A 166 +#define X1E80100_SLAVE_PCIE_6B 167 +#define X1E80100_SLAVE_DDR_PERF_MODE 168 +#define X1E80100_SLAVE_PCIE_NORTH 169 +#define X1E80100_SLAVE_PCIE_SOUTH 170 +#define X1E80100_SLAVE_USB_NOC_SNOC 171 +#define X1E80100_SLAVE_AGGRE_USB_NORTH 172 +#define X1E80100_SLAVE_AGGRE_USB_SOUTH 173 +#define X1E80100_SLAVE_LLCC_PCIE 174 +#define X1E80100_SLAVE_EBI1_PCIE 175 +#define X1E80100_SLAVE_ANOC_PCIE_GEM_NOC_PCIE 176 +#define X1E80100_SLAVE_PCIE_NORTH_PCIE 177 +#define X1E80100_SLAVE_PCIE_SOUTH_PCIE 178 + +#endif diff --git a/drivers/interconnect/samsung/exynos.c b/drivers/interconnect/samsung/exynos.c index ebf09bbf725b..1ba14cb45d5a 100644 --- a/drivers/interconnect/samsung/exynos.c +++ b/drivers/interconnect/samsung/exynos.c @@ -93,14 +93,12 @@ static struct icc_node *exynos_generic_icc_xlate(struct of_phandle_args *spec, return priv->node; } -static int exynos_generic_icc_remove(struct platform_device *pdev) +static void exynos_generic_icc_remove(struct platform_device *pdev) { struct exynos_icc_priv *priv = platform_get_drvdata(pdev); icc_provider_deregister(&priv->provider); icc_nodes_remove(&priv->provider); - - return 0; } static int exynos_generic_icc_probe(struct platform_device *pdev) @@ -182,7 +180,7 @@ static struct platform_driver exynos_generic_icc_driver = { .sync_state = icc_sync_state, }, .probe = exynos_generic_icc_probe, - .remove = exynos_generic_icc_remove, + .remove_new = exynos_generic_icc_remove, }; module_platform_driver(exynos_generic_icc_driver); diff --git a/drivers/ipack/ipack.c b/drivers/ipack/ipack.c index cc1ecfd49928..b1471ba016a5 100644 --- a/drivers/ipack/ipack.c +++ b/drivers/ipack/ipack.c @@ -207,7 +207,7 @@ struct ipack_bus_device *ipack_bus_register(struct device *parent, int slots, if (!bus) return NULL; - bus_nr = ida_simple_get(&ipack_ida, 0, 0, GFP_KERNEL); + bus_nr = ida_alloc(&ipack_ida, GFP_KERNEL); if (bus_nr < 0) { kfree(bus); return NULL; @@ -237,7 +237,7 @@ int ipack_bus_unregister(struct ipack_bus_device *bus) { bus_for_each_dev(&ipack_bus_type, NULL, bus, ipack_unregister_bus_member); - ida_simple_remove(&ipack_ida, bus->bus_nr); + ida_free(&ipack_ida, bus->bus_nr); kfree(bus); return 0; } diff --git a/drivers/mcb/mcb-core.c b/drivers/mcb/mcb-core.c index ba4530459de8..61994da7bad0 100644 --- a/drivers/mcb/mcb-core.c +++ b/drivers/mcb/mcb-core.c @@ -263,6 +263,7 @@ static void mcb_free_bus(struct device *dev) /** * mcb_alloc_bus() - Allocate a new @mcb_bus + * @carrier: generic &struct device for the carrier device * * Allocate a new @mcb_bus. */ @@ -327,7 +328,7 @@ void mcb_release_bus(struct mcb_bus *bus) EXPORT_SYMBOL_NS_GPL(mcb_release_bus, MCB); /** - * mcb_bus_put() - Increment refcnt + * mcb_bus_get() - Increment refcnt * @bus: The @mcb_bus * * Get a @mcb_bus' ref @@ -455,7 +456,7 @@ EXPORT_SYMBOL_NS_GPL(mcb_request_mem, MCB); /** * mcb_release_mem() - Release memory requested by device - * @dev: The @mcb_device that requested the memory + * @mem: The memory resource to be released * * Release memory that was prior requested via @mcb_request_mem(). */ diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index f37c4b8380ae..4fb291f0bf7c 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig @@ -562,6 +562,18 @@ config TPS6594_PFSM This driver can also be built as a module. If so, the module will be called tps6594-pfsm. +config NSM + tristate "Nitro (Enclaves) Security Module support" + depends on VIRTIO + select HW_RANDOM + help + This driver provides support for the Nitro Security Module + in AWS EC2 Nitro based Enclaves. The driver exposes a /dev/nsm + device user space can use to communicate with the hypervisor. + + To compile this driver as a module, choose M here. + The module will be called nsm. + source "drivers/misc/c2port/Kconfig" source "drivers/misc/eeprom/Kconfig" source "drivers/misc/cb710/Kconfig" diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index f2a4d1ff65d4..ea6ea5bbbc9c 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile @@ -67,3 +67,4 @@ obj-$(CONFIG_TMR_MANAGER) += xilinx_tmr_manager.o obj-$(CONFIG_TMR_INJECT) += xilinx_tmr_inject.o obj-$(CONFIG_TPS6594_ESM) += tps6594-esm.o obj-$(CONFIG_TPS6594_PFSM) += tps6594-pfsm.o +obj-$(CONFIG_NSM) += nsm.o diff --git a/drivers/misc/cardreader/Makefile b/drivers/misc/cardreader/Makefile index 895128475d83..1e1bca6b0b22 100644 --- a/drivers/misc/cardreader/Makefile +++ b/drivers/misc/cardreader/Makefile @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only obj-$(CONFIG_MISC_ALCOR_PCI) += alcor_pci.o obj-$(CONFIG_MISC_RTSX_PCI) += rtsx_pci.o -rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o rts5228.o +rtsx_pci-objs := rtsx_pcr.o rts5209.o rts5229.o rtl8411.o rts5227.o rts5249.o rts5260.o rts5261.o rts5228.o rts5264.o obj-$(CONFIG_MISC_RTSX_USB) += rtsx_usb.o diff --git a/drivers/misc/cardreader/rts5264.c b/drivers/misc/cardreader/rts5264.c new file mode 100644 index 000000000000..8be4ed7d9d47 --- /dev/null +++ b/drivers/misc/cardreader/rts5264.c @@ -0,0 +1,886 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Ricky Wu <ricky_wu@realtek.com> + */ + +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/rtsx_pci.h> + +#include "rts5264.h" +#include "rtsx_pcr.h" + +static u8 rts5264_get_ic_version(struct rtsx_pcr *pcr) +{ + u8 val; + + rtsx_pci_read_register(pcr, DUMMY_REG_RESET_0, &val); + return val & 0x0F; +} + +static void rts5264_fill_driving(struct rtsx_pcr *pcr, u8 voltage) +{ + u8 driving_3v3[4][3] = { + {0x88, 0x88, 0x88}, + {0x77, 0x77, 0x77}, + {0x99, 0x99, 0x99}, + {0x66, 0x66, 0x66}, + }; + u8 driving_1v8[4][3] = { + {0x99, 0x99, 0x99}, + {0x77, 0x77, 0x77}, + {0xBB, 0xBB, 0xBB}, + {0x65, 0x65, 0x65}, + }; + u8 (*driving)[3], drive_sel; + + if (voltage == OUTPUT_3V3) { + driving = driving_3v3; + drive_sel = pcr->sd30_drive_sel_3v3; + } else { + driving = driving_1v8; + drive_sel = pcr->sd30_drive_sel_1v8; + } + + rtsx_pci_write_register(pcr, SD30_CLK_DRIVE_SEL, + 0xFF, driving[drive_sel][0]); + rtsx_pci_write_register(pcr, SD30_CMD_DRIVE_SEL, + 0xFF, driving[drive_sel][1]); + rtsx_pci_write_register(pcr, SD30_DAT_DRIVE_SEL, + 0xFF, driving[drive_sel][2]); +} + +static void rts5264_force_power_down(struct rtsx_pcr *pcr, u8 pm_state, bool runtime) +{ + /* Set relink_time to 0 */ + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 1, MASK_8_BIT_DEF, 0); + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 2, MASK_8_BIT_DEF, 0); + rtsx_pci_write_register(pcr, AUTOLOAD_CFG_BASE + 3, + RELINK_TIME_MASK, 0); + + if (pm_state == HOST_ENTER_S3) + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, + D3_DELINK_MODE_EN, D3_DELINK_MODE_EN); + + if (!runtime) { + rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG1, + CD_RESUME_EN_MASK, 0); + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00); + rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL, + FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL); + } else { + rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL, + FORCE_PM_CONTROL | FORCE_PM_VALUE, 0); + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x01); + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, + D3_DELINK_MODE_EN, 0); + rtsx_pci_write_register(pcr, RTS5264_FW_CTL, + RTS5264_INFORM_RTD3_COLD, RTS5264_INFORM_RTD3_COLD); + rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4, + RTS5264_FORCE_PRSNT_LOW, RTS5264_FORCE_PRSNT_LOW); + } + + rtsx_pci_write_register(pcr, RTS5264_REG_FPDCTL, + SSC_POWER_DOWN, SSC_POWER_DOWN); +} + +static int rts5264_enable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, OLT_LED_CTL, + LED_SHINE_MASK, LED_SHINE_EN); +} + +static int rts5264_disable_auto_blink(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, OLT_LED_CTL, + LED_SHINE_MASK, LED_SHINE_DISABLE); +} + +static int rts5264_turn_on_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, GPIO_CTL, + 0x02, 0x02); +} + +static int rts5264_turn_off_led(struct rtsx_pcr *pcr) +{ + return rtsx_pci_write_register(pcr, GPIO_CTL, + 0x02, 0x00); +} + +/* SD Pull Control Enable: + * SD_DAT[3:0] ==> pull up + * SD_CD ==> pull up + * SD_WP ==> pull up + * SD_CMD ==> pull up + * SD_CLK ==> pull down + */ +static const u32 rts5264_sd_pull_ctl_enable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0xAA), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xE9), + 0, +}; + +/* SD Pull Control Disable: + * SD_DAT[3:0] ==> pull down + * SD_CD ==> pull up + * SD_WP ==> pull down + * SD_CMD ==> pull down + * SD_CLK ==> pull down + */ +static const u32 rts5264_sd_pull_ctl_disable_tbl[] = { + RTSX_REG_PAIR(CARD_PULL_CTL2, 0x55), + RTSX_REG_PAIR(CARD_PULL_CTL3, 0xD5), + 0, +}; + +static int rts5264_sd_set_sample_push_timing_sd30(struct rtsx_pcr *pcr) +{ + rtsx_pci_write_register(pcr, SD_CFG1, SD_MODE_SELECT_MASK + | SD_ASYNC_FIFO_NOT_RST, SD_30_MODE | SD_ASYNC_FIFO_NOT_RST); + rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, CLK_LOW_FREQ); + rtsx_pci_write_register(pcr, CARD_CLK_SOURCE, 0xFF, + CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); + rtsx_pci_write_register(pcr, CLK_CTL, CLK_LOW_FREQ, 0); + + return 0; +} + +static int rts5264_card_power_on(struct rtsx_pcr *pcr, int card) +{ + struct rtsx_cr_option *option = &pcr->option; + + if (option->ocp_en) + rtsx_pci_enable_ocp(pcr); + + rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0, + CFG_SD_POW_AUTO_PD, CFG_SD_POW_AUTO_PD); + + rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG1, + RTS5264_LDO1_TUNE_MASK, RTS5264_LDO1_33); + rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL, + RTS5264_LDO1_POWERON, RTS5264_LDO1_POWERON); + rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL, + RTS5264_LDO3318_POWERON, RTS5264_LDO3318_POWERON); + + msleep(20); + + rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN); + + /* Initialize SD_CFG1 register */ + rtsx_pci_write_register(pcr, SD_CFG1, 0xFF, + SD_CLK_DIVIDE_128 | SD_20_MODE | SD_BUS_WIDTH_1BIT); + rtsx_pci_write_register(pcr, SD_SAMPLE_POINT_CTL, + 0xFF, SD20_RX_POS_EDGE); + rtsx_pci_write_register(pcr, SD_PUSH_POINT_CTL, 0xFF, 0); + rtsx_pci_write_register(pcr, CARD_STOP, SD_STOP | SD_CLR_ERR, + SD_STOP | SD_CLR_ERR); + + /* Reset SD_CFG3 register */ + rtsx_pci_write_register(pcr, SD_CFG3, SD30_CLK_END_EN, 0); + rtsx_pci_write_register(pcr, REG_SD_STOP_SDCLK_CFG, + SD30_CLK_STOP_CFG_EN | SD30_CLK_STOP_CFG1 | + SD30_CLK_STOP_CFG0, 0); + + if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50 || + pcr->extra_caps & EXTRA_CAPS_SD_SDR104) + rts5264_sd_set_sample_push_timing_sd30(pcr); + + return 0; +} + +static int rts5264_switch_output_voltage(struct rtsx_pcr *pcr, u8 voltage) +{ + rtsx_pci_write_register(pcr, RTS5264_CARD_PWR_CTL, + RTS5264_PUPDC, RTS5264_PUPDC); + + switch (voltage) { + case OUTPUT_3V3: + rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL, + RTS5264_TUNE_REF_LDO3318, RTS5264_TUNE_REF_LDO3318); + rtsx_pci_write_register(pcr, RTS5264_DV3318_CFG, + RTS5264_DV3318_TUNE_MASK, RTS5264_DV3318_33); + rtsx_pci_write_register(pcr, SD_PAD_CTL, + SD_IO_USING_1V8, 0); + break; + case OUTPUT_1V8: + rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL, + RTS5264_TUNE_REF_LDO3318, RTS5264_TUNE_REF_LDO3318_DFT); + rtsx_pci_write_register(pcr, RTS5264_DV3318_CFG, + RTS5264_DV3318_TUNE_MASK, RTS5264_DV3318_18); + rtsx_pci_write_register(pcr, SD_PAD_CTL, + SD_IO_USING_1V8, SD_IO_USING_1V8); + break; + default: + return -EINVAL; + } + + /* set pad drive */ + rts5264_fill_driving(pcr, voltage); + + return 0; +} + +static void rts5264_stop_cmd(struct rtsx_pcr *pcr) +{ + rtsx_pci_writel(pcr, RTSX_HCBCTLR, STOP_CMD); + rtsx_pci_writel(pcr, RTSX_HDBCTLR, STOP_DMA); + rtsx_pci_write_register(pcr, DMACTL, DMA_RST, DMA_RST); + rtsx_pci_write_register(pcr, RBCTL, RB_FLUSH, RB_FLUSH); +} + +static void rts5264_card_before_power_off(struct rtsx_pcr *pcr) +{ + rts5264_stop_cmd(pcr); + rts5264_switch_output_voltage(pcr, OUTPUT_3V3); +} + +static int rts5264_card_power_off(struct rtsx_pcr *pcr, int card) +{ + int err = 0; + + rts5264_card_before_power_off(pcr); + err = rtsx_pci_write_register(pcr, RTS5264_LDO1233318_POW_CTL, + RTS5264_LDO_POWERON_MASK, 0); + + rtsx_pci_write_register(pcr, REG_CRC_DUMMY_0, + CFG_SD_POW_AUTO_PD, 0); + if (pcr->option.ocp_en) + rtsx_pci_disable_ocp(pcr); + + return err; +} + +static void rts5264_enable_ocp(struct rtsx_pcr *pcr) +{ + u8 mask = 0; + u8 val = 0; + + rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0, + RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN, + RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN); + rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0, + RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN, + RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN); + rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0, + RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN, + RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN); + rtsx_pci_write_register(pcr, RTS5264_OVP_DET, + RTS5264_POW_VDET, RTS5264_POW_VDET); + + mask = SD_OCP_INT_EN | SD_DETECT_EN; + mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN; + val = mask; + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val); + + mask = SD_VDD3_OCP_INT_EN | SD_VDD3_DETECT_EN; + val = mask; + rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL, mask, val); + + mask = RTS5264_OVP_INT_EN | RTS5264_OVP_DETECT_EN; + val = mask; + rtsx_pci_write_register(pcr, RTS5264_OVP_CTL, mask, val); +} + +static void rts5264_disable_ocp(struct rtsx_pcr *pcr) +{ + u8 mask = 0; + + mask = SD_OCP_INT_EN | SD_DETECT_EN; + mask |= SDVIO_OCP_INT_EN | SDVIO_DETECT_EN; + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); + + mask = SD_VDD3_OCP_INT_EN | SD_VDD3_DETECT_EN; + rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL, mask, 0); + + mask = RTS5264_OVP_INT_EN | RTS5264_OVP_DETECT_EN; + rtsx_pci_write_register(pcr, RTS5264_OVP_CTL, mask, 0); + + rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0, + RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN, 0); + rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0, + RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN, 0); + rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0, + RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN, 0); + rtsx_pci_write_register(pcr, RTS5264_OVP_DET, RTS5264_POW_VDET, 0); +} + +static void rts5264_init_ocp(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + + if (option->ocp_en) { + u8 mask, val; + + rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0, + RTS5264_LDO1_OCP_THD_MASK, option->sd_800mA_ocp_thd); + rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0, + RTS5264_LDO1_OCP_LMT_THD_MASK, + RTS5264_LDO1_LMT_THD_2000); + + rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0, + RTS5264_LDO2_OCP_THD_MASK, RTS5264_LDO2_OCP_THD_950); + rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0, + RTS5264_LDO2_OCP_LMT_THD_MASK, + RTS5264_LDO2_LMT_THD_2000); + + rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0, + RTS5264_LDO3_OCP_THD_MASK, RTS5264_LDO3_OCP_THD_710); + rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0, + RTS5264_LDO3_OCP_LMT_THD_MASK, + RTS5264_LDO3_LMT_THD_1500); + + rtsx_pci_write_register(pcr, RTS5264_OVP_DET, + RTS5264_TUNE_VROV_MASK, RTS5264_TUNE_VROV_1V6); + + mask = SD_OCP_GLITCH_MASK | SDVIO_OCP_GLITCH_MASK; + val = pcr->hw_param.ocp_glitch; + rtsx_pci_write_register(pcr, REG_OCPGLITCH, mask, val); + + } else { + rtsx_pci_write_register(pcr, RTS5264_LDO1_CFG0, + RTS5264_LDO1_OCP_EN | RTS5264_LDO1_OCP_LMT_EN, 0); + rtsx_pci_write_register(pcr, RTS5264_LDO2_CFG0, + RTS5264_LDO2_OCP_EN | RTS5264_LDO2_OCP_LMT_EN, 0); + rtsx_pci_write_register(pcr, RTS5264_LDO3_CFG0, + RTS5264_LDO3_OCP_EN | RTS5264_LDO3_OCP_LMT_EN, 0); + rtsx_pci_write_register(pcr, RTS5264_OVP_DET, + RTS5264_POW_VDET, 0); + } +} + +static int rts5264_get_ocpstat2(struct rtsx_pcr *pcr, u8 *val) +{ + return rtsx_pci_read_register(pcr, RTS5264_OCP_VDD3_STS, val); +} + +static int rts5264_get_ovpstat(struct rtsx_pcr *pcr, u8 *val) +{ + return rtsx_pci_read_register(pcr, RTS5264_OVP_STS, val); +} + +static void rts5264_clear_ocpstat(struct rtsx_pcr *pcr) +{ + u8 mask = 0; + u8 val = 0; + + mask = SD_OCP_INT_CLR | SD_OC_CLR; + mask |= SDVIO_OCP_INT_CLR | SDVIO_OC_CLR; + val = mask; + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, val); + rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL, + SD_VDD3_OCP_INT_CLR | SD_VDD3_OC_CLR, + SD_VDD3_OCP_INT_CLR | SD_VDD3_OC_CLR); + rtsx_pci_write_register(pcr, RTS5264_OVP_CTL, + RTS5264_OVP_INT_CLR | RTS5264_OVP_CLR, + RTS5264_OVP_INT_CLR | RTS5264_OVP_CLR); + + udelay(1000); + + rtsx_pci_write_register(pcr, REG_OCPCTL, mask, 0); + rtsx_pci_write_register(pcr, RTS5264_OCP_VDD3_CTL, + SD_VDD3_OCP_INT_CLR | SD_VDD3_OC_CLR, 0); + rtsx_pci_write_register(pcr, RTS5264_OVP_CTL, + RTS5264_OVP_INT_CLR | RTS5264_OVP_CLR, 0); +} + +static void rts5264_process_ocp(struct rtsx_pcr *pcr) +{ + if (!pcr->option.ocp_en) + return; + + rtsx_pci_get_ocpstat(pcr, &pcr->ocp_stat); + rts5264_get_ocpstat2(pcr, &pcr->ocp_stat2); + rts5264_get_ovpstat(pcr, &pcr->ovp_stat); + + if ((pcr->ocp_stat & (SD_OC_NOW | SD_OC_EVER | SDVIO_OC_NOW | SDVIO_OC_EVER)) || + (pcr->ocp_stat2 & (SD_VDD3_OC_NOW | SD_VDD3_OC_EVER)) || + (pcr->ovp_stat & (RTS5264_OVP_NOW | RTS5264_OVP_EVER))) { + rts5264_clear_ocpstat(pcr); + rts5264_card_power_off(pcr, RTSX_SD_CARD); + rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, 0); + pcr->ocp_stat = 0; + pcr->ocp_stat2 = 0; + pcr->ovp_stat = 0; + } +} + +static void rts5264_init_from_hw(struct rtsx_pcr *pcr) +{ + struct pci_dev *pdev = pcr->pci; + u32 lval1, lval2, i; + u16 setting_reg1, setting_reg2; + u8 valid, efuse_valid, tmp; + + rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL, + REG_EFUSE_POR | REG_EFUSE_POWER_MASK, + REG_EFUSE_POR | REG_EFUSE_POWERON); + udelay(1); + rtsx_pci_write_register(pcr, RTS5264_EFUSE_ADDR, + RTS5264_EFUSE_ADDR_MASK, 0x00); + rtsx_pci_write_register(pcr, RTS5264_EFUSE_CTL, + RTS5264_EFUSE_ENABLE | RTS5264_EFUSE_MODE_MASK, + RTS5264_EFUSE_ENABLE); + + /* Wait transfer end */ + for (i = 0; i < MAX_RW_REG_CNT; i++) { + rtsx_pci_read_register(pcr, RTS5264_EFUSE_CTL, &tmp); + if ((tmp & 0x80) == 0) + break; + } + rtsx_pci_read_register(pcr, RTS5264_EFUSE_READ_DATA, &tmp); + efuse_valid = ((tmp & 0x0C) >> 2); + pcr_dbg(pcr, "Load efuse valid: 0x%x\n", efuse_valid); + + pci_read_config_dword(pdev, PCR_SETTING_REG2, &lval2); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", PCR_SETTING_REG2, lval2); + /* 0x816 */ + valid = (u8)((lval2 >> 16) & 0x03); + + rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL, + REG_EFUSE_POR, 0); + pcr_dbg(pcr, "Disable efuse por!\n"); + + if (efuse_valid == 2 || efuse_valid == 3) { + if (valid == 3) { + /* Bypass efuse */ + setting_reg1 = PCR_SETTING_REG1; + setting_reg2 = PCR_SETTING_REG2; + } else { + /* Use efuse data */ + setting_reg1 = PCR_SETTING_REG4; + setting_reg2 = PCR_SETTING_REG5; + } + } else if (efuse_valid == 0) { + // default + setting_reg1 = PCR_SETTING_REG1; + setting_reg2 = PCR_SETTING_REG2; + } else { + return; + } + + pci_read_config_dword(pdev, setting_reg2, &lval2); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg2, lval2); + + if (!rts5264_vendor_setting_valid(lval2)) { + pcr_dbg(pcr, "skip fetch vendor setting\n"); + return; + } + + pcr->rtd3_en = rts5264_reg_to_rtd3(lval2); + + if (rts5264_reg_check_reverse_socket(lval2)) + pcr->flags |= PCR_REVERSE_SOCKET; + + pci_read_config_dword(pdev, setting_reg1, &lval1); + pcr_dbg(pcr, "Cfg 0x%x: 0x%x\n", setting_reg1, lval1); + + pcr->aspm_en = rts5264_reg_to_aspm(lval1); + pcr->sd30_drive_sel_1v8 = rts5264_reg_to_sd30_drive_sel_1v8(lval1); + pcr->sd30_drive_sel_3v3 = rts5264_reg_to_sd30_drive_sel_3v3(lval1); + + if (setting_reg1 == PCR_SETTING_REG1) { + /* store setting */ + rtsx_pci_write_register(pcr, 0xFF0C, 0xFF, (u8)(lval1 & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF0D, 0xFF, (u8)((lval1 >> 8) & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF0E, 0xFF, (u8)((lval1 >> 16) & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF0F, 0xFF, (u8)((lval1 >> 24) & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF10, 0xFF, (u8)(lval2 & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF11, 0xFF, (u8)((lval2 >> 8) & 0xFF)); + rtsx_pci_write_register(pcr, 0xFF12, 0xFF, (u8)((lval2 >> 16) & 0xFF)); + + pci_write_config_dword(pdev, PCR_SETTING_REG4, lval1); + lval2 = lval2 & 0x00FFFFFF; + pci_write_config_dword(pdev, PCR_SETTING_REG5, lval2); + } +} + +static void rts5264_init_from_cfg(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + + if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN + | PM_L1_1_EN | PM_L1_2_EN)) + rtsx_pci_disable_oobs_polling(pcr); + else + rtsx_pci_enable_oobs_polling(pcr); + + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0xFF, 0); + + if (option->ltr_en) { + if (option->ltr_enabled) + rtsx_set_ltr_latency(pcr, option->ltr_active_latency); + } +} + +static int rts5264_extra_init_hw(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + + rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG1, + CD_RESUME_EN_MASK, CD_RESUME_EN_MASK); + rtsx_pci_write_register(pcr, REG_VREF, PWD_SUSPND_EN, PWD_SUSPND_EN); + + rts5264_init_from_cfg(pcr); + rts5264_init_from_hw(pcr); + + /* power off efuse */ + rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL, + REG_EFUSE_POWER_MASK, REG_EFUSE_POWEROFF); + rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG2, + RTS5264_CHIP_RST_N_SEL, 0); + rtsx_pci_write_register(pcr, RTS5264_REG_LDO12_CFG, + RTS5264_LDO12_SR_MASK, RTS5264_LDO12_SR_0_0_MS); + rtsx_pci_write_register(pcr, CDGW, 0xFF, 0x01); + rtsx_pci_write_register(pcr, RTS5264_CKMUX_MBIAS_PWR, + RTS5264_POW_CKMUX, RTS5264_POW_CKMUX); + rtsx_pci_write_register(pcr, RTS5264_CMD_OE_START_EARLY, + RTS5264_CMD_OE_EARLY_EN | RTS5264_CMD_OE_EARLY_CYCLE_MASK, + RTS5264_CMD_OE_EARLY_EN); + rtsx_pci_write_register(pcr, RTS5264_DAT_OE_START_EARLY, + RTS5264_DAT_OE_EARLY_EN | RTS5264_DAT_OE_EARLY_CYCLE_MASK, + RTS5264_DAT_OE_EARLY_EN); + rtsx_pci_write_register(pcr, SSC_DIV_N_0, 0xFF, 0x5D); + + rtsx_pci_write_register(pcr, RTS5264_PWR_CUT, + RTS5264_CFG_MEM_PD, RTS5264_CFG_MEM_PD); + rtsx_pci_write_register(pcr, L1SUB_CONFIG1, + AUX_CLK_ACTIVE_SEL_MASK, MAC_CKSW_DONE); + rtsx_pci_write_register(pcr, L1SUB_CONFIG3, 0xFF, 0); + rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4, + RTS5264_AUX_CLK_16M_EN, 0); + + /* Release PRSNT# */ + rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4, + RTS5264_FORCE_PRSNT_LOW, 0); + rtsx_pci_write_register(pcr, PCLK_CTL, + PCLK_MODE_SEL, PCLK_MODE_SEL); + + /* LED shine disabled, set initial shine cycle period */ + rtsx_pci_write_register(pcr, OLT_LED_CTL, 0x0F, 0x02); + + /* Configure driving */ + rts5264_fill_driving(pcr, OUTPUT_3V3); + + if (pcr->flags & PCR_REVERSE_SOCKET) + rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x30); + else + rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00); + + /* + * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced + * to drive low, and we forcibly request clock. + */ + if (option->force_clkreq_0) + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); + else + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); + + rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFF); + rtsx_pci_write_register(pcr, RBCTL, U_AUTO_DMA_EN_MASK, 0); + rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG4, + RTS5264_F_HIGH_RC_MASK, RTS5264_F_HIGH_RC_400K); + + if (pcr->rtd3_en) { + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00); + rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL, + FORCE_PM_CONTROL | FORCE_PM_VALUE, 0); + } else { + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x01, 0x00); + rtsx_pci_write_register(pcr, RTS5264_REG_PME_FORCE_CTL, + FORCE_PM_CONTROL | FORCE_PM_VALUE, FORCE_PM_CONTROL); + } + rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, D3_DELINK_MODE_EN, 0x00); + + /* Clear Enter RTD3_cold Information*/ + rtsx_pci_write_register(pcr, RTS5264_FW_CTL, + RTS5264_INFORM_RTD3_COLD, 0); + + return 0; +} + +static void rts5264_enable_aspm(struct rtsx_pcr *pcr, bool enable) +{ + u8 val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1; + u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1; + + if (pcr->aspm_enabled == enable) + return; + + val |= (pcr->aspm_en & 0x02); + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val); + pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, pcr->aspm_en); + pcr->aspm_enabled = enable; +} + +static void rts5264_disable_aspm(struct rtsx_pcr *pcr, bool enable) +{ + u8 val = FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1; + u8 mask = FORCE_ASPM_VAL_MASK | FORCE_ASPM_CTL0 | FORCE_ASPM_CTL1; + + if (pcr->aspm_enabled == enable) + return; + + pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL, + PCI_EXP_LNKCTL_ASPMC, 0); + rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, mask, val); + rtsx_pci_write_register(pcr, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0); + udelay(10); + pcr->aspm_enabled = enable; +} + +static void rts5264_set_aspm(struct rtsx_pcr *pcr, bool enable) +{ + if (enable) + rts5264_enable_aspm(pcr, true); + else + rts5264_disable_aspm(pcr, false); +} + +static void rts5264_set_l1off_cfg_sub_d0(struct rtsx_pcr *pcr, int active) +{ + struct rtsx_cr_option *option = &(pcr->option); + + u32 interrupt = rtsx_pci_readl(pcr, RTSX_BIPR); + int card_exist = (interrupt & SD_EXIST); + int aspm_L1_1, aspm_L1_2; + u8 val = 0; + + aspm_L1_1 = rtsx_check_dev_flag(pcr, ASPM_L1_1_EN); + aspm_L1_2 = rtsx_check_dev_flag(pcr, ASPM_L1_2_EN); + + if (active) { + /* Run, latency: 60us */ + if (aspm_L1_1) + val = option->ltr_l1off_snooze_sspwrgate; + } else { + /* L1off, latency: 300us */ + if (aspm_L1_2) + val = option->ltr_l1off_sspwrgate; + } + + if (aspm_L1_1 || aspm_L1_2) { + if (rtsx_check_dev_flag(pcr, + LTR_L1SS_PWR_GATE_CHECK_CARD_EN)) { + if (card_exist) + val &= ~L1OFF_MBIAS2_EN_5250; + else + val |= L1OFF_MBIAS2_EN_5250; + } + } + rtsx_set_l1off_sub(pcr, val); +} + +static const struct pcr_ops rts5264_pcr_ops = { + .turn_on_led = rts5264_turn_on_led, + .turn_off_led = rts5264_turn_off_led, + .extra_init_hw = rts5264_extra_init_hw, + .enable_auto_blink = rts5264_enable_auto_blink, + .disable_auto_blink = rts5264_disable_auto_blink, + .card_power_on = rts5264_card_power_on, + .card_power_off = rts5264_card_power_off, + .switch_output_voltage = rts5264_switch_output_voltage, + .force_power_down = rts5264_force_power_down, + .stop_cmd = rts5264_stop_cmd, + .set_aspm = rts5264_set_aspm, + .set_l1off_cfg_sub_d0 = rts5264_set_l1off_cfg_sub_d0, + .enable_ocp = rts5264_enable_ocp, + .disable_ocp = rts5264_disable_ocp, + .init_ocp = rts5264_init_ocp, + .process_ocp = rts5264_process_ocp, + .clear_ocpstat = rts5264_clear_ocpstat, +}; + +static inline u8 double_ssc_depth(u8 depth) +{ + return ((depth > 1) ? (depth - 1) : depth); +} + +int rts5264_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, + u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk) +{ + int err, clk; + u16 n; + u8 clk_divider, mcu_cnt, div; + static const u8 depth[] = { + [RTSX_SSC_DEPTH_4M] = RTS5264_SSC_DEPTH_4M, + [RTSX_SSC_DEPTH_2M] = RTS5264_SSC_DEPTH_2M, + [RTSX_SSC_DEPTH_1M] = RTS5264_SSC_DEPTH_1M, + [RTSX_SSC_DEPTH_500K] = RTS5264_SSC_DEPTH_512K, + }; + + if (initial_mode) { + /* We use 250k(around) here, in initial stage */ + clk_divider = SD_CLK_DIVIDE_128; + card_clock = 30000000; + } else { + clk_divider = SD_CLK_DIVIDE_0; + } + err = rtsx_pci_write_register(pcr, SD_CFG1, + SD_CLK_DIVIDE_MASK, clk_divider); + if (err < 0) + return err; + + card_clock /= 1000000; + pcr_dbg(pcr, "Switch card clock to %dMHz\n", card_clock); + + clk = card_clock; + if (!initial_mode && double_clk) + clk = card_clock * 2; + pcr_dbg(pcr, "Internal SSC clock: %dMHz (cur_clock = %d)\n", + clk, pcr->cur_clock); + + if (clk == pcr->cur_clock) + return 0; + + if (pcr->ops->conv_clk_and_div_n) + n = pcr->ops->conv_clk_and_div_n(clk, CLK_TO_DIV_N); + else + n = clk - 4; + if ((clk <= 4) || (n > 396)) + return -EINVAL; + + mcu_cnt = 125/clk + 3; + if (mcu_cnt > 15) + mcu_cnt = 15; + + div = CLK_DIV_1; + while ((n < MIN_DIV_N_PCR - 4) && (div < CLK_DIV_8)) { + if (pcr->ops->conv_clk_and_div_n) { + int dbl_clk = pcr->ops->conv_clk_and_div_n(n, + DIV_N_TO_CLK) * 2; + n = pcr->ops->conv_clk_and_div_n(dbl_clk, + CLK_TO_DIV_N); + } else { + n = (n + 4) * 2 - 4; + } + div++; + } + + n = (n / 2) - 1; + pcr_dbg(pcr, "n = %d, div = %d\n", n, div); + + ssc_depth = depth[ssc_depth]; + if (double_clk) + ssc_depth = double_ssc_depth(ssc_depth); + + if (ssc_depth) { + if (div == CLK_DIV_2) { + if (ssc_depth > 1) + ssc_depth -= 1; + else + ssc_depth = RTS5264_SSC_DEPTH_8M; + } else if (div == CLK_DIV_4) { + if (ssc_depth > 2) + ssc_depth -= 2; + else + ssc_depth = RTS5264_SSC_DEPTH_8M; + } else if (div == CLK_DIV_8) { + if (ssc_depth > 3) + ssc_depth -= 3; + else + ssc_depth = RTS5264_SSC_DEPTH_8M; + } + } else { + ssc_depth = 0; + } + pcr_dbg(pcr, "ssc_depth = %d\n", ssc_depth); + + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, + CHANGE_CLK, CHANGE_CLK); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_DIV, + 0xFF, (div << 4) | mcu_cnt); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, + SSC_DEPTH_MASK, ssc_depth); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_DIV_N_0, 0xFF, n); + + if (is_version(pcr, 0x5264, IC_VER_A)) { + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS5264_CARD_CLK_SRC2, + RTS5264_REG_BIG_KVCO_A, 0); + } else { + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, SSC_RSTB); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, RTS5264_SYS_DUMMY_1, + RTS5264_REG_BIG_KVCO, 0); + } + + if (vpclk) { + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, + PHASE_NOT_RESET, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL, + PHASE_NOT_RESET, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, + PHASE_NOT_RESET, PHASE_NOT_RESET); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK1_CTL, + PHASE_NOT_RESET, PHASE_NOT_RESET); + } + + err = rtsx_pci_send_cmd(pcr, 2000); + if (err < 0) + return err; + + /* Wait SSC clock stable */ + udelay(SSC_CLOCK_STABLE_WAIT); + err = rtsx_pci_write_register(pcr, CLK_CTL, CHANGE_CLK, 0); + if (err < 0) + return err; + + pcr->cur_clock = clk; + return 0; +} + +void rts5264_init_params(struct rtsx_pcr *pcr) +{ + struct rtsx_cr_option *option = &pcr->option; + struct rtsx_hw_param *hw_param = &pcr->hw_param; + u8 val; + + pcr->extra_caps = EXTRA_CAPS_SD_SDR50 | EXTRA_CAPS_SD_SDR104; + pcr->extra_caps |= EXTRA_CAPS_NO_MMC; + rtsx_pci_read_register(pcr, RTS5264_FW_STATUS, &val); + if (!(val & RTS5264_EXPRESS_LINK_FAIL_MASK)) + pcr->extra_caps |= EXTRA_CAPS_SD_EXPRESS; + pcr->num_slots = 1; + pcr->ops = &rts5264_pcr_ops; + + pcr->flags = 0; + pcr->card_drive_sel = RTSX_CARD_DRIVE_DEFAULT; + pcr->sd30_drive_sel_1v8 = 0x00; + pcr->sd30_drive_sel_3v3 = 0x00; + pcr->aspm_en = ASPM_L1_EN; + pcr->aspm_mode = ASPM_MODE_REG; + pcr->tx_initial_phase = SET_CLOCK_PHASE(24, 24, 11); + pcr->rx_initial_phase = SET_CLOCK_PHASE(24, 6, 5); + + pcr->ic_version = rts5264_get_ic_version(pcr); + pcr->sd_pull_ctl_enable_tbl = rts5264_sd_pull_ctl_enable_tbl; + pcr->sd_pull_ctl_disable_tbl = rts5264_sd_pull_ctl_disable_tbl; + + pcr->reg_pm_ctrl3 = RTS5264_AUTOLOAD_CFG3; + + option->dev_flags = (LTR_L1SS_PWR_GATE_CHECK_CARD_EN + | LTR_L1SS_PWR_GATE_EN); + option->ltr_en = true; + + /* init latency of active, idle, L1OFF to 60us, 300us, 3ms */ + option->ltr_active_latency = LTR_ACTIVE_LATENCY_DEF; + option->ltr_idle_latency = LTR_IDLE_LATENCY_DEF; + option->ltr_l1off_latency = LTR_L1OFF_LATENCY_DEF; + option->l1_snooze_delay = L1_SNOOZE_DELAY_DEF; + option->ltr_l1off_sspwrgate = 0x7F; + option->ltr_l1off_snooze_sspwrgate = 0x78; + + option->ocp_en = 1; + hw_param->interrupt_en |= (SD_OC_INT_EN | SD_OVP_INT_EN); + hw_param->ocp_glitch = SD_OCP_GLITCH_800U | SDVIO_OCP_GLITCH_800U; + option->sd_800mA_ocp_thd = RTS5264_LDO1_OCP_THD_1150; +} diff --git a/drivers/misc/cardreader/rts5264.h b/drivers/misc/cardreader/rts5264.h new file mode 100644 index 000000000000..e3cbbf2fe1a4 --- /dev/null +++ b/drivers/misc/cardreader/rts5264.h @@ -0,0 +1,278 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* Driver for Realtek PCI-Express card reader + * + * Copyright(c) 2018-2019 Realtek Semiconductor Corp. All rights reserved. + * + * Author: + * Ricky Wu <ricky_wu@realtek.com> + */ +#ifndef RTS5264_H +#define RTS5264_H + +/*New add*/ +#define rts5264_vendor_setting_valid(reg) ((reg) & 0x010000) +#define rts5264_reg_to_aspm(reg) \ + (((~(reg) >> 28) & 0x02) | (((reg) >> 28) & 0x01)) +#define rts5264_reg_check_reverse_socket(reg) ((reg) & 0x04) +#define rts5264_reg_to_sd30_drive_sel_1v8(reg) (((reg) >> 22) & 0x03) +#define rts5264_reg_to_sd30_drive_sel_3v3(reg) (((reg) >> 16) & 0x03) +#define rts5264_reg_to_rtd3(reg) ((reg) & 0x08) + +#define RTS5264_AUTOLOAD_CFG0 0xFF7B +#define RTS5264_AUTOLOAD_CFG1 0xFF7C +#define RTS5264_AUTOLOAD_CFG3 0xFF7E +#define RTS5264_AUTOLOAD_CFG4 0xFF7F +#define RTS5264_FORCE_PRSNT_LOW (1 << 6) +#define RTS5264_AUX_CLK_16M_EN (1 << 5) +#define RTS5264_F_HIGH_RC_MASK (1 << 4) +#define RTS5264_F_HIGH_RC_1_6M (1 << 4) +#define RTS5264_F_HIGH_RC_400K (0 << 4) + +/* SSC_CTL2 0xFC12 */ +#define RTS5264_SSC_DEPTH_MASK 0x07 +#define RTS5264_SSC_DEPTH_DISALBE 0x00 +#define RTS5264_SSC_DEPTH_8M 0x01 +#define RTS5264_SSC_DEPTH_4M 0x02 +#define RTS5264_SSC_DEPTH_2M 0x03 +#define RTS5264_SSC_DEPTH_1M 0x04 +#define RTS5264_SSC_DEPTH_512K 0x05 +#define RTS5264_SSC_DEPTH_256K 0x06 +#define RTS5264_SSC_DEPTH_128K 0x07 + +#define RTS5264_CARD_CLK_SRC2 0xFC2F +#define RTS5264_REG_BIG_KVCO_A 0x20 + +/* efuse control register*/ +#define RTS5264_EFUSE_CTL 0xFC30 +#define RTS5264_EFUSE_ENABLE 0x80 +/* EFUSE_MODE: 0=READ 1=PROGRAM */ +#define RTS5264_EFUSE_MODE_MASK 0x40 +#define RTS5264_EFUSE_PROGRAM 0x40 + +#define RTS5264_EFUSE_ADDR 0xFC31 +#define RTS5264_EFUSE_ADDR_MASK 0x3F + +#define RTS5264_EFUSE_WRITE_DATA 0xFC32 +#define RTS5264_EFUSE_READ_DATA 0xFC34 + +#define RTS5264_SYS_DUMMY_1 0xFC35 +#define RTS5264_REG_BIG_KVCO 0x04 + +/* DMACTL 0xFE2C */ +#define RTS5264_DMA_PACK_SIZE_MASK 0x70 + +#define RTS5264_FW_CFG1 0xFF55 +#define RTS5264_SYS_CLK_SEL_MCU_CLK (0x01<<7) +#define RTS5264_CRC_CLK_SEL_MCU_CLK (0x01<<6) +#define RTS5264_FAKE_MCU_CLOCK_GATING (0x01<<5) +#define RTS5264_MCU_BUS_SEL_MASK (0x01<<4) + +/* FW status register */ +#define RTS5264_FW_STATUS 0xFF56 +#define RTS5264_EXPRESS_LINK_FAIL_MASK (0x01<<7) + +/* FW control register */ +#define RTS5264_FW_CTL 0xFF5F +#define RTS5264_INFORM_RTD3_COLD (0x01<<5) + +#define RTS5264_REG_FPDCTL 0xFF60 + +#define RTS5264_REG_LDO12_CFG 0xFF6E +#define RTS5264_LDO12_SR_MASK (0x03<<6) +#define RTS5264_LDO12_SR_1_0_MS (0x03<<6) +#define RTS5264_LDO12_SR_0_5_MS (0x02<<6) +#define RTS5264_LDO12_SR_0_2_5_MS (0x01<<6) +#define RTS5264_LDO12_SR_0_0_MS (0x00<<6) +#define RTS5264_LDO12_VO_TUNE_MASK (0x07<<1) +#define RTS5264_LDO12_115 (0x03<<1) +#define RTS5264_LDO12_120 (0x04<<1) +#define RTS5264_LDO12_125 (0x05<<1) +#define RTS5264_LDO12_130 (0x06<<1) +#define RTS5264_LDO12_135 (0x07<<1) + +/* LDO control register */ +#define RTS5264_CARD_PWR_CTL 0xFD50 +#define RTS5264_SD_CLK_ISO (0x01<<7) +#define RTS5264_PAD_SD_DAT_FW_CTRL (0x01<<6) +#define RTS5264_PUPDC (0x01<<5) +#define RTS5264_SD_CMD_ISO (0x01<<4) + +#define RTS5264_OCP_VDD3_CTL 0xFD89 +#define SD_VDD3_DETECT_EN 0x08 +#define SD_VDD3_OCP_INT_EN 0x04 +#define SD_VDD3_OCP_INT_CLR 0x02 +#define SD_VDD3_OC_CLR 0x01 + +#define RTS5264_OCP_VDD3_STS 0xFD8A +#define SD_VDD3_OCP_DETECT 0x08 +#define SD_VDD3_OC_NOW 0x04 +#define SD_VDD3_OC_EVER 0x02 + +#define RTS5264_OVP_CTL 0xFD8D +#define RTS5264_OVP_TIME_MASK 0xF0 +#define RTS5264_OVP_TIME_DFT 0x50 +#define RTS5264_OVP_DETECT_EN 0x08 +#define RTS5264_OVP_INT_EN 0x04 +#define RTS5264_OVP_INT_CLR 0x02 +#define RTS5264_OVP_CLR 0x01 + +#define RTS5264_OVP_STS 0xFD8E +#define RTS5264_OVP_GLTCH_TIME_MASK 0xF0 +#define RTS5264_OVP_GLTCH_TIME_DFT 0x50 +#define RTS5264_VOVER_DET 0x08 +#define RTS5264_OVP_NOW 0x04 +#define RTS5264_OVP_EVER 0x02 + +#define RTS5264_CMD_OE_START_EARLY 0xFDCB +#define RTS5264_CMD_OE_EARLY_LEAVE 0x08 +#define RTS5264_CMD_OE_EARLY_CYCLE_MASK 0x06 +#define RTS5264_CMD_OE_EARLY_4CYCLE 0x06 +#define RTS5264_CMD_OE_EARLY_3CYCLE 0x04 +#define RTS5264_CMD_OE_EARLY_2CYCLE 0x02 +#define RTS5264_CMD_OE_EARLY_1CYCLE 0x00 +#define RTS5264_CMD_OE_EARLY_EN 0x01 + +#define RTS5264_DAT_OE_START_EARLY 0xFDCC +#define RTS5264_DAT_OE_EARLY_LEAVE 0x08 +#define RTS5264_DAT_OE_EARLY_CYCLE_MASK 0x06 +#define RTS5264_DAT_OE_EARLY_4CYCLE 0x06 +#define RTS5264_DAT_OE_EARLY_3CYCLE 0x04 +#define RTS5264_DAT_OE_EARLY_2CYCLE 0x02 +#define RTS5264_DAT_OE_EARLY_1CYCLE 0x00 +#define RTS5264_DAT_OE_EARLY_EN 0x01 + +#define RTS5264_LDO1233318_POW_CTL 0xFF70 +#define RTS5264_TUNE_REF_LDO3318 (0x03<<6) +#define RTS5264_TUNE_REF_LDO3318_DFT (0x02<<6) +#define RTS5264_LDO3318_POWERON (0x01<<3) +#define RTS5264_LDO3_POWERON (0x01<<2) +#define RTS5264_LDO2_POWERON (0x01<<1) +#define RTS5264_LDO1_POWERON (0x01<<0) +#define RTS5264_LDO_POWERON_MASK (0x0F<<0) + +#define RTS5264_DV3318_CFG 0xFF71 +#define RTS5264_DV3318_TUNE_MASK (0x07<<4) +#define RTS5264_DV3318_18 (0x02<<4) +#define RTS5264_DV3318_19 (0x04<<4) +#define RTS5264_DV3318_33 (0x07<<4) + +#define RTS5264_LDO1_CFG0 0xFF72 +#define RTS5264_LDO1_OCP_THD_MASK (0x07 << 5) +#define RTS5264_LDO1_OCP_EN (0x01 << 4) +#define RTS5264_LDO1_OCP_LMT_THD_MASK (0x03 << 2) +#define RTS5264_LDO1_OCP_LMT_EN (0x01 << 1) + +#define RTS5264_LDO1_OCP_THD_850 (0x00<<5) +#define RTS5264_LDO1_OCP_THD_950 (0x01<<5) +#define RTS5264_LDO1_OCP_THD_1050 (0x02<<5) +#define RTS5264_LDO1_OCP_THD_1100 (0x03<<5) +#define RTS5264_LDO1_OCP_THD_1150 (0x04<<5) +#define RTS5264_LDO1_OCP_THD_1200 (0x05<<5) +#define RTS5264_LDO1_OCP_THD_1300 (0x06<<5) +#define RTS5264_LDO1_OCP_THD_1350 (0x07<<5) + +#define RTS5264_LDO1_LMT_THD_1700 (0x00<<2) +#define RTS5264_LDO1_LMT_THD_1800 (0x01<<2) +#define RTS5264_LDO1_LMT_THD_1900 (0x02<<2) +#define RTS5264_LDO1_LMT_THD_2000 (0x03<<2) + +#define RTS5264_LDO1_CFG1 0xFF73 +#define RTS5264_LDO1_TUNE_MASK (0x07<<1) +#define RTS5264_LDO1_18 (0x05<<1) +#define RTS5264_LDO1_33 (0x07<<1) +#define RTS5264_LDO1_PWD_MASK (0x01<<0) + +#define RTS5264_LDO2_CFG0 0xFF74 +#define RTS5264_LDO2_OCP_THD_MASK (0x07<<5) +#define RTS5264_LDO2_OCP_EN (0x01<<4) +#define RTS5264_LDO2_OCP_LMT_THD_MASK (0x03<<2) +#define RTS5264_LDO2_OCP_LMT_EN (0x01<<1) + +#define RTS5264_LDO2_OCP_THD_750 (0x00<<5) +#define RTS5264_LDO2_OCP_THD_850 (0x01<<5) +#define RTS5264_LDO2_OCP_THD_900 (0x02<<5) +#define RTS5264_LDO2_OCP_THD_950 (0x03<<5) +#define RTS5264_LDO2_OCP_THD_1050 (0x04<<5) +#define RTS5264_LDO2_OCP_THD_1100 (0x05<<5) +#define RTS5264_LDO2_OCP_THD_1150 (0x06<<5) +#define RTS5264_LDO2_OCP_THD_1200 (0x07<<5) + +#define RTS5264_LDO2_LMT_THD_1700 (0x00<<2) +#define RTS5264_LDO2_LMT_THD_1800 (0x01<<2) +#define RTS5264_LDO2_LMT_THD_1900 (0x02<<2) +#define RTS5264_LDO2_LMT_THD_2000 (0x03<<2) + +#define RTS5264_LDO2_CFG1 0xFF75 +#define RTS5264_LDO2_TUNE_MASK (0x07<<1) +#define RTS5264_LDO2_18 (0x02<<1) +#define RTS5264_LDO2_185 (0x03<<1) +#define RTS5264_LDO2_19 (0x04<<1) +#define RTS5264_LDO2_195 (0x05<<1) +#define RTS5264_LDO2_33 (0x07<<1) +#define RTS5264_LDO2_PWD_MASK (0x01<<0) + +#define RTS5264_LDO3_CFG0 0xFF76 +#define RTS5264_LDO3_OCP_THD_MASK (0x07<<5) +#define RTS5264_LDO3_OCP_EN (0x01<<4) +#define RTS5264_LDO3_OCP_LMT_THD_MASK (0x03<<2) +#define RTS5264_LDO3_OCP_LMT_EN (0x01<<1) + +#define RTS5264_LDO3_OCP_THD_610 (0x00<<5) +#define RTS5264_LDO3_OCP_THD_630 (0x01<<5) +#define RTS5264_LDO3_OCP_THD_670 (0x02<<5) +#define RTS5264_LDO3_OCP_THD_710 (0x03<<5) +#define RTS5264_LDO3_OCP_THD_750 (0x04<<5) +#define RTS5264_LDO3_OCP_THD_770 (0x05<<5) +#define RTS5264_LDO3_OCP_THD_810 (0x06<<5) +#define RTS5264_LDO3_OCP_THD_850 (0x07<<5) + +#define RTS5264_LDO3_LMT_THD_1200 (0x00<<2) +#define RTS5264_LDO3_LMT_THD_1300 (0x01<<2) +#define RTS5264_LDO3_LMT_THD_1400 (0x02<<2) +#define RTS5264_LDO3_LMT_THD_1500 (0x03<<2) + +#define RTS5264_LDO3_CFG1 0xFF77 +#define RTS5264_LDO3_TUNE_MASK (0x07<<1) +#define RTS5264_LDO3_12 (0x02<<1) +#define RTS5264_LDO3_125 (0x03<<1) +#define RTS5264_LDO3_13 (0x04<<1) +#define RTS5264_LDO3_135 (0x05<<1) +#define RTS5264_LDO3_33 (0x07<<1) +#define RTS5264_LDO3_PWD_MASK (0x01<<0) + +#define RTS5264_REG_PME_FORCE_CTL 0xFF78 +#define FORCE_PM_CONTROL 0x20 +#define FORCE_PM_VALUE 0x10 +#define REG_EFUSE_BYPASS 0x08 +#define REG_EFUSE_POR 0x04 +#define REG_EFUSE_POWER_MASK 0x03 +#define REG_EFUSE_POWERON 0x03 +#define REG_EFUSE_POWEROFF 0x00 + +#define RTS5264_PWR_CUT 0xFF81 +#define RTS5264_CFG_MEM_PD 0xF0 + +#define RTS5264_OVP_DET 0xFF8A +#define RTS5264_POW_VDET 0x04 +#define RTS5264_TUNE_VROV_MASK 0x03 +#define RTS5264_TUNE_VROV_2V 0x03 +#define RTS5264_TUNE_VROV_1V8 0x02 +#define RTS5264_TUNE_VROV_1V6 0x01 +#define RTS5264_TUNE_VROV_1V4 0x00 + +#define RTS5264_CKMUX_MBIAS_PWR 0xFF8B +#define RTS5264_NON_XTAL_SEL 0x80 +#define RTS5264_POW_CKMUX 0x40 +#define RTS5264_LVD_MASK 0x04 +#define RTS5264_POW_PSW_MASK 0x03 +#define RTS5264_POW_PSW_DFT 0x03 + +/* Single LUN, support SD/SD EXPRESS */ +#define DEFAULT_SINGLE 0 +#define SD_LUN 1 +#define SD_EXPRESS_LUN 2 + +int rts5264_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, + u8 ssc_depth, bool initial_mode, bool double_clk, bool vpclk); + +#endif /* RTS5264_H */ diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index a30751ad3733..1a64364700eb 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -26,6 +26,7 @@ #include "rtsx_pcr.h" #include "rts5261.h" #include "rts5228.h" +#include "rts5264.h" static bool msi_en = true; module_param(msi_en, bool, S_IRUGO | S_IWUSR); @@ -54,6 +55,7 @@ static const struct pci_device_id rtsx_pci_ids[] = { { PCI_DEVICE(0x10EC, 0x5260), PCI_CLASS_OTHERS << 16, 0xFF0000 }, { PCI_DEVICE(0x10EC, 0x5261), PCI_CLASS_OTHERS << 16, 0xFF0000 }, { PCI_DEVICE(0x10EC, 0x5228), PCI_CLASS_OTHERS << 16, 0xFF0000 }, + { PCI_DEVICE(0x10EC, 0x5264), PCI_CLASS_OTHERS << 16, 0xFF0000 }, { 0, } }; @@ -714,6 +716,9 @@ int rtsx_pci_switch_clock(struct rtsx_pcr *pcr, unsigned int card_clock, if (PCI_PID(pcr) == PID_5228) return rts5228_pci_switch_clock(pcr, card_clock, ssc_depth, initial_mode, double_clk, vpclk); + if (PCI_PID(pcr) == PID_5264) + return rts5264_pci_switch_clock(pcr, card_clock, + ssc_depth, initial_mode, double_clk, vpclk); if (initial_mode) { /* We use 250k(around) here, in initial stage */ @@ -987,7 +992,8 @@ static irqreturn_t rtsx_pci_isr(int irq, void *dev_id) int_reg &= (pcr->bier | 0x7FFFFF); - if (int_reg & SD_OC_INT) + if ((int_reg & SD_OC_INT) || + ((int_reg & SD_OVP_INT) && (PCI_PID(pcr) == PID_5264))) rtsx_pci_process_ocp_interrupt(pcr); if (int_reg & SD_INT) { @@ -1159,7 +1165,9 @@ void rtsx_pci_enable_oobs_polling(struct rtsx_pcr *pcr) { u16 val; - if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) { + if ((PCI_PID(pcr) != PID_525A) && + (PCI_PID(pcr) != PID_5260) && + (PCI_PID(pcr) != PID_5264)) { rtsx_pci_read_phy_register(pcr, 0x01, &val); val |= 1<<9; rtsx_pci_write_phy_register(pcr, 0x01, val); @@ -1175,7 +1183,9 @@ void rtsx_pci_disable_oobs_polling(struct rtsx_pcr *pcr) { u16 val; - if ((PCI_PID(pcr) != PID_525A) && (PCI_PID(pcr) != PID_5260)) { + if ((PCI_PID(pcr) != PID_525A) && + (PCI_PID(pcr) != PID_5260) && + (PCI_PID(pcr) != PID_5264)) { rtsx_pci_read_phy_register(pcr, 0x01, &val); val &= ~(1<<9); rtsx_pci_write_phy_register(pcr, 0x01, val); @@ -1226,7 +1236,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) rtsx_pci_enable_bus_int(pcr); /* Power on SSC */ - if (PCI_PID(pcr) == PID_5261) { + if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) { /* Gating real mcu clock */ err = rtsx_pci_write_register(pcr, RTS5261_FW_CFG1, RTS5261_MCU_CLOCK_GATING, 0); @@ -1270,6 +1280,11 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) else if (PCI_PID(pcr) == PID_5228) rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, RTS5228_SSC_DEPTH_2M); + else if (is_version(pcr, 0x5264, IC_VER_A)) + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL1, SSC_RSTB, 0); + else if (PCI_PID(pcr) == PID_5264) + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, + RTS5264_SSC_DEPTH_2M); else rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SSC_CTL2, 0xFF, 0x12); @@ -1305,6 +1320,7 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) case PID_5260: case PID_5261: case PID_5228: + case PID_5264: rtsx_pci_write_register(pcr, PM_CLK_FORCE_CTL, 1, 1); break; default: @@ -1404,6 +1420,10 @@ static int rtsx_pci_init_chip(struct rtsx_pcr *pcr) case 0x5228: rts5228_init_params(pcr); break; + + case 0x5264: + rts5264_init_params(pcr); + break; } pcr_dbg(pcr, "PID: 0x%04x, IC version: 0x%02x\n", @@ -1544,7 +1564,7 @@ static int rtsx_pci_probe(struct pci_dev *pcidev, pcr->pci = pcidev; dev_set_drvdata(&pcidev->dev, handle); - if (CHK_PCI_PID(pcr, 0x525A)) + if ((CHK_PCI_PID(pcr, 0x525A)) || (CHK_PCI_PID(pcr, 0x5264))) bar = 1; len = pci_resource_len(pcidev, bar); base = pci_resource_start(pcidev, bar); diff --git a/drivers/misc/cardreader/rtsx_pcr.h b/drivers/misc/cardreader/rtsx_pcr.h index 37d1f316ae17..9215d66de00c 100644 --- a/drivers/misc/cardreader/rtsx_pcr.h +++ b/drivers/misc/cardreader/rtsx_pcr.h @@ -74,6 +74,7 @@ void rtl8411b_init_params(struct rtsx_pcr *pcr); void rts5260_init_params(struct rtsx_pcr *pcr); void rts5261_init_params(struct rtsx_pcr *pcr); void rts5228_init_params(struct rtsx_pcr *pcr); +void rts5264_init_params(struct rtsx_pcr *pcr); static inline u8 map_sd_drive(int idx) { diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c index f61a80597a22..a5dcd7a13468 100644 --- a/drivers/misc/eeprom/at24.c +++ b/drivers/misc/eeprom/at24.c @@ -439,12 +439,9 @@ static int at24_read(void *priv, unsigned int off, void *val, size_t count) if (off + count > at24->byte_len) return -EINVAL; - ret = pm_runtime_get_sync(dev); - if (ret < 0) { - pm_runtime_put_noidle(dev); + ret = pm_runtime_resume_and_get(dev); + if (ret) return ret; - } - /* * Read data from chip, protecting against concurrent updates * from this host, but not from other I2C masters. @@ -486,12 +483,9 @@ static int at24_write(void *priv, unsigned int off, void *val, size_t count) if (off + count > at24->byte_len) return -EINVAL; - ret = pm_runtime_get_sync(dev); - if (ret < 0) { - pm_runtime_put_noidle(dev); + ret = pm_runtime_resume_and_get(dev); + if (ret) return ret; - } - /* * Write data to chip, protecting against concurrent updates * from this host, but not from other I2C masters. @@ -563,6 +557,31 @@ static unsigned int at24_get_offset_adj(u8 flags, unsigned int byte_len) } } +static void at24_probe_temp_sensor(struct i2c_client *client) +{ + struct at24_data *at24 = i2c_get_clientdata(client); + struct i2c_board_info info = { .type = "jc42" }; + int ret; + u8 val; + + /* + * Byte 2 has value 11 for DDR3, earlier versions don't + * support the thermal sensor present flag + */ + ret = at24_read(at24, 2, &val, 1); + if (ret || val != 11) + return; + + /* Byte 32, bit 7 is set if temp sensor is present */ + ret = at24_read(at24, 32, &val, 1); + if (ret || !(val & BIT(7))) + return; + + info.addr = 0x18 | (client->addr & 7); + + i2c_new_client_device(client->adapter, &info); +} + static int at24_probe(struct i2c_client *client) { struct regmap_config regmap_config = { }; @@ -762,6 +781,10 @@ static int at24_probe(struct i2c_client *client) } } + /* If this a SPD EEPROM, probe for DDR3 thermal sensor */ + if (cdata == &at24_data_spd) + at24_probe_temp_sensor(client); + pm_runtime_idle(dev); if (writable) diff --git a/drivers/misc/eeprom/ee1004.c b/drivers/misc/eeprom/ee1004.c index a1acd77130f2..21feebc3044c 100644 --- a/drivers/misc/eeprom/ee1004.c +++ b/drivers/misc/eeprom/ee1004.c @@ -31,6 +31,7 @@ * over performance. */ +#define EE1004_MAX_BUSSES 8 #define EE1004_ADDR_SET_PAGE 0x36 #define EE1004_NUM_PAGES 2 #define EE1004_PAGE_SIZE 256 @@ -42,9 +43,13 @@ * from page selection to end of read. */ static DEFINE_MUTEX(ee1004_bus_lock); -static struct i2c_client *ee1004_set_page[EE1004_NUM_PAGES]; -static unsigned int ee1004_dev_count; -static int ee1004_current_page; + +static struct ee1004_bus_data { + struct i2c_adapter *adap; + struct i2c_client *set_page[EE1004_NUM_PAGES]; + unsigned int dev_count; + int current_page; +} ee1004_bus_data[EE1004_MAX_BUSSES]; static const struct i2c_device_id ee1004_ids[] = { { "ee1004", 0 }, @@ -54,11 +59,29 @@ MODULE_DEVICE_TABLE(i2c, ee1004_ids); /*-------------------------------------------------------------------------*/ -static int ee1004_get_current_page(void) +static struct ee1004_bus_data *ee1004_get_bus_data(struct i2c_adapter *adap) +{ + int i; + + for (i = 0; i < EE1004_MAX_BUSSES; i++) + if (ee1004_bus_data[i].adap == adap) + return ee1004_bus_data + i; + + /* If not existent yet, create new entry */ + for (i = 0; i < EE1004_MAX_BUSSES; i++) + if (!ee1004_bus_data[i].adap) { + ee1004_bus_data[i].adap = adap; + return ee1004_bus_data + i; + } + + return NULL; +} + +static int ee1004_get_current_page(struct ee1004_bus_data *bd) { int err; - err = i2c_smbus_read_byte(ee1004_set_page[0]); + err = i2c_smbus_read_byte(bd->set_page[0]); if (err == -ENXIO) { /* Nack means page 1 is selected */ return 1; @@ -72,28 +95,29 @@ static int ee1004_get_current_page(void) return 0; } -static int ee1004_set_current_page(struct device *dev, int page) +static int ee1004_set_current_page(struct i2c_client *client, int page) { + struct ee1004_bus_data *bd = i2c_get_clientdata(client); int ret; - if (page == ee1004_current_page) + if (page == bd->current_page) return 0; /* Data is ignored */ - ret = i2c_smbus_write_byte(ee1004_set_page[page], 0x00); + ret = i2c_smbus_write_byte(bd->set_page[page], 0x00); /* * Don't give up just yet. Some memory modules will select the page * but not ack the command. Check which page is selected now. */ - if (ret == -ENXIO && ee1004_get_current_page() == page) + if (ret == -ENXIO && ee1004_get_current_page(bd) == page) ret = 0; if (ret < 0) { - dev_err(dev, "Failed to select page %d (%d)\n", page, ret); + dev_err(&client->dev, "Failed to select page %d (%d)\n", page, ret); return ret; } - dev_dbg(dev, "Selected page %d\n", page); - ee1004_current_page = page; + dev_dbg(&client->dev, "Selected page %d\n", page); + bd->current_page = page; return 0; } @@ -106,7 +130,7 @@ static ssize_t ee1004_eeprom_read(struct i2c_client *client, char *buf, page = offset >> EE1004_PAGE_SHIFT; offset &= (1 << EE1004_PAGE_SHIFT) - 1; - status = ee1004_set_current_page(&client->dev, page); + status = ee1004_set_current_page(client, page); if (status) return status; @@ -158,17 +182,34 @@ static struct bin_attribute *ee1004_attrs[] = { BIN_ATTRIBUTE_GROUPS(ee1004); -static void ee1004_cleanup(int idx) +static void ee1004_probe_temp_sensor(struct i2c_client *client) { - if (--ee1004_dev_count == 0) - while (--idx >= 0) { - i2c_unregister_device(ee1004_set_page[idx]); - ee1004_set_page[idx] = NULL; - } + struct i2c_board_info info = { .type = "jc42" }; + u8 byte14; + int ret; + + /* byte 14, bit 7 is set if temp sensor is present */ + ret = ee1004_eeprom_read(client, &byte14, 14, 1); + if (ret != 1 || !(byte14 & BIT(7))) + return; + + info.addr = 0x18 | (client->addr & 7); + + i2c_new_client_device(client->adapter, &info); +} + +static void ee1004_cleanup(int idx, struct ee1004_bus_data *bd) +{ + if (--bd->dev_count == 0) { + while (--idx >= 0) + i2c_unregister_device(bd->set_page[idx]); + memset(bd, 0, sizeof(struct ee1004_bus_data)); + } } static int ee1004_probe(struct i2c_client *client) { + struct ee1004_bus_data *bd; int err, cnr = 0; /* Make sure we can operate on this adapter */ @@ -178,9 +219,19 @@ static int ee1004_probe(struct i2c_client *client) I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_READ_BYTE_DATA)) return -EPFNOSUPPORT; - /* Use 2 dummy devices for page select command */ mutex_lock(&ee1004_bus_lock); - if (++ee1004_dev_count == 1) { + + bd = ee1004_get_bus_data(client->adapter); + if (!bd) { + mutex_unlock(&ee1004_bus_lock); + return dev_err_probe(&client->dev, -ENOSPC, + "Only %d busses supported", EE1004_MAX_BUSSES); + } + + i2c_set_clientdata(client, bd); + + if (++bd->dev_count == 1) { + /* Use 2 dummy devices for page select command */ for (cnr = 0; cnr < EE1004_NUM_PAGES; cnr++) { struct i2c_client *cl; @@ -189,21 +240,19 @@ static int ee1004_probe(struct i2c_client *client) err = PTR_ERR(cl); goto err_clients; } - ee1004_set_page[cnr] = cl; + bd->set_page[cnr] = cl; } /* Remember current page to avoid unneeded page select */ - err = ee1004_get_current_page(); + err = ee1004_get_current_page(bd); if (err < 0) goto err_clients; dev_dbg(&client->dev, "Currently selected page: %d\n", err); - ee1004_current_page = err; - } else if (client->adapter != ee1004_set_page[0]->adapter) { - dev_err(&client->dev, - "Driver only supports devices on a single I2C bus\n"); - err = -EOPNOTSUPP; - goto err_clients; + bd->current_page = err; } + + ee1004_probe_temp_sensor(client); + mutex_unlock(&ee1004_bus_lock); dev_info(&client->dev, @@ -213,7 +262,7 @@ static int ee1004_probe(struct i2c_client *client) return 0; err_clients: - ee1004_cleanup(cnr); + ee1004_cleanup(cnr, bd); mutex_unlock(&ee1004_bus_lock); return err; @@ -221,9 +270,11 @@ static int ee1004_probe(struct i2c_client *client) static void ee1004_remove(struct i2c_client *client) { + struct ee1004_bus_data *bd = i2c_get_clientdata(client); + /* Remove page select clients if this is the last device */ mutex_lock(&ee1004_bus_lock); - ee1004_cleanup(EE1004_NUM_PAGES); + ee1004_cleanup(EE1004_NUM_PAGES, bd); mutex_unlock(&ee1004_bus_lock); } diff --git a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c index 3882e97e96a7..c6eb27d46cb0 100644 --- a/drivers/misc/lis3lv02d/lis3lv02d_i2c.c +++ b/drivers/misc/lis3lv02d/lis3lv02d_i2c.c @@ -150,6 +150,7 @@ static int lis3lv02d_i2c_probe(struct i2c_client *client) lis3_dev.init = lis3_i2c_init; lis3_dev.read = lis3_i2c_read; lis3_dev.write = lis3_i2c_write; + lis3_dev.reg_ctrl = lis3_reg_ctrl; lis3_dev.irq = client->irq; lis3_dev.ac = lis3lv02d_axis_map; lis3_dev.pm_dev = &client->dev; diff --git a/drivers/misc/mei/Kconfig b/drivers/misc/mei/Kconfig index 37db142de413..67d9391f1855 100644 --- a/drivers/misc/mei/Kconfig +++ b/drivers/misc/mei/Kconfig @@ -3,6 +3,7 @@ config INTEL_MEI tristate "Intel Management Engine Interface" depends on X86 && PCI + default GENERIC_CPU || MCORE2 || MATOM || X86_GENERIC help The Intel Management Engine (Intel ME) provides Manageability, Security and Media services for system containing Intel chipsets. @@ -11,10 +12,11 @@ config INTEL_MEI For more information see <https://software.intel.com/en-us/manageability/> +if INTEL_MEI + config INTEL_MEI_ME tristate "ME Enabled Intel Chipsets" - select INTEL_MEI - depends on X86 && PCI + default y help MEI support for ME Enabled Intel chipsets. @@ -38,8 +40,6 @@ config INTEL_MEI_ME config INTEL_MEI_TXE tristate "Intel Trusted Execution Environment with ME Interface" - select INTEL_MEI - depends on X86 && PCI help MEI Support for Trusted Execution Environment device on Intel SoCs @@ -48,9 +48,7 @@ config INTEL_MEI_TXE config INTEL_MEI_GSC tristate "Intel MEI GSC embedded device" - depends on INTEL_MEI depends on INTEL_MEI_ME - depends on X86 && PCI depends on DRM_I915 help Intel auxiliary driver for GSC devices embedded in Intel graphics devices. @@ -60,6 +58,31 @@ config INTEL_MEI_GSC tasks such as graphics card firmware update and security tasks. +config INTEL_MEI_VSC_HW + tristate "Intel visual sensing controller device transport driver" + depends on ACPI && SPI + depends on GPIOLIB || COMPILE_TEST + help + Intel SPI transport driver between host and Intel visual sensing + controller (IVSC) device. + + This driver can also be built as a module. If so, the module + will be called mei-vsc-hw. + +config INTEL_MEI_VSC + tristate "Intel visual sensing controller device with ME interface" + depends on INTEL_MEI_VSC_HW + help + Intel MEI over SPI driver for Intel visual sensing controller + (IVSC) device embedded in IA platform. It supports camera sharing + between IVSC for context sensing and IPU for typical media usage. + Select this config should enable transport layer for IVSC device. + + This driver can also be built as a module. If so, the module + will be called mei-vsc. + source "drivers/misc/mei/hdcp/Kconfig" source "drivers/misc/mei/pxp/Kconfig" source "drivers/misc/mei/gsc_proxy/Kconfig" + +endif diff --git a/drivers/misc/mei/Makefile b/drivers/misc/mei/Makefile index 14aee253ae48..6f9fdbf1a495 100644 --- a/drivers/misc/mei/Makefile +++ b/drivers/misc/mei/Makefile @@ -31,3 +31,10 @@ CFLAGS_mei-trace.o = -I$(src) obj-$(CONFIG_INTEL_MEI_HDCP) += hdcp/ obj-$(CONFIG_INTEL_MEI_PXP) += pxp/ obj-$(CONFIG_INTEL_MEI_GSC_PROXY) += gsc_proxy/ + +obj-$(CONFIG_INTEL_MEI_VSC_HW) += mei-vsc-hw.o +mei-vsc-hw-y := vsc-tp.o +mei-vsc-hw-y += vsc-fw-loader.o + +obj-$(CONFIG_INTEL_MEI_VSC) += mei-vsc.o +mei-vsc-y := platform-vsc.o diff --git a/drivers/misc/mei/gsc_proxy/Kconfig b/drivers/misc/mei/gsc_proxy/Kconfig index 5f68d9f3d691..ac78b9d1eccd 100644 --- a/drivers/misc/mei/gsc_proxy/Kconfig +++ b/drivers/misc/mei/gsc_proxy/Kconfig @@ -3,7 +3,7 @@ # config INTEL_MEI_GSC_PROXY tristate "Intel GSC Proxy services of ME Interface" - select INTEL_MEI_ME + depends on INTEL_MEI_ME depends on DRM_I915 help MEI Support for GSC Proxy Services on Intel platforms. diff --git a/drivers/misc/mei/hdcp/Kconfig b/drivers/misc/mei/hdcp/Kconfig index 54e1c9526909..9be312ec798d 100644 --- a/drivers/misc/mei/hdcp/Kconfig +++ b/drivers/misc/mei/hdcp/Kconfig @@ -3,7 +3,7 @@ # config INTEL_MEI_HDCP tristate "Intel HDCP2.2 services of ME Interface" - select INTEL_MEI_ME + depends on INTEL_MEI_ME depends on DRM_I915 help MEI Support for HDCP2.2 Services on Intel platforms. diff --git a/drivers/misc/mei/platform-vsc.c b/drivers/misc/mei/platform-vsc.c new file mode 100644 index 000000000000..8d303c6c0000 --- /dev/null +++ b/drivers/misc/mei/platform-vsc.c @@ -0,0 +1,450 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023, Intel Corporation. + * Intel Visual Sensing Controller Interface Linux driver + */ + +#include <linux/align.h> +#include <linux/cache.h> +#include <linux/cleanup.h> +#include <linux/iopoll.h> +#include <linux/list.h> +#include <linux/mei.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/overflow.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> +#include <linux/timekeeping.h> +#include <linux/types.h> + +#include <asm-generic/bug.h> +#include <asm-generic/unaligned.h> + +#include "mei_dev.h" +#include "vsc-tp.h" + +#define MEI_VSC_DRV_NAME "intel_vsc" + +#define MEI_VSC_MAX_MSG_SIZE 512 + +#define MEI_VSC_POLL_DELAY_US (50 * USEC_PER_MSEC) +#define MEI_VSC_POLL_TIMEOUT_US (200 * USEC_PER_MSEC) + +#define mei_dev_to_vsc_hw(dev) ((struct mei_vsc_hw *)((dev)->hw)) + +struct mei_vsc_host_timestamp { + u64 realtime; + u64 boottime; +}; + +struct mei_vsc_hw { + struct vsc_tp *tp; + + bool fw_ready; + bool host_ready; + + atomic_t write_lock_cnt; + + u32 rx_len; + u32 rx_hdr; + + /* buffer for tx */ + char tx_buf[MEI_VSC_MAX_MSG_SIZE + sizeof(struct mei_msg_hdr)] ____cacheline_aligned; + /* buffer for rx */ + char rx_buf[MEI_VSC_MAX_MSG_SIZE + sizeof(struct mei_msg_hdr)] ____cacheline_aligned; +}; + +static int mei_vsc_read_helper(struct mei_vsc_hw *hw, u8 *buf, + u32 max_len) +{ + struct mei_vsc_host_timestamp ts = { + .realtime = ktime_to_ns(ktime_get_real()), + .boottime = ktime_to_ns(ktime_get_boottime()), + }; + + return vsc_tp_xfer(hw->tp, VSC_TP_CMD_READ, &ts, sizeof(ts), + buf, max_len); +} + +static int mei_vsc_write_helper(struct mei_vsc_hw *hw, u8 *buf, u32 len) +{ + u8 status; + + return vsc_tp_xfer(hw->tp, VSC_TP_CMD_WRITE, buf, len, &status, + sizeof(status)); +} + +static int mei_vsc_fw_status(struct mei_device *mei_dev, + struct mei_fw_status *fw_status) +{ + if (!fw_status) + return -EINVAL; + + fw_status->count = 0; + + return 0; +} + +static inline enum mei_pg_state mei_vsc_pg_state(struct mei_device *mei_dev) +{ + return MEI_PG_OFF; +} + +static void mei_vsc_intr_enable(struct mei_device *mei_dev) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + + vsc_tp_intr_enable(hw->tp); +} + +static void mei_vsc_intr_disable(struct mei_device *mei_dev) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + + vsc_tp_intr_disable(hw->tp); +} + +/* mei framework requires this ops */ +static void mei_vsc_intr_clear(struct mei_device *mei_dev) +{ +} + +/* wait for pending irq handler */ +static void mei_vsc_synchronize_irq(struct mei_device *mei_dev) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + + vsc_tp_intr_synchronize(hw->tp); +} + +static int mei_vsc_hw_config(struct mei_device *mei_dev) +{ + return 0; +} + +static bool mei_vsc_host_is_ready(struct mei_device *mei_dev) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + + return hw->host_ready; +} + +static bool mei_vsc_hw_is_ready(struct mei_device *mei_dev) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + + return hw->fw_ready; +} + +static int mei_vsc_hw_start(struct mei_device *mei_dev) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + int ret, rlen; + u8 buf; + + hw->host_ready = true; + + vsc_tp_intr_enable(hw->tp); + + ret = read_poll_timeout(mei_vsc_read_helper, rlen, + rlen >= 0, MEI_VSC_POLL_DELAY_US, + MEI_VSC_POLL_TIMEOUT_US, true, + hw, &buf, sizeof(buf)); + if (ret) { + dev_err(mei_dev->dev, "wait fw ready failed: %d\n", ret); + return ret; + } + + hw->fw_ready = true; + + return 0; +} + +static bool mei_vsc_hbuf_is_ready(struct mei_device *mei_dev) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + + return atomic_read(&hw->write_lock_cnt) == 0; +} + +static int mei_vsc_hbuf_empty_slots(struct mei_device *mei_dev) +{ + return MEI_VSC_MAX_MSG_SIZE / MEI_SLOT_SIZE; +} + +static u32 mei_vsc_hbuf_depth(const struct mei_device *mei_dev) +{ + return MEI_VSC_MAX_MSG_SIZE / MEI_SLOT_SIZE; +} + +static int mei_vsc_write(struct mei_device *mei_dev, + const void *hdr, size_t hdr_len, + const void *data, size_t data_len) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + char *buf = hw->tx_buf; + int ret; + + if (WARN_ON(!hdr || !IS_ALIGNED(hdr_len, 4))) + return -EINVAL; + + if (!data || data_len > MEI_VSC_MAX_MSG_SIZE) + return -EINVAL; + + atomic_inc(&hw->write_lock_cnt); + + memcpy(buf, hdr, hdr_len); + memcpy(buf + hdr_len, data, data_len); + + ret = mei_vsc_write_helper(hw, buf, hdr_len + data_len); + + atomic_dec_if_positive(&hw->write_lock_cnt); + + return ret < 0 ? ret : 0; +} + +static inline u32 mei_vsc_read(const struct mei_device *mei_dev) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + int ret; + + ret = mei_vsc_read_helper(hw, hw->rx_buf, sizeof(hw->rx_buf)); + if (ret < 0 || ret < sizeof(u32)) + return 0; + hw->rx_len = ret; + + hw->rx_hdr = get_unaligned_le32(hw->rx_buf); + + return hw->rx_hdr; +} + +static int mei_vsc_count_full_read_slots(struct mei_device *mei_dev) +{ + return MEI_VSC_MAX_MSG_SIZE / MEI_SLOT_SIZE; +} + +static int mei_vsc_read_slots(struct mei_device *mei_dev, unsigned char *buf, + unsigned long len) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + struct mei_msg_hdr *hdr; + + hdr = (struct mei_msg_hdr *)&hw->rx_hdr; + if (len != hdr->length || hdr->length + sizeof(*hdr) != hw->rx_len) + return -EINVAL; + + memcpy(buf, hw->rx_buf + sizeof(*hdr), len); + + return 0; +} + +static bool mei_vsc_pg_in_transition(struct mei_device *mei_dev) +{ + return mei_dev->pg_event >= MEI_PG_EVENT_WAIT && + mei_dev->pg_event <= MEI_PG_EVENT_INTR_WAIT; +} + +static bool mei_vsc_pg_is_enabled(struct mei_device *mei_dev) +{ + return false; +} + +static int mei_vsc_hw_reset(struct mei_device *mei_dev, bool intr_enable) +{ + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + + vsc_tp_reset(hw->tp); + + vsc_tp_intr_disable(hw->tp); + + return vsc_tp_init(hw->tp, mei_dev->dev); +} + +static const struct mei_hw_ops mei_vsc_hw_ops = { + .fw_status = mei_vsc_fw_status, + .pg_state = mei_vsc_pg_state, + + .host_is_ready = mei_vsc_host_is_ready, + .hw_is_ready = mei_vsc_hw_is_ready, + .hw_reset = mei_vsc_hw_reset, + .hw_config = mei_vsc_hw_config, + .hw_start = mei_vsc_hw_start, + + .pg_in_transition = mei_vsc_pg_in_transition, + .pg_is_enabled = mei_vsc_pg_is_enabled, + + .intr_clear = mei_vsc_intr_clear, + .intr_enable = mei_vsc_intr_enable, + .intr_disable = mei_vsc_intr_disable, + .synchronize_irq = mei_vsc_synchronize_irq, + + .hbuf_free_slots = mei_vsc_hbuf_empty_slots, + .hbuf_is_ready = mei_vsc_hbuf_is_ready, + .hbuf_depth = mei_vsc_hbuf_depth, + .write = mei_vsc_write, + + .rdbuf_full_slots = mei_vsc_count_full_read_slots, + .read_hdr = mei_vsc_read, + .read = mei_vsc_read_slots, +}; + +static void mei_vsc_event_cb(void *context) +{ + struct mei_device *mei_dev = context; + struct mei_vsc_hw *hw = mei_dev_to_vsc_hw(mei_dev); + struct list_head cmpl_list; + s32 slots; + int ret; + + if (mei_dev->dev_state == MEI_DEV_RESETTING || + mei_dev->dev_state == MEI_DEV_INITIALIZING) + return; + + INIT_LIST_HEAD(&cmpl_list); + + guard(mutex)(&mei_dev->device_lock); + + while (vsc_tp_need_read(hw->tp)) { + /* check slots available for reading */ + slots = mei_count_full_read_slots(mei_dev); + + ret = mei_irq_read_handler(mei_dev, &cmpl_list, &slots); + if (ret) { + if (ret != -ENODATA) { + if (mei_dev->dev_state != MEI_DEV_RESETTING && + mei_dev->dev_state != MEI_DEV_POWER_DOWN) + schedule_work(&mei_dev->reset_work); + } + + return; + } + } + + mei_dev->hbuf_is_ready = mei_hbuf_is_ready(mei_dev); + ret = mei_irq_write_handler(mei_dev, &cmpl_list); + if (ret) + dev_err(mei_dev->dev, "dispatch write request failed: %d\n", ret); + + mei_dev->hbuf_is_ready = mei_hbuf_is_ready(mei_dev); + mei_irq_compl_handler(mei_dev, &cmpl_list); +} + +static int mei_vsc_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mei_device *mei_dev; + struct mei_vsc_hw *hw; + struct vsc_tp *tp; + int ret; + + tp = *(struct vsc_tp **)dev_get_platdata(dev); + if (!tp) + return dev_err_probe(dev, -ENODEV, "no platform data\n"); + + mei_dev = devm_kzalloc(dev, size_add(sizeof(*mei_dev), sizeof(*hw)), + GFP_KERNEL); + if (!mei_dev) + return -ENOMEM; + + mei_device_init(mei_dev, dev, false, &mei_vsc_hw_ops); + mei_dev->fw_f_fw_ver_supported = 0; + mei_dev->kind = "ivsc"; + + hw = mei_dev_to_vsc_hw(mei_dev); + atomic_set(&hw->write_lock_cnt, 0); + hw->tp = tp; + + platform_set_drvdata(pdev, mei_dev); + + vsc_tp_register_event_cb(tp, mei_vsc_event_cb, mei_dev); + + ret = mei_start(mei_dev); + if (ret) { + dev_err_probe(dev, ret, "init hw failed\n"); + goto err_cancel; + } + + ret = mei_register(mei_dev, dev); + if (ret) + goto err_stop; + + pm_runtime_enable(mei_dev->dev); + + return 0; + +err_stop: + mei_stop(mei_dev); + +err_cancel: + mei_cancel_work(mei_dev); + + mei_disable_interrupts(mei_dev); + + return ret; +} + +static int mei_vsc_remove(struct platform_device *pdev) +{ + struct mei_device *mei_dev = platform_get_drvdata(pdev); + + pm_runtime_disable(mei_dev->dev); + + mei_stop(mei_dev); + + mei_disable_interrupts(mei_dev); + + mei_deregister(mei_dev); + + return 0; +} + +static int mei_vsc_suspend(struct device *dev) +{ + struct mei_device *mei_dev = dev_get_drvdata(dev); + + mei_stop(mei_dev); + + return 0; +} + +static int mei_vsc_resume(struct device *dev) +{ + struct mei_device *mei_dev = dev_get_drvdata(dev); + int ret; + + ret = mei_restart(mei_dev); + if (ret) + return ret; + + /* start timer if stopped in suspend */ + schedule_delayed_work(&mei_dev->timer_work, HZ); + + return 0; +} + +static DEFINE_SIMPLE_DEV_PM_OPS(mei_vsc_pm_ops, mei_vsc_suspend, mei_vsc_resume); + +static const struct platform_device_id mei_vsc_id_table[] = { + { MEI_VSC_DRV_NAME }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(platform, mei_vsc_id_table); + +static struct platform_driver mei_vsc_drv = { + .probe = mei_vsc_probe, + .remove = mei_vsc_remove, + .id_table = mei_vsc_id_table, + .driver = { + .name = MEI_VSC_DRV_NAME, + .pm = &mei_vsc_pm_ops, + .probe_type = PROBE_PREFER_ASYNCHRONOUS, + }, +}; +module_platform_driver(mei_vsc_drv); + +MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>"); +MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>"); +MODULE_DESCRIPTION("Intel Visual Sensing Controller Interface"); +MODULE_LICENSE("GPL"); +MODULE_IMPORT_NS(VSC_TP); diff --git a/drivers/misc/mei/pxp/Kconfig b/drivers/misc/mei/pxp/Kconfig index 4029b96afc04..e9219b61cd92 100644 --- a/drivers/misc/mei/pxp/Kconfig +++ b/drivers/misc/mei/pxp/Kconfig @@ -1,10 +1,9 @@ - # SPDX-License-Identifier: GPL-2.0 # Copyright (c) 2020, Intel Corporation. All rights reserved. # config INTEL_MEI_PXP tristate "Intel PXP services of ME Interface" - select INTEL_MEI_ME + depends on INTEL_MEI_ME depends on DRM_I915 help MEI Support for PXP Services on Intel platforms. diff --git a/drivers/misc/mei/vsc-fw-loader.c b/drivers/misc/mei/vsc-fw-loader.c new file mode 100644 index 000000000000..ffa4ccd96a10 --- /dev/null +++ b/drivers/misc/mei/vsc-fw-loader.c @@ -0,0 +1,770 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023, Intel Corporation. + * Intel Visual Sensing Controller Transport Layer Linux driver + */ + +#include <linux/acpi.h> +#include <linux/align.h> +#include <linux/bitfield.h> +#include <linux/bits.h> +#include <linux/cleanup.h> +#include <linux/firmware.h> +#include <linux/sizes.h> +#include <linux/slab.h> +#include <linux/string_helpers.h> +#include <linux/types.h> + +#include <asm-generic/unaligned.h> + +#include "vsc-tp.h" + +#define VSC_MAGIC_NUM 0x49505343 /* IPSC */ +#define VSC_MAGIC_FW 0x49574653 /* IWFS */ +#define VSC_MAGIC_FILE 0x46564353 /* FVCS */ + +#define VSC_ADDR_BASE 0xE0030000 +#define VSC_EFUSE_ADDR (VSC_ADDR_BASE + 0x038) +#define VSC_STRAP_ADDR (VSC_ADDR_BASE + 0x100) + +#define VSC_MAINSTEPPING_VERSION_MASK GENMASK(7, 4) +#define VSC_MAINSTEPPING_VERSION_A 0 + +#define VSC_SUBSTEPPING_VERSION_MASK GENMASK(3, 0) +#define VSC_SUBSTEPPING_VERSION_0 0 +#define VSC_SUBSTEPPING_VERSION_1 2 + +#define VSC_BOOT_IMG_OPTION_MASK GENMASK(15, 0) + +#define VSC_SKU_CFG_LOCATION 0x5001A000 +#define VSC_SKU_MAX_SIZE 4100u + +#define VSC_ACE_IMG_CNT 2 +#define VSC_CSI_IMG_CNT 4 +#define VSC_IMG_CNT_MAX 6 + +#define VSC_ROM_PKG_SIZE 256u +#define VSC_FW_PKG_SIZE 512u + +#define VSC_IMAGE_DIR "intel/vsc/" + +#define VSC_CSI_IMAGE_NAME VSC_IMAGE_DIR "ivsc_fw.bin" +#define VSC_ACE_IMAGE_NAME_FMT VSC_IMAGE_DIR "ivsc_pkg_%s_0.bin" +#define VSC_CFG_IMAGE_NAME_FMT VSC_IMAGE_DIR "ivsc_skucfg_%s_0_1.bin" + +#define VSC_IMAGE_PATH_MAX_LEN 64 + +#define VSC_SENSOR_NAME_MAX_LEN 16 + +/* command id */ +enum { + VSC_CMD_QUERY = 0, + VSC_CMD_DL_SET = 1, + VSC_CMD_DL_START = 2, + VSC_CMD_DL_CONT = 3, + VSC_CMD_DUMP_MEM = 4, + VSC_CMD_GET_CONT = 8, + VSC_CMD_CAM_BOOT = 10, +}; + +/* command ack token */ +enum { + VSC_TOKEN_BOOTLOADER_REQ = 1, + VSC_TOKEN_DUMP_RESP = 4, + VSC_TOKEN_ERROR = 7, +}; + +/* image type */ +enum { + VSC_IMG_BOOTLOADER_TYPE = 1, + VSC_IMG_CSI_EM7D_TYPE, + VSC_IMG_CSI_SEM_TYPE, + VSC_IMG_CSI_RUNTIME_TYPE, + VSC_IMG_ACE_VISION_TYPE, + VSC_IMG_ACE_CFG_TYPE, + VSC_IMG_SKU_CFG_TYPE, +}; + +/* image fragments */ +enum { + VSC_IMG_BOOTLOADER_FRAG, + VSC_IMG_CSI_SEM_FRAG, + VSC_IMG_CSI_RUNTIME_FRAG, + VSC_IMG_ACE_VISION_FRAG, + VSC_IMG_ACE_CFG_FRAG, + VSC_IMG_CSI_EM7D_FRAG, + VSC_IMG_SKU_CFG_FRAG, + VSC_IMG_FRAG_MAX +}; + +struct vsc_rom_cmd { + __le32 magic; + __u8 cmd_id; + union { + /* download start */ + struct { + __u8 img_type; + __le16 option; + __le32 img_len; + __le32 img_loc; + __le32 crc; + DECLARE_FLEX_ARRAY(__u8, res); + } __packed dl_start; + /* download set */ + struct { + __u8 option; + __le16 img_cnt; + DECLARE_FLEX_ARRAY(__le32, payload); + } __packed dl_set; + /* download continue */ + struct { + __u8 end_flag; + __le16 len; + /* 8 is the offset of payload */ + __u8 payload[VSC_ROM_PKG_SIZE - 8]; + } __packed dl_cont; + /* dump memory */ + struct { + __u8 res; + __le16 len; + __le32 addr; + DECLARE_FLEX_ARRAY(__u8, payload); + } __packed dump_mem; + /* 5 is the offset of padding */ + __u8 padding[VSC_ROM_PKG_SIZE - 5]; + } data; +}; + +struct vsc_rom_cmd_ack { + __le32 magic; + __u8 token; + __u8 type; + __u8 res[2]; + __u8 payload[]; +}; + +struct vsc_fw_cmd { + __le32 magic; + __u8 cmd_id; + union { + struct { + __le16 option; + __u8 img_type; + __le32 img_len; + __le32 img_loc; + __le32 crc; + DECLARE_FLEX_ARRAY(__u8, res); + } __packed dl_start; + struct { + __le16 option; + __u8 img_cnt; + DECLARE_FLEX_ARRAY(__le32, payload); + } __packed dl_set; + struct { + __le32 addr; + __u8 len; + DECLARE_FLEX_ARRAY(__u8, payload); + } __packed dump_mem; + struct { + __u8 resv[3]; + __le32 crc; + DECLARE_FLEX_ARRAY(__u8, payload); + } __packed boot; + /* 5 is the offset of padding */ + __u8 padding[VSC_FW_PKG_SIZE - 5]; + } data; +}; + +struct vsc_img { + __le32 magic; + __le32 option; + __le32 image_count; + __le32 image_location[VSC_IMG_CNT_MAX]; +}; + +struct vsc_fw_sign { + __le32 magic; + __le32 image_size; + __u8 image[]; +}; + +struct vsc_image_code_data { + /* fragment index */ + u8 frag_index; + /* image type */ + u8 image_type; +}; + +struct vsc_img_frag { + u8 type; + u32 location; + const u8 *data; + u32 size; +}; + +/** + * struct vsc_fw_loader - represent vsc firmware loader + * @dev: device used to request fimware + * @tp: transport layer used with the firmware loader + * @csi: CSI image + * @ace: ACE image + * @cfg: config image + * @tx_buf: tx buffer + * @rx_buf: rx buffer + * @option: command option + * @count: total image count + * @sensor_name: camera sensor name + * @frags: image fragments + */ +struct vsc_fw_loader { + struct device *dev; + struct vsc_tp *tp; + + const struct firmware *csi; + const struct firmware *ace; + const struct firmware *cfg; + + void *tx_buf; + void *rx_buf; + + u16 option; + u16 count; + + char sensor_name[VSC_SENSOR_NAME_MAX_LEN]; + + struct vsc_img_frag frags[VSC_IMG_FRAG_MAX]; +}; + +static inline u32 vsc_sum_crc(void *data, size_t size) +{ + u32 crc = 0; + size_t i; + + for (i = 0; i < size; i++) + crc += *((u8 *)data + i); + + return crc; +} + +/* get sensor name to construct image name */ +static int vsc_get_sensor_name(struct vsc_fw_loader *fw_loader, + struct device *dev) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER }; + union acpi_object obj = { + .type = ACPI_TYPE_INTEGER, + .integer.value = 1, + }; + struct acpi_object_list arg_list = { + .count = 1, + .pointer = &obj, + }; + union acpi_object *ret_obj; + acpi_handle handle; + acpi_status status; + int ret = 0; + + handle = ACPI_HANDLE(dev); + if (!handle) + return -EINVAL; + + status = acpi_evaluate_object(handle, "SID", &arg_list, &buffer); + if (ACPI_FAILURE(status)) { + dev_err(dev, "can't evaluate SID method: %d\n", status); + return -ENODEV; + } + + ret_obj = buffer.pointer; + if (!ret_obj) { + dev_err(dev, "can't locate ACPI buffer\n"); + return -ENODEV; + } + + if (ret_obj->type != ACPI_TYPE_STRING) { + dev_err(dev, "found non-string entry\n"); + ret = -ENODEV; + goto out_free_buff; + } + + /* string length excludes trailing NUL */ + if (ret_obj->string.length >= sizeof(fw_loader->sensor_name)) { + dev_err(dev, "sensor name buffer too small\n"); + ret = -EINVAL; + goto out_free_buff; + } + + memcpy(fw_loader->sensor_name, ret_obj->string.pointer, + ret_obj->string.length); + + string_lower(fw_loader->sensor_name, fw_loader->sensor_name); + +out_free_buff: + ACPI_FREE(buffer.pointer); + + return ret; +} + +static int vsc_identify_silicon(struct vsc_fw_loader *fw_loader) +{ + struct vsc_rom_cmd_ack *ack = fw_loader->rx_buf; + struct vsc_rom_cmd *cmd = fw_loader->tx_buf; + u8 version, sub_version; + int ret; + + /* identify stepping information */ + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_DUMP_MEM; + cmd->data.dump_mem.addr = cpu_to_le32(VSC_EFUSE_ADDR); + cmd->data.dump_mem.len = cpu_to_le16(sizeof(__le32)); + ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE); + if (ret) + return ret; + if (ack->token == VSC_TOKEN_ERROR) + return -EINVAL; + + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_GET_CONT; + ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE); + if (ret) + return ret; + if (ack->token != VSC_TOKEN_DUMP_RESP) + return -EINVAL; + + version = FIELD_GET(VSC_MAINSTEPPING_VERSION_MASK, ack->payload[0]); + sub_version = FIELD_GET(VSC_SUBSTEPPING_VERSION_MASK, ack->payload[0]); + + if (version != VSC_MAINSTEPPING_VERSION_A) + return -EINVAL; + + if (sub_version != VSC_SUBSTEPPING_VERSION_0 && + sub_version != VSC_SUBSTEPPING_VERSION_1) + return -EINVAL; + + dev_info(fw_loader->dev, "silicon stepping version is %u:%u\n", + version, sub_version); + + /* identify strap information */ + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_DUMP_MEM; + cmd->data.dump_mem.addr = cpu_to_le32(VSC_STRAP_ADDR); + cmd->data.dump_mem.len = cpu_to_le16(sizeof(__le32)); + ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE); + if (ret) + return ret; + if (ack->token == VSC_TOKEN_ERROR) + return -EINVAL; + + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_GET_CONT; + ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE); + if (ret) + return ret; + if (ack->token != VSC_TOKEN_DUMP_RESP) + return -EINVAL; + + return 0; +} + +static int vsc_identify_csi_image(struct vsc_fw_loader *fw_loader) +{ + const struct firmware *image; + struct vsc_fw_sign *sign; + struct vsc_img *img; + unsigned int i; + int ret; + + ret = request_firmware(&image, VSC_CSI_IMAGE_NAME, fw_loader->dev); + if (ret) + return ret; + + img = (struct vsc_img *)image->data; + if (!img) { + ret = -ENOENT; + goto err_release_image; + } + + if (le32_to_cpu(img->magic) != VSC_MAGIC_FILE) { + ret = -EINVAL; + goto err_release_image; + } + + if (le32_to_cpu(img->image_count) != VSC_CSI_IMG_CNT) { + ret = -EINVAL; + goto err_release_image; + } + fw_loader->count += le32_to_cpu(img->image_count) - 1; + + fw_loader->option = + FIELD_GET(VSC_BOOT_IMG_OPTION_MASK, le32_to_cpu(img->option)); + + sign = (struct vsc_fw_sign *) + (img->image_location + le32_to_cpu(img->image_count)); + + for (i = 0; i < VSC_CSI_IMG_CNT; i++) { + /* mapping from CSI image index to image code data */ + static const struct vsc_image_code_data csi_image_map[] = { + { VSC_IMG_BOOTLOADER_FRAG, VSC_IMG_BOOTLOADER_TYPE }, + { VSC_IMG_CSI_SEM_FRAG, VSC_IMG_CSI_SEM_TYPE }, + { VSC_IMG_CSI_RUNTIME_FRAG, VSC_IMG_CSI_RUNTIME_TYPE }, + { VSC_IMG_CSI_EM7D_FRAG, VSC_IMG_CSI_EM7D_TYPE }, + }; + struct vsc_img_frag *frag; + + if ((u8 *)sign + sizeof(*sign) > image->data + image->size) { + ret = -EINVAL; + goto err_release_image; + } + + if (le32_to_cpu(sign->magic) != VSC_MAGIC_FW) { + ret = -EINVAL; + goto err_release_image; + } + + if (!le32_to_cpu(img->image_location[i])) { + ret = -EINVAL; + goto err_release_image; + } + + frag = &fw_loader->frags[csi_image_map[i].frag_index]; + + frag->data = sign->image; + frag->size = le32_to_cpu(sign->image_size); + frag->location = le32_to_cpu(img->image_location[i]); + frag->type = csi_image_map[i].image_type; + + sign = (struct vsc_fw_sign *) + (sign->image + le32_to_cpu(sign->image_size)); + } + + fw_loader->csi = image; + + return 0; + +err_release_image: + release_firmware(image); + + return ret; +} + +static int vsc_identify_ace_image(struct vsc_fw_loader *fw_loader) +{ + char path[VSC_IMAGE_PATH_MAX_LEN]; + const struct firmware *image; + struct vsc_fw_sign *sign; + struct vsc_img *img; + unsigned int i; + int ret; + + snprintf(path, sizeof(path), VSC_ACE_IMAGE_NAME_FMT, + fw_loader->sensor_name); + + ret = request_firmware(&image, path, fw_loader->dev); + if (ret) + return ret; + + img = (struct vsc_img *)image->data; + if (!img) { + ret = -ENOENT; + goto err_release_image; + } + + if (le32_to_cpu(img->magic) != VSC_MAGIC_FILE) { + ret = -EINVAL; + goto err_release_image; + } + + if (le32_to_cpu(img->image_count) != VSC_ACE_IMG_CNT) { + ret = -EINVAL; + goto err_release_image; + } + fw_loader->count += le32_to_cpu(img->image_count); + + sign = (struct vsc_fw_sign *) + (img->image_location + le32_to_cpu(img->image_count)); + + for (i = 0; i < VSC_ACE_IMG_CNT; i++) { + /* mapping from ACE image index to image code data */ + static const struct vsc_image_code_data ace_image_map[] = { + { VSC_IMG_ACE_VISION_FRAG, VSC_IMG_ACE_VISION_TYPE }, + { VSC_IMG_ACE_CFG_FRAG, VSC_IMG_ACE_CFG_TYPE }, + }; + struct vsc_img_frag *frag, *last_frag; + u8 frag_index; + + if ((u8 *)sign + sizeof(*sign) > image->data + image->size) { + ret = -EINVAL; + goto err_release_image; + } + + if (le32_to_cpu(sign->magic) != VSC_MAGIC_FW) { + ret = -EINVAL; + goto err_release_image; + } + + frag_index = ace_image_map[i].frag_index; + frag = &fw_loader->frags[frag_index]; + + frag->data = sign->image; + frag->size = le32_to_cpu(sign->image_size); + frag->location = le32_to_cpu(img->image_location[i]); + frag->type = ace_image_map[i].image_type; + + if (!frag->location) { + last_frag = &fw_loader->frags[frag_index - 1]; + frag->location = + ALIGN(last_frag->location + last_frag->size, SZ_4K); + } + + sign = (struct vsc_fw_sign *) + (sign->image + le32_to_cpu(sign->image_size)); + } + + fw_loader->ace = image; + + return 0; + +err_release_image: + release_firmware(image); + + return ret; +} + +static int vsc_identify_cfg_image(struct vsc_fw_loader *fw_loader) +{ + struct vsc_img_frag *frag = &fw_loader->frags[VSC_IMG_SKU_CFG_FRAG]; + char path[VSC_IMAGE_PATH_MAX_LEN]; + const struct firmware *image; + u32 size; + int ret; + + snprintf(path, sizeof(path), VSC_CFG_IMAGE_NAME_FMT, + fw_loader->sensor_name); + + ret = request_firmware(&image, path, fw_loader->dev); + if (ret) + return ret; + + /* identify image size */ + if (image->size <= sizeof(u32) || image->size > VSC_SKU_MAX_SIZE) { + ret = -EINVAL; + goto err_release_image; + } + + size = le32_to_cpu(*((__le32 *)image->data)) + sizeof(u32); + if (image->size != size) { + ret = -EINVAL; + goto err_release_image; + } + + frag->data = image->data; + frag->size = image->size; + frag->type = VSC_IMG_SKU_CFG_TYPE; + frag->location = VSC_SKU_CFG_LOCATION; + + fw_loader->cfg = image; + + return 0; + +err_release_image: + release_firmware(image); + + return ret; +} + +static int vsc_download_bootloader(struct vsc_fw_loader *fw_loader) +{ + struct vsc_img_frag *frag = &fw_loader->frags[VSC_IMG_BOOTLOADER_FRAG]; + struct vsc_rom_cmd_ack *ack = fw_loader->rx_buf; + struct vsc_rom_cmd *cmd = fw_loader->tx_buf; + u32 len, c_len; + size_t remain; + const u8 *p; + int ret; + + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_QUERY; + ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, ack, VSC_ROM_PKG_SIZE); + if (ret) + return ret; + if (ack->token != VSC_TOKEN_DUMP_RESP && + ack->token != VSC_TOKEN_BOOTLOADER_REQ) + return -EINVAL; + + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_DL_START; + cmd->data.dl_start.option = cpu_to_le16(fw_loader->option); + cmd->data.dl_start.img_type = frag->type; + cmd->data.dl_start.img_len = cpu_to_le32(frag->size); + cmd->data.dl_start.img_loc = cpu_to_le32(frag->location); + + c_len = offsetof(struct vsc_rom_cmd, data.dl_start.crc); + cmd->data.dl_start.crc = cpu_to_le32(vsc_sum_crc(cmd, c_len)); + + ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_ROM_PKG_SIZE); + if (ret) + return ret; + + p = frag->data; + remain = frag->size; + + /* download image data */ + while (remain > 0) { + len = min(remain, sizeof(cmd->data.dl_cont.payload)); + + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_DL_CONT; + cmd->data.dl_cont.len = cpu_to_le16(len); + cmd->data.dl_cont.end_flag = remain == len; + memcpy(cmd->data.dl_cont.payload, p, len); + + ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_ROM_PKG_SIZE); + if (ret) + return ret; + + p += len; + remain -= len; + } + + return 0; +} + +static int vsc_download_firmware(struct vsc_fw_loader *fw_loader) +{ + struct vsc_fw_cmd *cmd = fw_loader->tx_buf; + unsigned int i, index = 0; + u32 c_len; + int ret; + + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_DL_SET; + cmd->data.dl_set.img_cnt = cpu_to_le16(fw_loader->count); + put_unaligned_le16(fw_loader->option, &cmd->data.dl_set.option); + + for (i = VSC_IMG_CSI_SEM_FRAG; i <= VSC_IMG_CSI_EM7D_FRAG; i++) { + struct vsc_img_frag *frag = &fw_loader->frags[i]; + + cmd->data.dl_set.payload[index++] = cpu_to_le32(frag->location); + cmd->data.dl_set.payload[index++] = cpu_to_le32(frag->size); + } + + c_len = offsetof(struct vsc_fw_cmd, data.dl_set.payload[index]); + cmd->data.dl_set.payload[index] = cpu_to_le32(vsc_sum_crc(cmd, c_len)); + + ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_FW_PKG_SIZE); + if (ret) + return ret; + + for (i = VSC_IMG_CSI_SEM_FRAG; i < VSC_IMG_FRAG_MAX; i++) { + struct vsc_img_frag *frag = &fw_loader->frags[i]; + const u8 *p; + u32 remain; + + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_DL_START; + cmd->data.dl_start.img_type = frag->type; + cmd->data.dl_start.img_len = cpu_to_le32(frag->size); + cmd->data.dl_start.img_loc = cpu_to_le32(frag->location); + put_unaligned_le16(fw_loader->option, &cmd->data.dl_start.option); + + c_len = offsetof(struct vsc_fw_cmd, data.dl_start.crc); + cmd->data.dl_start.crc = cpu_to_le32(vsc_sum_crc(cmd, c_len)); + + ret = vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_FW_PKG_SIZE); + if (ret) + return ret; + + p = frag->data; + remain = frag->size; + + /* download image data */ + while (remain > 0) { + u32 len = min(remain, VSC_FW_PKG_SIZE); + + memcpy(fw_loader->tx_buf, p, len); + memset(fw_loader->tx_buf + len, 0, VSC_FW_PKG_SIZE - len); + + ret = vsc_tp_rom_xfer(fw_loader->tp, fw_loader->tx_buf, + NULL, VSC_FW_PKG_SIZE); + if (ret) + break; + + p += len; + remain -= len; + } + } + + cmd->magic = cpu_to_le32(VSC_MAGIC_NUM); + cmd->cmd_id = VSC_CMD_CAM_BOOT; + + c_len = offsetof(struct vsc_fw_cmd, data.dl_start.crc); + cmd->data.boot.crc = cpu_to_le32(vsc_sum_crc(cmd, c_len)); + + return vsc_tp_rom_xfer(fw_loader->tp, cmd, NULL, VSC_FW_PKG_SIZE); +} + +/** + * vsc_tp_init - init vsc_tp + * @tp: vsc_tp device handle + * @dev: device node for mei vsc device + * Return: 0 in case of success, negative value in case of error + */ +int vsc_tp_init(struct vsc_tp *tp, struct device *dev) +{ + struct vsc_fw_loader *fw_loader __free(kfree) = NULL; + void *tx_buf __free(kfree) = NULL; + void *rx_buf __free(kfree) = NULL; + int ret; + + fw_loader = kzalloc(sizeof(*fw_loader), GFP_KERNEL); + if (!fw_loader) + return -ENOMEM; + + tx_buf = kzalloc(VSC_FW_PKG_SIZE, GFP_KERNEL); + if (!tx_buf) + return -ENOMEM; + + rx_buf = kzalloc(VSC_FW_PKG_SIZE, GFP_KERNEL); + if (!rx_buf) + return -ENOMEM; + + fw_loader->tx_buf = tx_buf; + fw_loader->rx_buf = rx_buf; + + fw_loader->tp = tp; + fw_loader->dev = dev; + + ret = vsc_get_sensor_name(fw_loader, dev); + if (ret) + return ret; + + ret = vsc_identify_silicon(fw_loader); + if (ret) + return ret; + + ret = vsc_identify_csi_image(fw_loader); + if (ret) + return ret; + + ret = vsc_identify_ace_image(fw_loader); + if (ret) + goto err_release_csi; + + ret = vsc_identify_cfg_image(fw_loader); + if (ret) + goto err_release_ace; + + ret = vsc_download_bootloader(fw_loader); + if (!ret) + ret = vsc_download_firmware(fw_loader); + + release_firmware(fw_loader->cfg); + +err_release_ace: + release_firmware(fw_loader->ace); + +err_release_csi: + release_firmware(fw_loader->csi); + + return ret; +} +EXPORT_SYMBOL_NS_GPL(vsc_tp_init, VSC_TP); diff --git a/drivers/misc/mei/vsc-tp.c b/drivers/misc/mei/vsc-tp.c new file mode 100644 index 000000000000..6f4a4be6ccb5 --- /dev/null +++ b/drivers/misc/mei/vsc-tp.c @@ -0,0 +1,555 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023, Intel Corporation. + * Intel Visual Sensing Controller Transport Layer Linux driver + */ + +#include <linux/acpi.h> +#include <linux/cleanup.h> +#include <linux/crc32.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/iopoll.h> +#include <linux/irq.h> +#include <linux/irqreturn.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/platform_device.h> +#include <linux/spi/spi.h> +#include <linux/types.h> + +#include "vsc-tp.h" + +#define VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS 20 +#define VSC_TP_ROM_BOOTUP_DELAY_MS 10 +#define VSC_TP_ROM_XFER_POLL_TIMEOUT_US (500 * USEC_PER_MSEC) +#define VSC_TP_ROM_XFER_POLL_DELAY_US (20 * USEC_PER_MSEC) +#define VSC_TP_WAIT_FW_ASSERTED_TIMEOUT (2 * HZ) +#define VSC_TP_MAX_XFER_COUNT 5 + +#define VSC_TP_PACKET_SYNC 0x31 +#define VSC_TP_CRC_SIZE sizeof(u32) +#define VSC_TP_MAX_MSG_SIZE 2048 +/* SPI xfer timeout size */ +#define VSC_TP_XFER_TIMEOUT_BYTES 700 +#define VSC_TP_PACKET_PADDING_SIZE 1 +#define VSC_TP_PACKET_SIZE(pkt) \ + (sizeof(struct vsc_tp_packet) + le16_to_cpu((pkt)->len) + VSC_TP_CRC_SIZE) +#define VSC_TP_MAX_PACKET_SIZE \ + (sizeof(struct vsc_tp_packet) + VSC_TP_MAX_MSG_SIZE + VSC_TP_CRC_SIZE) +#define VSC_TP_MAX_XFER_SIZE \ + (VSC_TP_MAX_PACKET_SIZE + VSC_TP_XFER_TIMEOUT_BYTES) +#define VSC_TP_NEXT_XFER_LEN(len, offset) \ + (len + sizeof(struct vsc_tp_packet) + VSC_TP_CRC_SIZE - offset + VSC_TP_PACKET_PADDING_SIZE) + +struct vsc_tp_packet { + __u8 sync; + __u8 cmd; + __le16 len; + __le32 seq; + __u8 buf[] __counted_by(len); +}; + +struct vsc_tp { + /* do the actual data transfer */ + struct spi_device *spi; + + /* bind with mei framework */ + struct platform_device *pdev; + + struct gpio_desc *wakeuphost; + struct gpio_desc *resetfw; + struct gpio_desc *wakeupfw; + + /* command sequence number */ + u32 seq; + + /* command buffer */ + void *tx_buf; + void *rx_buf; + + atomic_t assert_cnt; + wait_queue_head_t xfer_wait; + + vsc_tp_event_cb_t event_notify; + void *event_notify_context; + + /* used to protect command download */ + struct mutex mutex; +}; + +/* GPIO resources */ +static const struct acpi_gpio_params wakeuphost_gpio = { 0, 0, false }; +static const struct acpi_gpio_params wakeuphostint_gpio = { 1, 0, false }; +static const struct acpi_gpio_params resetfw_gpio = { 2, 0, false }; +static const struct acpi_gpio_params wakeupfw = { 3, 0, false }; + +static const struct acpi_gpio_mapping vsc_tp_acpi_gpios[] = { + { "wakeuphost-gpios", &wakeuphost_gpio, 1 }, + { "wakeuphostint-gpios", &wakeuphostint_gpio, 1 }, + { "resetfw-gpios", &resetfw_gpio, 1 }, + { "wakeupfw-gpios", &wakeupfw, 1 }, + {} +}; + +/* wakeup firmware and wait for response */ +static int vsc_tp_wakeup_request(struct vsc_tp *tp) +{ + int ret; + + gpiod_set_value_cansleep(tp->wakeupfw, 0); + + ret = wait_event_timeout(tp->xfer_wait, + atomic_read(&tp->assert_cnt) && + gpiod_get_value_cansleep(tp->wakeuphost), + VSC_TP_WAIT_FW_ASSERTED_TIMEOUT); + if (!ret) + return -ETIMEDOUT; + + return 0; +} + +static void vsc_tp_wakeup_release(struct vsc_tp *tp) +{ + atomic_dec_if_positive(&tp->assert_cnt); + + gpiod_set_value_cansleep(tp->wakeupfw, 1); +} + +static int vsc_tp_dev_xfer(struct vsc_tp *tp, void *obuf, void *ibuf, size_t len) +{ + struct spi_message msg = { 0 }; + struct spi_transfer xfer = { + .tx_buf = obuf, + .rx_buf = ibuf, + .len = len, + }; + + spi_message_init_with_transfers(&msg, &xfer, 1); + + return spi_sync_locked(tp->spi, &msg); +} + +static int vsc_tp_xfer_helper(struct vsc_tp *tp, struct vsc_tp_packet *pkt, + void *ibuf, u16 ilen) +{ + int ret, offset = 0, cpy_len, src_len, dst_len = sizeof(struct vsc_tp_packet); + int next_xfer_len = VSC_TP_PACKET_SIZE(pkt) + VSC_TP_XFER_TIMEOUT_BYTES; + u8 *src, *crc_src, *rx_buf = tp->rx_buf; + int count_down = VSC_TP_MAX_XFER_COUNT; + u32 recv_crc = 0, crc = ~0; + struct vsc_tp_packet ack; + u8 *dst = (u8 *)&ack; + bool synced = false; + + do { + ret = vsc_tp_dev_xfer(tp, pkt, rx_buf, next_xfer_len); + if (ret) + return ret; + memset(pkt, 0, VSC_TP_MAX_XFER_SIZE); + + if (synced) { + src = rx_buf; + src_len = next_xfer_len; + } else { + src = memchr(rx_buf, VSC_TP_PACKET_SYNC, next_xfer_len); + if (!src) + continue; + synced = true; + src_len = next_xfer_len - (src - rx_buf); + } + + /* traverse received data */ + while (src_len > 0) { + cpy_len = min(src_len, dst_len); + memcpy(dst, src, cpy_len); + crc_src = src; + src += cpy_len; + src_len -= cpy_len; + dst += cpy_len; + dst_len -= cpy_len; + + if (offset < sizeof(ack)) { + offset += cpy_len; + crc = crc32(crc, crc_src, cpy_len); + + if (!src_len) + continue; + + if (le16_to_cpu(ack.len)) { + dst = ibuf; + dst_len = min(ilen, le16_to_cpu(ack.len)); + } else { + dst = (u8 *)&recv_crc; + dst_len = sizeof(recv_crc); + } + } else if (offset < sizeof(ack) + le16_to_cpu(ack.len)) { + offset += cpy_len; + crc = crc32(crc, crc_src, cpy_len); + + if (src_len) { + int remain = sizeof(ack) + le16_to_cpu(ack.len) - offset; + + cpy_len = min(src_len, remain); + offset += cpy_len; + crc = crc32(crc, src, cpy_len); + src += cpy_len; + src_len -= cpy_len; + if (src_len) { + dst = (u8 *)&recv_crc; + dst_len = sizeof(recv_crc); + continue; + } + } + next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset); + } else if (offset < sizeof(ack) + le16_to_cpu(ack.len) + VSC_TP_CRC_SIZE) { + offset += cpy_len; + + if (src_len) { + /* terminate the traverse */ + next_xfer_len = 0; + break; + } + next_xfer_len = VSC_TP_NEXT_XFER_LEN(le16_to_cpu(ack.len), offset); + } + } + } while (next_xfer_len > 0 && --count_down); + + if (next_xfer_len > 0) + return -EAGAIN; + + if (~recv_crc != crc || le32_to_cpu(ack.seq) != tp->seq) { + dev_err(&tp->spi->dev, "recv crc or seq error\n"); + return -EINVAL; + } + + if (ack.cmd == VSC_TP_CMD_ACK || ack.cmd == VSC_TP_CMD_NACK || + ack.cmd == VSC_TP_CMD_BUSY) { + dev_err(&tp->spi->dev, "recv cmd ack error\n"); + return -EAGAIN; + } + + return min(le16_to_cpu(ack.len), ilen); +} + +/** + * vsc_tp_xfer - transfer data to firmware + * @tp: vsc_tp device handle + * @cmd: the command to be sent to the device + * @obuf: the tx buffer to be sent to the device + * @olen: the length of tx buffer + * @ibuf: the rx buffer to receive from the device + * @ilen: the length of rx buffer + * Return: the length of received data in case of success, + * otherwise negative value + */ +int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen, + void *ibuf, size_t ilen) +{ + struct vsc_tp_packet *pkt = tp->tx_buf; + u32 crc; + int ret; + + if (!obuf || !ibuf || olen > VSC_TP_MAX_MSG_SIZE) + return -EINVAL; + + guard(mutex)(&tp->mutex); + + pkt->sync = VSC_TP_PACKET_SYNC; + pkt->cmd = cmd; + pkt->len = cpu_to_le16(olen); + pkt->seq = cpu_to_le32(++tp->seq); + memcpy(pkt->buf, obuf, olen); + + crc = ~crc32(~0, (u8 *)pkt, sizeof(pkt) + olen); + memcpy(pkt->buf + olen, &crc, sizeof(crc)); + + ret = vsc_tp_wakeup_request(tp); + if (unlikely(ret)) + dev_err(&tp->spi->dev, "wakeup firmware failed ret: %d\n", ret); + else + ret = vsc_tp_xfer_helper(tp, pkt, ibuf, ilen); + + vsc_tp_wakeup_release(tp); + + return ret; +} +EXPORT_SYMBOL_NS_GPL(vsc_tp_xfer, VSC_TP); + +/** + * vsc_tp_rom_xfer - transfer data to rom code + * @tp: vsc_tp device handle + * @obuf: the data buffer to be sent to the device + * @ibuf: the buffer to receive data from the device + * @len: the length of tx buffer and rx buffer + * Return: 0 in case of success, negative value in case of error + */ +int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, size_t len) +{ + size_t words = len / sizeof(__be32); + int ret; + + if (len % sizeof(__be32) || len > VSC_TP_MAX_MSG_SIZE) + return -EINVAL; + + guard(mutex)(&tp->mutex); + + /* rom xfer is big endian */ + cpu_to_be32_array(tp->tx_buf, obuf, words); + + ret = read_poll_timeout(gpiod_get_value_cansleep, ret, + !ret, VSC_TP_ROM_XFER_POLL_DELAY_US, + VSC_TP_ROM_XFER_POLL_TIMEOUT_US, false, + tp->wakeuphost); + if (ret) { + dev_err(&tp->spi->dev, "wait rom failed ret: %d\n", ret); + return ret; + } + + ret = vsc_tp_dev_xfer(tp, tp->tx_buf, tp->rx_buf, len); + if (ret) + return ret; + + if (ibuf) + cpu_to_be32_array(ibuf, tp->rx_buf, words); + + return ret; +} + +/** + * vsc_tp_reset - reset vsc transport layer + * @tp: vsc_tp device handle + */ +void vsc_tp_reset(struct vsc_tp *tp) +{ + disable_irq(tp->spi->irq); + + /* toggle reset pin */ + gpiod_set_value_cansleep(tp->resetfw, 0); + msleep(VSC_TP_RESET_PIN_TOGGLE_INTERVAL_MS); + gpiod_set_value_cansleep(tp->resetfw, 1); + + /* wait for ROM */ + msleep(VSC_TP_ROM_BOOTUP_DELAY_MS); + + /* + * Set default host wakeup pin to non-active + * to avoid unexpected host irq interrupt. + */ + gpiod_set_value_cansleep(tp->wakeupfw, 1); + + atomic_set(&tp->assert_cnt, 0); + + enable_irq(tp->spi->irq); +} +EXPORT_SYMBOL_NS_GPL(vsc_tp_reset, VSC_TP); + +/** + * vsc_tp_need_read - check if device has data to sent + * @tp: vsc_tp device handle + * Return: true if device has data to sent, otherwise false + */ +bool vsc_tp_need_read(struct vsc_tp *tp) +{ + if (!atomic_read(&tp->assert_cnt)) + return false; + if (!gpiod_get_value_cansleep(tp->wakeuphost)) + return false; + if (!gpiod_get_value_cansleep(tp->wakeupfw)) + return false; + + return true; +} +EXPORT_SYMBOL_NS_GPL(vsc_tp_need_read, VSC_TP); + +/** + * vsc_tp_register_event_cb - register a callback function to receive event + * @tp: vsc_tp device handle + * @event_cb: callback function + * @context: execution context of event callback + * Return: 0 in case of success, negative value in case of error + */ +int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb, + void *context) +{ + tp->event_notify = event_cb; + tp->event_notify_context = context; + + return 0; +} +EXPORT_SYMBOL_NS_GPL(vsc_tp_register_event_cb, VSC_TP); + +/** + * vsc_tp_intr_synchronize - synchronize vsc_tp interrupt + * @tp: vsc_tp device handle + */ +void vsc_tp_intr_synchronize(struct vsc_tp *tp) +{ + synchronize_irq(tp->spi->irq); +} +EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_synchronize, VSC_TP); + +/** + * vsc_tp_intr_enable - enable vsc_tp interrupt + * @tp: vsc_tp device handle + */ +void vsc_tp_intr_enable(struct vsc_tp *tp) +{ + enable_irq(tp->spi->irq); +} +EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_enable, VSC_TP); + +/** + * vsc_tp_intr_disable - disable vsc_tp interrupt + * @tp: vsc_tp device handle + */ +void vsc_tp_intr_disable(struct vsc_tp *tp) +{ + disable_irq(tp->spi->irq); +} +EXPORT_SYMBOL_NS_GPL(vsc_tp_intr_disable, VSC_TP); + +static irqreturn_t vsc_tp_isr(int irq, void *data) +{ + struct vsc_tp *tp = data; + + atomic_inc(&tp->assert_cnt); + + wake_up(&tp->xfer_wait); + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t vsc_tp_thread_isr(int irq, void *data) +{ + struct vsc_tp *tp = data; + + if (tp->event_notify) + tp->event_notify(tp->event_notify_context); + + return IRQ_HANDLED; +} + +static int vsc_tp_match_any(struct acpi_device *adev, void *data) +{ + struct acpi_device **__adev = data; + + *__adev = adev; + + return 1; +} + +static int vsc_tp_probe(struct spi_device *spi) +{ + struct platform_device_info pinfo = { 0 }; + struct device *dev = &spi->dev; + struct platform_device *pdev; + struct acpi_device *adev; + struct vsc_tp *tp; + int ret; + + tp = devm_kzalloc(dev, sizeof(*tp), GFP_KERNEL); + if (!tp) + return -ENOMEM; + + tp->tx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL); + if (!tp->tx_buf) + return -ENOMEM; + + tp->rx_buf = devm_kzalloc(dev, VSC_TP_MAX_XFER_SIZE, GFP_KERNEL); + if (!tp->rx_buf) + return -ENOMEM; + + ret = devm_acpi_dev_add_driver_gpios(dev, vsc_tp_acpi_gpios); + if (ret) + return ret; + + tp->wakeuphost = devm_gpiod_get(dev, "wakeuphost", GPIOD_IN); + if (IS_ERR(tp->wakeuphost)) + return PTR_ERR(tp->wakeuphost); + + tp->resetfw = devm_gpiod_get(dev, "resetfw", GPIOD_OUT_HIGH); + if (IS_ERR(tp->resetfw)) + return PTR_ERR(tp->resetfw); + + tp->wakeupfw = devm_gpiod_get(dev, "wakeupfw", GPIOD_OUT_HIGH); + if (IS_ERR(tp->wakeupfw)) + return PTR_ERR(tp->wakeupfw); + + atomic_set(&tp->assert_cnt, 0); + init_waitqueue_head(&tp->xfer_wait); + tp->spi = spi; + + irq_set_status_flags(spi->irq, IRQ_DISABLE_UNLAZY); + ret = devm_request_threaded_irq(dev, spi->irq, vsc_tp_isr, + vsc_tp_thread_isr, + IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + dev_name(dev), tp); + if (ret) + return ret; + + mutex_init(&tp->mutex); + + /* only one child acpi device */ + ret = acpi_dev_for_each_child(ACPI_COMPANION(dev), + vsc_tp_match_any, &adev); + if (!ret) { + ret = -ENODEV; + goto err_destroy_lock; + } + pinfo.fwnode = acpi_fwnode_handle(adev); + + pinfo.name = "intel_vsc"; + pinfo.data = &tp; + pinfo.size_data = sizeof(tp); + pinfo.id = PLATFORM_DEVID_NONE; + + pdev = platform_device_register_full(&pinfo); + if (IS_ERR(pdev)) { + ret = PTR_ERR(pdev); + goto err_destroy_lock; + } + + tp->pdev = pdev; + spi_set_drvdata(spi, tp); + + return 0; + +err_destroy_lock: + mutex_destroy(&tp->mutex); + + return ret; +} + +static void vsc_tp_remove(struct spi_device *spi) +{ + struct vsc_tp *tp = spi_get_drvdata(spi); + + platform_device_unregister(tp->pdev); + + mutex_destroy(&tp->mutex); +} + +static const struct acpi_device_id vsc_tp_acpi_ids[] = { + { "INTC1009" }, /* Raptor Lake */ + { "INTC1058" }, /* Tiger Lake */ + { "INTC1094" }, /* Alder Lake */ + {} +}; +MODULE_DEVICE_TABLE(acpi, vsc_tp_acpi_ids); + +static struct spi_driver vsc_tp_driver = { + .probe = vsc_tp_probe, + .remove = vsc_tp_remove, + .driver = { + .name = "vsc-tp", + .acpi_match_table = vsc_tp_acpi_ids, + }, +}; +module_spi_driver(vsc_tp_driver); + +MODULE_AUTHOR("Wentong Wu <wentong.wu@intel.com>"); +MODULE_AUTHOR("Zhifeng Wang <zhifeng.wang@intel.com>"); +MODULE_DESCRIPTION("Intel Visual Sensing Controller Transport Layer"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/mei/vsc-tp.h b/drivers/misc/mei/vsc-tp.h new file mode 100644 index 000000000000..f9513ddc3e40 --- /dev/null +++ b/drivers/misc/mei/vsc-tp.h @@ -0,0 +1,50 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2023, Intel Corporation. + * Intel Visual Sensing Controller Transport Layer Linux driver + */ + +#ifndef _VSC_TP_H_ +#define _VSC_TP_H_ + +#include <linux/types.h> + +#define VSC_TP_CMD_WRITE 0x01 +#define VSC_TP_CMD_READ 0x02 + +#define VSC_TP_CMD_ACK 0x10 +#define VSC_TP_CMD_NACK 0x11 +#define VSC_TP_CMD_BUSY 0x12 + +struct vsc_tp; + +/** + * typedef vsc_event_cb_t - event callback function signature + * @context: the execution context of who registered this callback + * + * The callback function is called in interrupt context and the data + * payload is only valid during the call. If the user needs access + * the data payload later, it must copy the payload. + */ +typedef void (*vsc_tp_event_cb_t)(void *context); + +int vsc_tp_rom_xfer(struct vsc_tp *tp, const void *obuf, void *ibuf, + size_t len); + +int vsc_tp_xfer(struct vsc_tp *tp, u8 cmd, const void *obuf, size_t olen, + void *ibuf, size_t ilen); + +int vsc_tp_register_event_cb(struct vsc_tp *tp, vsc_tp_event_cb_t event_cb, + void *context); + +void vsc_tp_intr_enable(struct vsc_tp *tp); +void vsc_tp_intr_disable(struct vsc_tp *tp); +void vsc_tp_intr_synchronize(struct vsc_tp *tp); + +void vsc_tp_reset(struct vsc_tp *tp); + +bool vsc_tp_need_read(struct vsc_tp *tp); + +int vsc_tp_init(struct vsc_tp *tp, struct device *dev); + +#endif diff --git a/drivers/misc/nsm.c b/drivers/misc/nsm.c new file mode 100644 index 000000000000..0eaa3b4484bd --- /dev/null +++ b/drivers/misc/nsm.c @@ -0,0 +1,506 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Amazon Nitro Secure Module driver. + * + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + * + * The Nitro Secure Module implements commands via CBOR over virtio. + * This driver exposes a raw message ioctls on /dev/nsm that user + * space can use to issue these commands. + */ + +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/interrupt.h> +#include <linux/hw_random.h> +#include <linux/miscdevice.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/slab.h> +#include <linux/string.h> +#include <linux/uaccess.h> +#include <linux/uio.h> +#include <linux/virtio_config.h> +#include <linux/virtio_ids.h> +#include <linux/virtio.h> +#include <linux/wait.h> +#include <uapi/linux/nsm.h> + +/* Timeout for NSM virtqueue respose in milliseconds. */ +#define NSM_DEFAULT_TIMEOUT_MSECS (120000) /* 2 minutes */ + +/* Maximum length input data */ +struct nsm_data_req { + u32 len; + u8 data[NSM_REQUEST_MAX_SIZE]; +}; + +/* Maximum length output data */ +struct nsm_data_resp { + u32 len; + u8 data[NSM_RESPONSE_MAX_SIZE]; +}; + +/* Full NSM request/response message */ +struct nsm_msg { + struct nsm_data_req req; + struct nsm_data_resp resp; +}; + +struct nsm { + struct virtio_device *vdev; + struct virtqueue *vq; + struct mutex lock; + struct completion cmd_done; + struct miscdevice misc; + struct hwrng hwrng; + struct work_struct misc_init; + struct nsm_msg msg; +}; + +/* NSM device ID */ +static const struct virtio_device_id id_table[] = { + { VIRTIO_ID_NITRO_SEC_MOD, VIRTIO_DEV_ANY_ID }, + { 0 }, +}; + +static struct nsm *file_to_nsm(struct file *file) +{ + return container_of(file->private_data, struct nsm, misc); +} + +static struct nsm *hwrng_to_nsm(struct hwrng *rng) +{ + return container_of(rng, struct nsm, hwrng); +} + +#define CBOR_TYPE_MASK 0xE0 +#define CBOR_TYPE_MAP 0xA0 +#define CBOR_TYPE_TEXT 0x60 +#define CBOR_TYPE_ARRAY 0x40 +#define CBOR_HEADER_SIZE_SHORT 1 + +#define CBOR_SHORT_SIZE_MAX_VALUE 23 +#define CBOR_LONG_SIZE_U8 24 +#define CBOR_LONG_SIZE_U16 25 +#define CBOR_LONG_SIZE_U32 26 +#define CBOR_LONG_SIZE_U64 27 + +static bool cbor_object_is_array(const u8 *cbor_object, size_t cbor_object_size) +{ + if (cbor_object_size == 0 || cbor_object == NULL) + return false; + + return (cbor_object[0] & CBOR_TYPE_MASK) == CBOR_TYPE_ARRAY; +} + +static int cbor_object_get_array(u8 *cbor_object, size_t cbor_object_size, u8 **cbor_array) +{ + u8 cbor_short_size; + void *array_len_p; + u64 array_len; + u64 array_offset; + + if (!cbor_object_is_array(cbor_object, cbor_object_size)) + return -EFAULT; + + cbor_short_size = (cbor_object[0] & 0x1F); + + /* Decoding byte array length */ + array_offset = CBOR_HEADER_SIZE_SHORT; + if (cbor_short_size >= CBOR_LONG_SIZE_U8) + array_offset += BIT(cbor_short_size - CBOR_LONG_SIZE_U8); + + if (cbor_object_size < array_offset) + return -EFAULT; + + array_len_p = &cbor_object[1]; + + switch (cbor_short_size) { + case CBOR_SHORT_SIZE_MAX_VALUE: /* short encoding */ + array_len = cbor_short_size; + break; + case CBOR_LONG_SIZE_U8: + array_len = *(u8 *)array_len_p; + break; + case CBOR_LONG_SIZE_U16: + array_len = be16_to_cpup((__be16 *)array_len_p); + break; + case CBOR_LONG_SIZE_U32: + array_len = be32_to_cpup((__be32 *)array_len_p); + break; + case CBOR_LONG_SIZE_U64: + array_len = be64_to_cpup((__be64 *)array_len_p); + break; + } + + if (cbor_object_size < array_offset) + return -EFAULT; + + if (cbor_object_size - array_offset < array_len) + return -EFAULT; + + if (array_len > INT_MAX) + return -EFAULT; + + *cbor_array = cbor_object + array_offset; + return array_len; +} + +/* Copy the request of a raw message to kernel space */ +static int fill_req_raw(struct nsm *nsm, struct nsm_data_req *req, + struct nsm_raw *raw) +{ + /* Verify the user input size. */ + if (raw->request.len > sizeof(req->data)) + return -EMSGSIZE; + + /* Copy the request payload */ + if (copy_from_user(req->data, u64_to_user_ptr(raw->request.addr), + raw->request.len)) + return -EFAULT; + + req->len = raw->request.len; + + return 0; +} + +/* Copy the response of a raw message back to user-space */ +static int parse_resp_raw(struct nsm *nsm, struct nsm_data_resp *resp, + struct nsm_raw *raw) +{ + /* Truncate any message that does not fit. */ + raw->response.len = min_t(u64, raw->response.len, resp->len); + + /* Copy the response content to user space */ + if (copy_to_user(u64_to_user_ptr(raw->response.addr), + resp->data, raw->response.len)) + return -EFAULT; + + return 0; +} + +/* Virtqueue interrupt handler */ +static void nsm_vq_callback(struct virtqueue *vq) +{ + struct nsm *nsm = vq->vdev->priv; + + complete(&nsm->cmd_done); +} + +/* Forward a message to the NSM device and wait for the response from it */ +static int nsm_sendrecv_msg_locked(struct nsm *nsm) +{ + struct device *dev = &nsm->vdev->dev; + struct scatterlist sg_in, sg_out; + struct nsm_msg *msg = &nsm->msg; + struct virtqueue *vq = nsm->vq; + unsigned int len; + void *queue_buf; + bool kicked; + int rc; + + /* Initialize scatter-gather lists with request and response buffers. */ + sg_init_one(&sg_out, msg->req.data, msg->req.len); + sg_init_one(&sg_in, msg->resp.data, sizeof(msg->resp.data)); + + init_completion(&nsm->cmd_done); + /* Add the request buffer (read by the device). */ + rc = virtqueue_add_outbuf(vq, &sg_out, 1, msg->req.data, GFP_KERNEL); + if (rc) + return rc; + + /* Add the response buffer (written by the device). */ + rc = virtqueue_add_inbuf(vq, &sg_in, 1, msg->resp.data, GFP_KERNEL); + if (rc) + goto cleanup; + + kicked = virtqueue_kick(vq); + if (!kicked) { + /* Cannot kick the virtqueue. */ + rc = -EIO; + goto cleanup; + } + + /* If the kick succeeded, wait for the device's response. */ + if (!wait_for_completion_io_timeout(&nsm->cmd_done, + msecs_to_jiffies(NSM_DEFAULT_TIMEOUT_MSECS))) { + rc = -ETIMEDOUT; + goto cleanup; + } + + queue_buf = virtqueue_get_buf(vq, &len); + if (!queue_buf || (queue_buf != msg->req.data)) { + dev_err(dev, "wrong request buffer."); + rc = -ENODATA; + goto cleanup; + } + + queue_buf = virtqueue_get_buf(vq, &len); + if (!queue_buf || (queue_buf != msg->resp.data)) { + dev_err(dev, "wrong response buffer."); + rc = -ENODATA; + goto cleanup; + } + + msg->resp.len = len; + + rc = 0; + +cleanup: + if (rc) { + /* Clean the virtqueue. */ + while (virtqueue_get_buf(vq, &len) != NULL) + ; + } + + return rc; +} + +static int fill_req_get_random(struct nsm *nsm, struct nsm_data_req *req) +{ + /* + * 69 # text(9) + * 47657452616E646F6D # "GetRandom" + */ + const u8 request[] = { CBOR_TYPE_TEXT + strlen("GetRandom"), + 'G', 'e', 't', 'R', 'a', 'n', 'd', 'o', 'm' }; + + memcpy(req->data, request, sizeof(request)); + req->len = sizeof(request); + + return 0; +} + +static int parse_resp_get_random(struct nsm *nsm, struct nsm_data_resp *resp, + void *out, size_t max) +{ + /* + * A1 # map(1) + * 69 # text(9) - Name of field + * 47657452616E646F6D # "GetRandom" + * A1 # map(1) - The field itself + * 66 # text(6) + * 72616E646F6D # "random" + * # The rest of the response is random data + */ + const u8 response[] = { CBOR_TYPE_MAP + 1, + CBOR_TYPE_TEXT + strlen("GetRandom"), + 'G', 'e', 't', 'R', 'a', 'n', 'd', 'o', 'm', + CBOR_TYPE_MAP + 1, + CBOR_TYPE_TEXT + strlen("random"), + 'r', 'a', 'n', 'd', 'o', 'm' }; + struct device *dev = &nsm->vdev->dev; + u8 *rand_data = NULL; + u8 *resp_ptr = resp->data; + u64 resp_len = resp->len; + int rc; + + if ((resp->len < sizeof(response) + 1) || + (memcmp(resp_ptr, response, sizeof(response)) != 0)) { + dev_err(dev, "Invalid response for GetRandom"); + return -EFAULT; + } + + resp_ptr += sizeof(response); + resp_len -= sizeof(response); + + rc = cbor_object_get_array(resp_ptr, resp_len, &rand_data); + if (rc < 0) { + dev_err(dev, "GetRandom: Invalid CBOR encoding\n"); + return rc; + } + + rc = min_t(size_t, rc, max); + memcpy(out, rand_data, rc); + + return rc; +} + +/* + * HwRNG implementation + */ +static int nsm_rng_read(struct hwrng *rng, void *data, size_t max, bool wait) +{ + struct nsm *nsm = hwrng_to_nsm(rng); + struct device *dev = &nsm->vdev->dev; + int rc = 0; + + /* NSM always needs to wait for a response */ + if (!wait) + return 0; + + mutex_lock(&nsm->lock); + + rc = fill_req_get_random(nsm, &nsm->msg.req); + if (rc != 0) + goto out; + + rc = nsm_sendrecv_msg_locked(nsm); + if (rc != 0) + goto out; + + rc = parse_resp_get_random(nsm, &nsm->msg.resp, data, max); + if (rc < 0) + goto out; + + dev_dbg(dev, "RNG: returning rand bytes = %d", rc); +out: + mutex_unlock(&nsm->lock); + return rc; +} + +static long nsm_dev_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +{ + void __user *argp = u64_to_user_ptr((u64)arg); + struct nsm *nsm = file_to_nsm(file); + struct nsm_raw raw; + int r = 0; + + if (cmd != NSM_IOCTL_RAW) + return -EINVAL; + + if (_IOC_SIZE(cmd) != sizeof(raw)) + return -EINVAL; + + /* Copy user argument struct to kernel argument struct */ + r = -EFAULT; + if (copy_from_user(&raw, argp, _IOC_SIZE(cmd))) + goto out; + + mutex_lock(&nsm->lock); + + /* Convert kernel argument struct to device request */ + r = fill_req_raw(nsm, &nsm->msg.req, &raw); + if (r) + goto out; + + /* Send message to NSM and read reply */ + r = nsm_sendrecv_msg_locked(nsm); + if (r) + goto out; + + /* Parse device response into kernel argument struct */ + r = parse_resp_raw(nsm, &nsm->msg.resp, &raw); + if (r) + goto out; + + /* Copy kernel argument struct back to user argument struct */ + r = -EFAULT; + if (copy_to_user(argp, &raw, sizeof(raw))) + goto out; + + r = 0; + +out: + mutex_unlock(&nsm->lock); + return r; +} + +static int nsm_device_init_vq(struct virtio_device *vdev) +{ + struct virtqueue *vq = virtio_find_single_vq(vdev, + nsm_vq_callback, "nsm.vq.0"); + struct nsm *nsm = vdev->priv; + + if (IS_ERR(vq)) + return PTR_ERR(vq); + + nsm->vq = vq; + + return 0; +} + +static const struct file_operations nsm_dev_fops = { + .unlocked_ioctl = nsm_dev_ioctl, + .compat_ioctl = compat_ptr_ioctl, +}; + +/* Handler for probing the NSM device */ +static int nsm_device_probe(struct virtio_device *vdev) +{ + struct device *dev = &vdev->dev; + struct nsm *nsm; + int rc; + + nsm = devm_kzalloc(&vdev->dev, sizeof(*nsm), GFP_KERNEL); + if (!nsm) + return -ENOMEM; + + vdev->priv = nsm; + nsm->vdev = vdev; + + rc = nsm_device_init_vq(vdev); + if (rc) { + dev_err(dev, "queue failed to initialize: %d.\n", rc); + goto err_init_vq; + } + + mutex_init(&nsm->lock); + + /* Register as hwrng provider */ + nsm->hwrng = (struct hwrng) { + .read = nsm_rng_read, + .name = "nsm-hwrng", + .quality = 1000, + }; + + rc = hwrng_register(&nsm->hwrng); + if (rc) { + dev_err(dev, "RNG initialization error: %d.\n", rc); + goto err_hwrng; + } + + /* Register /dev/nsm device node */ + nsm->misc = (struct miscdevice) { + .minor = MISC_DYNAMIC_MINOR, + .name = "nsm", + .fops = &nsm_dev_fops, + .mode = 0666, + }; + + rc = misc_register(&nsm->misc); + if (rc) { + dev_err(dev, "misc device registration error: %d.\n", rc); + goto err_misc; + } + + return 0; + +err_misc: + hwrng_unregister(&nsm->hwrng); +err_hwrng: + vdev->config->del_vqs(vdev); +err_init_vq: + return rc; +} + +/* Handler for removing the NSM device */ +static void nsm_device_remove(struct virtio_device *vdev) +{ + struct nsm *nsm = vdev->priv; + + hwrng_unregister(&nsm->hwrng); + + vdev->config->del_vqs(vdev); + misc_deregister(&nsm->misc); +} + +/* NSM device configuration structure */ +static struct virtio_driver virtio_nsm_driver = { + .feature_table = 0, + .feature_table_size = 0, + .feature_table_legacy = 0, + .feature_table_size_legacy = 0, + .driver.name = KBUILD_MODNAME, + .driver.owner = THIS_MODULE, + .id_table = id_table, + .probe = nsm_device_probe, + .remove = nsm_device_remove, +}; + +module_virtio_driver(virtio_nsm_driver); +MODULE_DEVICE_TABLE(virtio, id_table); +MODULE_DESCRIPTION("Virtio NSM driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/pvpanic/pvpanic-mmio.c b/drivers/misc/pvpanic/pvpanic-mmio.c index 9715798acce3..f3f2113a54a7 100644 --- a/drivers/misc/pvpanic/pvpanic-mmio.c +++ b/drivers/misc/pvpanic/pvpanic-mmio.c @@ -7,16 +7,15 @@ * Copyright (C) 2021 Oracle. */ +#include <linux/device.h> +#include <linux/err.h> #include <linux/io.h> -#include <linux/kernel.h> +#include <linux/ioport.h> #include <linux/kexec.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/types.h> -#include <linux/slab.h> - -#include <uapi/misc/pvpanic.h> #include "pvpanic.h" diff --git a/drivers/misc/pvpanic/pvpanic-pci.c b/drivers/misc/pvpanic/pvpanic-pci.c index 689af4c28c2a..9ad20e82785b 100644 --- a/drivers/misc/pvpanic/pvpanic-pci.c +++ b/drivers/misc/pvpanic/pvpanic-pci.c @@ -5,17 +5,13 @@ * Copyright (C) 2021 Oracle. */ -#include <linux/kernel.h> +#include <linux/errno.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/types.h> -#include <linux/slab.h> - -#include <uapi/misc/pvpanic.h> #include "pvpanic.h" -#define PCI_VENDOR_ID_REDHAT 0x1b36 #define PCI_DEVICE_ID_REDHAT_PVPANIC 0x0011 MODULE_AUTHOR("Mihai Carabas <mihai.carabas@oracle.com>"); diff --git a/drivers/misc/pvpanic/pvpanic.c b/drivers/misc/pvpanic/pvpanic.c index 305b367e0ce3..df3457ce1cb1 100644 --- a/drivers/misc/pvpanic/pvpanic.c +++ b/drivers/misc/pvpanic/pvpanic.c @@ -8,16 +8,20 @@ */ #include <linux/device.h> +#include <linux/errno.h> +#include <linux/gfp_types.h> #include <linux/io.h> -#include <linux/kernel.h> #include <linux/kexec.h> +#include <linux/kstrtox.h> +#include <linux/limits.h> +#include <linux/list.h> #include <linux/mod_devicetable.h> #include <linux/module.h> -#include <linux/platform_device.h> #include <linux/panic_notifier.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/sysfs.h> #include <linux/types.h> -#include <linux/cdev.h> -#include <linux/list.h> #include <uapi/misc/pvpanic.h> diff --git a/drivers/misc/pvpanic/pvpanic.h b/drivers/misc/pvpanic/pvpanic.h index 46ffb10438ad..a42fa760eed5 100644 --- a/drivers/misc/pvpanic/pvpanic.h +++ b/drivers/misc/pvpanic/pvpanic.h @@ -8,6 +8,11 @@ #ifndef PVPANIC_H_ #define PVPANIC_H_ +#include <linux/compiler_types.h> + +struct attribute_group; +struct device; + int devm_pvpanic_probe(struct device *dev, void __iomem *base); extern const struct attribute_group *pvpanic_dev_groups[]; diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.c b/drivers/misc/vmw_vmci/vmci_handle_array.c index de7fee7ead1b..681b3500125a 100644 --- a/drivers/misc/vmw_vmci/vmci_handle_array.c +++ b/drivers/misc/vmw_vmci/vmci_handle_array.c @@ -8,12 +8,6 @@ #include <linux/slab.h> #include "vmci_handle_array.h" -static size_t handle_arr_calc_size(u32 capacity) -{ - return VMCI_HANDLE_ARRAY_HEADER_SIZE + - capacity * sizeof(struct vmci_handle); -} - struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity) { struct vmci_handle_arr *array; @@ -25,7 +19,7 @@ struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity) capacity = min((u32)VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY, max_capacity); - array = kmalloc(handle_arr_calc_size(capacity), GFP_ATOMIC); + array = kmalloc(struct_size(array, entries, capacity), GFP_ATOMIC); if (!array) return NULL; @@ -51,8 +45,8 @@ int vmci_handle_arr_append_entry(struct vmci_handle_arr **array_ptr, struct vmci_handle_arr *new_array; u32 capacity_bump = min(array->max_capacity - array->capacity, array->capacity); - size_t new_size = handle_arr_calc_size(array->capacity + - capacity_bump); + size_t new_size = struct_size(array, entries, + size_add(array->capacity, capacity_bump)); if (array->size >= array->max_capacity) return VMCI_ERROR_NO_MEM; diff --git a/drivers/misc/vmw_vmci/vmci_handle_array.h b/drivers/misc/vmw_vmci/vmci_handle_array.h index b0e6b1956014..27a38b97e8a8 100644 --- a/drivers/misc/vmw_vmci/vmci_handle_array.h +++ b/drivers/misc/vmw_vmci/vmci_handle_array.h @@ -20,14 +20,8 @@ struct vmci_handle_arr { struct vmci_handle entries[] __counted_by(capacity); }; -#define VMCI_HANDLE_ARRAY_HEADER_SIZE \ - offsetof(struct vmci_handle_arr, entries) /* Select a default capacity that results in a 64 byte sized array */ #define VMCI_HANDLE_ARRAY_DEFAULT_CAPACITY 6 -/* Make sure that the max array size can be expressed by a u32 */ -#define VMCI_HANDLE_ARRAY_MAX_CAPACITY \ - ((U32_MAX - VMCI_HANDLE_ARRAY_HEADER_SIZE - 1) / \ - sizeof(struct vmci_handle)) struct vmci_handle_arr *vmci_handle_arr_create(u32 capacity, u32 max_capacity); void vmci_handle_arr_destroy(struct vmci_handle_arr *array); diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c index 87d78432a1e0..7dfe7c4e0077 100644 --- a/drivers/mmc/host/rtsx_pci_sdmmc.c +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -7,6 +7,7 @@ * Wei WANG <wei_wang@realsil.com.cn> */ +#include <linux/pci.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/highmem.h> @@ -947,7 +948,7 @@ static int sd_power_on(struct realtek_pci_sdmmc *host, unsigned char power_mode) /* send at least 74 clocks */ rtsx_pci_write_register(pcr, SD_BUS_STAT, SD_CLK_TOGGLE_EN, SD_CLK_TOGGLE_EN); - if (PCI_PID(pcr) == PID_5261) { + if ((PCI_PID(pcr) == PID_5261) || (PCI_PID(pcr) == PID_5264)) { /* * If test mode is set switch to SD Express mandatorily, * this is only for factory testing. @@ -1364,6 +1365,14 @@ static int sdmmc_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios) struct realtek_pci_sdmmc *host = mmc_priv(mmc); struct rtsx_pcr *pcr = host->pcr; + if (PCI_PID(pcr) == PID_5264) { + pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL2, + PCI_EXP_LNKCTL2_TLS, PCI_EXP_LNKCTL2_TLS_2_5GT); + pci_write_config_byte(pcr->pci, 0x80e, 0x02); + pcie_capability_clear_and_set_word(pcr->pci, PCI_EXP_LNKCTL2, + PCI_EXP_LNKCTL2_TLS, PCI_EXP_LNKCTL2_TLS_5_0GT); + } + /* Set relink_time for changing to PCIe card */ relink_time = 0x8FFF; @@ -1379,6 +1388,12 @@ static int sdmmc_init_sd_express(struct mmc_host *mmc, struct mmc_ios *ios) if (pcr->ops->disable_auto_blink) pcr->ops->disable_auto_blink(pcr); + if (PCI_PID(pcr) == PID_5264) { + rtsx_pci_write_register(pcr, RTS5264_AUTOLOAD_CFG2, + RTS5264_CHIP_RST_N_SEL, RTS5264_CHIP_RST_N_SEL); + rtsx_pci_write_register(pcr, GPIO_CTL, 0x02, 0x00); + } + /* For PCIe/NVMe mode can't enter delink issue */ pcr->hw_param.interrupt_en &= ~(SD_INT_EN); rtsx_pci_writel(pcr, RTSX_BIER, pcr->hw_param.interrupt_en); diff --git a/drivers/mux/mmio.c b/drivers/mux/mmio.c index fd1d121a584b..30a952c34365 100644 --- a/drivers/mux/mmio.c +++ b/drivers/mux/mmio.c @@ -44,15 +44,20 @@ static int mux_mmio_probe(struct platform_device *pdev) int ret; int i; - if (of_device_is_compatible(np, "mmio-mux")) + if (of_device_is_compatible(np, "mmio-mux")) { regmap = syscon_node_to_regmap(np->parent); - else - regmap = dev_get_regmap(dev->parent, NULL) ?: ERR_PTR(-ENODEV); - if (IS_ERR(regmap)) { - ret = PTR_ERR(regmap); - dev_err(dev, "failed to get regmap: %d\n", ret); - return ret; + } else { + regmap = device_node_to_regmap(np); + /* Fallback to checking the parent node on "real" errors. */ + if (IS_ERR(regmap) && regmap != ERR_PTR(-EPROBE_DEFER)) { + regmap = dev_get_regmap(dev->parent, NULL); + if (!regmap) + regmap = ERR_PTR(-ENODEV); + } } + if (IS_ERR(regmap)) + return dev_err_probe(dev, PTR_ERR(regmap), + "failed to get regmap\n"); ret = of_property_count_u32_elems(np, "mux-reg-masks"); if (ret == 0 || ret % 2) diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig index 5bc9c4874fe3..283134498fbc 100644 --- a/drivers/nvmem/Kconfig +++ b/drivers/nvmem/Kconfig @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only menuconfig NVMEM bool "NVMEM Support" + imply NVMEM_LAYOUTS help Support for NVMEM(Non Volatile Memory) devices like EEPROM, EFUSES... diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile index 423baf089515..cdd01fbf1313 100644 --- a/drivers/nvmem/Makefile +++ b/drivers/nvmem/Makefile @@ -5,6 +5,8 @@ obj-$(CONFIG_NVMEM) += nvmem_core.o nvmem_core-y := core.o +obj-$(CONFIG_NVMEM_LAYOUTS) += nvmem_layouts.o +nvmem_layouts-y := layouts.o obj-y += layouts/ # Devices diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c index 608b352a7d91..980123fb4dde 100644 --- a/drivers/nvmem/core.c +++ b/drivers/nvmem/core.c @@ -19,29 +19,7 @@ #include <linux/of.h> #include <linux/slab.h> -struct nvmem_device { - struct module *owner; - struct device dev; - int stride; - int word_size; - int id; - struct kref refcnt; - size_t size; - bool read_only; - bool root_only; - int flags; - enum nvmem_type type; - struct bin_attribute eeprom; - struct device *base_dev; - struct list_head cells; - const struct nvmem_keepout *keepout; - unsigned int nkeepout; - nvmem_reg_read_t reg_read; - nvmem_reg_write_t reg_write; - struct gpio_desc *wp_gpio; - struct nvmem_layout *layout; - void *priv; -}; +#include "internals.h" #define to_nvmem_device(d) container_of(d, struct nvmem_device, dev) @@ -77,9 +55,6 @@ static LIST_HEAD(nvmem_lookup_list); static BLOCKING_NOTIFIER_HEAD(nvmem_notifier); -static DEFINE_SPINLOCK(nvmem_layout_lock); -static LIST_HEAD(nvmem_layouts); - static int __nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset, void *val, size_t bytes) { @@ -324,6 +299,43 @@ static umode_t nvmem_bin_attr_is_visible(struct kobject *kobj, return nvmem_bin_attr_get_umode(nvmem); } +static struct nvmem_cell *nvmem_create_cell(struct nvmem_cell_entry *entry, + const char *id, int index); + +static ssize_t nvmem_cell_attr_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *attr, char *buf, + loff_t pos, size_t count) +{ + struct nvmem_cell_entry *entry; + struct nvmem_cell *cell = NULL; + size_t cell_sz, read_len; + void *content; + + entry = attr->private; + cell = nvmem_create_cell(entry, entry->name, 0); + if (IS_ERR(cell)) + return PTR_ERR(cell); + + if (!cell) + return -EINVAL; + + content = nvmem_cell_read(cell, &cell_sz); + if (IS_ERR(content)) { + read_len = PTR_ERR(content); + goto destroy_cell; + } + + read_len = min_t(unsigned int, cell_sz - pos, count); + memcpy(buf, content + pos, read_len); + kfree(content); + +destroy_cell: + kfree_const(cell->id); + kfree(cell); + + return read_len; +} + /* default read/write permissions */ static struct bin_attribute bin_attr_rw_nvmem = { .attr = { @@ -345,11 +357,21 @@ static const struct attribute_group nvmem_bin_group = { .is_bin_visible = nvmem_bin_attr_is_visible, }; +/* Cell attributes will be dynamically allocated */ +static struct attribute_group nvmem_cells_group = { + .name = "cells", +}; + static const struct attribute_group *nvmem_dev_groups[] = { &nvmem_bin_group, NULL, }; +static const struct attribute_group *nvmem_cells_groups[] = { + &nvmem_cells_group, + NULL, +}; + static struct bin_attribute bin_attr_nvmem_eeprom_compat = { .attr = { .name = "eeprom", @@ -405,6 +427,68 @@ static void nvmem_sysfs_remove_compat(struct nvmem_device *nvmem, device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); } +static int nvmem_populate_sysfs_cells(struct nvmem_device *nvmem) +{ + struct bin_attribute **cells_attrs, *attrs; + struct nvmem_cell_entry *entry; + unsigned int ncells = 0, i = 0; + int ret = 0; + + mutex_lock(&nvmem_mutex); + + if (list_empty(&nvmem->cells) || nvmem->sysfs_cells_populated) { + nvmem_cells_group.bin_attrs = NULL; + goto unlock_mutex; + } + + /* Allocate an array of attributes with a sentinel */ + ncells = list_count_nodes(&nvmem->cells); + cells_attrs = devm_kcalloc(&nvmem->dev, ncells + 1, + sizeof(struct bin_attribute *), GFP_KERNEL); + if (!cells_attrs) { + ret = -ENOMEM; + goto unlock_mutex; + } + + attrs = devm_kcalloc(&nvmem->dev, ncells, sizeof(struct bin_attribute), GFP_KERNEL); + if (!attrs) { + ret = -ENOMEM; + goto unlock_mutex; + } + + /* Initialize each attribute to take the name and size of the cell */ + list_for_each_entry(entry, &nvmem->cells, node) { + sysfs_bin_attr_init(&attrs[i]); + attrs[i].attr.name = devm_kasprintf(&nvmem->dev, GFP_KERNEL, + "%s@%x", entry->name, + entry->offset); + attrs[i].attr.mode = 0444; + attrs[i].size = entry->bytes; + attrs[i].read = &nvmem_cell_attr_read; + attrs[i].private = entry; + if (!attrs[i].attr.name) { + ret = -ENOMEM; + goto unlock_mutex; + } + + cells_attrs[i] = &attrs[i]; + i++; + } + + nvmem_cells_group.bin_attrs = cells_attrs; + + ret = devm_device_add_groups(&nvmem->dev, nvmem_cells_groups); + if (ret) + goto unlock_mutex; + + nvmem->sysfs_cells_populated = true; + +unlock_mutex: + mutex_unlock(&nvmem_mutex); + + return ret; +} + #else /* CONFIG_NVMEM_SYSFS */ static int nvmem_sysfs_setup_compat(struct nvmem_device *nvmem, @@ -697,7 +781,6 @@ static int nvmem_validate_keepouts(struct nvmem_device *nvmem) static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_node *np) { - struct nvmem_layout *layout = nvmem->layout; struct device *dev = &nvmem->dev; struct device_node *child; const __be32 *addr; @@ -727,8 +810,8 @@ static int nvmem_add_cells_from_dt(struct nvmem_device *nvmem, struct device_nod info.np = of_node_get(child); - if (layout && layout->fixup_cell_info) - layout->fixup_cell_info(nvmem, layout, &info); + if (nvmem->fixup_dt_cell_info) + nvmem->fixup_dt_cell_info(nvmem, &info); ret = nvmem_add_one_cell(nvmem, &info); kfree(info.name); @@ -763,117 +846,35 @@ static int nvmem_add_cells_from_fixed_layout(struct nvmem_device *nvmem) return err; } -int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner) -{ - layout->owner = owner; - - spin_lock(&nvmem_layout_lock); - list_add(&layout->node, &nvmem_layouts); - spin_unlock(&nvmem_layout_lock); - - blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_ADD, layout); - - return 0; -} -EXPORT_SYMBOL_GPL(__nvmem_layout_register); - -void nvmem_layout_unregister(struct nvmem_layout *layout) -{ - blocking_notifier_call_chain(&nvmem_notifier, NVMEM_LAYOUT_REMOVE, layout); - - spin_lock(&nvmem_layout_lock); - list_del(&layout->node); - spin_unlock(&nvmem_layout_lock); -} -EXPORT_SYMBOL_GPL(nvmem_layout_unregister); - -static struct nvmem_layout *nvmem_layout_get(struct nvmem_device *nvmem) +int nvmem_layout_register(struct nvmem_layout *layout) { - struct device_node *layout_np; - struct nvmem_layout *l, *layout = ERR_PTR(-EPROBE_DEFER); - - layout_np = of_nvmem_layout_get_container(nvmem); - if (!layout_np) - return NULL; - - /* Fixed layouts don't have a matching driver */ - if (of_device_is_compatible(layout_np, "fixed-layout")) { - of_node_put(layout_np); - return NULL; - } - - /* - * In case the nvmem device was built-in while the layout was built as a - * module, we shall manually request the layout driver loading otherwise - * we'll never have any match. - */ - of_request_module(layout_np); - - spin_lock(&nvmem_layout_lock); - - list_for_each_entry(l, &nvmem_layouts, node) { - if (of_match_node(l->of_match_table, layout_np)) { - if (try_module_get(l->owner)) - layout = l; - - break; - } - } - - spin_unlock(&nvmem_layout_lock); - of_node_put(layout_np); - - return layout; -} + int ret; -static void nvmem_layout_put(struct nvmem_layout *layout) -{ - if (layout) - module_put(layout->owner); -} + if (!layout->add_cells) + return -EINVAL; -static int nvmem_add_cells_from_layout(struct nvmem_device *nvmem) -{ - struct nvmem_layout *layout = nvmem->layout; - int ret; + /* Populate the cells */ + ret = layout->add_cells(layout); + if (ret) + return ret; - if (layout && layout->add_cells) { - ret = layout->add_cells(&nvmem->dev, nvmem, layout); - if (ret) - return ret; +#ifdef CONFIG_NVMEM_SYSFS + ret = nvmem_populate_sysfs_cells(layout->nvmem); + if (ret) { + nvmem_device_remove_all_cells(layout->nvmem); + return ret; } +#endif return 0; } +EXPORT_SYMBOL_GPL(nvmem_layout_register); -#if IS_ENABLED(CONFIG_OF) -/** - * of_nvmem_layout_get_container() - Get OF node to layout container. - * - * @nvmem: nvmem device. - * - * Return: a node pointer with refcount incremented or NULL if no - * container exists. Use of_node_put() on it when done. - */ -struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem) -{ - return of_get_child_by_name(nvmem->dev.of_node, "nvmem-layout"); -} -EXPORT_SYMBOL_GPL(of_nvmem_layout_get_container); -#endif - -const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem, - struct nvmem_layout *layout) +void nvmem_layout_unregister(struct nvmem_layout *layout) { - struct device_node __maybe_unused *layout_np; - const struct of_device_id *match; - - layout_np = of_nvmem_layout_get_container(nvmem); - match = of_match_node(layout->of_match_table, layout_np); - - return match ? match->data : NULL; + /* Keep the API even with an empty stub in case we need it later */ } -EXPORT_SYMBOL_GPL(nvmem_layout_get_match_data); +EXPORT_SYMBOL_GPL(nvmem_layout_unregister); /** * nvmem_register() - Register a nvmem device for given nvmem_config. @@ -925,6 +926,7 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) kref_init(&nvmem->refcnt); INIT_LIST_HEAD(&nvmem->cells); + nvmem->fixup_dt_cell_info = config->fixup_dt_cell_info; nvmem->owner = config->owner; if (!nvmem->owner && config->dev->driver) @@ -980,19 +982,6 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) goto err_put_device; } - /* - * If the driver supplied a layout by config->layout, the module - * pointer will be NULL and nvmem_layout_put() will be a noop. - */ - nvmem->layout = config->layout ?: nvmem_layout_get(nvmem); - if (IS_ERR(nvmem->layout)) { - rval = PTR_ERR(nvmem->layout); - nvmem->layout = NULL; - - if (rval == -EPROBE_DEFER) - goto err_teardown_compat; - } - if (config->cells) { rval = nvmem_add_cells(nvmem, config->cells, config->ncells); if (rval) @@ -1013,24 +1002,34 @@ struct nvmem_device *nvmem_register(const struct nvmem_config *config) if (rval) goto err_remove_cells; - rval = nvmem_add_cells_from_layout(nvmem); - if (rval) - goto err_remove_cells; - dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name); rval = device_add(&nvmem->dev); if (rval) goto err_remove_cells; + rval = nvmem_populate_layout(nvmem); + if (rval) + goto err_remove_dev; + +#ifdef CONFIG_NVMEM_SYSFS + rval = nvmem_populate_sysfs_cells(nvmem); + if (rval) + goto err_destroy_layout; +#endif + blocking_notifier_call_chain(&nvmem_notifier, NVMEM_ADD, nvmem); return nvmem; +#ifdef CONFIG_NVMEM_SYSFS +err_destroy_layout: + nvmem_destroy_layout(nvmem); +#endif +err_remove_dev: + device_del(&nvmem->dev); err_remove_cells: nvmem_device_remove_all_cells(nvmem); - nvmem_layout_put(nvmem->layout); -err_teardown_compat: if (config->compat) nvmem_sysfs_remove_compat(nvmem, config); err_put_device: @@ -1052,7 +1051,7 @@ static void nvmem_device_release(struct kref *kref) device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom); nvmem_device_remove_all_cells(nvmem); - nvmem_layout_put(nvmem->layout); + nvmem_destroy_layout(nvmem); device_unregister(&nvmem->dev); } @@ -1354,6 +1353,12 @@ nvmem_cell_get_from_lookup(struct device *dev, const char *con_id) return cell; } +static void nvmem_layout_module_put(struct nvmem_device *nvmem) +{ + if (nvmem->layout && nvmem->layout->dev.driver) + module_put(nvmem->layout->dev.driver->owner); +} + #if IS_ENABLED(CONFIG_OF) static struct nvmem_cell_entry * nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np) @@ -1372,6 +1377,18 @@ nvmem_find_cell_entry_by_node(struct nvmem_device *nvmem, struct device_node *np return cell; } +static int nvmem_layout_module_get_optional(struct nvmem_device *nvmem) +{ + if (!nvmem->layout) + return 0; + + if (!nvmem->layout->dev.driver || + !try_module_get(nvmem->layout->dev.driver->owner)) + return -EPROBE_DEFER; + + return 0; +} + /** * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id * @@ -1434,16 +1451,29 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) return ERR_CAST(nvmem); } + ret = nvmem_layout_module_get_optional(nvmem); + if (ret) { + of_node_put(cell_np); + __nvmem_device_put(nvmem); + return ERR_PTR(ret); + } + cell_entry = nvmem_find_cell_entry_by_node(nvmem, cell_np); of_node_put(cell_np); if (!cell_entry) { __nvmem_device_put(nvmem); - return ERR_PTR(-ENOENT); + nvmem_layout_module_put(nvmem); + if (nvmem->layout) + return ERR_PTR(-EPROBE_DEFER); + else + return ERR_PTR(-ENOENT); } cell = nvmem_create_cell(cell_entry, id, cell_index); - if (IS_ERR(cell)) + if (IS_ERR(cell)) { __nvmem_device_put(nvmem); + nvmem_layout_module_put(nvmem); + } return cell; } @@ -1557,6 +1587,7 @@ void nvmem_cell_put(struct nvmem_cell *cell) kfree(cell); __nvmem_device_put(nvmem); + nvmem_layout_module_put(nvmem); } EXPORT_SYMBOL_GPL(nvmem_cell_put); @@ -2132,13 +2163,37 @@ const char *nvmem_dev_name(struct nvmem_device *nvmem) } EXPORT_SYMBOL_GPL(nvmem_dev_name); +/** + * nvmem_dev_size() - Get the size of a given nvmem device. + * + * @nvmem: nvmem device. + * + * Return: size of the nvmem device. + */ +size_t nvmem_dev_size(struct nvmem_device *nvmem) +{ + return nvmem->size; +} +EXPORT_SYMBOL_GPL(nvmem_dev_size); + static int __init nvmem_init(void) { - return bus_register(&nvmem_bus_type); + int ret; + + ret = bus_register(&nvmem_bus_type); + if (ret) + return ret; + + ret = nvmem_layout_bus_register(); + if (ret) + bus_unregister(&nvmem_bus_type); + + return ret; } static void __exit nvmem_exit(void) { + nvmem_layout_bus_unregister(); bus_unregister(&nvmem_bus_type); } diff --git a/drivers/nvmem/imx-ocotp.c b/drivers/nvmem/imx-ocotp.c index f1e202efaa49..79dd4fda0329 100644 --- a/drivers/nvmem/imx-ocotp.c +++ b/drivers/nvmem/imx-ocotp.c @@ -583,17 +583,12 @@ static const struct of_device_id imx_ocotp_dt_ids[] = { }; MODULE_DEVICE_TABLE(of, imx_ocotp_dt_ids); -static void imx_ocotp_fixup_cell_info(struct nvmem_device *nvmem, - struct nvmem_layout *layout, - struct nvmem_cell_info *cell) +static void imx_ocotp_fixup_dt_cell_info(struct nvmem_device *nvmem, + struct nvmem_cell_info *cell) { cell->read_post_process = imx_ocotp_cell_pp; } -static struct nvmem_layout imx_ocotp_layout = { - .fixup_cell_info = imx_ocotp_fixup_cell_info, -}; - static int imx_ocotp_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -619,7 +614,7 @@ static int imx_ocotp_probe(struct platform_device *pdev) imx_ocotp_nvmem_config.size = 4 * priv->params->nregs; imx_ocotp_nvmem_config.dev = dev; imx_ocotp_nvmem_config.priv = priv; - imx_ocotp_nvmem_config.layout = &imx_ocotp_layout; + imx_ocotp_nvmem_config.fixup_dt_cell_info = &imx_ocotp_fixup_dt_cell_info; priv->config = &imx_ocotp_nvmem_config; diff --git a/drivers/nvmem/internals.h b/drivers/nvmem/internals.h new file mode 100644 index 000000000000..18fed57270e5 --- /dev/null +++ b/drivers/nvmem/internals.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _LINUX_NVMEM_INTERNALS_H +#define _LINUX_NVMEM_INTERNALS_H + +#include <linux/device.h> +#include <linux/nvmem-consumer.h> +#include <linux/nvmem-provider.h> + +struct nvmem_device { + struct module *owner; + struct device dev; + struct list_head node; + int stride; + int word_size; + int id; + struct kref refcnt; + size_t size; + bool read_only; + bool root_only; + int flags; + enum nvmem_type type; + struct bin_attribute eeprom; + struct device *base_dev; + struct list_head cells; + void (*fixup_dt_cell_info)(struct nvmem_device *nvmem, + struct nvmem_cell_info *cell); + const struct nvmem_keepout *keepout; + unsigned int nkeepout; + nvmem_reg_read_t reg_read; + nvmem_reg_write_t reg_write; + struct gpio_desc *wp_gpio; + struct nvmem_layout *layout; + void *priv; + bool sysfs_cells_populated; +}; + +#if IS_ENABLED(CONFIG_OF) +int nvmem_layout_bus_register(void); +void nvmem_layout_bus_unregister(void); +int nvmem_populate_layout(struct nvmem_device *nvmem); +void nvmem_destroy_layout(struct nvmem_device *nvmem); +#else /* CONFIG_OF */ +static inline int nvmem_layout_bus_register(void) +{ + return 0; +} + +static inline void nvmem_layout_bus_unregister(void) {} + +static inline int nvmem_populate_layout(struct nvmem_device *nvmem) +{ + return 0; +} + +static inline void nvmem_destroy_layout(struct nvmem_device *nvmem) { } +#endif /* CONFIG_OF */ + +#endif /* ifndef _LINUX_NVMEM_INTERNALS_H */ diff --git a/drivers/nvmem/layouts.c b/drivers/nvmem/layouts.c new file mode 100644 index 000000000000..6a6aa58369ff --- /dev/null +++ b/drivers/nvmem/layouts.c @@ -0,0 +1,201 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * NVMEM layout bus handling + * + * Copyright (C) 2023 Bootlin + * Author: Miquel Raynal <miquel.raynal@bootlin.com + */ + +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/nvmem-consumer.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> + +#include "internals.h" + +#define to_nvmem_layout_driver(drv) \ + (container_of((drv), struct nvmem_layout_driver, driver)) +#define to_nvmem_layout_device(_dev) \ + container_of((_dev), struct nvmem_layout, dev) + +static int nvmem_layout_bus_match(struct device *dev, struct device_driver *drv) +{ + return of_driver_match_device(dev, drv); +} + +static int nvmem_layout_bus_probe(struct device *dev) +{ + struct nvmem_layout_driver *drv = to_nvmem_layout_driver(dev->driver); + struct nvmem_layout *layout = to_nvmem_layout_device(dev); + + if (!drv->probe || !drv->remove) + return -EINVAL; + + return drv->probe(layout); +} + +static void nvmem_layout_bus_remove(struct device *dev) +{ + struct nvmem_layout_driver *drv = to_nvmem_layout_driver(dev->driver); + struct nvmem_layout *layout = to_nvmem_layout_device(dev); + + return drv->remove(layout); +} + +static struct bus_type nvmem_layout_bus_type = { + .name = "nvmem-layout", + .match = nvmem_layout_bus_match, + .probe = nvmem_layout_bus_probe, + .remove = nvmem_layout_bus_remove, +}; + +int nvmem_layout_driver_register(struct nvmem_layout_driver *drv) +{ + drv->driver.bus = &nvmem_layout_bus_type; + + return driver_register(&drv->driver); +} +EXPORT_SYMBOL_GPL(nvmem_layout_driver_register); + +void nvmem_layout_driver_unregister(struct nvmem_layout_driver *drv) +{ + driver_unregister(&drv->driver); +} +EXPORT_SYMBOL_GPL(nvmem_layout_driver_unregister); + +static void nvmem_layout_release_device(struct device *dev) +{ + struct nvmem_layout *layout = to_nvmem_layout_device(dev); + + of_node_put(layout->dev.of_node); + kfree(layout); +} + +static int nvmem_layout_create_device(struct nvmem_device *nvmem, + struct device_node *np) +{ + struct nvmem_layout *layout; + struct device *dev; + int ret; + + layout = kzalloc(sizeof(*layout), GFP_KERNEL); + if (!layout) + return -ENOMEM; + + /* Create a bidirectional link */ + layout->nvmem = nvmem; + nvmem->layout = layout; + + /* Device model registration */ + dev = &layout->dev; + device_initialize(dev); + dev->parent = &nvmem->dev; + dev->bus = &nvmem_layout_bus_type; + dev->release = nvmem_layout_release_device; + dev->coherent_dma_mask = DMA_BIT_MASK(32); + dev->dma_mask = &dev->coherent_dma_mask; + device_set_node(dev, of_fwnode_handle(of_node_get(np))); + of_device_make_bus_id(dev); + of_msi_configure(dev, dev->of_node); + + ret = device_add(dev); + if (ret) { + put_device(dev); + return ret; + } + + return 0; +} + +static const struct of_device_id of_nvmem_layout_skip_table[] = { + { .compatible = "fixed-layout", }, + {} +}; + +static int nvmem_layout_bus_populate(struct nvmem_device *nvmem, + struct device_node *layout_dn) +{ + int ret; + + /* Make sure it has a compatible property */ + if (!of_get_property(layout_dn, "compatible", NULL)) { + pr_debug("%s() - skipping %pOF, no compatible prop\n", + __func__, layout_dn); + return 0; + } + + /* Fixed layouts are parsed manually somewhere else for now */ + if (of_match_node(of_nvmem_layout_skip_table, layout_dn)) { + pr_debug("%s() - skipping %pOF node\n", __func__, layout_dn); + return 0; + } + + if (of_node_check_flag(layout_dn, OF_POPULATED_BUS)) { + pr_debug("%s() - skipping %pOF, already populated\n", + __func__, layout_dn); + + return 0; + } + + /* NVMEM layout buses expect only a single device representing the layout */ + ret = nvmem_layout_create_device(nvmem, layout_dn); + if (ret) + return ret; + + of_node_set_flag(layout_dn, OF_POPULATED_BUS); + + return 0; +} + +struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem) +{ + return of_get_child_by_name(nvmem->dev.of_node, "nvmem-layout"); +} +EXPORT_SYMBOL_GPL(of_nvmem_layout_get_container); + +/* + * Returns the number of devices populated, 0 if the operation was not relevant + * for this nvmem device, an error code otherwise. + */ +int nvmem_populate_layout(struct nvmem_device *nvmem) +{ + struct device_node *layout_dn; + int ret; + + layout_dn = of_nvmem_layout_get_container(nvmem); + if (!layout_dn) + return 0; + + /* Populate the layout device */ + device_links_supplier_sync_state_pause(); + ret = nvmem_layout_bus_populate(nvmem, layout_dn); + device_links_supplier_sync_state_resume(); + + of_node_put(layout_dn); + return ret; +} + +void nvmem_destroy_layout(struct nvmem_device *nvmem) +{ + struct device *dev; + + if (!nvmem->layout) + return; + + dev = &nvmem->layout->dev; + of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); + device_unregister(dev); +} + +int nvmem_layout_bus_register(void) +{ + return bus_register(&nvmem_layout_bus_type); +} + +void nvmem_layout_bus_unregister(void) +{ + bus_unregister(&nvmem_layout_bus_type); +} diff --git a/drivers/nvmem/layouts/Kconfig b/drivers/nvmem/layouts/Kconfig index 7ff1ee1c1f05..9c6e672fc350 100644 --- a/drivers/nvmem/layouts/Kconfig +++ b/drivers/nvmem/layouts/Kconfig @@ -1,5 +1,11 @@ # SPDX-License-Identifier: GPL-2.0 +config NVMEM_LAYOUTS + bool + depends on OF + +if NVMEM_LAYOUTS + menu "Layout Types" config NVMEM_LAYOUT_SL28_VPD @@ -21,3 +27,5 @@ config NVMEM_LAYOUT_ONIE_TLV If unsure, say N. endmenu + +endif diff --git a/drivers/nvmem/layouts/onie-tlv.c b/drivers/nvmem/layouts/onie-tlv.c index 59fc87ccfcff..9d2ad5f2dc10 100644 --- a/drivers/nvmem/layouts/onie-tlv.c +++ b/drivers/nvmem/layouts/onie-tlv.c @@ -182,9 +182,10 @@ static bool onie_tlv_crc_is_valid(struct device *dev, size_t table_len, u8 *tabl return true; } -static int onie_tlv_parse_table(struct device *dev, struct nvmem_device *nvmem, - struct nvmem_layout *layout) +static int onie_tlv_parse_table(struct nvmem_layout *layout) { + struct nvmem_device *nvmem = layout->nvmem; + struct device *dev = &layout->dev; struct onie_tlv_hdr hdr; size_t table_len, data_len, hdr_len; u8 *table, *data; @@ -226,16 +227,32 @@ static int onie_tlv_parse_table(struct device *dev, struct nvmem_device *nvmem, return 0; } +static int onie_tlv_probe(struct nvmem_layout *layout) +{ + layout->add_cells = onie_tlv_parse_table; + + return nvmem_layout_register(layout); +} + +static void onie_tlv_remove(struct nvmem_layout *layout) +{ + nvmem_layout_unregister(layout); +} + static const struct of_device_id onie_tlv_of_match_table[] = { { .compatible = "onie,tlv-layout", }, {}, }; MODULE_DEVICE_TABLE(of, onie_tlv_of_match_table); -static struct nvmem_layout onie_tlv_layout = { - .name = "ONIE tlv layout", - .of_match_table = onie_tlv_of_match_table, - .add_cells = onie_tlv_parse_table, +static struct nvmem_layout_driver onie_tlv_layout = { + .driver = { + .owner = THIS_MODULE, + .name = "onie-tlv-layout", + .of_match_table = onie_tlv_of_match_table, + }, + .probe = onie_tlv_probe, + .remove = onie_tlv_remove, }; module_nvmem_layout_driver(onie_tlv_layout); diff --git a/drivers/nvmem/layouts/sl28vpd.c b/drivers/nvmem/layouts/sl28vpd.c index 05671371f631..53fa50f17dca 100644 --- a/drivers/nvmem/layouts/sl28vpd.c +++ b/drivers/nvmem/layouts/sl28vpd.c @@ -80,9 +80,10 @@ static int sl28vpd_v1_check_crc(struct device *dev, struct nvmem_device *nvmem) return 0; } -static int sl28vpd_add_cells(struct device *dev, struct nvmem_device *nvmem, - struct nvmem_layout *layout) +static int sl28vpd_add_cells(struct nvmem_layout *layout) { + struct nvmem_device *nvmem = layout->nvmem; + struct device *dev = &layout->dev; const struct nvmem_cell_info *pinfo; struct nvmem_cell_info info = {0}; struct device_node *layout_np; @@ -135,16 +136,32 @@ static int sl28vpd_add_cells(struct device *dev, struct nvmem_device *nvmem, return 0; } +static int sl28vpd_probe(struct nvmem_layout *layout) +{ + layout->add_cells = sl28vpd_add_cells; + + return nvmem_layout_register(layout); +} + +static void sl28vpd_remove(struct nvmem_layout *layout) +{ + nvmem_layout_unregister(layout); +} + static const struct of_device_id sl28vpd_of_match_table[] = { { .compatible = "kontron,sl28-vpd" }, {}, }; MODULE_DEVICE_TABLE(of, sl28vpd_of_match_table); -static struct nvmem_layout sl28vpd_layout = { - .name = "sl28-vpd", - .of_match_table = sl28vpd_of_match_table, - .add_cells = sl28vpd_add_cells, +static struct nvmem_layout_driver sl28vpd_layout = { + .driver = { + .owner = THIS_MODULE, + .name = "kontron-sl28vpd-layout", + .of_match_table = sl28vpd_of_match_table, + }, + .probe = sl28vpd_probe, + .remove = sl28vpd_remove, }; module_nvmem_layout_driver(sl28vpd_layout); diff --git a/drivers/nvmem/mtk-efuse.c b/drivers/nvmem/mtk-efuse.c index 87c94686cfd2..84f05b40a411 100644 --- a/drivers/nvmem/mtk-efuse.c +++ b/drivers/nvmem/mtk-efuse.c @@ -45,9 +45,8 @@ static int mtk_efuse_gpu_speedbin_pp(void *context, const char *id, int index, return 0; } -static void mtk_efuse_fixup_cell_info(struct nvmem_device *nvmem, - struct nvmem_layout *layout, - struct nvmem_cell_info *cell) +static void mtk_efuse_fixup_dt_cell_info(struct nvmem_device *nvmem, + struct nvmem_cell_info *cell) { size_t sz = strlen(cell->name); @@ -61,10 +60,6 @@ static void mtk_efuse_fixup_cell_info(struct nvmem_device *nvmem, cell->read_post_process = mtk_efuse_gpu_speedbin_pp; } -static struct nvmem_layout mtk_efuse_layout = { - .fixup_cell_info = mtk_efuse_fixup_cell_info, -}; - static int mtk_efuse_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -91,7 +86,7 @@ static int mtk_efuse_probe(struct platform_device *pdev) econfig.priv = priv; econfig.dev = dev; if (pdata->uses_post_processing) - econfig.layout = &mtk_efuse_layout; + econfig.fixup_dt_cell_info = &mtk_efuse_fixup_dt_cell_info; nvmem = devm_nvmem_register(dev, &econfig); return PTR_ERR_OR_ZERO(nvmem); diff --git a/drivers/nvmem/stm32-romem.c b/drivers/nvmem/stm32-romem.c index 8a553b1799a8..82879b1c9eb9 100644 --- a/drivers/nvmem/stm32-romem.c +++ b/drivers/nvmem/stm32-romem.c @@ -269,6 +269,19 @@ static const struct stm32_romem_cfg stm32mp13_bsec_cfg = { .ta = true, }; +/* + * STM32MP25 BSEC OTP: 3 regions of 32-bits data words + * lower OTP (OTP0 to OTP127), bitwise (1-bit) programmable + * mid OTP (OTP128 to OTP255), bulk (32-bit) programmable + * upper OTP (OTP256 to OTP383), bulk (32-bit) programmable + * but no access to HWKEY and ECIES key: limited at OTP367 + */ +static const struct stm32_romem_cfg stm32mp25_bsec_cfg = { + .size = 368 * 4, + .lower = 127, + .ta = true, +}; + static const struct of_device_id stm32_romem_of_match[] __maybe_unused = { { .compatible = "st,stm32f4-otp", }, { .compatible = "st,stm32mp15-bsec", @@ -276,6 +289,9 @@ static const struct of_device_id stm32_romem_of_match[] __maybe_unused = { }, { .compatible = "st,stm32mp13-bsec", .data = (void *)&stm32mp13_bsec_cfg, + }, { + .compatible = "st,stm32mp25-bsec", + .data = (void *)&stm32mp25_bsec_cfg, }, { /* sentinel */ }, }; diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c index c4ae94af4af7..befbab156cda 100644 --- a/drivers/nvmem/u-boot-env.c +++ b/drivers/nvmem/u-boot-env.c @@ -23,13 +23,10 @@ enum u_boot_env_format { struct u_boot_env { struct device *dev; + struct nvmem_device *nvmem; enum u_boot_env_format format; struct mtd_info *mtd; - - /* Cells */ - struct nvmem_cell_info *cells; - int ncells; }; struct u_boot_env_image_single { @@ -94,70 +91,71 @@ static int u_boot_env_read_post_process_ethaddr(void *context, const char *id, i static int u_boot_env_add_cells(struct u_boot_env *priv, uint8_t *buf, size_t data_offset, size_t data_len) { + struct nvmem_device *nvmem = priv->nvmem; struct device *dev = priv->dev; char *data = buf + data_offset; char *var, *value, *eq; - int idx; - - priv->ncells = 0; - for (var = data; var < data + data_len && *var; var += strlen(var) + 1) - priv->ncells++; - - priv->cells = devm_kcalloc(dev, priv->ncells, sizeof(*priv->cells), GFP_KERNEL); - if (!priv->cells) - return -ENOMEM; - for (var = data, idx = 0; + for (var = data; var < data + data_len && *var; - var = value + strlen(value) + 1, idx++) { + var = value + strlen(value) + 1) { + struct nvmem_cell_info info = {}; + eq = strchr(var, '='); if (!eq) break; *eq = '\0'; value = eq + 1; - priv->cells[idx].name = devm_kstrdup(dev, var, GFP_KERNEL); - if (!priv->cells[idx].name) + info.name = devm_kstrdup(dev, var, GFP_KERNEL); + if (!info.name) return -ENOMEM; - priv->cells[idx].offset = data_offset + value - data; - priv->cells[idx].bytes = strlen(value); - priv->cells[idx].np = of_get_child_by_name(dev->of_node, priv->cells[idx].name); + info.offset = data_offset + value - data; + info.bytes = strlen(value); + info.np = of_get_child_by_name(dev->of_node, info.name); if (!strcmp(var, "ethaddr")) { - priv->cells[idx].raw_len = strlen(value); - priv->cells[idx].bytes = ETH_ALEN; - priv->cells[idx].read_post_process = u_boot_env_read_post_process_ethaddr; + info.raw_len = strlen(value); + info.bytes = ETH_ALEN; + info.read_post_process = u_boot_env_read_post_process_ethaddr; } - } - if (WARN_ON(idx != priv->ncells)) - priv->ncells = idx; + nvmem_add_one_cell(nvmem, &info); + } return 0; } static int u_boot_env_parse(struct u_boot_env *priv) { + struct nvmem_device *nvmem = priv->nvmem; struct device *dev = priv->dev; size_t crc32_data_offset; size_t crc32_data_len; size_t crc32_offset; + __le32 *crc32_addr; size_t data_offset; size_t data_len; + size_t dev_size; uint32_t crc32; uint32_t calc; - size_t bytes; uint8_t *buf; + int bytes; int err; - buf = kcalloc(1, priv->mtd->size, GFP_KERNEL); + dev_size = nvmem_dev_size(nvmem); + + buf = kzalloc(dev_size, GFP_KERNEL); if (!buf) { err = -ENOMEM; goto err_out; } - err = mtd_read(priv->mtd, 0, priv->mtd->size, &bytes, buf); - if ((err && !mtd_is_bitflip(err)) || bytes != priv->mtd->size) { - dev_err(dev, "Failed to read from mtd: %d\n", err); + bytes = nvmem_device_read(nvmem, 0, dev_size, buf); + if (bytes < 0) { + err = bytes; + goto err_kfree; + } else if (bytes != dev_size) { + err = -EIO; goto err_kfree; } @@ -178,9 +176,10 @@ static int u_boot_env_parse(struct u_boot_env *priv) data_offset = offsetof(struct u_boot_env_image_broadcom, data); break; } - crc32 = le32_to_cpu(*(__le32 *)(buf + crc32_offset)); - crc32_data_len = priv->mtd->size - crc32_data_offset; - data_len = priv->mtd->size - data_offset; + crc32_addr = (__le32 *)(buf + crc32_offset); + crc32 = le32_to_cpu(*crc32_addr); + crc32_data_len = dev_size - crc32_data_offset; + data_len = dev_size - data_offset; calc = crc32(~0, buf + crc32_data_offset, crc32_data_len) ^ ~0L; if (calc != crc32) { @@ -189,10 +188,8 @@ static int u_boot_env_parse(struct u_boot_env *priv) goto err_kfree; } - buf[priv->mtd->size - 1] = '\0'; + buf[dev_size - 1] = '\0'; err = u_boot_env_add_cells(priv, buf, data_offset, data_len); - if (err) - dev_err(dev, "Failed to add cells: %d\n", err); err_kfree: kfree(buf); @@ -209,7 +206,6 @@ static int u_boot_env_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct u_boot_env *priv; - int err; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -224,17 +220,15 @@ static int u_boot_env_probe(struct platform_device *pdev) return PTR_ERR(priv->mtd); } - err = u_boot_env_parse(priv); - if (err) - return err; - config.dev = dev; - config.cells = priv->cells; - config.ncells = priv->ncells; config.priv = priv; config.size = priv->mtd->size; - return PTR_ERR_OR_ZERO(devm_nvmem_register(dev, &config)); + priv->nvmem = devm_nvmem_register(dev, &config); + if (IS_ERR(priv->nvmem)) + return PTR_ERR(priv->nvmem); + + return u_boot_env_parse(priv); } static const struct of_device_id u_boot_env_of_match_table[] = { diff --git a/drivers/of/device.c b/drivers/of/device.c index 1ca42ad9dd15..6e9572c4af83 100644 --- a/drivers/of/device.c +++ b/drivers/of/device.c @@ -304,3 +304,44 @@ int of_device_uevent_modalias(const struct device *dev, struct kobj_uevent_env * return 0; } EXPORT_SYMBOL_GPL(of_device_uevent_modalias); + +/** + * of_device_make_bus_id - Use the device node data to assign a unique name + * @dev: pointer to device structure that is linked to a device tree node + * + * This routine will first try using the translated bus address to + * derive a unique name. If it cannot, then it will prepend names from + * parent nodes until a unique name can be derived. + */ +void of_device_make_bus_id(struct device *dev) +{ + struct device_node *node = dev->of_node; + const __be32 *reg; + u64 addr; + u32 mask; + + /* Construct the name, using parent nodes if necessary to ensure uniqueness */ + while (node->parent) { + /* + * If the address can be translated, then that is as much + * uniqueness as we need. Make it the first component and return + */ + reg = of_get_property(node, "reg", NULL); + if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) { + if (!of_property_read_u32(node, "mask", &mask)) + dev_set_name(dev, dev_name(dev) ? "%llx.%x.%pOFn:%s" : "%llx.%x.%pOFn", + addr, ffs(mask) - 1, node, dev_name(dev)); + + else + dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn", + addr, node, dev_name(dev)); + return; + } + + /* format arguments only used if dev_name() resolves to NULL */ + dev_set_name(dev, dev_name(dev) ? "%s:%s" : "%s", + kbasename(node->full_name), dev_name(dev)); + node = node->parent; + } +} +EXPORT_SYMBOL_GPL(of_device_make_bus_id); diff --git a/drivers/of/platform.c b/drivers/of/platform.c index af0e1fb2ab68..b7708a06dc78 100644 --- a/drivers/of/platform.c +++ b/drivers/of/platform.c @@ -99,46 +99,6 @@ static const struct of_device_id of_skipped_node_table[] = { */ /** - * of_device_make_bus_id - Use the device node data to assign a unique name - * @dev: pointer to device structure that is linked to a device tree node - * - * This routine will first try using the translated bus address to - * derive a unique name. If it cannot, then it will prepend names from - * parent nodes until a unique name can be derived. - */ -static void of_device_make_bus_id(struct device *dev) -{ - struct device_node *node = dev->of_node; - const __be32 *reg; - u64 addr; - u32 mask; - - /* Construct the name, using parent nodes if necessary to ensure uniqueness */ - while (node->parent) { - /* - * If the address can be translated, then that is as much - * uniqueness as we need. Make it the first component and return - */ - reg = of_get_property(node, "reg", NULL); - if (reg && (addr = of_translate_address(node, reg)) != OF_BAD_ADDR) { - if (!of_property_read_u32(node, "mask", &mask)) - dev_set_name(dev, dev_name(dev) ? "%llx.%x.%pOFn:%s" : "%llx.%x.%pOFn", - addr, ffs(mask) - 1, node, dev_name(dev)); - - else - dev_set_name(dev, dev_name(dev) ? "%llx.%pOFn:%s" : "%llx.%pOFn", - addr, node, dev_name(dev)); - return; - } - - /* format arguments only used if dev_name() resolves to NULL */ - dev_set_name(dev, dev_name(dev) ? "%s:%s" : "%s", - kbasename(node->full_name), dev_name(dev)); - node = node->parent; - } -} - -/** * of_device_alloc - Allocate and initialize an of_device * @np: device node to assign to device * @bus_id: Name to assign to the device. May be null to use default name. diff --git a/drivers/parport/parport_serial.c b/drivers/parport/parport_serial.c index 9f5d784cd95d..3644997a8342 100644 --- a/drivers/parport/parport_serial.c +++ b/drivers/parport/parport_serial.c @@ -65,6 +65,10 @@ enum parport_pc_pci_cards { sunix_5069a, sunix_5079a, sunix_5099a, + brainboxes_uc257, + brainboxes_is300, + brainboxes_uc414, + brainboxes_px263, }; /* each element directly indexed from enum list, above */ @@ -158,6 +162,10 @@ static struct parport_pc_pci cards[] = { /* sunix_5069a */ { 1, { { 1, 2 }, } }, /* sunix_5079a */ { 1, { { 1, 2 }, } }, /* sunix_5099a */ { 1, { { 1, 2 }, } }, + /* brainboxes_uc257 */ { 1, { { 3, -1 }, } }, + /* brainboxes_is300 */ { 1, { { 3, -1 }, } }, + /* brainboxes_uc414 */ { 1, { { 3, -1 }, } }, + /* brainboxes_px263 */ { 1, { { 3, -1 }, } }, }; static struct pci_device_id parport_serial_pci_tbl[] = { @@ -277,6 +285,38 @@ static struct pci_device_id parport_serial_pci_tbl[] = { { PCI_VENDOR_ID_SUNIX, PCI_DEVICE_ID_SUNIX_1999, PCI_VENDOR_ID_SUNIX, 0x0104, 0, 0, sunix_5099a }, + /* Brainboxes UC-203 */ + { PCI_VENDOR_ID_INTASHIELD, 0x0bc1, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0bc2, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 }, + + /* Brainboxes UC-257 */ + { PCI_VENDOR_ID_INTASHIELD, 0x0861, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0862, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0863, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 }, + + /* Brainboxes UC-414 */ + { PCI_VENDOR_ID_INTASHIELD, 0x0e61, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc414 }, + + /* Brainboxes UC-475 */ + { PCI_VENDOR_ID_INTASHIELD, 0x0981, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 }, + { PCI_VENDOR_ID_INTASHIELD, 0x0982, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_uc257 }, + + /* Brainboxes IS-300/IS-500 */ + { PCI_VENDOR_ID_INTASHIELD, 0x0da0, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_is300 }, + + /* Brainboxes PX-263/PX-295 */ + { PCI_VENDOR_ID_INTASHIELD, 0x402c, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, brainboxes_px263 }, + { 0, } /* terminate list */ }; MODULE_DEVICE_TABLE(pci,parport_serial_pci_tbl); @@ -542,6 +582,30 @@ static struct pciserial_board pci_parport_serial_boards[] = { .base_baud = 921600, .uart_offset = 0x8, }, + [brainboxes_uc257] = { + .flags = FL_BASE2, + .num_ports = 2, + .base_baud = 115200, + .uart_offset = 8, + }, + [brainboxes_is300] = { + .flags = FL_BASE2, + .num_ports = 1, + .base_baud = 115200, + .uart_offset = 8, + }, + [brainboxes_uc414] = { + .flags = FL_BASE2, + .num_ports = 4, + .base_baud = 115200, + .uart_offset = 8, + }, + [brainboxes_px263] = { + .flags = FL_BASE2, + .num_ports = 4, + .base_baud = 921600, + .uart_offset = 8, + }, }; struct parport_serial_private { diff --git a/drivers/parport/share.c b/drivers/parport/share.c index e21831d93305..49c74ded8a53 100644 --- a/drivers/parport/share.c +++ b/drivers/parport/share.c @@ -611,7 +611,7 @@ static void free_pardevice(struct device *dev) { struct pardevice *par_dev = to_pardevice(dev); - kfree(par_dev->name); + kfree_const(par_dev->name); kfree(par_dev); } @@ -682,8 +682,8 @@ parport_register_dev_model(struct parport *port, const char *name, const struct pardev_cb *par_dev_cb, int id) { struct pardevice *par_dev; + const char *devname; int ret; - char *devname; if (port->physport->flags & PARPORT_FLAG_EXCL) { /* An exclusive device is registered. */ @@ -726,7 +726,7 @@ parport_register_dev_model(struct parport *port, const char *name, if (!par_dev->state) goto err_put_par_dev; - devname = kstrdup(name, GFP_KERNEL); + devname = kstrdup_const(name, GFP_KERNEL); if (!devname) goto err_free_par_dev; @@ -804,7 +804,7 @@ parport_register_dev_model(struct parport *port, const char *name, return par_dev; err_free_devname: - kfree(devname); + kfree_const(devname); err_free_par_dev: kfree(par_dev->state); err_put_par_dev: diff --git a/drivers/pci/endpoint/functions/pci-epf-mhi.c b/drivers/pci/endpoint/functions/pci-epf-mhi.c index e0000cef2940..1c3e4ea76bd2 100644 --- a/drivers/pci/endpoint/functions/pci-epf-mhi.c +++ b/drivers/pci/endpoint/functions/pci-epf-mhi.c @@ -21,6 +21,15 @@ /* Platform specific flags */ #define MHI_EPF_USE_DMA BIT(0) +struct pci_epf_mhi_dma_transfer { + struct pci_epf_mhi *epf_mhi; + struct mhi_ep_buf_info buf_info; + struct list_head node; + dma_addr_t paddr; + enum dma_data_direction dir; + size_t size; +}; + struct pci_epf_mhi_ep_info { const struct mhi_ep_cntrl_config *config; struct pci_epf_header *epf_header; @@ -124,6 +133,10 @@ struct pci_epf_mhi { resource_size_t mmio_phys; struct dma_chan *dma_chan_tx; struct dma_chan *dma_chan_rx; + struct workqueue_struct *dma_wq; + struct work_struct dma_work; + struct list_head dma_list; + spinlock_t list_lock; u32 mmio_size; int irq; }; @@ -209,59 +222,65 @@ static void pci_epf_mhi_raise_irq(struct mhi_ep_cntrl *mhi_cntrl, u32 vector) vector + 1); } -static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from, - void *to, size_t size) +static int pci_epf_mhi_iatu_read(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_buf_info *buf_info) { struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl); - size_t offset = get_align_offset(epf_mhi, from); + size_t offset = get_align_offset(epf_mhi, buf_info->host_addr); void __iomem *tre_buf; phys_addr_t tre_phys; int ret; mutex_lock(&epf_mhi->lock); - ret = __pci_epf_mhi_alloc_map(mhi_cntrl, from, &tre_phys, &tre_buf, - offset, size); + ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys, + &tre_buf, offset, buf_info->size); if (ret) { mutex_unlock(&epf_mhi->lock); return ret; } - memcpy_fromio(to, tre_buf, size); + memcpy_fromio(buf_info->dev_addr, tre_buf, buf_info->size); - __pci_epf_mhi_unmap_free(mhi_cntrl, from, tre_phys, tre_buf, offset, - size); + __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys, + tre_buf, offset, buf_info->size); mutex_unlock(&epf_mhi->lock); + if (buf_info->cb) + buf_info->cb(buf_info); + return 0; } static int pci_epf_mhi_iatu_write(struct mhi_ep_cntrl *mhi_cntrl, - void *from, u64 to, size_t size) + struct mhi_ep_buf_info *buf_info) { struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl); - size_t offset = get_align_offset(epf_mhi, to); + size_t offset = get_align_offset(epf_mhi, buf_info->host_addr); void __iomem *tre_buf; phys_addr_t tre_phys; int ret; mutex_lock(&epf_mhi->lock); - ret = __pci_epf_mhi_alloc_map(mhi_cntrl, to, &tre_phys, &tre_buf, - offset, size); + ret = __pci_epf_mhi_alloc_map(mhi_cntrl, buf_info->host_addr, &tre_phys, + &tre_buf, offset, buf_info->size); if (ret) { mutex_unlock(&epf_mhi->lock); return ret; } - memcpy_toio(tre_buf, from, size); + memcpy_toio(tre_buf, buf_info->dev_addr, buf_info->size); - __pci_epf_mhi_unmap_free(mhi_cntrl, to, tre_phys, tre_buf, offset, - size); + __pci_epf_mhi_unmap_free(mhi_cntrl, buf_info->host_addr, tre_phys, + tre_buf, offset, buf_info->size); mutex_unlock(&epf_mhi->lock); + if (buf_info->cb) + buf_info->cb(buf_info); + return 0; } @@ -270,8 +289,8 @@ static void pci_epf_mhi_dma_callback(void *param) complete(param); } -static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from, - void *to, size_t size) +static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_buf_info *buf_info) { struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl); struct device *dma_dev = epf_mhi->epf->epc->dev.parent; @@ -284,13 +303,13 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from, dma_addr_t dst_addr; int ret; - if (size < SZ_4K) - return pci_epf_mhi_iatu_read(mhi_cntrl, from, to, size); + if (buf_info->size < SZ_4K) + return pci_epf_mhi_iatu_read(mhi_cntrl, buf_info); mutex_lock(&epf_mhi->lock); config.direction = DMA_DEV_TO_MEM; - config.src_addr = from; + config.src_addr = buf_info->host_addr; ret = dmaengine_slave_config(chan, &config); if (ret) { @@ -298,14 +317,16 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from, goto err_unlock; } - dst_addr = dma_map_single(dma_dev, to, size, DMA_FROM_DEVICE); + dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size, + DMA_FROM_DEVICE); ret = dma_mapping_error(dma_dev, dst_addr); if (ret) { dev_err(dev, "Failed to map remote memory\n"); goto err_unlock; } - desc = dmaengine_prep_slave_single(chan, dst_addr, size, DMA_DEV_TO_MEM, + desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size, + DMA_DEV_TO_MEM, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); if (!desc) { dev_err(dev, "Failed to prepare DMA\n"); @@ -332,15 +353,15 @@ static int pci_epf_mhi_edma_read(struct mhi_ep_cntrl *mhi_cntrl, u64 from, } err_unmap: - dma_unmap_single(dma_dev, dst_addr, size, DMA_FROM_DEVICE); + dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE); err_unlock: mutex_unlock(&epf_mhi->lock); return ret; } -static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from, - u64 to, size_t size) +static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_buf_info *buf_info) { struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl); struct device *dma_dev = epf_mhi->epf->epc->dev.parent; @@ -353,13 +374,13 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from, dma_addr_t src_addr; int ret; - if (size < SZ_4K) - return pci_epf_mhi_iatu_write(mhi_cntrl, from, to, size); + if (buf_info->size < SZ_4K) + return pci_epf_mhi_iatu_write(mhi_cntrl, buf_info); mutex_lock(&epf_mhi->lock); config.direction = DMA_MEM_TO_DEV; - config.dst_addr = to; + config.dst_addr = buf_info->host_addr; ret = dmaengine_slave_config(chan, &config); if (ret) { @@ -367,14 +388,16 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from, goto err_unlock; } - src_addr = dma_map_single(dma_dev, from, size, DMA_TO_DEVICE); + src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size, + DMA_TO_DEVICE); ret = dma_mapping_error(dma_dev, src_addr); if (ret) { dev_err(dev, "Failed to map remote memory\n"); goto err_unlock; } - desc = dmaengine_prep_slave_single(chan, src_addr, size, DMA_MEM_TO_DEV, + desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size, + DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT); if (!desc) { dev_err(dev, "Failed to prepare DMA\n"); @@ -401,7 +424,199 @@ static int pci_epf_mhi_edma_write(struct mhi_ep_cntrl *mhi_cntrl, void *from, } err_unmap: - dma_unmap_single(dma_dev, src_addr, size, DMA_FROM_DEVICE); + dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE); +err_unlock: + mutex_unlock(&epf_mhi->lock); + + return ret; +} + +static void pci_epf_mhi_dma_worker(struct work_struct *work) +{ + struct pci_epf_mhi *epf_mhi = container_of(work, struct pci_epf_mhi, dma_work); + struct device *dma_dev = epf_mhi->epf->epc->dev.parent; + struct pci_epf_mhi_dma_transfer *itr, *tmp; + struct mhi_ep_buf_info *buf_info; + unsigned long flags; + LIST_HEAD(head); + + spin_lock_irqsave(&epf_mhi->list_lock, flags); + list_splice_tail_init(&epf_mhi->dma_list, &head); + spin_unlock_irqrestore(&epf_mhi->list_lock, flags); + + list_for_each_entry_safe(itr, tmp, &head, node) { + list_del(&itr->node); + dma_unmap_single(dma_dev, itr->paddr, itr->size, itr->dir); + buf_info = &itr->buf_info; + buf_info->cb(buf_info); + kfree(itr); + } +} + +static void pci_epf_mhi_dma_async_callback(void *param) +{ + struct pci_epf_mhi_dma_transfer *transfer = param; + struct pci_epf_mhi *epf_mhi = transfer->epf_mhi; + + spin_lock(&epf_mhi->list_lock); + list_add_tail(&transfer->node, &epf_mhi->dma_list); + spin_unlock(&epf_mhi->list_lock); + + queue_work(epf_mhi->dma_wq, &epf_mhi->dma_work); +} + +static int pci_epf_mhi_edma_read_async(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_buf_info *buf_info) +{ + struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl); + struct device *dma_dev = epf_mhi->epf->epc->dev.parent; + struct pci_epf_mhi_dma_transfer *transfer = NULL; + struct dma_chan *chan = epf_mhi->dma_chan_rx; + struct device *dev = &epf_mhi->epf->dev; + DECLARE_COMPLETION_ONSTACK(complete); + struct dma_async_tx_descriptor *desc; + struct dma_slave_config config = {}; + dma_cookie_t cookie; + dma_addr_t dst_addr; + int ret; + + mutex_lock(&epf_mhi->lock); + + config.direction = DMA_DEV_TO_MEM; + config.src_addr = buf_info->host_addr; + + ret = dmaengine_slave_config(chan, &config); + if (ret) { + dev_err(dev, "Failed to configure DMA channel\n"); + goto err_unlock; + } + + dst_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size, + DMA_FROM_DEVICE); + ret = dma_mapping_error(dma_dev, dst_addr); + if (ret) { + dev_err(dev, "Failed to map remote memory\n"); + goto err_unlock; + } + + desc = dmaengine_prep_slave_single(chan, dst_addr, buf_info->size, + DMA_DEV_TO_MEM, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(dev, "Failed to prepare DMA\n"); + ret = -EIO; + goto err_unmap; + } + + transfer = kzalloc(sizeof(*transfer), GFP_KERNEL); + if (!transfer) { + ret = -ENOMEM; + goto err_unmap; + } + + transfer->epf_mhi = epf_mhi; + transfer->paddr = dst_addr; + transfer->size = buf_info->size; + transfer->dir = DMA_FROM_DEVICE; + memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info)); + + desc->callback = pci_epf_mhi_dma_async_callback; + desc->callback_param = transfer; + + cookie = dmaengine_submit(desc); + ret = dma_submit_error(cookie); + if (ret) { + dev_err(dev, "Failed to do DMA submit\n"); + goto err_free_transfer; + } + + dma_async_issue_pending(chan); + + goto err_unlock; + +err_free_transfer: + kfree(transfer); +err_unmap: + dma_unmap_single(dma_dev, dst_addr, buf_info->size, DMA_FROM_DEVICE); +err_unlock: + mutex_unlock(&epf_mhi->lock); + + return ret; +} + +static int pci_epf_mhi_edma_write_async(struct mhi_ep_cntrl *mhi_cntrl, + struct mhi_ep_buf_info *buf_info) +{ + struct pci_epf_mhi *epf_mhi = to_epf_mhi(mhi_cntrl); + struct device *dma_dev = epf_mhi->epf->epc->dev.parent; + struct pci_epf_mhi_dma_transfer *transfer = NULL; + struct dma_chan *chan = epf_mhi->dma_chan_tx; + struct device *dev = &epf_mhi->epf->dev; + DECLARE_COMPLETION_ONSTACK(complete); + struct dma_async_tx_descriptor *desc; + struct dma_slave_config config = {}; + dma_cookie_t cookie; + dma_addr_t src_addr; + int ret; + + mutex_lock(&epf_mhi->lock); + + config.direction = DMA_MEM_TO_DEV; + config.dst_addr = buf_info->host_addr; + + ret = dmaengine_slave_config(chan, &config); + if (ret) { + dev_err(dev, "Failed to configure DMA channel\n"); + goto err_unlock; + } + + src_addr = dma_map_single(dma_dev, buf_info->dev_addr, buf_info->size, + DMA_TO_DEVICE); + ret = dma_mapping_error(dma_dev, src_addr); + if (ret) { + dev_err(dev, "Failed to map remote memory\n"); + goto err_unlock; + } + + desc = dmaengine_prep_slave_single(chan, src_addr, buf_info->size, + DMA_MEM_TO_DEV, + DMA_CTRL_ACK | DMA_PREP_INTERRUPT); + if (!desc) { + dev_err(dev, "Failed to prepare DMA\n"); + ret = -EIO; + goto err_unmap; + } + + transfer = kzalloc(sizeof(*transfer), GFP_KERNEL); + if (!transfer) { + ret = -ENOMEM; + goto err_unmap; + } + + transfer->epf_mhi = epf_mhi; + transfer->paddr = src_addr; + transfer->size = buf_info->size; + transfer->dir = DMA_TO_DEVICE; + memcpy(&transfer->buf_info, buf_info, sizeof(*buf_info)); + + desc->callback = pci_epf_mhi_dma_async_callback; + desc->callback_param = transfer; + + cookie = dmaengine_submit(desc); + ret = dma_submit_error(cookie); + if (ret) { + dev_err(dev, "Failed to do DMA submit\n"); + goto err_free_transfer; + } + + dma_async_issue_pending(chan); + + goto err_unlock; + +err_free_transfer: + kfree(transfer); +err_unmap: + dma_unmap_single(dma_dev, src_addr, buf_info->size, DMA_TO_DEVICE); err_unlock: mutex_unlock(&epf_mhi->lock); @@ -431,6 +646,7 @@ static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi) struct device *dev = &epf_mhi->epf->dev; struct epf_dma_filter filter; dma_cap_mask_t mask; + int ret; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); @@ -449,16 +665,35 @@ static int pci_epf_mhi_dma_init(struct pci_epf_mhi *epf_mhi) &filter); if (IS_ERR_OR_NULL(epf_mhi->dma_chan_rx)) { dev_err(dev, "Failed to request rx channel\n"); - dma_release_channel(epf_mhi->dma_chan_tx); - epf_mhi->dma_chan_tx = NULL; - return -ENODEV; + ret = -ENODEV; + goto err_release_tx; + } + + epf_mhi->dma_wq = alloc_workqueue("pci_epf_mhi_dma_wq", 0, 0); + if (!epf_mhi->dma_wq) { + ret = -ENOMEM; + goto err_release_rx; } + INIT_LIST_HEAD(&epf_mhi->dma_list); + INIT_WORK(&epf_mhi->dma_work, pci_epf_mhi_dma_worker); + spin_lock_init(&epf_mhi->list_lock); + return 0; + +err_release_rx: + dma_release_channel(epf_mhi->dma_chan_rx); + epf_mhi->dma_chan_rx = NULL; +err_release_tx: + dma_release_channel(epf_mhi->dma_chan_tx); + epf_mhi->dma_chan_tx = NULL; + + return ret; } static void pci_epf_mhi_dma_deinit(struct pci_epf_mhi *epf_mhi) { + destroy_workqueue(epf_mhi->dma_wq); dma_release_channel(epf_mhi->dma_chan_tx); dma_release_channel(epf_mhi->dma_chan_rx); epf_mhi->dma_chan_tx = NULL; @@ -531,12 +766,13 @@ static int pci_epf_mhi_link_up(struct pci_epf *epf) mhi_cntrl->raise_irq = pci_epf_mhi_raise_irq; mhi_cntrl->alloc_map = pci_epf_mhi_alloc_map; mhi_cntrl->unmap_free = pci_epf_mhi_unmap_free; + mhi_cntrl->read_sync = mhi_cntrl->read_async = pci_epf_mhi_iatu_read; + mhi_cntrl->write_sync = mhi_cntrl->write_async = pci_epf_mhi_iatu_write; if (info->flags & MHI_EPF_USE_DMA) { - mhi_cntrl->read_from_host = pci_epf_mhi_edma_read; - mhi_cntrl->write_to_host = pci_epf_mhi_edma_write; - } else { - mhi_cntrl->read_from_host = pci_epf_mhi_iatu_read; - mhi_cntrl->write_to_host = pci_epf_mhi_iatu_write; + mhi_cntrl->read_sync = pci_epf_mhi_edma_read; + mhi_cntrl->write_sync = pci_epf_mhi_edma_write; + mhi_cntrl->read_async = pci_epf_mhi_edma_read_async; + mhi_cntrl->write_async = pci_epf_mhi_edma_write_async; } /* Register the MHI EP controller */ diff --git a/drivers/pcmcia/bcm63xx_pcmcia.c b/drivers/pcmcia/bcm63xx_pcmcia.c index dd3c26099048..a5414441834a 100644 --- a/drivers/pcmcia/bcm63xx_pcmcia.c +++ b/drivers/pcmcia/bcm63xx_pcmcia.c @@ -437,7 +437,7 @@ err: return ret; } -static int bcm63xx_drv_pcmcia_remove(struct platform_device *pdev) +static void bcm63xx_drv_pcmcia_remove(struct platform_device *pdev) { struct bcm63xx_pcmcia_socket *skt; struct resource *res; @@ -449,12 +449,11 @@ static int bcm63xx_drv_pcmcia_remove(struct platform_device *pdev) res = skt->reg_res; release_mem_region(res->start, resource_size(res)); kfree(skt); - return 0; } struct platform_driver bcm63xx_pcmcia_driver = { .probe = bcm63xx_drv_pcmcia_probe, - .remove = bcm63xx_drv_pcmcia_remove, + .remove_new = bcm63xx_drv_pcmcia_remove, .driver = { .name = "bcm63xx_pcmcia", .owner = THIS_MODULE, diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c index 87a33ecc2cf1..509713b9a502 100644 --- a/drivers/pcmcia/db1xxx_ss.c +++ b/drivers/pcmcia/db1xxx_ss.c @@ -577,7 +577,7 @@ out0: return ret; } -static int db1x_pcmcia_socket_remove(struct platform_device *pdev) +static void db1x_pcmcia_socket_remove(struct platform_device *pdev) { struct db1x_pcmcia_sock *sock = platform_get_drvdata(pdev); @@ -585,8 +585,6 @@ static int db1x_pcmcia_socket_remove(struct platform_device *pdev) pcmcia_unregister_socket(&sock->socket); iounmap((void *)(sock->virt_io + (u32)mips_io_port_base)); kfree(sock); - - return 0; } static struct platform_driver db1x_pcmcia_socket_driver = { @@ -594,7 +592,7 @@ static struct platform_driver db1x_pcmcia_socket_driver = { .name = "db1xxx_pcmcia", }, .probe = db1x_pcmcia_socket_probe, - .remove = db1x_pcmcia_socket_remove, + .remove_new = db1x_pcmcia_socket_remove, }; module_platform_driver(db1x_pcmcia_socket_driver); diff --git a/drivers/pcmcia/electra_cf.c b/drivers/pcmcia/electra_cf.c index efc27bc15152..5ae826e54811 100644 --- a/drivers/pcmcia/electra_cf.c +++ b/drivers/pcmcia/electra_cf.c @@ -307,7 +307,7 @@ out_free_cf: } -static int electra_cf_remove(struct platform_device *ofdev) +static void electra_cf_remove(struct platform_device *ofdev) { struct device *device = &ofdev->dev; struct electra_cf_socket *cf; @@ -326,8 +326,6 @@ static int electra_cf_remove(struct platform_device *ofdev) release_region(cf->io_base, cf->io_size); kfree(cf); - - return 0; } static const struct of_device_id electra_cf_match[] = { @@ -344,7 +342,7 @@ static struct platform_driver electra_cf_driver = { .of_match_table = electra_cf_match, }, .probe = electra_cf_probe, - .remove = electra_cf_remove, + .remove_new = electra_cf_remove, }; module_platform_driver(electra_cf_driver); diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c index e613818dc0bc..80137c7afe0d 100644 --- a/drivers/pcmcia/omap_cf.c +++ b/drivers/pcmcia/omap_cf.c @@ -290,7 +290,7 @@ fail0: return status; } -static int __exit omap_cf_remove(struct platform_device *pdev) +static void __exit omap_cf_remove(struct platform_device *pdev) { struct omap_cf_socket *cf = platform_get_drvdata(pdev); @@ -300,14 +300,13 @@ static int __exit omap_cf_remove(struct platform_device *pdev) release_mem_region(cf->phys_cf, SZ_8K); free_irq(cf->irq, cf); kfree(cf); - return 0; } static struct platform_driver omap_cf_driver = { .driver = { .name = driver_name, }, - .remove = __exit_p(omap_cf_remove), + .remove_new = __exit_p(omap_cf_remove), }; static int __init omap_cf_init(void) diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c index 5254028354f4..457fb81b497a 100644 --- a/drivers/pcmcia/pxa2xx_base.c +++ b/drivers/pcmcia/pxa2xx_base.c @@ -313,15 +313,13 @@ err0: return ret; } -static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev) +static void pxa2xx_drv_pcmcia_remove(struct platform_device *dev) { struct skt_dev_info *sinfo = platform_get_drvdata(dev); int i; for (i = 0; i < sinfo->nskt; i++) soc_pcmcia_remove_one(&sinfo->skt[i]); - - return 0; } static int pxa2xx_drv_pcmcia_resume(struct device *dev) @@ -338,7 +336,7 @@ static const struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = { static struct platform_driver pxa2xx_pcmcia_driver = { .probe = pxa2xx_drv_pcmcia_probe, - .remove = pxa2xx_drv_pcmcia_remove, + .remove_new = pxa2xx_drv_pcmcia_remove, .driver = { .name = "pxa2xx-pcmcia", .pm = &pxa2xx_drv_pcmcia_pm_ops, diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c index 89d4ba58c891..ccb219c38761 100644 --- a/drivers/pcmcia/sa1100_generic.c +++ b/drivers/pcmcia/sa1100_generic.c @@ -158,20 +158,18 @@ static int sa11x0_drv_pcmcia_probe(struct platform_device *pdev) return sa11xx_drv_pcmcia_add_one(skt); } -static int sa11x0_drv_pcmcia_remove(struct platform_device *dev) +static void sa11x0_drv_pcmcia_remove(struct platform_device *dev) { struct soc_pcmcia_socket *skt; if (dev->id == -1) { sa11x0_drv_pcmcia_legacy_remove(dev); - return 0; + return; } skt = platform_get_drvdata(dev); soc_pcmcia_remove_one(skt); - - return 0; } static struct platform_driver sa11x0_pcmcia_driver = { @@ -179,7 +177,7 @@ static struct platform_driver sa11x0_pcmcia_driver = { .name = "sa11x0-pcmcia", }, .probe = sa11x0_drv_pcmcia_probe, - .remove = sa11x0_drv_pcmcia_remove, + .remove_new = sa11x0_drv_pcmcia_remove, }; /* sa11x0_pcmcia_init() diff --git a/drivers/pcmcia/xxs1500_ss.c b/drivers/pcmcia/xxs1500_ss.c index b11c7abb1dc0..2a93fbbd128d 100644 --- a/drivers/pcmcia/xxs1500_ss.c +++ b/drivers/pcmcia/xxs1500_ss.c @@ -301,7 +301,7 @@ out0: return ret; } -static int xxs1500_pcmcia_remove(struct platform_device *pdev) +static void xxs1500_pcmcia_remove(struct platform_device *pdev) { struct xxs1500_pcmcia_sock *sock = platform_get_drvdata(pdev); @@ -309,8 +309,6 @@ static int xxs1500_pcmcia_remove(struct platform_device *pdev) free_irq(gpio_to_irq(GPIO_CDA), sock); iounmap((void *)(sock->virt_io + (u32)mips_io_port_base)); kfree(sock); - - return 0; } static struct platform_driver xxs1500_pcmcia_socket_driver = { @@ -318,7 +316,7 @@ static struct platform_driver xxs1500_pcmcia_socket_driver = { .name = "xxs1500_pcmcia", }, .probe = xxs1500_pcmcia_probe, - .remove = xxs1500_pcmcia_remove, + .remove_new = xxs1500_pcmcia_remove, }; module_platform_driver(xxs1500_pcmcia_socket_driver); diff --git a/drivers/platform/goldfish/goldfish_pipe.c b/drivers/platform/goldfish/goldfish_pipe.c index 7737d56191d7..061aa9647c19 100644 --- a/drivers/platform/goldfish/goldfish_pipe.c +++ b/drivers/platform/goldfish/goldfish_pipe.c @@ -915,12 +915,11 @@ static int goldfish_pipe_probe(struct platform_device *pdev) return goldfish_pipe_device_init(pdev, dev); } -static int goldfish_pipe_remove(struct platform_device *pdev) +static void goldfish_pipe_remove(struct platform_device *pdev) { struct goldfish_pipe_dev *dev = platform_get_drvdata(pdev); goldfish_pipe_device_deinit(pdev, dev); - return 0; } static const struct acpi_device_id goldfish_pipe_acpi_match[] = { @@ -937,7 +936,7 @@ MODULE_DEVICE_TABLE(of, goldfish_pipe_of_match); static struct platform_driver goldfish_pipe_driver = { .probe = goldfish_pipe_probe, - .remove = goldfish_pipe_remove, + .remove_new = goldfish_pipe_remove, .driver = { .name = "goldfish_pipe", .of_match_table = goldfish_pipe_of_match, diff --git a/drivers/platform/surface/aggregator/bus.c b/drivers/platform/surface/aggregator/bus.c index 42ccd7f1c9b9..118caa651bec 100644 --- a/drivers/platform/surface/aggregator/bus.c +++ b/drivers/platform/surface/aggregator/bus.c @@ -35,6 +35,8 @@ static struct attribute *ssam_device_attrs[] = { }; ATTRIBUTE_GROUPS(ssam_device); +static const struct bus_type ssam_bus_type; + static int ssam_device_uevent(const struct device *dev, struct kobj_uevent_env *env) { const struct ssam_device *sdev = to_ssam_device(dev); @@ -329,13 +331,12 @@ static void ssam_bus_remove(struct device *dev) sdrv->remove(to_ssam_device(dev)); } -struct bus_type ssam_bus_type = { +static const struct bus_type ssam_bus_type = { .name = "surface_aggregator", .match = ssam_bus_match, .probe = ssam_bus_probe, .remove = ssam_bus_remove, }; -EXPORT_SYMBOL_GPL(ssam_bus_type); /** * __ssam_device_driver_register() - Register a SSAM client device driver. diff --git a/drivers/sh/maple/maple.c b/drivers/sh/maple/maple.c index e05473c5c267..16018009a5a6 100644 --- a/drivers/sh/maple/maple.c +++ b/drivers/sh/maple/maple.c @@ -59,6 +59,7 @@ struct maple_device_specify { static bool checked[MAPLE_PORTS]; static bool empty[MAPLE_PORTS]; static struct maple_device *baseunits[MAPLE_PORTS]; +static const struct bus_type maple_bus_type; /** * maple_driver_register - register a maple driver @@ -773,11 +774,10 @@ static struct maple_driver maple_unsupported_device = { /* * maple_bus_type - core maple bus structure */ -struct bus_type maple_bus_type = { +static const struct bus_type maple_bus_type = { .name = "maple", .match = maple_match_bus_driver, }; -EXPORT_SYMBOL_GPL(maple_bus_type); static struct device maple_bus = { .init_name = "maple", diff --git a/drivers/soc/xilinx/xlnx_event_manager.c b/drivers/soc/xilinx/xlnx_event_manager.c index 042553abe1bf..253299e4214d 100644 --- a/drivers/soc/xilinx/xlnx_event_manager.c +++ b/drivers/soc/xilinx/xlnx_event_manager.c @@ -77,11 +77,26 @@ struct registered_event_data { static bool xlnx_is_error_event(const u32 node_id) { - if (node_id == EVENT_ERROR_PMC_ERR1 || - node_id == EVENT_ERROR_PMC_ERR2 || - node_id == EVENT_ERROR_PSM_ERR1 || - node_id == EVENT_ERROR_PSM_ERR2) - return true; + u32 pm_family_code, pm_sub_family_code; + + zynqmp_pm_get_family_info(&pm_family_code, &pm_sub_family_code); + + if (pm_sub_family_code == VERSAL_SUB_FAMILY_CODE) { + if (node_id == VERSAL_EVENT_ERROR_PMC_ERR1 || + node_id == VERSAL_EVENT_ERROR_PMC_ERR2 || + node_id == VERSAL_EVENT_ERROR_PSM_ERR1 || + node_id == VERSAL_EVENT_ERROR_PSM_ERR2) + return true; + } else { + if (node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR1 || + node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR2 || + node_id == VERSAL_NET_EVENT_ERROR_PMC_ERR3 || + node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR1 || + node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR2 || + node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR3 || + node_id == VERSAL_NET_EVENT_ERROR_PSM_ERR4) + return true; + } return false; } @@ -483,7 +498,7 @@ static void xlnx_call_notify_cb_handler(const u32 *payload) static void xlnx_get_event_callback_data(u32 *buf) { - zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf); + zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, buf, 0); } static irqreturn_t xlnx_event_handler(int irq, void *dev_id) @@ -656,7 +671,11 @@ static int xlnx_event_manager_probe(struct platform_device *pdev) ret = zynqmp_pm_register_sgi(sgi_num, 0); if (ret) { - dev_err(&pdev->dev, "SGI %d Registration over TF-A failed with %d\n", sgi_num, ret); + if (ret == -EOPNOTSUPP) + dev_err(&pdev->dev, "SGI registration not supported by TF-A or Xen\n"); + else + dev_err(&pdev->dev, "SGI %d registration failed, err %d\n", sgi_num, ret); + xlnx_event_cleanup_sgi(pdev); return ret; } diff --git a/drivers/soc/xilinx/zynqmp_power.c b/drivers/soc/xilinx/zynqmp_power.c index 07d735b38b49..965b1143936a 100644 --- a/drivers/soc/xilinx/zynqmp_power.c +++ b/drivers/soc/xilinx/zynqmp_power.c @@ -51,7 +51,7 @@ static enum pm_suspend_mode suspend_mode = PM_SUSPEND_MODE_STD; static void zynqmp_pm_get_callback_data(u32 *buf) { - zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, 0, 0, 0, 0, buf); + zynqmp_pm_invoke_fn(GET_CALLBACK_DATA, buf, 0); } static void suspend_event_callback(const u32 *payload, void *data) diff --git a/drivers/spmi/Makefile b/drivers/spmi/Makefile index 9d974424c8c1..7f152167bb05 100644 --- a/drivers/spmi/Makefile +++ b/drivers/spmi/Makefile @@ -2,7 +2,7 @@ # # Makefile for kernel SPMI framework. # -obj-$(CONFIG_SPMI) += spmi.o +obj-$(CONFIG_SPMI) += spmi.o spmi-devres.o obj-$(CONFIG_SPMI_HISI3670) += hisi-spmi-controller.o obj-$(CONFIG_SPMI_MSM_PMIC_ARB) += spmi-pmic-arb.o diff --git a/drivers/spmi/hisi-spmi-controller.c b/drivers/spmi/hisi-spmi-controller.c index 9cbd473487cb..674a350cc676 100644 --- a/drivers/spmi/hisi-spmi-controller.c +++ b/drivers/spmi/hisi-spmi-controller.c @@ -267,10 +267,10 @@ static int spmi_controller_probe(struct platform_device *pdev) struct resource *iores; int ret; - ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*spmi_controller)); - if (!ctrl) { + ctrl = devm_spmi_controller_alloc(&pdev->dev, sizeof(*spmi_controller)); + if (IS_ERR(ctrl)) { dev_err(&pdev->dev, "can not allocate spmi_controller data\n"); - return -ENOMEM; + return PTR_ERR(ctrl); } spmi_controller = spmi_controller_get_drvdata(ctrl); spmi_controller->controller = ctrl; @@ -278,24 +278,21 @@ static int spmi_controller_probe(struct platform_device *pdev) iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iores) { dev_err(&pdev->dev, "can not get resource!\n"); - ret = -EINVAL; - goto err_put_controller; + return -EINVAL; } spmi_controller->base = devm_ioremap(&pdev->dev, iores->start, resource_size(iores)); if (!spmi_controller->base) { dev_err(&pdev->dev, "can not remap base addr!\n"); - ret = -EADDRNOTAVAIL; - goto err_put_controller; + return -EADDRNOTAVAIL; } ret = of_property_read_u32(pdev->dev.of_node, "hisilicon,spmi-channel", &spmi_controller->channel); if (ret) { dev_err(&pdev->dev, "can not get channel\n"); - ret = -ENODEV; - goto err_put_controller; + return -ENODEV; } platform_set_drvdata(pdev, spmi_controller); @@ -311,25 +308,13 @@ static int spmi_controller_probe(struct platform_device *pdev) ctrl->read_cmd = spmi_read_cmd; ctrl->write_cmd = spmi_write_cmd; - ret = spmi_controller_add(ctrl); + ret = devm_spmi_controller_add(&pdev->dev, ctrl); if (ret) { dev_err(&pdev->dev, "spmi_controller_add failed with error %d!\n", ret); - goto err_put_controller; + return ret; } return 0; - -err_put_controller: - spmi_controller_put(ctrl); - return ret; -} - -static void spmi_del_controller(struct platform_device *pdev) -{ - struct spmi_controller *ctrl = platform_get_drvdata(pdev); - - spmi_controller_remove(ctrl); - spmi_controller_put(ctrl); } static const struct of_device_id spmi_controller_match_table[] = { @@ -342,7 +327,6 @@ MODULE_DEVICE_TABLE(of, spmi_controller_match_table); static struct platform_driver spmi_controller_driver = { .probe = spmi_controller_probe, - .remove_new = spmi_del_controller, .driver = { .name = "hisi_spmi_controller", .of_match_table = spmi_controller_match_table, diff --git a/drivers/spmi/spmi-devres.c b/drivers/spmi/spmi-devres.c new file mode 100644 index 000000000000..62c4b3f24d06 --- /dev/null +++ b/drivers/spmi/spmi-devres.c @@ -0,0 +1,64 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright 2023 Google LLC. + */ + +#include <linux/device.h> +#include <linux/spmi.h> + +static void devm_spmi_controller_release(struct device *parent, void *res) +{ + spmi_controller_put(*(struct spmi_controller **)res); +} + +struct spmi_controller *devm_spmi_controller_alloc(struct device *parent, size_t size) +{ + struct spmi_controller **ptr, *ctrl; + + ptr = devres_alloc(devm_spmi_controller_release, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return ERR_PTR(-ENOMEM); + + ctrl = spmi_controller_alloc(parent, size); + if (IS_ERR(ctrl)) { + devres_free(ptr); + return ctrl; + } + + *ptr = ctrl; + devres_add(parent, ptr); + + return ctrl; +} +EXPORT_SYMBOL_GPL(devm_spmi_controller_alloc); + +static void devm_spmi_controller_remove(struct device *parent, void *res) +{ + spmi_controller_remove(*(struct spmi_controller **)res); +} + +int devm_spmi_controller_add(struct device *parent, struct spmi_controller *ctrl) +{ + struct spmi_controller **ptr; + int ret; + + ptr = devres_alloc(devm_spmi_controller_remove, sizeof(*ptr), GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + ret = spmi_controller_add(ctrl); + if (ret) { + devres_free(ptr); + return ret; + } + + *ptr = ctrl; + devres_add(parent, ptr); + + return 0; + +} +EXPORT_SYMBOL_GPL(devm_spmi_controller_add); + +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("SPMI devres helpers"); diff --git a/drivers/spmi/spmi-mtk-pmif.c b/drivers/spmi/spmi-mtk-pmif.c index b3c991e1ea40..5079442f8ea1 100644 --- a/drivers/spmi/spmi-mtk-pmif.c +++ b/drivers/spmi/spmi-mtk-pmif.c @@ -50,6 +50,7 @@ struct pmif { struct clk_bulk_data clks[PMIF_MAX_CLKS]; size_t nclks; const struct pmif_data *data; + raw_spinlock_t lock; }; static const char * const pmif_clock_names[] = { @@ -314,6 +315,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, struct ch_reg *inf_reg; int ret; u32 data, cmd; + unsigned long flags; /* Check for argument validation. */ if (sid & ~0xf) { @@ -334,6 +336,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, else return -EINVAL; + raw_spin_lock_irqsave(&arb->lock, flags); /* Wait for Software Interface FSM state to be IDLE. */ inf_reg = &arb->chan; ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta], @@ -343,6 +346,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, /* set channel ready if the data has transferred */ if (pmif_is_fsm_vldclr(arb)) pmif_writel(arb, 1, inf_reg->ch_rdy); + raw_spin_unlock_irqrestore(&arb->lock, flags); dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n"); return ret; } @@ -350,6 +354,7 @@ static int pmif_spmi_read_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, /* Send the command. */ cmd = (opc << 30) | (sid << 24) | ((len - 1) << 16) | addr; pmif_writel(arb, cmd, inf_reg->ch_send); + raw_spin_unlock_irqrestore(&arb->lock, flags); /* * Wait for Software Interface FSM state to be WFVLDCLR, @@ -376,7 +381,14 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, struct pmif *arb = spmi_controller_get_drvdata(ctrl); struct ch_reg *inf_reg; int ret; - u32 data, cmd; + u32 data, wdata, cmd; + unsigned long flags; + + /* Check for argument validation. */ + if (unlikely(sid & ~0xf)) { + dev_err(&ctrl->dev, "exceed the max slv id\n"); + return -EINVAL; + } if (len > 4) { dev_err(&ctrl->dev, "pmif supports 1..4 bytes per trans, but:%zu requested", len); @@ -394,6 +406,10 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, else return -EINVAL; + /* Set the write data. */ + memcpy(&wdata, buf, len); + + raw_spin_lock_irqsave(&arb->lock, flags); /* Wait for Software Interface FSM state to be IDLE. */ inf_reg = &arb->chan; ret = readl_poll_timeout_atomic(arb->base + arb->data->regs[inf_reg->ch_sta], @@ -403,17 +419,17 @@ static int pmif_spmi_write_cmd(struct spmi_controller *ctrl, u8 opc, u8 sid, /* set channel ready if the data has transferred */ if (pmif_is_fsm_vldclr(arb)) pmif_writel(arb, 1, inf_reg->ch_rdy); + raw_spin_unlock_irqrestore(&arb->lock, flags); dev_err(&ctrl->dev, "failed to wait for SWINF_IDLE\n"); return ret; } - /* Set the write data. */ - memcpy(&data, buf, len); - pmif_writel(arb, data, inf_reg->wdata); + pmif_writel(arb, wdata, inf_reg->wdata); /* Send the command. */ cmd = (opc << 30) | BIT(29) | (sid << 24) | ((len - 1) << 16) | addr; pmif_writel(arb, cmd, inf_reg->ch_send); + raw_spin_unlock_irqrestore(&arb->lock, flags); return 0; } @@ -437,44 +453,39 @@ static int mtk_spmi_probe(struct platform_device *pdev) int err, i; u32 chan_offset; - ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*arb)); - if (!ctrl) - return -ENOMEM; + ctrl = devm_spmi_controller_alloc(&pdev->dev, sizeof(*arb)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); arb = spmi_controller_get_drvdata(ctrl); arb->data = device_get_match_data(&pdev->dev); if (!arb->data) { - err = -EINVAL; dev_err(&pdev->dev, "Cannot get drv_data\n"); - goto err_put_ctrl; + return -EINVAL; } arb->base = devm_platform_ioremap_resource_byname(pdev, "pmif"); - if (IS_ERR(arb->base)) { - err = PTR_ERR(arb->base); - goto err_put_ctrl; - } + if (IS_ERR(arb->base)) + return PTR_ERR(arb->base); arb->spmimst_base = devm_platform_ioremap_resource_byname(pdev, "spmimst"); - if (IS_ERR(arb->spmimst_base)) { - err = PTR_ERR(arb->spmimst_base); - goto err_put_ctrl; - } + if (IS_ERR(arb->spmimst_base)) + return PTR_ERR(arb->spmimst_base); arb->nclks = ARRAY_SIZE(pmif_clock_names); for (i = 0; i < arb->nclks; i++) arb->clks[i].id = pmif_clock_names[i]; - err = devm_clk_bulk_get(&pdev->dev, arb->nclks, arb->clks); + err = clk_bulk_get(&pdev->dev, arb->nclks, arb->clks); if (err) { dev_err(&pdev->dev, "Failed to get clocks: %d\n", err); - goto err_put_ctrl; + return err; } err = clk_bulk_prepare_enable(arb->nclks, arb->clks); if (err) { dev_err(&pdev->dev, "Failed to enable clocks: %d\n", err); - goto err_put_ctrl; + goto err_put_clks; } ctrl->cmd = pmif_arb_cmd; @@ -488,6 +499,8 @@ static int mtk_spmi_probe(struct platform_device *pdev) arb->chan.ch_send = PMIF_SWINF_0_ACC + chan_offset; arb->chan.ch_rdy = PMIF_SWINF_0_VLD_CLR + chan_offset; + raw_spin_lock_init(&arb->lock); + platform_set_drvdata(pdev, ctrl); err = spmi_controller_add(ctrl); @@ -498,8 +511,8 @@ static int mtk_spmi_probe(struct platform_device *pdev) err_domain_remove: clk_bulk_disable_unprepare(arb->nclks, arb->clks); -err_put_ctrl: - spmi_controller_put(ctrl); +err_put_clks: + clk_bulk_put(arb->nclks, arb->clks); return err; } @@ -508,9 +521,9 @@ static void mtk_spmi_remove(struct platform_device *pdev) struct spmi_controller *ctrl = platform_get_drvdata(pdev); struct pmif *arb = spmi_controller_get_drvdata(ctrl); - clk_bulk_disable_unprepare(arb->nclks, arb->clks); spmi_controller_remove(ctrl); - spmi_controller_put(ctrl); + clk_bulk_disable_unprepare(arb->nclks, arb->clks); + clk_bulk_put(arb->nclks, arb->clks); } static const struct of_device_id mtk_spmi_match_table[] = { diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c index dcb675d980d4..9ed1180fe31f 100644 --- a/drivers/spmi/spmi-pmic-arb.c +++ b/drivers/spmi/spmi-pmic-arb.c @@ -1443,9 +1443,9 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) u32 channel, ee, hw_ver; int err; - ctrl = spmi_controller_alloc(&pdev->dev, sizeof(*pmic_arb)); - if (!ctrl) - return -ENOMEM; + ctrl = devm_spmi_controller_alloc(&pdev->dev, sizeof(*pmic_arb)); + if (IS_ERR(ctrl)) + return PTR_ERR(ctrl); pmic_arb = spmi_controller_get_drvdata(ctrl); pmic_arb->spmic = ctrl; @@ -1462,20 +1462,16 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "core"); core = devm_ioremap(&ctrl->dev, res->start, resource_size(res)); - if (IS_ERR(core)) { - err = PTR_ERR(core); - goto err_put_ctrl; - } + if (IS_ERR(core)) + return PTR_ERR(core); pmic_arb->core_size = resource_size(res); pmic_arb->ppid_to_apid = devm_kcalloc(&ctrl->dev, PMIC_ARB_MAX_PPID, sizeof(*pmic_arb->ppid_to_apid), GFP_KERNEL); - if (!pmic_arb->ppid_to_apid) { - err = -ENOMEM; - goto err_put_ctrl; - } + if (!pmic_arb->ppid_to_apid) + return -ENOMEM; hw_ver = readl_relaxed(core + PMIC_ARB_VERSION); @@ -1499,19 +1495,15 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) "obsrvr"); pmic_arb->rd_base = devm_ioremap(&ctrl->dev, res->start, resource_size(res)); - if (IS_ERR(pmic_arb->rd_base)) { - err = PTR_ERR(pmic_arb->rd_base); - goto err_put_ctrl; - } + if (IS_ERR(pmic_arb->rd_base)) + return PTR_ERR(pmic_arb->rd_base); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "chnls"); pmic_arb->wr_base = devm_ioremap(&ctrl->dev, res->start, resource_size(res)); - if (IS_ERR(pmic_arb->wr_base)) { - err = PTR_ERR(pmic_arb->wr_base); - goto err_put_ctrl; - } + if (IS_ERR(pmic_arb->wr_base)) + return PTR_ERR(pmic_arb->wr_base); } pmic_arb->max_periphs = PMIC_ARB_MAX_PERIPHS; @@ -1522,10 +1514,9 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) of_property_read_u32(pdev->dev.of_node, "qcom,bus-id", &pmic_arb->bus_instance); if (pmic_arb->bus_instance > 1) { - err = -EINVAL; dev_err(&pdev->dev, "invalid bus instance (%u) specified\n", pmic_arb->bus_instance); - goto err_put_ctrl; + return -EINVAL; } if (pmic_arb->bus_instance == 0) { @@ -1543,10 +1534,9 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) } if (pmic_arb->base_apid + pmic_arb->apid_count > pmic_arb->max_periphs) { - err = -EINVAL; dev_err(&pdev->dev, "Unsupported APID count %d detected\n", pmic_arb->base_apid + pmic_arb->apid_count); - goto err_put_ctrl; + return -EINVAL; } } else if (hw_ver >= PMIC_ARB_VERSION_V5_MIN) { pmic_arb->base_apid = 0; @@ -1554,55 +1544,45 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) PMIC_ARB_FEATURES_PERIPH_MASK; if (pmic_arb->apid_count > pmic_arb->max_periphs) { - err = -EINVAL; dev_err(&pdev->dev, "Unsupported APID count %d detected\n", pmic_arb->apid_count); - goto err_put_ctrl; + return -EINVAL; } } pmic_arb->apid_data = devm_kcalloc(&ctrl->dev, pmic_arb->max_periphs, sizeof(*pmic_arb->apid_data), GFP_KERNEL); - if (!pmic_arb->apid_data) { - err = -ENOMEM; - goto err_put_ctrl; - } + if (!pmic_arb->apid_data) + return -ENOMEM; dev_info(&ctrl->dev, "PMIC arbiter version %s (0x%x)\n", pmic_arb->ver_ops->ver_str, hw_ver); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "intr"); pmic_arb->intr = devm_ioremap_resource(&ctrl->dev, res); - if (IS_ERR(pmic_arb->intr)) { - err = PTR_ERR(pmic_arb->intr); - goto err_put_ctrl; - } + if (IS_ERR(pmic_arb->intr)) + return PTR_ERR(pmic_arb->intr); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cnfg"); pmic_arb->cnfg = devm_ioremap_resource(&ctrl->dev, res); - if (IS_ERR(pmic_arb->cnfg)) { - err = PTR_ERR(pmic_arb->cnfg); - goto err_put_ctrl; - } + if (IS_ERR(pmic_arb->cnfg)) + return PTR_ERR(pmic_arb->cnfg); pmic_arb->irq = platform_get_irq_byname(pdev, "periph_irq"); - if (pmic_arb->irq < 0) { - err = pmic_arb->irq; - goto err_put_ctrl; - } + if (pmic_arb->irq < 0) + return pmic_arb->irq; err = of_property_read_u32(pdev->dev.of_node, "qcom,channel", &channel); if (err) { dev_err(&pdev->dev, "channel unspecified.\n"); - goto err_put_ctrl; + return err; } if (channel > 5) { dev_err(&pdev->dev, "invalid channel (%u) specified.\n", channel); - err = -EINVAL; - goto err_put_ctrl; + return -EINVAL; } pmic_arb->channel = channel; @@ -1610,22 +1590,19 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) err = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &ee); if (err) { dev_err(&pdev->dev, "EE unspecified.\n"); - goto err_put_ctrl; + return err; } if (ee > 5) { dev_err(&pdev->dev, "invalid EE (%u) specified\n", ee); - err = -EINVAL; - goto err_put_ctrl; + return -EINVAL; } pmic_arb->ee = ee; mapping_table = devm_kcalloc(&ctrl->dev, pmic_arb->max_periphs, sizeof(*mapping_table), GFP_KERNEL); - if (!mapping_table) { - err = -ENOMEM; - goto err_put_ctrl; - } + if (!mapping_table) + return -ENOMEM; pmic_arb->mapping_table = mapping_table; /* Initialize max_apid/min_apid to the opposite bounds, during @@ -1645,7 +1622,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) if (err) { dev_err(&pdev->dev, "could not read APID->PPID mapping table, rc= %d\n", err); - goto err_put_ctrl; + return err; } } @@ -1654,8 +1631,7 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) &pmic_arb_irq_domain_ops, pmic_arb); if (!pmic_arb->domain) { dev_err(&pdev->dev, "unable to create irq_domain\n"); - err = -ENOMEM; - goto err_put_ctrl; + return -ENOMEM; } irq_set_chained_handler_and_data(pmic_arb->irq, pmic_arb_chained_irq, @@ -1669,8 +1645,6 @@ static int spmi_pmic_arb_probe(struct platform_device *pdev) err_domain_remove: irq_set_chained_handler_and_data(pmic_arb->irq, NULL, NULL); irq_domain_remove(pmic_arb->domain); -err_put_ctrl: - spmi_controller_put(ctrl); return err; } @@ -1681,7 +1655,6 @@ static void spmi_pmic_arb_remove(struct platform_device *pdev) spmi_controller_remove(ctrl); irq_set_chained_handler_and_data(pmic_arb->irq, NULL, NULL); irq_domain_remove(pmic_arb->domain); - spmi_controller_put(ctrl); } static const struct of_device_id spmi_pmic_arb_match_table[] = { diff --git a/drivers/spmi/spmi.c b/drivers/spmi/spmi.c index 93cd4a34debc..3a60fd2e09e1 100644 --- a/drivers/spmi/spmi.c +++ b/drivers/spmi/spmi.c @@ -448,11 +448,11 @@ struct spmi_controller *spmi_controller_alloc(struct device *parent, int id; if (WARN_ON(!parent)) - return NULL; + return ERR_PTR(-EINVAL); ctrl = kzalloc(sizeof(*ctrl) + size, GFP_KERNEL); if (!ctrl) - return NULL; + return ERR_PTR(-ENOMEM); device_initialize(&ctrl->dev); ctrl->dev.type = &spmi_ctrl_type; @@ -466,7 +466,7 @@ struct spmi_controller *spmi_controller_alloc(struct device *parent, dev_err(parent, "unable to allocate SPMI controller identifier.\n"); spmi_controller_put(ctrl); - return NULL; + return ERR_PTR(id); } ctrl->nr = id; diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c index 62082d64ece0..2d572f6c8ec8 100644 --- a/drivers/uio/uio.c +++ b/drivers/uio/uio.c @@ -466,13 +466,13 @@ static int uio_open(struct inode *inode, struct file *filep) mutex_lock(&minor_lock); idev = idr_find(&uio_idr, iminor(inode)); - mutex_unlock(&minor_lock); if (!idev) { ret = -ENODEV; + mutex_unlock(&minor_lock); goto out; } - get_device(&idev->dev); + mutex_unlock(&minor_lock); if (!try_module_get(idev->owner)) { ret = -ENODEV; @@ -1064,9 +1064,8 @@ void uio_unregister_device(struct uio_info *info) wake_up_interruptible(&idev->wait); kill_fasync(&idev->async_queue, SIGIO, POLL_HUP); - device_unregister(&idev->dev); - uio_free_minor(minor); + device_unregister(&idev->dev); return; } diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index 83c2d7329ca5..bc31db6ef7d2 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -7,7 +7,7 @@ menu "Console display driver support" config VGA_CONSOLE bool "VGA text console" if EXPERT || !X86 - depends on ALPHA || IA64 || X86 || \ + depends on ALPHA || X86 || \ (ARM && ARCH_FOOTBRIDGE) || \ (MIPS && (MIPS_MALTA || SIBYTE_BCM112X || SIBYTE_SB1250 || SIBYTE_BCM1x80 || SNI_RM)) select APERTURE_HELPERS if (DRM || FB || VFIO_PCI_CORE) diff --git a/drivers/virt/vboxguest/vboxguest_core.c b/drivers/virt/vboxguest/vboxguest_core.c index dfd69bd77f53..c6e9855998ab 100644 --- a/drivers/virt/vboxguest/vboxguest_core.c +++ b/drivers/virt/vboxguest/vboxguest_core.c @@ -33,16 +33,15 @@ VMMDEV_REQUESTOR_CON_DONT_KNOW | VMMDEV_REQUESTOR_TRUST_NOT_GIVEN) /** - * Reserves memory in which the VMM can relocate any guest mappings - * that are floating around. + * vbg_guest_mappings_init - Reserves memory in which the VMM can + * relocate any guest mappings that are floating around. + * @gdev: The Guest extension device. * * This operation is a little bit tricky since the VMM might not accept * just any address because of address clashes between the three contexts * it operates in, so we try several times. * * Failure to reserve the guest mappings is ignored. - * - * @gdev: The Guest extension device. */ static void vbg_guest_mappings_init(struct vbg_dev *gdev) { @@ -125,7 +124,7 @@ out: } /** - * Undo what vbg_guest_mappings_init did. + * vbg_guest_mappings_exit - Undo what vbg_guest_mappings_init did. * * @gdev: The Guest extension device. */ @@ -166,9 +165,10 @@ static void vbg_guest_mappings_exit(struct vbg_dev *gdev) } /** - * Report the guest information to the host. - * Return: 0 or negative errno value. + * vbg_report_guest_info - Report the guest information to the host. * @gdev: The Guest extension device. + * + * Return: %0 or negative errno value. */ static int vbg_report_guest_info(struct vbg_dev *gdev) { @@ -229,10 +229,11 @@ out_free: } /** - * Report the guest driver status to the host. - * Return: 0 or negative errno value. + * vbg_report_driver_status - Report the guest driver status to the host. * @gdev: The Guest extension device. * @active: Flag whether the driver is now active or not. + * + * Return: 0 or negative errno value. */ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active) { @@ -261,10 +262,12 @@ static int vbg_report_driver_status(struct vbg_dev *gdev, bool active) } /** - * Inflate the balloon by one chunk. The caller owns the balloon mutex. - * Return: 0 or negative errno value. + * vbg_balloon_inflate - Inflate the balloon by one chunk. The caller + * owns the balloon mutex. * @gdev: The Guest extension device. * @chunk_idx: Index of the chunk. + * + * Return: %0 or negative errno value. */ static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx) { @@ -312,10 +315,12 @@ out_error: } /** - * Deflate the balloon by one chunk. The caller owns the balloon mutex. - * Return: 0 or negative errno value. + * vbg_balloon_deflate - Deflate the balloon by one chunk. The caller + * owns the balloon mutex. * @gdev: The Guest extension device. * @chunk_idx: Index of the chunk. + * + * Return: %0 or negative errno value. */ static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx) { @@ -344,7 +349,7 @@ static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx) return 0; } -/** +/* * Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size * the host wants the balloon to be and adjust accordingly. */ @@ -409,7 +414,7 @@ static void vbg_balloon_work(struct work_struct *work) } } -/** +/* * Callback for heartbeat timer. */ static void vbg_heartbeat_timer(struct timer_list *t) @@ -422,11 +427,12 @@ static void vbg_heartbeat_timer(struct timer_list *t) } /** - * Configure the host to check guest's heartbeat - * and get heartbeat interval from the host. - * Return: 0 or negative errno value. + * vbg_heartbeat_host_config - Configure the host to check guest's heartbeat + * and get heartbeat interval from the host. * @gdev: The Guest extension device. * @enabled: Set true to enable guest heartbeat checks on host. + * + * Return: %0 or negative errno value. */ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled) { @@ -449,9 +455,11 @@ static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled) } /** - * Initializes the heartbeat timer. This feature may be disabled by the host. - * Return: 0 or negative errno value. + * vbg_heartbeat_init - Initializes the heartbeat timer. This feature + * may be disabled by the host. * @gdev: The Guest extension device. + * + * Return: %0 or negative errno value. */ static int vbg_heartbeat_init(struct vbg_dev *gdev) { @@ -481,7 +489,8 @@ static int vbg_heartbeat_init(struct vbg_dev *gdev) } /** - * Cleanup hearbeat code, stop HB timer and disable host heartbeat checking. + * vbg_heartbeat_exit - Cleanup heartbeat code, stop HB timer and disable + * host heartbeat checking. * @gdev: The Guest extension device. */ static void vbg_heartbeat_exit(struct vbg_dev *gdev) @@ -493,11 +502,12 @@ static void vbg_heartbeat_exit(struct vbg_dev *gdev) } /** - * Applies a change to the bit usage tracker. - * Return: true if the mask changed, false if not. + * vbg_track_bit_usage - Applies a change to the bit usage tracker. * @tracker: The bit usage tracker. * @changed: The bits to change. * @previous: The previous value of the bits. + * + * Return: %true if the mask changed, %false if not. */ static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker, u32 changed, u32 previous) @@ -529,10 +539,12 @@ static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker, } /** - * Init and termination worker for resetting the (host) event filter on the host - * Return: 0 or negative errno value. + * vbg_reset_host_event_filter - Init and termination worker for + * resetting the (host) event filter on the host * @gdev: The Guest extension device. * @fixed_events: Fixed events (init time). + * + * Return: %0 or negative errno value. */ static int vbg_reset_host_event_filter(struct vbg_dev *gdev, u32 fixed_events) @@ -556,12 +568,8 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev, } /** - * Changes the event filter mask for the given session. - * - * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to - * do session cleanup. Takes the session mutex. - * - * Return: 0 or negative errno value. + * vbg_set_session_event_filter - Changes the event filter mask for the + * given session. * @gdev: The Guest extension device. * @session: The session. * @or_mask: The events to add. @@ -570,6 +578,11 @@ static int vbg_reset_host_event_filter(struct vbg_dev *gdev, * This tweaks the error handling so we perform * proper session cleanup even if the host * misbehaves. + * + * This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to + * do session cleanup. Takes the session mutex. + * + * Return: 0 or negative errno value. */ static int vbg_set_session_event_filter(struct vbg_dev *gdev, struct vbg_session *session, @@ -637,9 +650,11 @@ out: } /** - * Init and termination worker for set guest capabilities to zero on the host. - * Return: 0 or negative errno value. + * vbg_reset_host_capabilities - Init and termination worker for set + * guest capabilities to zero on the host. * @gdev: The Guest extension device. + * + * Return: %0 or negative errno value. */ static int vbg_reset_host_capabilities(struct vbg_dev *gdev) { @@ -662,12 +677,14 @@ static int vbg_reset_host_capabilities(struct vbg_dev *gdev) } /** - * Set guest capabilities on the host. - * Must be called with gdev->session_mutex hold. - * Return: 0 or negative errno value. + * vbg_set_host_capabilities - Set guest capabilities on the host. * @gdev: The Guest extension device. * @session: The session. * @session_termination: Set if we're called by the session cleanup code. + * + * Must be called with gdev->session_mutex hold. + * + * Return: %0 or negative errno value. */ static int vbg_set_host_capabilities(struct vbg_dev *gdev, struct vbg_session *session, @@ -704,9 +721,8 @@ static int vbg_set_host_capabilities(struct vbg_dev *gdev, } /** - * Acquire (get exclusive access) guest capabilities for a session. - * Takes the session mutex. - * Return: 0 or negative errno value. + * vbg_acquire_session_capabilities - Acquire (get exclusive access) + * guest capabilities for a session. * @gdev: The Guest extension device. * @session: The session. * @flags: Flags (VBGL_IOC_AGC_FLAGS_XXX). @@ -716,6 +732,10 @@ static int vbg_set_host_capabilities(struct vbg_dev *gdev, * This tweaks the error handling so we perform * proper session cleanup even if the host * misbehaves. + * + * Takes the session mutex. + * + * Return: %0 or negative errno value. */ static int vbg_acquire_session_capabilities(struct vbg_dev *gdev, struct vbg_session *session, @@ -811,8 +831,8 @@ out: } /** - * Sets the guest capabilities for a session. Takes the session mutex. - * Return: 0 or negative errno value. + * vbg_set_session_capabilities - Sets the guest capabilities for a + * session. Takes the session mutex. * @gdev: The Guest extension device. * @session: The session. * @or_mask: The capabilities to add. @@ -821,6 +841,8 @@ out: * This tweaks the error handling so we perform * proper session cleanup even if the host * misbehaves. + * + * Return: %0 or negative errno value. */ static int vbg_set_session_capabilities(struct vbg_dev *gdev, struct vbg_session *session, @@ -866,9 +888,10 @@ out: } /** - * vbg_query_host_version get the host feature mask and version information. - * Return: 0 or negative errno value. + * vbg_query_host_version - get the host feature mask and version information. * @gdev: The Guest extension device. + * + * Return: %0 or negative errno value. */ static int vbg_query_host_version(struct vbg_dev *gdev) { @@ -905,19 +928,18 @@ out: } /** - * Initializes the VBoxGuest device extension when the - * device driver is loaded. + * vbg_core_init - Initializes the VBoxGuest device extension when the + * device driver is loaded. + * @gdev: The Guest extension device. + * @fixed_events: Events that will be enabled upon init and no client + * will ever be allowed to mask. * * The native code locates the VMMDev on the PCI bus and retrieve * the MMIO and I/O port ranges, this function will take care of * mapping the MMIO memory (if present). Upon successful return * the native code should set up the interrupt handler. * - * Return: 0 or negative errno value. - * - * @gdev: The Guest extension device. - * @fixed_events: Events that will be enabled upon init and no client - * will ever be allowed to mask. + * Return: %0 or negative errno value. */ int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events) { @@ -1017,11 +1039,12 @@ err_free_reqs: } /** - * Call this on exit to clean-up vboxguest-core managed resources. + * vbg_core_exit - Call this on exit to clean-up vboxguest-core managed + * resources. + * @gdev: The Guest extension device. * * The native code should call this before the driver is loaded, * but don't call this on shutdown. - * @gdev: The Guest extension device. */ void vbg_core_exit(struct vbg_dev *gdev) { @@ -1046,12 +1069,13 @@ void vbg_core_exit(struct vbg_dev *gdev) } /** - * Creates a VBoxGuest user session. + * vbg_core_open_session - Creates a VBoxGuest user session. + * @gdev: The Guest extension device. + * @requestor: VMMDEV_REQUESTOR_* flags * * vboxguest_linux.c calls this when userspace opens the char-device. + * * Return: A pointer to the new session or an ERR_PTR on error. - * @gdev: The Guest extension device. - * @requestor: VMMDEV_REQUESTOR_* flags */ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor) { @@ -1068,7 +1092,7 @@ struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, u32 requestor) } /** - * Closes a VBoxGuest session. + * vbg_core_close_session - Closes a VBoxGuest session. * @session: The session to close (and free). */ void vbg_core_close_session(struct vbg_session *session) @@ -1250,11 +1274,13 @@ static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev, } /** - * Checks if the VMM request is allowed in the context of the given session. - * Return: 0 or negative errno value. + * vbg_req_allowed - Checks if the VMM request is allowed in the + * context of the given session. * @gdev: The Guest extension device. * @session: The calling session. * @req: The request. + * + * Return: %0 or negative errno value. */ static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session, const struct vmmdev_request_header *req) @@ -1670,11 +1696,12 @@ static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev, } /** - * Common IOCtl for user to kernel communication. - * Return: 0 or negative errno value. + * vbg_core_ioctl - Common IOCtl for user to kernel communication. * @session: The client session. * @req: The requested function. * @data: The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr). + * + * Return: %0 or negative errno value. */ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) { @@ -1744,11 +1771,12 @@ int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data) } /** - * Report guest supported mouse-features to the host. + * vbg_core_set_mouse_status - Report guest supported mouse-features to the host. * - * Return: 0 or negative errno value. * @gdev: The Guest extension device. * @features: The set of features to report to the host. + * + * Return: %0 or negative errno value. */ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features) { @@ -1772,7 +1800,7 @@ int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features) return vbg_status_code_to_errno(rc); } -/** Core interrupt service routine. */ +/* Core interrupt service routine. */ irqreturn_t vbg_core_isr(int irq, void *dev_id) { struct vbg_dev *gdev = dev_id; diff --git a/drivers/virt/vboxguest/vboxguest_linux.c b/drivers/virt/vboxguest/vboxguest_linux.c index c47e62dc55da..8c92ea5b7305 100644 --- a/drivers/virt/vboxguest/vboxguest_linux.c +++ b/drivers/virt/vboxguest/vboxguest_linux.c @@ -81,10 +81,11 @@ static int vbg_misc_device_user_open(struct inode *inode, struct file *filp) } /** - * Close device. - * Return: 0 on success, negated errno on failure. + * vbg_misc_device_close - Close device. * @inode: Pointer to inode info structure. * @filp: Associated file pointer. + * + * Return: %0 on success, negated errno on failure. */ static int vbg_misc_device_close(struct inode *inode, struct file *filp) { @@ -94,11 +95,12 @@ static int vbg_misc_device_close(struct inode *inode, struct file *filp) } /** - * Device I/O Control entry point. - * Return: 0 on success, negated errno on failure. + * vbg_misc_device_ioctl - Device I/O Control entry point. * @filp: Associated file pointer. * @req: The request specified to ioctl(). * @arg: The argument specified to ioctl(). + * + * Return: %0 on success, negated errno on failure. */ static long vbg_misc_device_ioctl(struct file *filp, unsigned int req, unsigned long arg) @@ -173,7 +175,7 @@ out: return ret; } -/** The file_operations structures. */ +/* The file_operations structures. */ static const struct file_operations vbg_misc_device_fops = { .owner = THIS_MODULE, .open = vbg_misc_device_open, @@ -193,7 +195,7 @@ static const struct file_operations vbg_misc_device_user_fops = { #endif }; -/** +/* * Called when the input device is first opened. * * Sets up absolute mouse reporting. @@ -206,7 +208,7 @@ static int vbg_input_open(struct input_dev *input) return vbg_core_set_mouse_status(gdev, feat); } -/** +/* * Called if all open handles to the input device are closed. * * Disables absolute reporting. @@ -218,7 +220,7 @@ static void vbg_input_close(struct input_dev *input) vbg_core_set_mouse_status(gdev, 0); } -/** +/* * Creates the kernel input device. * * Return: 0 on success, negated errno on failure. @@ -277,7 +279,7 @@ static struct attribute *vbg_pci_attrs[] = { }; ATTRIBUTE_GROUPS(vbg_pci); -/** +/* * Does the PCI detection and init of the device. * * Return: 0 on success, negated errno on failure. @@ -453,7 +455,7 @@ void vbg_put_gdev(struct vbg_dev *gdev) } EXPORT_SYMBOL(vbg_put_gdev); -/** +/* * Callback for mouse events. * * This is called at the end of the ISR, after leaving the event spinlock, if diff --git a/drivers/virt/vboxguest/vboxguest_utils.c b/drivers/virt/vboxguest/vboxguest_utils.c index 8d195e3f8301..1c02b3c0d934 100644 --- a/drivers/virt/vboxguest/vboxguest_utils.c +++ b/drivers/virt/vboxguest/vboxguest_utils.c @@ -237,14 +237,16 @@ static int hgcm_call_preprocess_linaddr( } /** - * Preprocesses the HGCM call, validate parameters, alloc bounce buffers and - * figure out how much extra storage we need for page lists. - * Return: 0 or negative errno value. + * hgcm_call_preprocess - Preprocesses the HGCM call, validate parameters, + * alloc bounce buffers and figure out how much extra storage we need for + * page lists. * @src_parm: Pointer to source function call parameters * @parm_count: Number of function call parameters. * @bounce_bufs_ret: Where to return the allocated bouncebuffer array * @extra: Where to return the extra request space needed for * physical page lists. + * + * Return: %0 or negative errno value. */ static int hgcm_call_preprocess( const struct vmmdev_hgcm_function_parameter *src_parm, @@ -301,10 +303,11 @@ static int hgcm_call_preprocess( } /** - * Translates linear address types to page list direction flags. + * hgcm_call_linear_addr_type_to_pagelist_flags - Translates linear address + * types to page list direction flags. + * @type: The type. * * Return: page list flags. - * @type: The type. */ static u32 hgcm_call_linear_addr_type_to_pagelist_flags( enum vmmdev_hgcm_function_parameter_type type) @@ -369,7 +372,8 @@ static void hgcm_call_init_linaddr(struct vmmdev_hgcm_call *call, } /** - * Initializes the call request that we're sending to the host. + * hgcm_call_init_call - Initializes the call request that we're sending + * to the host. * @call: The call to initialize. * @client_id: The client ID of the caller. * @function: The function number of the function to call. @@ -425,7 +429,9 @@ static void hgcm_call_init_call( } /** - * Tries to cancel a pending HGCM call. + * hgcm_cancel_call - Tries to cancel a pending HGCM call. + * @gdev: The VBoxGuest device extension. + * @call: The call to cancel. * * Return: VBox status code */ @@ -459,13 +465,15 @@ static int hgcm_cancel_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call) } /** - * Performs the call and completion wait. - * Return: 0 or negative errno value. + * vbg_hgcm_do_call - Performs the call and completion wait. * @gdev: The VBoxGuest device extension. * @call: The call to execute. * @timeout_ms: Timeout in ms. + * @interruptible: whether this call is interruptible * @leak_it: Where to return the leak it / free it, indicator. * Cancellation fun. + * + * Return: %0 or negative errno value. */ static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call, u32 timeout_ms, bool interruptible, bool *leak_it) @@ -545,13 +553,14 @@ static int vbg_hgcm_do_call(struct vbg_dev *gdev, struct vmmdev_hgcm_call *call, } /** - * Copies the result of the call back to the caller info structure and user - * buffers. - * Return: 0 or negative errno value. + * hgcm_call_copy_back_result - Copies the result of the call back to + * the caller info structure and user buffers. * @call: HGCM call request. * @dst_parm: Pointer to function call parameters destination. * @parm_count: Number of function call parameters. * @bounce_bufs: The bouncebuffer array. + * + * Return: %0 or negative errno value. */ static int hgcm_call_copy_back_result( const struct vmmdev_hgcm_call *call, diff --git a/drivers/virt/vmgenid.c b/drivers/virt/vmgenid.c index a1c467a0e9f7..b67a28da4702 100644 --- a/drivers/virt/vmgenid.c +++ b/drivers/virt/vmgenid.c @@ -68,6 +68,7 @@ out: static void vmgenid_notify(struct acpi_device *device, u32 event) { struct vmgenid_state *state = acpi_driver_data(device); + char *envp[] = { "NEW_VMGENID=1", NULL }; u8 old_id[VMGENID_SIZE]; memcpy(old_id, state->this_id, sizeof(old_id)); @@ -75,6 +76,7 @@ static void vmgenid_notify(struct acpi_device *device, u32 event) if (!memcmp(old_id, state->this_id, sizeof(old_id))) return; add_vmfork_randomness(state->this_id, sizeof(state->this_id)); + kobject_uevent_env(&device->dev.kobj, KOBJ_CHANGE, envp); } static const struct acpi_device_id vmgenid_ids[] = { diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig index ad316573288a..513c0b114337 100644 --- a/drivers/w1/masters/Kconfig +++ b/drivers/w1/masters/Kconfig @@ -5,6 +5,17 @@ menu "1-wire Bus Masters" +config W1_MASTER_AMD_AXI + tristate "AMD AXI 1-wire bus host" + help + Say Y here is you want to support the AMD AXI 1-wire IP core. + This driver makes use of the programmable logic IP to perform + correctly timed 1 wire transactions without relying on GPIO timing + through the kernel. + + This driver can also be built as a module. If so, the module will be + called amd_w1_axi. + config W1_MASTER_MATROX tristate "Matrox G400 transport layer for 1-wire" depends on PCI diff --git a/drivers/w1/masters/Makefile b/drivers/w1/masters/Makefile index c5d85a827e52..6c5a21f9b88c 100644 --- a/drivers/w1/masters/Makefile +++ b/drivers/w1/masters/Makefile @@ -3,6 +3,7 @@ # Makefile for 1-wire bus master drivers. # +obj-$(CONFIG_W1_MASTER_AMD_AXI) += amd_axi_w1.o obj-$(CONFIG_W1_MASTER_MATROX) += matrox_w1.o obj-$(CONFIG_W1_MASTER_DS2490) += ds2490.o obj-$(CONFIG_W1_MASTER_DS2482) += ds2482.o diff --git a/drivers/w1/masters/amd_axi_w1.c b/drivers/w1/masters/amd_axi_w1.c new file mode 100644 index 000000000000..4d3a68ca9263 --- /dev/null +++ b/drivers/w1/masters/amd_axi_w1.c @@ -0,0 +1,396 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * amd_axi_w1 - AMD 1Wire programmable logic bus host driver + * + * Copyright (C) 2022-2023 Advanced Micro Devices, Inc. All Rights Reserved. + */ + +#include <linux/atomic.h> +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/jiffies.h> +#include <linux/kernel.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/types.h> +#include <linux/wait.h> + +#include <linux/w1.h> + +/* 1-wire AMD IP definition */ +#define AXIW1_IPID 0x10ee4453 +/* Registers offset */ +#define AXIW1_INST_REG 0x0 +#define AXIW1_CTRL_REG 0x4 +#define AXIW1_IRQE_REG 0x8 +#define AXIW1_STAT_REG 0xC +#define AXIW1_DATA_REG 0x10 +#define AXIW1_IPVER_REG 0x18 +#define AXIW1_IPID_REG 0x1C +/* Instructions */ +#define AXIW1_INITPRES 0x0800 +#define AXIW1_READBIT 0x0C00 +#define AXIW1_WRITEBIT 0x0E00 +#define AXIW1_READBYTE 0x0D00 +#define AXIW1_WRITEBYTE 0x0F00 +/* Status flag masks */ +#define AXIW1_DONE BIT(0) +#define AXIW1_READY BIT(4) +#define AXIW1_PRESENCE BIT(31) +#define AXIW1_MAJORVER_MASK GENMASK(23, 8) +#define AXIW1_MINORVER_MASK GENMASK(7, 0) +/* Control flag */ +#define AXIW1_GO BIT(0) +#define AXI_CLEAR 0 +#define AXI_RESET BIT(31) +#define AXIW1_READDATA BIT(0) +/* Interrupt Enable */ +#define AXIW1_READY_IRQ_EN BIT(4) +#define AXIW1_DONE_IRQ_EN BIT(0) + +#define AXIW1_TIMEOUT msecs_to_jiffies(100) + +#define DRIVER_NAME "amd_axi_w1" + +struct amd_axi_w1_local { + struct device *dev; + void __iomem *base_addr; + int irq; + atomic_t flag; /* Set on IRQ, cleared once serviced */ + wait_queue_head_t wait_queue; + struct w1_bus_master bus_host; +}; + +/** + * amd_axi_w1_wait_irq_interruptible_timeout() - Wait for IRQ with timeout. + * + * @amd_axi_w1_local: Pointer to device structure + * @IRQ: IRQ channel to wait on + * + * Return: %0 - OK, %-EINTR - Interrupted, %-EBUSY - Timed out + */ +static int amd_axi_w1_wait_irq_interruptible_timeout(struct amd_axi_w1_local *amd_axi_w1_local, + u32 IRQ) +{ + int ret; + + /* Enable the IRQ requested and wait for flag to indicate it's been triggered */ + iowrite32(IRQ, amd_axi_w1_local->base_addr + AXIW1_IRQE_REG); + ret = wait_event_interruptible_timeout(amd_axi_w1_local->wait_queue, + atomic_read(&amd_axi_w1_local->flag) != 0, + AXIW1_TIMEOUT); + if (ret < 0) { + dev_err(amd_axi_w1_local->dev, "Wait IRQ Interrupted\n"); + return -EINTR; + } + + if (!ret) { + dev_err(amd_axi_w1_local->dev, "Wait IRQ Timeout\n"); + return -EBUSY; + } + + atomic_set(&amd_axi_w1_local->flag, 0); + return 0; +} + +/** + * amd_axi_w1_touch_bit() - Performs the touch-bit function - write a 0 or 1 and reads the level. + * + * @data: Pointer to device structure + * @bit: The level to write + * + * Return: The level read + */ +static u8 amd_axi_w1_touch_bit(void *data, u8 bit) +{ + struct amd_axi_w1_local *amd_axi_w1_local = data; + u8 val = 0; + int rc; + + /* Wait for READY signal to be 1 to ensure 1-wire IP is ready */ + while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_READY) == 0) { + rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local, + AXIW1_READY_IRQ_EN); + if (rc < 0) + return 1; /* Callee doesn't test for error. Return inactive bus state */ + } + + if (bit) + /* Read. Write read Bit command in register 0 */ + iowrite32(AXIW1_READBIT, amd_axi_w1_local->base_addr + AXIW1_INST_REG); + else + /* Write. Write tx Bit command in instruction register with bit to transmit */ + iowrite32(AXIW1_WRITEBIT + (bit & 0x01), + amd_axi_w1_local->base_addr + AXIW1_INST_REG); + + /* Write Go signal and clear control reset signal in control register */ + iowrite32(AXIW1_GO, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG); + + /* Wait for done signal to be 1 */ + while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_DONE) != 1) { + rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local, AXIW1_DONE_IRQ_EN); + if (rc < 0) + return 1; /* Callee doesn't test for error. Return inactive bus state */ + } + + /* If read, Retrieve data from register */ + if (bit) + val = (u8)(ioread32(amd_axi_w1_local->base_addr + AXIW1_DATA_REG) & AXIW1_READDATA); + + /* Clear Go signal in register 1 */ + iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG); + + return val; +} + +/** + * amd_axi_w1_read_byte - Performs the read byte function. + * + * @data: Pointer to device structure + * Return: The value read + */ +static u8 amd_axi_w1_read_byte(void *data) +{ + struct amd_axi_w1_local *amd_axi_w1_local = data; + u8 val = 0; + int rc; + + /* Wait for READY signal to be 1 to ensure 1-wire IP is ready */ + while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_READY) == 0) { + rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local, + AXIW1_READY_IRQ_EN); + if (rc < 0) + return 0xFF; /* Return inactive bus state */ + } + + /* Write read Byte command in instruction register*/ + iowrite32(AXIW1_READBYTE, amd_axi_w1_local->base_addr + AXIW1_INST_REG); + + /* Write Go signal and clear control reset signal in control register */ + iowrite32(AXIW1_GO, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG); + + /* Wait for done signal to be 1 */ + while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_DONE) != 1) { + rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local, AXIW1_DONE_IRQ_EN); + if (rc < 0) + return 0xFF; /* Return inactive bus state */ + } + + /* Retrieve LSB bit in data register to get RX byte */ + val = (u8)(ioread32(amd_axi_w1_local->base_addr + AXIW1_DATA_REG) & 0x000000FF); + + /* Clear Go signal in control register */ + iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG); + + return val; +} + +/** + * amd_axi_w1_write_byte - Performs the write byte function. + * + * @data: The ds2482 channel pointer + * @val: The value to write + */ +static void amd_axi_w1_write_byte(void *data, u8 val) +{ + struct amd_axi_w1_local *amd_axi_w1_local = data; + int rc; + + /* Wait for READY signal to be 1 to ensure 1-wire IP is ready */ + while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_READY) == 0) { + rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local, + AXIW1_READY_IRQ_EN); + if (rc < 0) + return; + } + + /* Write tx Byte command in instruction register with bit to transmit */ + iowrite32(AXIW1_WRITEBYTE + val, amd_axi_w1_local->base_addr + AXIW1_INST_REG); + + /* Write Go signal and clear control reset signal in register 1 */ + iowrite32(AXIW1_GO, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG); + + /* Wait for done signal to be 1 */ + while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_DONE) != 1) { + rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local, + AXIW1_DONE_IRQ_EN); + if (rc < 0) + return; + } + + /* Clear Go signal in control register */ + iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG); +} + +/** + * amd_axi_w1_reset_bus() - Issues a reset bus sequence. + * + * @data: the bus host data struct + * Return: 0=Device present, 1=No device present or error + */ +static u8 amd_axi_w1_reset_bus(void *data) +{ + struct amd_axi_w1_local *amd_axi_w1_local = data; + u8 val = 0; + int rc; + + /* Reset 1-wire Axi IP */ + iowrite32(AXI_RESET, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG); + + /* Wait for READY signal to be 1 to ensure 1-wire IP is ready */ + while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_READY) == 0) { + rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local, + AXIW1_READY_IRQ_EN); + if (rc < 0) + return 1; /* Something went wrong with the hardware */ + } + /* Write Initialization command in instruction register */ + iowrite32(AXIW1_INITPRES, amd_axi_w1_local->base_addr + AXIW1_INST_REG); + + /* Write Go signal and clear control reset signal in register 1 */ + iowrite32(AXIW1_GO, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG); + + /* Wait for done signal to be 1 */ + while ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_DONE) != 1) { + rc = amd_axi_w1_wait_irq_interruptible_timeout(amd_axi_w1_local, AXIW1_DONE_IRQ_EN); + if (rc < 0) + return 1; /* Something went wrong with the hardware */ + } + /* Retrieve MSB bit in status register to get failure bit */ + if ((ioread32(amd_axi_w1_local->base_addr + AXIW1_STAT_REG) & AXIW1_PRESENCE) != 0) + val = 1; + + /* Clear Go signal in control register */ + iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG); + + return val; +} + +/* Reset the 1-wire AXI IP. Put the IP in reset state and clear registers */ +static void amd_axi_w1_reset(struct amd_axi_w1_local *amd_axi_w1_local) +{ + iowrite32(AXI_RESET, amd_axi_w1_local->base_addr + AXIW1_CTRL_REG); + iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_INST_REG); + iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_IRQE_REG); + iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_STAT_REG); + iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_DATA_REG); +} + +static irqreturn_t amd_axi_w1_irq(int irq, void *lp) +{ + struct amd_axi_w1_local *amd_axi_w1_local = lp; + + /* Reset interrupt trigger */ + iowrite32(AXI_CLEAR, amd_axi_w1_local->base_addr + AXIW1_IRQE_REG); + + atomic_set(&amd_axi_w1_local->flag, 1); + wake_up_interruptible(&amd_axi_w1_local->wait_queue); + + return IRQ_HANDLED; +} + +static int amd_axi_w1_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct amd_axi_w1_local *lp; + struct clk *clk; + u32 ver_major, ver_minor; + int val, rc = 0; + + lp = devm_kzalloc(dev, sizeof(*lp), GFP_KERNEL); + if (!lp) + return -ENOMEM; + + lp->dev = dev; + lp->base_addr = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(lp->base_addr)) + return PTR_ERR(lp->base_addr); + + lp->irq = platform_get_irq(pdev, 0); + if (lp->irq < 0) + return lp->irq; + + rc = devm_request_irq(dev, lp->irq, &amd_axi_w1_irq, IRQF_TRIGGER_HIGH, DRIVER_NAME, lp); + if (rc) + return rc; + + /* Initialize wait queue and flag */ + init_waitqueue_head(&lp->wait_queue); + + clk = devm_clk_get_enabled(dev, NULL); + if (IS_ERR(clk)) + return PTR_ERR(clk); + + /* Verify IP presence in HW */ + if (ioread32(lp->base_addr + AXIW1_IPID_REG) != AXIW1_IPID) { + dev_err(dev, "AMD 1-wire IP not detected in hardware\n"); + return -ENODEV; + } + + /* + * Allow for future driver expansion supporting new hardware features + * This driver currently only supports hardware 1.x, but include logic + * to detect if a potentially incompatible future version is used + * by reading major version ID. It is highly undesirable for new IP versions + * to break the API, but this code will at least allow for graceful failure + * should that happen. Future new features can be enabled by hardware + * incrementing the minor version and augmenting the driver to detect capability + * using the minor version number + */ + val = ioread32(lp->base_addr + AXIW1_IPVER_REG); + ver_major = FIELD_GET(AXIW1_MAJORVER_MASK, val); + ver_minor = FIELD_GET(AXIW1_MINORVER_MASK, val); + + if (ver_major != 1) { + dev_err(dev, "AMD AXI W1 host version %u.%u is not supported by this driver", + ver_major, ver_minor); + return -ENODEV; + } + + lp->bus_host.data = lp; + lp->bus_host.touch_bit = amd_axi_w1_touch_bit; + lp->bus_host.read_byte = amd_axi_w1_read_byte; + lp->bus_host.write_byte = amd_axi_w1_write_byte; + lp->bus_host.reset_bus = amd_axi_w1_reset_bus; + + amd_axi_w1_reset(lp); + + platform_set_drvdata(pdev, lp); + rc = w1_add_master_device(&lp->bus_host); + if (rc) { + dev_err(dev, "Could not add host device\n"); + return rc; + } + + return 0; +} + +static void amd_axi_w1_remove(struct platform_device *pdev) +{ + struct amd_axi_w1_local *lp = platform_get_drvdata(pdev); + + w1_remove_master_device(&lp->bus_host); +} + +static const struct of_device_id amd_axi_w1_of_match[] = { + { .compatible = "amd,axi-1wire-host" }, + { /* end of list */ }, +}; +MODULE_DEVICE_TABLE(of, amd_axi_w1_of_match); + +static struct platform_driver amd_axi_w1_driver = { + .probe = amd_axi_w1_probe, + .remove_new = amd_axi_w1_remove, + .driver = { + .name = DRIVER_NAME, + .of_match_table = amd_axi_w1_of_match, + }, +}; +module_platform_driver(amd_axi_w1_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Kris Chaplin <kris.chaplin@amd.com>"); +MODULE_DESCRIPTION("Driver for AMD AXI 1 Wire IP core"); diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c index 5f5b97e24700..e1cac0730cbb 100644 --- a/drivers/w1/masters/ds2490.c +++ b/drivers/w1/masters/ds2490.c @@ -98,6 +98,8 @@ #define ST_EPOF 0x80 /* Status transfer size, 16 bytes status, 16 byte result flags */ #define ST_SIZE 0x20 +/* 1-wire data i/o fifo size, 128 bytes */ +#define FIFO_SIZE 0x80 /* Result Register flags */ #define RR_DETECT 0xA5 /* New device detected */ @@ -614,14 +616,11 @@ static int ds_read_byte(struct ds_device *dev, u8 *byte) return 0; } -static int ds_read_block(struct ds_device *dev, u8 *buf, int len) +static int read_block_chunk(struct ds_device *dev, u8 *buf, int len) { struct ds_status st; int err; - if (len > 64*1024) - return -E2BIG; - memset(buf, 0xFF, len); err = ds_send_data(dev, buf, len); @@ -640,6 +639,24 @@ static int ds_read_block(struct ds_device *dev, u8 *buf, int len) return err; } +static int ds_read_block(struct ds_device *dev, u8 *buf, int len) +{ + int err, to_read, rem = len; + + if (len > 64 * 1024) + return -E2BIG; + + do { + to_read = rem <= FIFO_SIZE ? rem : FIFO_SIZE; + err = read_block_chunk(dev, &buf[len - rem], to_read); + if (err < 0) + return err; + rem -= to_read; + } while (rem); + + return err; +} + static int ds_write_block(struct ds_device *dev, u8 *buf, int len) { int err; diff --git a/drivers/w1/masters/w1-gpio.c b/drivers/w1/masters/w1-gpio.c index e45acb6d916e..05c67038ed20 100644 --- a/drivers/w1/masters/w1-gpio.c +++ b/drivers/w1/masters/w1-gpio.c @@ -9,7 +9,6 @@ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> -#include <linux/w1-gpio.h> #include <linux/gpio/consumer.h> #include <linux/of_platform.h> #include <linux/err.h> @@ -18,27 +17,33 @@ #include <linux/w1.h> +struct w1_gpio_ddata { + struct gpio_desc *gpiod; + struct gpio_desc *pullup_gpiod; + unsigned int pullup_duration; +}; + static u8 w1_gpio_set_pullup(void *data, int delay) { - struct w1_gpio_platform_data *pdata = data; + struct w1_gpio_ddata *ddata = data; if (delay) { - pdata->pullup_duration = delay; + ddata->pullup_duration = delay; } else { - if (pdata->pullup_duration) { + if (ddata->pullup_duration) { /* * This will OVERRIDE open drain emulation and force-pull * the line high for some time. */ - gpiod_set_raw_value(pdata->gpiod, 1); - msleep(pdata->pullup_duration); + gpiod_set_raw_value(ddata->gpiod, 1); + msleep(ddata->pullup_duration); /* * This will simply set the line as input since we are doing * open drain emulation in the GPIO library. */ - gpiod_set_value(pdata->gpiod, 1); + gpiod_set_value(ddata->gpiod, 1); } - pdata->pullup_duration = 0; + ddata->pullup_duration = 0; } return 0; @@ -46,16 +51,16 @@ static u8 w1_gpio_set_pullup(void *data, int delay) static void w1_gpio_write_bit(void *data, u8 bit) { - struct w1_gpio_platform_data *pdata = data; + struct w1_gpio_ddata *ddata = data; - gpiod_set_value(pdata->gpiod, bit); + gpiod_set_value(ddata->gpiod, bit); } static u8 w1_gpio_read_bit(void *data) { - struct w1_gpio_platform_data *pdata = data; + struct w1_gpio_ddata *ddata = data; - return gpiod_get_value(pdata->gpiod) ? 1 : 0; + return gpiod_get_value(ddata->gpiod) ? 1 : 0; } #if defined(CONFIG_OF) @@ -69,58 +74,48 @@ MODULE_DEVICE_TABLE(of, w1_gpio_dt_ids); static int w1_gpio_probe(struct platform_device *pdev) { struct w1_bus_master *master; - struct w1_gpio_platform_data *pdata; + struct w1_gpio_ddata *ddata; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; /* Enforce open drain mode by default */ enum gpiod_flags gflags = GPIOD_OUT_LOW_OPEN_DRAIN; int err; - if (of_have_populated_dt()) { - pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return -ENOMEM; - - /* - * This parameter means that something else than the gpiolib has - * already set the line into open drain mode, so we should just - * driver it high/low like we are in full control of the line and - * open drain will happen transparently. - */ - if (of_property_present(np, "linux,open-drain")) - gflags = GPIOD_OUT_LOW; - - pdev->dev.platform_data = pdata; - } - pdata = dev_get_platdata(dev); + ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL); + if (!ddata) + return -ENOMEM; - if (!pdata) { - dev_err(dev, "No configuration data\n"); - return -ENXIO; - } + /* + * This parameter means that something else than the gpiolib has + * already set the line into open drain mode, so we should just + * driver it high/low like we are in full control of the line and + * open drain will happen transparently. + */ + if (of_property_present(np, "linux,open-drain")) + gflags = GPIOD_OUT_LOW; master = devm_kzalloc(dev, sizeof(struct w1_bus_master), GFP_KERNEL); if (!master) return -ENOMEM; - pdata->gpiod = devm_gpiod_get_index(dev, NULL, 0, gflags); - if (IS_ERR(pdata->gpiod)) { + ddata->gpiod = devm_gpiod_get_index(dev, NULL, 0, gflags); + if (IS_ERR(ddata->gpiod)) { dev_err(dev, "gpio_request (pin) failed\n"); - return PTR_ERR(pdata->gpiod); + return PTR_ERR(ddata->gpiod); } - pdata->pullup_gpiod = + ddata->pullup_gpiod = devm_gpiod_get_index_optional(dev, NULL, 1, GPIOD_OUT_LOW); - if (IS_ERR(pdata->pullup_gpiod)) { + if (IS_ERR(ddata->pullup_gpiod)) { dev_err(dev, "gpio_request_one " "(ext_pullup_enable_pin) failed\n"); - return PTR_ERR(pdata->pullup_gpiod); + return PTR_ERR(ddata->pullup_gpiod); } - master->data = pdata; + master->data = ddata; master->read_bit = w1_gpio_read_bit; - gpiod_direction_output(pdata->gpiod, 1); + gpiod_direction_output(ddata->gpiod, 1); master->write_bit = w1_gpio_write_bit; /* @@ -138,11 +133,8 @@ static int w1_gpio_probe(struct platform_device *pdev) return err; } - if (pdata->enable_external_pullup) - pdata->enable_external_pullup(1); - - if (pdata->pullup_gpiod) - gpiod_set_value(pdata->pullup_gpiod, 1); + if (ddata->pullup_gpiod) + gpiod_set_value(ddata->pullup_gpiod, 1); platform_set_drvdata(pdev, master); @@ -152,45 +144,19 @@ static int w1_gpio_probe(struct platform_device *pdev) static int w1_gpio_remove(struct platform_device *pdev) { struct w1_bus_master *master = platform_get_drvdata(pdev); - struct w1_gpio_platform_data *pdata = dev_get_platdata(&pdev->dev); - - if (pdata->enable_external_pullup) - pdata->enable_external_pullup(0); + struct w1_gpio_ddata *ddata = master->data; - if (pdata->pullup_gpiod) - gpiod_set_value(pdata->pullup_gpiod, 0); + if (ddata->pullup_gpiod) + gpiod_set_value(ddata->pullup_gpiod, 0); w1_remove_master_device(master); return 0; } -static int __maybe_unused w1_gpio_suspend(struct device *dev) -{ - struct w1_gpio_platform_data *pdata = dev_get_platdata(dev); - - if (pdata->enable_external_pullup) - pdata->enable_external_pullup(0); - - return 0; -} - -static int __maybe_unused w1_gpio_resume(struct device *dev) -{ - struct w1_gpio_platform_data *pdata = dev_get_platdata(dev); - - if (pdata->enable_external_pullup) - pdata->enable_external_pullup(1); - - return 0; -} - -static SIMPLE_DEV_PM_OPS(w1_gpio_pm_ops, w1_gpio_suspend, w1_gpio_resume); - static struct platform_driver w1_gpio_driver = { .driver = { .name = "w1-gpio", - .pm = &w1_gpio_pm_ops, .of_match_table = of_match_ptr(w1_gpio_dt_ids), }, .probe = w1_gpio_probe, diff --git a/drivers/w1/slaves/w1_ds2433.c b/drivers/w1/slaves/w1_ds2433.c index 9f21fd98f799..250b7f7ec429 100644 --- a/drivers/w1/slaves/w1_ds2433.c +++ b/drivers/w1/slaves/w1_ds2433.c @@ -1,8 +1,9 @@ // SPDX-License-Identifier: GPL-2.0-only /* - * w1_ds2433.c - w1 family 23 (DS2433) driver + * w1_ds2433.c - w1 family 23 (DS2433) & 43 (DS28EC20) eeprom driver * * Copyright (c) 2005 Ben Gardner <bgardner@wabtec.com> + * Copyright (c) 2023 Marc Ferland <marc.ferland@sonatest.com> */ #include <linux/kernel.h> @@ -23,23 +24,45 @@ #include <linux/w1.h> #define W1_EEPROM_DS2433 0x23 +#define W1_EEPROM_DS28EC20 0x43 + +#define W1_EEPROM_DS2433_SIZE 512 +#define W1_EEPROM_DS28EC20_SIZE 2560 -#define W1_EEPROM_SIZE 512 -#define W1_PAGE_COUNT 16 #define W1_PAGE_SIZE 32 #define W1_PAGE_BITS 5 #define W1_PAGE_MASK 0x1F - -#define W1_F23_TIME 300 +#define W1_VALIDCRC_MAX 96 #define W1_F23_READ_EEPROM 0xF0 #define W1_F23_WRITE_SCRATCH 0x0F #define W1_F23_READ_SCRATCH 0xAA #define W1_F23_COPY_SCRATCH 0x55 +struct ds2433_config { + size_t eeprom_size; /* eeprom size in bytes */ + unsigned int page_count; /* number of 256 bits pages */ + unsigned int tprog; /* time in ms for page programming */ +}; + +static const struct ds2433_config config_f23 = { + .eeprom_size = W1_EEPROM_DS2433_SIZE, + .page_count = 16, + .tprog = 5, +}; + +static const struct ds2433_config config_f43 = { + .eeprom_size = W1_EEPROM_DS28EC20_SIZE, + .page_count = 80, + .tprog = 10, +}; + struct w1_f23_data { - u8 memory[W1_EEPROM_SIZE]; - u32 validcrc; +#ifdef CONFIG_W1_SLAVE_DS2433_CRC + u8 *memory; + DECLARE_BITMAP(validcrc, W1_VALIDCRC_MAX); +#endif + const struct ds2433_config *cfg; }; /* @@ -64,11 +87,11 @@ static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data, u8 wrbuf[3]; int off = block * W1_PAGE_SIZE; - if (data->validcrc & (1 << block)) + if (test_bit(block, data->validcrc)) return 0; if (w1_reset_select_slave(sl)) { - data->validcrc = 0; + bitmap_zero(data->validcrc, data->cfg->page_count); return -EIO; } @@ -80,7 +103,7 @@ static int w1_f23_refresh_block(struct w1_slave *sl, struct w1_f23_data *data, /* cache the block if the CRC is valid */ if (crc16(CRC16_INIT, &data->memory[off], W1_PAGE_SIZE) == CRC16_VALID) - data->validcrc |= (1 << block); + set_bit(block, data->validcrc); return 0; } @@ -98,7 +121,7 @@ static ssize_t eeprom_read(struct file *filp, struct kobject *kobj, u8 wrbuf[3]; #endif - count = w1_f23_fix_count(off, count, W1_EEPROM_SIZE); + count = w1_f23_fix_count(off, count, bin_attr->size); if (!count) return 0; @@ -153,9 +176,7 @@ out_up: */ static int w1_f23_write(struct w1_slave *sl, int addr, int len, const u8 *data) { -#ifdef CONFIG_W1_SLAVE_DS2433_CRC struct w1_f23_data *f23 = sl->family_data; -#endif u8 wrbuf[4]; u8 rdbuf[W1_PAGE_SIZE + 3]; u8 es = (addr + len - 1) & 0x1f; @@ -191,13 +212,13 @@ static int w1_f23_write(struct w1_slave *sl, int addr, int len, const u8 *data) wrbuf[3] = es; w1_write_block(sl->master, wrbuf, 4); - /* Sleep for 5 ms to wait for the write to complete */ - msleep(5); + /* Sleep for tprog ms to wait for the write to complete */ + msleep(f23->cfg->tprog); /* Reset the bus to wake up the EEPROM (this may not be needed) */ w1_reset_bus(sl->master); #ifdef CONFIG_W1_SLAVE_DS2433_CRC - f23->validcrc &= ~(1 << (addr >> W1_PAGE_BITS)); + clear_bit(addr >> W1_PAGE_BITS, f23->validcrc); #endif return 0; } @@ -209,7 +230,7 @@ static ssize_t eeprom_write(struct file *filp, struct kobject *kobj, struct w1_slave *sl = kobj_to_w1_slave(kobj); int addr, len, idx; - count = w1_f23_fix_count(off, count, W1_EEPROM_SIZE); + count = w1_f23_fix_count(off, count, bin_attr->size); if (!count) return 0; @@ -253,10 +274,22 @@ out_up: return count; } -static BIN_ATTR_RW(eeprom, W1_EEPROM_SIZE); +static struct bin_attribute bin_attr_f23_eeprom = { + .attr = { .name = "eeprom", .mode = 0644 }, + .read = eeprom_read, + .write = eeprom_write, + .size = W1_EEPROM_DS2433_SIZE, +}; + +static struct bin_attribute bin_attr_f43_eeprom = { + .attr = { .name = "eeprom", .mode = 0644 }, + .read = eeprom_read, + .write = eeprom_write, + .size = W1_EEPROM_DS28EC20_SIZE, +}; static struct bin_attribute *w1_f23_bin_attributes[] = { - &bin_attr_eeprom, + &bin_attr_f23_eeprom, NULL, }; @@ -269,26 +302,63 @@ static const struct attribute_group *w1_f23_groups[] = { NULL, }; +static struct bin_attribute *w1_f43_bin_attributes[] = { + &bin_attr_f43_eeprom, + NULL, +}; + +static const struct attribute_group w1_f43_group = { + .bin_attrs = w1_f43_bin_attributes, +}; + +static const struct attribute_group *w1_f43_groups[] = { + &w1_f43_group, + NULL, +}; + static int w1_f23_add_slave(struct w1_slave *sl) { -#ifdef CONFIG_W1_SLAVE_DS2433_CRC struct w1_f23_data *data; data = kzalloc(sizeof(struct w1_f23_data), GFP_KERNEL); if (!data) return -ENOMEM; + + switch (sl->family->fid) { + case W1_EEPROM_DS2433: + data->cfg = &config_f23; + break; + case W1_EEPROM_DS28EC20: + data->cfg = &config_f43; + break; + } + +#ifdef CONFIG_W1_SLAVE_DS2433_CRC + if (data->cfg->page_count > W1_VALIDCRC_MAX) { + dev_err(&sl->dev, "page count too big for crc bitmap\n"); + kfree(data); + return -EINVAL; + } + data->memory = kzalloc(data->cfg->eeprom_size, GFP_KERNEL); + if (!data->memory) { + kfree(data); + return -ENOMEM; + } + bitmap_zero(data->validcrc, data->cfg->page_count); +#endif /* CONFIG_W1_SLAVE_DS2433_CRC */ sl->family_data = data; -#endif /* CONFIG_W1_SLAVE_DS2433_CRC */ return 0; } static void w1_f23_remove_slave(struct w1_slave *sl) { -#ifdef CONFIG_W1_SLAVE_DS2433_CRC - kfree(sl->family_data); + struct w1_f23_data *data = sl->family_data; sl->family_data = NULL; -#endif /* CONFIG_W1_SLAVE_DS2433_CRC */ +#ifdef CONFIG_W1_SLAVE_DS2433_CRC + kfree(data->memory); +#endif /* CONFIG_W1_SLAVE_DS2433_CRC */ + kfree(data); } static const struct w1_family_ops w1_f23_fops = { @@ -297,13 +367,53 @@ static const struct w1_family_ops w1_f23_fops = { .groups = w1_f23_groups, }; +static const struct w1_family_ops w1_f43_fops = { + .add_slave = w1_f23_add_slave, + .remove_slave = w1_f23_remove_slave, + .groups = w1_f43_groups, +}; + static struct w1_family w1_family_23 = { .fid = W1_EEPROM_DS2433, .fops = &w1_f23_fops, }; -module_w1_family(w1_family_23); + +static struct w1_family w1_family_43 = { + .fid = W1_EEPROM_DS28EC20, + .fops = &w1_f43_fops, +}; + +static int __init w1_ds2433_init(void) +{ + int err; + + err = w1_register_family(&w1_family_23); + if (err) + return err; + + err = w1_register_family(&w1_family_43); + if (err) + goto err_43; + + return 0; + +err_43: + w1_unregister_family(&w1_family_23); + return err; +} + +static void __exit w1_ds2433_exit(void) +{ + w1_unregister_family(&w1_family_23); + w1_unregister_family(&w1_family_43); +} + +module_init(w1_ds2433_init); +module_exit(w1_ds2433_exit); MODULE_AUTHOR("Ben Gardner <bgardner@wabtec.com>"); -MODULE_DESCRIPTION("w1 family 23 driver for DS2433, 4kb EEPROM"); +MODULE_AUTHOR("Marc Ferland <marc.ferland@sonatest.com>"); +MODULE_DESCRIPTION("w1 family 23/43 driver for DS2433 (4kb) and DS28EC20 (20kb)"); MODULE_LICENSE("GPL"); MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS2433)); +MODULE_ALIAS("w1-family-" __stringify(W1_EEPROM_DS28EC20)); diff --git a/include/linux/cdx/cdx_bus.h b/include/linux/cdx/cdx_bus.h index 94ad2c9017c9..6355a36a3f81 100644 --- a/include/linux/cdx/cdx_bus.h +++ b/include/linux/cdx/cdx_bus.h @@ -113,6 +113,7 @@ struct cdx_controller { * @dev_num: Device number for this device * @res: array of MMIO region entries * @res_attr: resource binary attribute + * @debugfs_dir: debugfs directory for this device * @res_count: number of valid MMIO regions * @dma_mask: Default DMA mask * @flags: CDX device flags @@ -135,6 +136,8 @@ struct cdx_device { u8 bus_num; u8 dev_num; struct resource res[MAX_CDX_DEV_RESOURCES]; + struct bin_attribute *res_attr[MAX_CDX_DEV_RESOURCES]; + struct dentry *debugfs_dir; u8 res_count; u64 dma_mask; u16 flags; @@ -147,6 +150,15 @@ struct cdx_device { #define to_cdx_device(_dev) \ container_of(_dev, struct cdx_device, dev) +#define cdx_resource_start(dev, num) ((dev)->res[(num)].start) +#define cdx_resource_end(dev, num) ((dev)->res[(num)].end) +#define cdx_resource_flags(dev, num) ((dev)->res[(num)].flags) +#define cdx_resource_len(dev, num) \ + ((cdx_resource_start((dev), (num)) == 0 && \ + cdx_resource_end((dev), (num)) == \ + cdx_resource_start((dev), (num))) ? 0 : \ + (cdx_resource_end((dev), (num)) - \ + cdx_resource_start((dev), (num)) + 1)) /** * struct cdx_driver - CDX device driver * @driver: Generic device driver diff --git a/include/linux/coresight.h b/include/linux/coresight.h index a269fffaf991..a4cb7dd6ca23 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h @@ -64,6 +64,7 @@ enum coresight_dev_subtype_source { CORESIGHT_DEV_SUBTYPE_SOURCE_PROC, CORESIGHT_DEV_SUBTYPE_SOURCE_BUS, CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE, + CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM, CORESIGHT_DEV_SUBTYPE_SOURCE_OTHERS, }; diff --git a/include/linux/dma-map-ops.h b/include/linux/dma-map-ops.h index f2fc203fb8a1..e401f824a007 100644 --- a/include/linux/dma-map-ops.h +++ b/include/linux/dma-map-ops.h @@ -443,10 +443,10 @@ static inline void arch_teardown_dma_ops(struct device *dev) #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */ #ifdef CONFIG_DMA_API_DEBUG -void dma_debug_add_bus(struct bus_type *bus); +void dma_debug_add_bus(const struct bus_type *bus); void debug_dma_dump_mappings(struct device *dev); #else -static inline void dma_debug_add_bus(struct bus_type *bus) +static inline void dma_debug_add_bus(const struct bus_type *bus) { } static inline void debug_dma_dump_mappings(struct device *dev) diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index d1ea3898564c..9a7e52739251 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h @@ -32,6 +32,7 @@ #define PM_SIP_SVC 0xC2000000 /* PM API versions */ +#define PM_API_VERSION_1 1 #define PM_API_VERSION_2 2 #define PM_PINCTRL_PARAM_SET_VERSION 2 @@ -47,6 +48,9 @@ #define FAMILY_CODE_MASK GENMASK(27, 21) #define SUB_FAMILY_CODE_MASK GENMASK(20, 19) +#define API_ID_MASK GENMASK(7, 0) +#define MODULE_ID_MASK GENMASK(11, 8) + /* ATF only commands */ #define TF_A_PM_REGISTER_SGI 0xa04 #define PM_GET_TRUSTZONE_VERSION 0xa03 @@ -91,10 +95,18 @@ /* * Node IDs for the Error Events. */ -#define EVENT_ERROR_PMC_ERR1 (0x28100000U) -#define EVENT_ERROR_PMC_ERR2 (0x28104000U) -#define EVENT_ERROR_PSM_ERR1 (0x28108000U) -#define EVENT_ERROR_PSM_ERR2 (0x2810C000U) +#define VERSAL_EVENT_ERROR_PMC_ERR1 (0x28100000U) +#define VERSAL_EVENT_ERROR_PMC_ERR2 (0x28104000U) +#define VERSAL_EVENT_ERROR_PSM_ERR1 (0x28108000U) +#define VERSAL_EVENT_ERROR_PSM_ERR2 (0x2810C000U) + +#define VERSAL_NET_EVENT_ERROR_PMC_ERR1 (0x28100000U) +#define VERSAL_NET_EVENT_ERROR_PMC_ERR2 (0x28104000U) +#define VERSAL_NET_EVENT_ERROR_PMC_ERR3 (0x28108000U) +#define VERSAL_NET_EVENT_ERROR_PSM_ERR1 (0x2810C000U) +#define VERSAL_NET_EVENT_ERROR_PSM_ERR2 (0x28110000U) +#define VERSAL_NET_EVENT_ERROR_PSM_ERR3 (0x28114000U) +#define VERSAL_NET_EVENT_ERROR_PSM_ERR4 (0x28118000U) /* ZynqMP SD tap delay tuning */ #define SD_ITAPDLY 0xFF180314 @@ -112,6 +124,12 @@ #define XPM_EVENT_ERROR_MASK_NOC_NCR BIT(13) #define XPM_EVENT_ERROR_MASK_NOC_CR BIT(12) +enum pm_module_id { + PM_MODULE_ID = 0x0, + XSEM_MODULE_ID = 0x3, + TF_A_MODULE_ID = 0xa, +}; + enum pm_api_cb_id { PM_INIT_SUSPEND_CB = 30, PM_ACKNOWLEDGE_CB = 31, @@ -119,6 +137,7 @@ enum pm_api_cb_id { }; enum pm_api_id { + PM_API_FEATURES = 0, PM_GET_API_VERSION = 1, PM_REGISTER_NOTIFIER = 5, PM_FORCE_POWERDOWN = 8, @@ -138,7 +157,6 @@ enum pm_api_id { PM_SECURE_SHA = 26, PM_PINCTRL_REQUEST = 28, PM_PINCTRL_RELEASE = 29, - PM_PINCTRL_GET_FUNCTION = 30, PM_PINCTRL_SET_FUNCTION = 31, PM_PINCTRL_CONFIG_PARAM_GET = 32, PM_PINCTRL_CONFIG_PARAM_SET = 33, @@ -149,8 +167,6 @@ enum pm_api_id { PM_CLOCK_GETSTATE = 38, PM_CLOCK_SETDIVIDER = 39, PM_CLOCK_GETDIVIDER = 40, - PM_CLOCK_SETRATE = 41, - PM_CLOCK_GETRATE = 42, PM_CLOCK_SETPARENT = 43, PM_CLOCK_GETPARENT = 44, PM_FPGA_READ = 46, @@ -161,7 +177,9 @@ enum pm_api_id { /* PMU-FW return status codes */ enum pm_ret_status { XST_PM_SUCCESS = 0, + XST_PM_INVALID_VERSION = 4, XST_PM_NO_FEATURE = 19, + XST_PM_INVALID_CRC = 301, XST_PM_INTERNAL = 2000, XST_PM_CONFLICT = 2001, XST_PM_NO_ACCESS = 2002, @@ -509,20 +527,18 @@ struct zynqmp_pm_query_data { u32 arg3; }; -int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1, - u32 arg2, u32 arg3, u32 *ret_payload); +int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 *ret_payload, u32 num_args, ...); #if IS_REACHABLE(CONFIG_ZYNQMP_FIRMWARE) int zynqmp_pm_get_api_version(u32 *version); int zynqmp_pm_get_chipid(u32 *idcode, u32 *version); +int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily); int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out); int zynqmp_pm_clock_enable(u32 clock_id); int zynqmp_pm_clock_disable(u32 clock_id); int zynqmp_pm_clock_getstate(u32 clock_id, u32 *state); int zynqmp_pm_clock_setdivider(u32 clock_id, u32 divider); int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider); -int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate); -int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate); int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id); int zynqmp_pm_clock_getparent(u32 clock_id, u32 *parent_id); int zynqmp_pm_set_pll_frac_mode(u32 clk_id, u32 mode); @@ -559,7 +575,6 @@ int zynqmp_pm_system_shutdown(const u32 type, const u32 subtype); int zynqmp_pm_set_boot_health_status(u32 value); int zynqmp_pm_pinctrl_request(const u32 pin); int zynqmp_pm_pinctrl_release(const u32 pin); -int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id); int zynqmp_pm_pinctrl_set_function(const u32 pin, const u32 id); int zynqmp_pm_pinctrl_get_config(const u32 pin, const u32 param, u32 *value); @@ -596,6 +611,11 @@ static inline int zynqmp_pm_get_chipid(u32 *idcode, u32 *version) return -ENODEV; } +static inline int zynqmp_pm_get_family_info(u32 *family, u32 *subfamily) +{ + return -ENODEV; +} + static inline int zynqmp_pm_query_data(struct zynqmp_pm_query_data qdata, u32 *out) { @@ -627,16 +647,6 @@ static inline int zynqmp_pm_clock_getdivider(u32 clock_id, u32 *divider) return -ENODEV; } -static inline int zynqmp_pm_clock_setrate(u32 clock_id, u64 rate) -{ - return -ENODEV; -} - -static inline int zynqmp_pm_clock_getrate(u32 clock_id, u64 *rate) -{ - return -ENODEV; -} - static inline int zynqmp_pm_clock_setparent(u32 clock_id, u32 parent_id) { return -ENODEV; @@ -806,11 +816,6 @@ static inline int zynqmp_pm_pinctrl_release(const u32 pin) return -ENODEV; } -static inline int zynqmp_pm_pinctrl_get_function(const u32 pin, u32 *id) -{ - return -ENODEV; -} - static inline int zynqmp_pm_is_function_supported(const u32 api_id, const u32 id) { return -ENODEV; diff --git a/include/linux/iio/adc/adi-axi-adc.h b/include/linux/iio/adc/adi-axi-adc.h index 52620e5b8052..b7904992d561 100644 --- a/include/linux/iio/adc/adi-axi-adc.h +++ b/include/linux/iio/adc/adi-axi-adc.h @@ -41,6 +41,7 @@ struct adi_axi_adc_chip_info { * @reg_access IIO debugfs_reg_access hook for the client ADC * @read_raw IIO read_raw hook for the client ADC * @write_raw IIO write_raw hook for the client ADC + * @read_avail IIO read_avail hook for the client ADC */ struct adi_axi_adc_conv { const struct adi_axi_adc_chip_info *chip_info; @@ -54,6 +55,9 @@ struct adi_axi_adc_conv { int (*write_raw)(struct adi_axi_adc_conv *conv, struct iio_chan_spec const *chan, int val, int val2, long mask); + int (*read_avail)(struct adi_axi_adc_conv *conv, + struct iio_chan_spec const *chan, + const int **val, int *type, int *length, long mask); }; struct adi_axi_adc_conv *devm_adi_axi_adc_conv_register(struct device *dev, diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h index 6564bdcdac66..18d3702fa95d 100644 --- a/include/linux/iio/buffer-dma.h +++ b/include/linux/iio/buffer-dma.h @@ -19,14 +19,12 @@ struct device; /** * enum iio_block_state - State of a struct iio_dma_buffer_block - * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed */ enum iio_block_state { - IIO_BLOCK_STATE_DEQUEUED, IIO_BLOCK_STATE_QUEUED, IIO_BLOCK_STATE_ACTIVE, IIO_BLOCK_STATE_DONE, @@ -73,12 +71,15 @@ struct iio_dma_buffer_block { * @active_block: Block being used in read() * @pos: Read offset in the active block * @block_size: Size of each block + * @next_dequeue: index of next block that will be dequeued */ struct iio_dma_buffer_queue_fileio { struct iio_dma_buffer_block *blocks[2]; struct iio_dma_buffer_block *active_block; size_t pos; size_t block_size; + + unsigned int next_dequeue; }; /** @@ -93,7 +94,6 @@ struct iio_dma_buffer_queue_fileio { * list and typically also a list of active blocks in the part that handles * the DMA controller * @incoming: List of buffers on the incoming queue - * @outgoing: List of buffers on the outgoing queue * @active: Whether the buffer is currently active * @fileio: FileIO state */ @@ -105,7 +105,6 @@ struct iio_dma_buffer_queue { struct mutex lock; spinlock_t list_lock; struct list_head incoming; - struct list_head outgoing; bool active; diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index d0ce3b71106a..c5b36d2c1e73 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h @@ -434,13 +434,7 @@ struct iio_trigger; /* forward declaration */ * @update_scan_mode: function to configure device and scan buffer when * channels have changed * @debugfs_reg_access: function to read or write register value of device - * @of_xlate: function pointer to obtain channel specifier index. - * When #iio-cells is greater than '0', the driver could - * provide a custom of_xlate function that reads the - * *args* and returns the appropriate index in registered - * IIO channels array. * @fwnode_xlate: fwnode based function pointer to obtain channel specifier index. - * Functionally the same as @of_xlate. * @hwfifo_set_watermark: function pointer to set the current hardware * fifo watermark level; see hwfifo_* entries in * Documentation/ABI/testing/sysfs-bus-iio for details on diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h index 117bde7d6ad7..d89982c98368 100644 --- a/include/linux/iio/types.h +++ b/include/linux/iio/types.h @@ -68,6 +68,7 @@ enum iio_chan_info_enum { IIO_CHAN_INFO_THERMOCOUPLE_TYPE, IIO_CHAN_INFO_CALIBAMBIENT, IIO_CHAN_INFO_ZEROPOINT, + IIO_CHAN_INFO_TROUGH, }; #endif /* _IIO_TYPES_H_ */ diff --git a/include/linux/maple.h b/include/linux/maple.h index 9b140272ee16..9aae44efcfd4 100644 --- a/include/linux/maple.h +++ b/include/linux/maple.h @@ -5,7 +5,6 @@ #include <mach/maple.h> struct device; -extern struct bus_type maple_bus_type; /* Maple Bus command and response codes */ enum maple_code { diff --git a/include/linux/mhi.h b/include/linux/mhi.h index 039943ec4d4e..d0f9b522f328 100644 --- a/include/linux/mhi.h +++ b/include/linux/mhi.h @@ -266,6 +266,7 @@ struct mhi_event_config { * struct mhi_controller_config - Root MHI controller configuration * @max_channels: Maximum number of channels supported * @timeout_ms: Timeout value for operations. 0 means use default + * @ready_timeout_ms: Timeout value for waiting device to be ready (optional) * @buf_len: Size of automatically allocated buffers. 0 means use default * @num_channels: Number of channels defined in @ch_cfg * @ch_cfg: Array of defined channels @@ -277,6 +278,7 @@ struct mhi_event_config { struct mhi_controller_config { u32 max_channels; u32 timeout_ms; + u32 ready_timeout_ms; u32 buf_len; u32 num_channels; const struct mhi_channel_config *ch_cfg; @@ -330,6 +332,7 @@ struct mhi_controller_config { * @pm_mutex: Mutex for suspend/resume operation * @pm_lock: Lock for protecting MHI power management state * @timeout_ms: Timeout in ms for state transitions + * @ready_timeout_ms: Timeout in ms for waiting device to be ready (optional) * @pm_state: MHI power management state * @db_access: DB access states * @ee: MHI device execution environment @@ -419,6 +422,7 @@ struct mhi_controller { struct mutex pm_mutex; rwlock_t pm_lock; u32 timeout_ms; + u32 ready_timeout_ms; u32 pm_state; u32 db_access; enum mhi_ee_type ee; diff --git a/include/linux/mhi_ep.h b/include/linux/mhi_ep.h index f198a8ac7ee7..11bf3212f782 100644 --- a/include/linux/mhi_ep.h +++ b/include/linux/mhi_ep.h @@ -50,6 +50,27 @@ struct mhi_ep_db_info { }; /** + * struct mhi_ep_buf_info - MHI Endpoint transfer buffer info + * @mhi_dev: MHI device associated with this buffer + * @dev_addr: Address of the buffer in endpoint + * @host_addr: Address of the bufffer in host + * @size: Size of the buffer + * @code: Transfer completion code + * @cb: Callback to be executed by controller drivers after transfer completion (async) + * @cb_buf: Opaque buffer to be passed to the callback + */ +struct mhi_ep_buf_info { + struct mhi_ep_device *mhi_dev; + void *dev_addr; + u64 host_addr; + size_t size; + int code; + + void (*cb)(struct mhi_ep_buf_info *buf_info); + void *cb_buf; +}; + +/** * struct mhi_ep_cntrl - MHI Endpoint controller structure * @cntrl_dev: Pointer to the struct device of physical bus acting as the MHI * Endpoint controller @@ -82,8 +103,10 @@ struct mhi_ep_db_info { * @raise_irq: CB function for raising IRQ to the host * @alloc_map: CB function for allocating memory in endpoint for storing host context and mapping it * @unmap_free: CB function to unmap and free the allocated memory in endpoint for storing host context - * @read_from_host: CB function for reading from host memory from endpoint - * @write_to_host: CB function for writing to host memory from endpoint + * @read_sync: CB function for reading from host memory synchronously + * @write_sync: CB function for writing to host memory synchronously + * @read_async: CB function for reading from host memory asynchronously + * @write_async: CB function for writing to host memory asynchronously * @mhi_state: MHI Endpoint state * @max_chan: Maximum channels supported by the endpoint controller * @mru: MRU (Maximum Receive Unit) value of the endpoint controller @@ -128,14 +151,19 @@ struct mhi_ep_cntrl { struct work_struct reset_work; struct work_struct cmd_ring_work; struct work_struct ch_ring_work; + struct kmem_cache *ring_item_cache; + struct kmem_cache *ev_ring_el_cache; + struct kmem_cache *tre_buf_cache; void (*raise_irq)(struct mhi_ep_cntrl *mhi_cntrl, u32 vector); int (*alloc_map)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t *phys_ptr, void __iomem **virt, size_t size); void (*unmap_free)(struct mhi_ep_cntrl *mhi_cntrl, u64 pci_addr, phys_addr_t phys, void __iomem *virt, size_t size); - int (*read_from_host)(struct mhi_ep_cntrl *mhi_cntrl, u64 from, void *to, size_t size); - int (*write_to_host)(struct mhi_ep_cntrl *mhi_cntrl, void *from, u64 to, size_t size); + int (*read_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info); + int (*write_sync)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info); + int (*read_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info); + int (*write_async)(struct mhi_ep_cntrl *mhi_cntrl, struct mhi_ep_buf_info *buf_info); enum mhi_state mhi_state; diff --git a/include/linux/moxtet.h b/include/linux/moxtet.h index 79184948fab4..ac577699edfd 100644 --- a/include/linux/moxtet.h +++ b/include/linux/moxtet.h @@ -35,8 +35,6 @@ enum turris_mox_module_id { #define MOXTET_NIRQS 16 -extern struct bus_type moxtet_type; - struct moxtet { struct device *dev; struct mutex lock; diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h index 6ec4b9743e25..34c0e58dfa26 100644 --- a/include/linux/nvmem-consumer.h +++ b/include/linux/nvmem-consumer.h @@ -81,6 +81,7 @@ int nvmem_device_cell_write(struct nvmem_device *nvmem, struct nvmem_cell_info *info, void *buf); const char *nvmem_dev_name(struct nvmem_device *nvmem); +size_t nvmem_dev_size(struct nvmem_device *nvmem); void nvmem_add_cell_lookups(struct nvmem_cell_lookup *entries, size_t nentries); @@ -247,7 +248,6 @@ struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id); struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *name); -struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem); #else static inline struct nvmem_cell *of_nvmem_cell_get(struct device_node *np, const char *id) @@ -260,12 +260,6 @@ static inline struct nvmem_device *of_nvmem_device_get(struct device_node *np, { return ERR_PTR(-EOPNOTSUPP); } - -static inline struct device_node * -of_nvmem_layout_get_container(struct nvmem_device *nvmem) -{ - return NULL; -} #endif /* CONFIG_NVMEM && CONFIG_OF */ #endif /* ifndef _LINUX_NVMEM_CONSUMER_H */ diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h index e3930835235b..f0ba0e03218f 100644 --- a/include/linux/nvmem-provider.h +++ b/include/linux/nvmem-provider.h @@ -9,6 +9,7 @@ #ifndef _LINUX_NVMEM_PROVIDER_H #define _LINUX_NVMEM_PROVIDER_H +#include <linux/device.h> #include <linux/device/driver.h> #include <linux/err.h> #include <linux/errno.h> @@ -83,6 +84,8 @@ struct nvmem_cell_info { * @cells: Optional array of pre-defined NVMEM cells. * @ncells: Number of elements in cells. * @add_legacy_fixed_of_cells: Read fixed NVMEM cells from old OF syntax. + * @fixup_dt_cell_info: Will be called before a cell is added. Can be + * used to modify the nvmem_cell_info. * @keepout: Optional array of keepout ranges (sorted ascending by start). * @nkeepout: Number of elements in the keepout array. * @type: Type of the nvmem storage @@ -113,6 +116,8 @@ struct nvmem_config { const struct nvmem_cell_info *cells; int ncells; bool add_legacy_fixed_of_cells; + void (*fixup_dt_cell_info)(struct nvmem_device *nvmem, + struct nvmem_cell_info *cell); const struct nvmem_keepout *keepout; unsigned int nkeepout; enum nvmem_type type; @@ -154,15 +159,11 @@ struct nvmem_cell_table { /** * struct nvmem_layout - NVMEM layout definitions * - * @name: Layout name. - * @of_match_table: Open firmware match table. + * @dev: Device-model layout device. + * @nvmem: The underlying NVMEM device * @add_cells: Will be called if a nvmem device is found which * has this layout. The function will add layout * specific cells with nvmem_add_one_cell(). - * @fixup_cell_info: Will be called before a cell is added. Can be - * used to modify the nvmem_cell_info. - * @owner: Pointer to struct module. - * @node: List node. * * A nvmem device can hold a well defined structure which can just be * evaluated during runtime. For example a TLV list, or a list of "name=val" @@ -170,17 +171,15 @@ struct nvmem_cell_table { * cells. */ struct nvmem_layout { - const char *name; - const struct of_device_id *of_match_table; - int (*add_cells)(struct device *dev, struct nvmem_device *nvmem, - struct nvmem_layout *layout); - void (*fixup_cell_info)(struct nvmem_device *nvmem, - struct nvmem_layout *layout, - struct nvmem_cell_info *cell); - - /* private */ - struct module *owner; - struct list_head node; + struct device dev; + struct nvmem_device *nvmem; + int (*add_cells)(struct nvmem_layout *layout); +}; + +struct nvmem_layout_driver { + struct device_driver driver; + int (*probe)(struct nvmem_layout *layout); + void (*remove)(struct nvmem_layout *layout); }; #if IS_ENABLED(CONFIG_NVMEM) @@ -197,13 +196,14 @@ void nvmem_del_cell_table(struct nvmem_cell_table *table); int nvmem_add_one_cell(struct nvmem_device *nvmem, const struct nvmem_cell_info *info); -int __nvmem_layout_register(struct nvmem_layout *layout, struct module *owner); -#define nvmem_layout_register(layout) \ - __nvmem_layout_register(layout, THIS_MODULE) +int nvmem_layout_register(struct nvmem_layout *layout); void nvmem_layout_unregister(struct nvmem_layout *layout); -const void *nvmem_layout_get_match_data(struct nvmem_device *nvmem, - struct nvmem_layout *layout); +int nvmem_layout_driver_register(struct nvmem_layout_driver *drv); +void nvmem_layout_driver_unregister(struct nvmem_layout_driver *drv); +#define module_nvmem_layout_driver(__nvmem_layout_driver) \ + module_driver(__nvmem_layout_driver, nvmem_layout_driver_register, \ + nvmem_layout_driver_unregister) #else @@ -235,17 +235,27 @@ static inline int nvmem_layout_register(struct nvmem_layout *layout) static inline void nvmem_layout_unregister(struct nvmem_layout *layout) {} -static inline const void * -nvmem_layout_get_match_data(struct nvmem_device *nvmem, - struct nvmem_layout *layout) +#endif /* CONFIG_NVMEM */ + +#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF) + +/** + * of_nvmem_layout_get_container() - Get OF node of layout container + * + * @nvmem: nvmem device + * + * Return: a node pointer with refcount incremented or NULL if no + * container exists. Use of_node_put() on it when done. + */ +struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem); + +#else /* CONFIG_NVMEM && CONFIG_OF */ + +static inline struct device_node *of_nvmem_layout_get_container(struct nvmem_device *nvmem) { return NULL; } -#endif /* CONFIG_NVMEM */ - -#define module_nvmem_layout_driver(__layout_driver) \ - module_driver(__layout_driver, nvmem_layout_register, \ - nvmem_layout_unregister) +#endif /* CONFIG_NVMEM && CONFIG_OF */ #endif /* ifndef _LINUX_NVMEM_PROVIDER_H */ diff --git a/include/linux/of_device.h b/include/linux/of_device.h index 2c7a3d4bc775..a72661e47faa 100644 --- a/include/linux/of_device.h +++ b/include/linux/of_device.h @@ -40,6 +40,9 @@ static inline int of_dma_configure(struct device *dev, { return of_dma_configure_id(dev, np, force_dma, NULL); } + +void of_device_make_bus_id(struct device *dev); + #else /* CONFIG_OF */ static inline int of_driver_match_device(struct device *dev, @@ -82,6 +85,9 @@ static inline int of_dma_configure(struct device *dev, { return 0; } + +static inline void of_device_make_bus_id(struct device *dev) {} + #endif /* CONFIG_OF */ #endif /* _LINUX_OF_DEVICE_H */ diff --git a/include/linux/property.h b/include/linux/property.h index 60e35bd97398..e6516d0b7d52 100644 --- a/include/linux/property.h +++ b/include/linux/property.h @@ -124,6 +124,18 @@ static inline bool device_is_compatible(const struct device *dev, const char *co return fwnode_device_is_compatible(dev_fwnode(dev), compat); } +int fwnode_property_match_property_string(const struct fwnode_handle *fwnode, + const char *propname, + const char * const *array, size_t n); + +static inline +int device_property_match_property_string(const struct device *dev, + const char *propname, + const char * const *array, size_t n) +{ + return fwnode_property_match_property_string(dev_fwnode(dev), propname, array, n); +} + int fwnode_property_get_reference_args(const struct fwnode_handle *fwnode, const char *prop, const char *nargs_prop, unsigned int nargs, unsigned int index, diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h index 534038d962e4..4612ef09a0c7 100644 --- a/include/linux/rtsx_pci.h +++ b/include/linux/rtsx_pci.h @@ -60,6 +60,7 @@ #define SD_EXIST (1 << 16) #define DELINK_INT GPIO0_INT #define MS_OC_INT (1 << 23) +#define SD_OVP_INT (1 << 23) #define SD_OC_INT (1 << 22) #define CARD_INT (XD_INT | MS_INT | SD_INT) @@ -80,6 +81,7 @@ #define OC_INT_EN (1 << 23) #define DELINK_INT_EN GPIO0_INT_EN #define MS_OC_INT_EN (1 << 23) +#define SD_OVP_INT_EN (1 << 23) #define SD_OC_INT_EN (1 << 22) #define RTSX_DUM_REG 0x1C @@ -583,6 +585,7 @@ #define OBFF_DISABLE 0x00 #define CDRESUMECTL 0xFE52 +#define CDGW 0xFE53 #define WAKE_SEL_CTL 0xFE54 #define PCLK_CTL 0xFE55 #define PCLK_MODE_SEL 0x20 @@ -764,6 +767,9 @@ #define SD_VIO_LDO_1V8 0x40 #define SD_VIO_LDO_3V3 0x70 +#define RTS5264_AUTOLOAD_CFG2 0xFF7D +#define RTS5264_CHIP_RST_N_SEL (1 << 6) + #define RTS5260_AUTOLOAD_CFG4 0xFF7F #define RTS5260_MIMO_DISABLE 0x8A /*RTS5261*/ @@ -1261,6 +1267,7 @@ struct rtsx_pcr { u8 dma_error_count; u8 ocp_stat; u8 ocp_stat2; + u8 ovp_stat; u8 rtd3_en; }; @@ -1271,6 +1278,7 @@ struct rtsx_pcr { #define PID_5260 0x5260 #define PID_5261 0x5261 #define PID_5228 0x5228 +#define PID_5264 0x5264 #define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid)) #define PCI_VID(pcr) ((pcr)->pci->vendor) diff --git a/include/linux/spmi.h b/include/linux/spmi.h index 2a4ce4144f9f..28e8c8bd3944 100644 --- a/include/linux/spmi.h +++ b/include/linux/spmi.h @@ -120,6 +120,9 @@ static inline void spmi_controller_put(struct spmi_controller *ctrl) int spmi_controller_add(struct spmi_controller *ctrl); void spmi_controller_remove(struct spmi_controller *ctrl); +struct spmi_controller *devm_spmi_controller_alloc(struct device *parent, size_t size); +int devm_spmi_controller_add(struct device *parent, struct spmi_controller *ctrl); + /** * struct spmi_driver - SPMI slave device driver * @driver: SPMI device drivers should initialize name and owner field of diff --git a/include/linux/surface_aggregator/device.h b/include/linux/surface_aggregator/device.h index 42b249b4c24b..8cd8c38cf3f3 100644 --- a/include/linux/surface_aggregator/device.h +++ b/include/linux/surface_aggregator/device.h @@ -193,7 +193,6 @@ struct ssam_device_driver { #ifdef CONFIG_SURFACE_AGGREGATOR_BUS -extern struct bus_type ssam_bus_type; extern const struct device_type ssam_device_type; /** diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h deleted file mode 100644 index 3495fd0dc790..000000000000 --- a/include/linux/w1-gpio.h +++ /dev/null @@ -1,22 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * w1-gpio interface to platform code - * - * Copyright (C) 2007 Ville Syrjala <syrjala@sci.fi> - */ -#ifndef _LINUX_W1_GPIO_H -#define _LINUX_W1_GPIO_H - -struct gpio_desc; - -/** - * struct w1_gpio_platform_data - Platform-dependent data for w1-gpio - */ -struct w1_gpio_platform_data { - struct gpio_desc *gpiod; - struct gpio_desc *pullup_gpiod; - void (*enable_external_pullup)(int enable); - unsigned int pullup_duration; -}; - -#endif /* _LINUX_W1_GPIO_H */ diff --git a/include/uapi/linux/android/binder.h b/include/uapi/linux/android/binder.h index 5f636b5afcd7..d44a8118b2ed 100644 --- a/include/uapi/linux/android/binder.h +++ b/include/uapi/linux/android/binder.h @@ -251,20 +251,22 @@ struct binder_extended_error { __s32 param; }; -#define BINDER_WRITE_READ _IOWR('b', 1, struct binder_write_read) -#define BINDER_SET_IDLE_TIMEOUT _IOW('b', 3, __s64) -#define BINDER_SET_MAX_THREADS _IOW('b', 5, __u32) -#define BINDER_SET_IDLE_PRIORITY _IOW('b', 6, __s32) -#define BINDER_SET_CONTEXT_MGR _IOW('b', 7, __s32) -#define BINDER_THREAD_EXIT _IOW('b', 8, __s32) -#define BINDER_VERSION _IOWR('b', 9, struct binder_version) -#define BINDER_GET_NODE_DEBUG_INFO _IOWR('b', 11, struct binder_node_debug_info) -#define BINDER_GET_NODE_INFO_FOR_REF _IOWR('b', 12, struct binder_node_info_for_ref) -#define BINDER_SET_CONTEXT_MGR_EXT _IOW('b', 13, struct flat_binder_object) -#define BINDER_FREEZE _IOW('b', 14, struct binder_freeze_info) -#define BINDER_GET_FROZEN_INFO _IOWR('b', 15, struct binder_frozen_status_info) -#define BINDER_ENABLE_ONEWAY_SPAM_DETECTION _IOW('b', 16, __u32) -#define BINDER_GET_EXTENDED_ERROR _IOWR('b', 17, struct binder_extended_error) +enum { + BINDER_WRITE_READ = _IOWR('b', 1, struct binder_write_read), + BINDER_SET_IDLE_TIMEOUT = _IOW('b', 3, __s64), + BINDER_SET_MAX_THREADS = _IOW('b', 5, __u32), + BINDER_SET_IDLE_PRIORITY = _IOW('b', 6, __s32), + BINDER_SET_CONTEXT_MGR = _IOW('b', 7, __s32), + BINDER_THREAD_EXIT = _IOW('b', 8, __s32), + BINDER_VERSION = _IOWR('b', 9, struct binder_version), + BINDER_GET_NODE_DEBUG_INFO = _IOWR('b', 11, struct binder_node_debug_info), + BINDER_GET_NODE_INFO_FOR_REF = _IOWR('b', 12, struct binder_node_info_for_ref), + BINDER_SET_CONTEXT_MGR_EXT = _IOW('b', 13, struct flat_binder_object), + BINDER_FREEZE = _IOW('b', 14, struct binder_freeze_info), + BINDER_GET_FROZEN_INFO = _IOWR('b', 15, struct binder_frozen_status_info), + BINDER_ENABLE_ONEWAY_SPAM_DETECTION = _IOW('b', 16, __u32), + BINDER_GET_EXTENDED_ERROR = _IOWR('b', 17, struct binder_extended_error), +}; /* * NOTE: Two special error codes you should check for when calling diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h index 9c2ffdcd6623..5060963707b1 100644 --- a/include/uapi/linux/iio/types.h +++ b/include/uapi/linux/iio/types.h @@ -91,6 +91,8 @@ enum iio_modifier { IIO_MOD_CO2, IIO_MOD_VOC, IIO_MOD_LIGHT_UV, + IIO_MOD_LIGHT_UVA, + IIO_MOD_LIGHT_UVB, IIO_MOD_LIGHT_DUV, IIO_MOD_PM1, IIO_MOD_PM2P5, diff --git a/include/uapi/linux/mei.h b/include/uapi/linux/mei.h index 171c5cce3641..68a0272e99b7 100644 --- a/include/uapi/linux/mei.h +++ b/include/uapi/linux/mei.h @@ -100,14 +100,14 @@ struct mei_connect_client_data_vtag { * a FW client on a tagged channel. From this point on, every read * and write will communicate with the associated FW client * on the tagged channel. - * Upone close() the communication is terminated. + * Upon close() the communication is terminated. * * The IOCTL argument is a struct with a union that contains * the input parameter and the output parameter for this IOCTL. * * The input parameter is UUID of the FW Client, a vtag [0,255]. * The output parameter is the properties of the FW client - * (FW protocool version and max message size). + * (FW protocol version and max message size). * * Clients that do not support tagged connection * will respond with -EOPNOTSUPP. diff --git a/include/uapi/linux/nsm.h b/include/uapi/linux/nsm.h new file mode 100644 index 000000000000..e529f232f6c0 --- /dev/null +++ b/include/uapi/linux/nsm.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. + */ + +#ifndef __UAPI_LINUX_NSM_H +#define __UAPI_LINUX_NSM_H + +#include <linux/ioctl.h> +#include <linux/types.h> + +#define NSM_MAGIC 0x0A + +#define NSM_REQUEST_MAX_SIZE 0x1000 +#define NSM_RESPONSE_MAX_SIZE 0x3000 + +struct nsm_iovec { + __u64 addr; /* Virtual address of target buffer */ + __u64 len; /* Length of target buffer */ +}; + +/* Raw NSM message. Only available with CAP_SYS_ADMIN. */ +struct nsm_raw { + /* Request from user */ + struct nsm_iovec request; + /* Response to user */ + struct nsm_iovec response; +}; +#define NSM_IOCTL_RAW _IOWR(NSM_MAGIC, 0x0, struct nsm_raw) + +#endif /* __UAPI_LINUX_NSM_H */ diff --git a/init/do_mounts.c b/init/do_mounts.c index 5fdef94f0864..279ad28bf4fb 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -510,7 +510,10 @@ struct file_system_type rootfs_fs_type = { void __init init_rootfs(void) { - if (IS_ENABLED(CONFIG_TMPFS) && !saved_root_name[0] && - (!root_fs_names || strstr(root_fs_names, "tmpfs"))) - is_tmpfs = true; + if (IS_ENABLED(CONFIG_TMPFS)) { + if (!saved_root_name[0] && !root_fs_names) + is_tmpfs = true; + else if (root_fs_names && !!strstr(root_fs_names, "tmpfs")) + is_tmpfs = true; + } } diff --git a/kernel/dma/debug.c b/kernel/dma/debug.c index 3de494375b7b..1a5c86dd87d5 100644 --- a/kernel/dma/debug.c +++ b/kernel/dma/debug.c @@ -876,7 +876,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti return 0; } -void dma_debug_add_bus(struct bus_type *bus) +void dma_debug_add_bus(const struct bus_type *bus) { struct notifier_block *nb; diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl index f8343b34a28b..9c4c4a61bc83 100755 --- a/scripts/checkpatch.pl +++ b/scripts/checkpatch.pl @@ -512,6 +512,7 @@ our $Attribute = qr{ __ro_after_init| __kprobes| $InitAttribute| + __aligned\s*\(.*\)| ____cacheline_aligned| ____cacheline_aligned_in_smp| ____cacheline_internodealigned_in_smp| diff --git a/scripts/tags.sh b/scripts/tags.sh index a70d43723146..191e0461d6d5 100755 --- a/scripts/tags.sh +++ b/scripts/tags.sh @@ -3,7 +3,7 @@ # Generate tags or cscope files # Usage tags.sh <mode> # -# mode may be any of: tags, TAGS, cscope +# mode may be any of: tags, gtags, TAGS, cscope # # Uses the following environment variables: # SUBARCH, SRCARCH, srctree @@ -50,7 +50,7 @@ fi find_arch_sources() { for i in $archincludedir; do - prune="$prune -wholename $i -prune -o" + local prune="$prune ( -path $i ) -prune -o" done find ${tree}arch/$1 $ignore $prune -name "$2" -not -type l -print; } @@ -58,7 +58,7 @@ find_arch_sources() # find sources in arch/$1/include find_arch_include_sources() { - include=$(find ${tree}arch/$1/ -name include -type d -print); + local include=$(find ${tree}arch/$1/ -name include -type d -print); if [ -n "$include" ]; then archincludedir="$archincludedir $include" find $include $ignore -name "$2" -not -type l -print; @@ -81,21 +81,16 @@ find_other_sources() -name "$1" -not -type l -print; } -find_sources() -{ - find_arch_sources $1 "$2" -} - all_sources() { find_arch_include_sources ${SRCARCH} '*.[chS]' - if [ ! -z "$archinclude" ]; then + if [ -n "$archinclude" ]; then find_arch_include_sources $archinclude '*.[chS]' fi find_include_sources '*.[chS]' for arch in $ALLSOURCE_ARCHS do - find_sources $arch '*.[chS]' + find_arch_sources $arch '*.[chS]' done find_other_sources '*.[chS]' } @@ -125,7 +120,7 @@ all_kconfigs() find ${tree}arch/ -maxdepth 1 $ignore \ -name "Kconfig*" -not -type l -print; for arch in $ALLSOURCE_ARCHS; do - find_sources $arch 'Kconfig*' + find_arch_sources $arch 'Kconfig*' done find_other_sources 'Kconfig*' } diff --git a/tools/counter/Build b/tools/counter/Build index 33f4a51d715e..4bbadb7ec93a 100644 --- a/tools/counter/Build +++ b/tools/counter/Build @@ -1 +1,2 @@ counter_example-y += counter_example.o +counter_watch_events-y += counter_watch_events.o diff --git a/tools/counter/Makefile b/tools/counter/Makefile index b2c2946f44c9..d82d35a520f6 100644 --- a/tools/counter/Makefile +++ b/tools/counter/Makefile @@ -12,9 +12,10 @@ endif # (this improves performance and avoids hard-to-debug behaviour); MAKEFLAGS += -r -override CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include +override CFLAGS += -O2 -Wall -g -D_GNU_SOURCE -I$(OUTPUT)include \ + -I$(srctree)/tools/include -ALL_TARGETS := counter_example +ALL_TARGETS := counter_example counter_watch_events ALL_PROGRAMS := $(patsubst %,$(OUTPUT)%,$(ALL_TARGETS)) all: $(ALL_PROGRAMS) @@ -37,12 +38,19 @@ $(COUNTER_EXAMPLE): prepare FORCE $(OUTPUT)counter_example: $(COUNTER_EXAMPLE) $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ +COUNTER_WATCH_EVENTS := $(OUTPUT)counter_watch_events.o +$(COUNTER_WATCH_EVENTS): prepare FORCE + $(Q)$(MAKE) $(build)=counter_watch_events +$(OUTPUT)counter_watch_events: $(COUNTER_WATCH_EVENTS) + $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $< -o $@ + clean: rm -f $(ALL_PROGRAMS) rm -rf $(OUTPUT)include/linux/counter.h rm -df $(OUTPUT)include/linux rm -df $(OUTPUT)include find $(or $(OUTPUT),.) -name '*.o' -delete -o -name '\.*.d' -delete + find $(or $(OUTPUT),.) -name '\.*.o.cmd' -delete install: $(ALL_PROGRAMS) install -d -m 755 $(DESTDIR)$(bindir); \ diff --git a/tools/counter/counter_watch_events.c b/tools/counter/counter_watch_events.c new file mode 100644 index 000000000000..107631e0f2e3 --- /dev/null +++ b/tools/counter/counter_watch_events.c @@ -0,0 +1,406 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Counter Watch Events - Test various counter watch events in a userspace application + * + * Copyright (C) STMicroelectronics 2023 - All Rights Reserved + * Author: Fabrice Gasnier <fabrice.gasnier@foss.st.com>. + */ + +#include <errno.h> +#include <fcntl.h> +#include <getopt.h> +#include <linux/counter.h> +#include <linux/kernel.h> +#include <stdlib.h> +#include <stdio.h> +#include <string.h> +#include <sys/ioctl.h> +#include <unistd.h> + +static struct counter_watch simple_watch[] = { + { + /* Component data: Count 0 count */ + .component.type = COUNTER_COMPONENT_COUNT, + .component.scope = COUNTER_SCOPE_COUNT, + .component.parent = 0, + /* Event type: overflow or underflow */ + .event = COUNTER_EVENT_OVERFLOW_UNDERFLOW, + /* Device event channel 0 */ + .channel = 0, + }, +}; + +static const char * const counter_event_type_name[] = { + "COUNTER_EVENT_OVERFLOW", + "COUNTER_EVENT_UNDERFLOW", + "COUNTER_EVENT_OVERFLOW_UNDERFLOW", + "COUNTER_EVENT_THRESHOLD", + "COUNTER_EVENT_INDEX", + "COUNTER_EVENT_CHANGE_OF_STATE", + "COUNTER_EVENT_CAPTURE", +}; + +static const char * const counter_component_type_name[] = { + "COUNTER_COMPONENT_NONE", + "COUNTER_COMPONENT_SIGNAL", + "COUNTER_COMPONENT_COUNT", + "COUNTER_COMPONENT_FUNCTION", + "COUNTER_COMPONENT_SYNAPSE_ACTION", + "COUNTER_COMPONENT_EXTENSION", +}; + +static const char * const counter_scope_name[] = { + "COUNTER_SCOPE_DEVICE", + "COUNTER_SCOPE_SIGNAL", + "COUNTER_SCOPE_COUNT", +}; + +static void print_watch(struct counter_watch *watch, int nwatch) +{ + int i; + + /* prints the watch array in C-like structure */ + printf("watch[%d] = {\n", nwatch); + for (i = 0; i < nwatch; i++) { + printf(" [%d] =\t{\n" + "\t\t.component.type = %s\n" + "\t\t.component.scope = %s\n" + "\t\t.component.parent = %d\n" + "\t\t.component.id = %d\n" + "\t\t.event = %s\n" + "\t\t.channel = %d\n" + "\t},\n", + i, + counter_component_type_name[watch[i].component.type], + counter_scope_name[watch[i].component.scope], + watch[i].component.parent, + watch[i].component.id, + counter_event_type_name[watch[i].event], + watch[i].channel); + } + printf("};\n"); +} + +static void print_usage(void) +{ + fprintf(stderr, "Usage:\n\n" + "counter_watch_events [options] [-w <watchoptions>]\n" + "counter_watch_events [options] [-w <watch1 options>] [-w <watch2 options>]...\n" + "\n" + "When no --watch option has been provided, simple watch example is used:\n" + "counter_watch_events [options] -w comp_count,scope_count,evt_ovf_udf\n" + "\n" + "Test various watch events for given counter device.\n" + "\n" + "Options:\n" + " -d, --debug Prints debug information\n" + " -h, --help Prints usage\n" + " -n, --device-num <n> Use /dev/counter<n> [default: /dev/counter0]\n" + " -l, --loop <n> Loop for <n> events [default: 0 (forever)]\n" + " -w, --watch <watchoptions> comma-separated list of watch options\n" + "\n" + "Watch options:\n" + " scope_device (COUNTER_SCOPE_DEVICE) [default: scope_device]\n" + " scope_signal (COUNTER_SCOPE_SIGNAL)\n" + " scope_count (COUNTER_SCOPE_COUNT)\n" + "\n" + " comp_none (COUNTER_COMPONENT_NONE) [default: comp_none]\n" + " comp_signal (COUNTER_COMPONENT_SIGNAL)\n" + " comp_count (COUNTER_COMPONENT_COUNT)\n" + " comp_function (COUNTER_COMPONENT_FUNCTION)\n" + " comp_synapse_action (COUNTER_COMPONENT_SYNAPSE_ACTION)\n" + " comp_extension (COUNTER_COMPONENT_EXTENSION)\n" + "\n" + " evt_ovf (COUNTER_EVENT_OVERFLOW) [default: evt_ovf]\n" + " evt_udf (COUNTER_EVENT_UNDERFLOW)\n" + " evt_ovf_udf (COUNTER_EVENT_OVERFLOW_UNDERFLOW)\n" + " evt_threshold (COUNTER_EVENT_THRESHOLD)\n" + " evt_index (COUNTER_EVENT_INDEX)\n" + " evt_change_of_state (COUNTER_EVENT_CHANGE_OF_STATE)\n" + " evt_capture (COUNTER_EVENT_CAPTURE)\n" + "\n" + " chan=<n> channel <n> for this watch [default: 0]\n" + " id=<n> component id <n> for this watch [default: 0]\n" + " parent=<n> component parent <n> for this watch [default: 0]\n" + "\n" + "Example with two watched events:\n\n" + "counter_watch_events -d \\\n" + "\t-w comp_count,scope_count,evt_ovf_udf \\\n" + "\t-w comp_extension,scope_count,evt_capture,id=7,chan=3\n" + ); +} + +static const struct option longopts[] = { + { "debug", no_argument, 0, 'd' }, + { "help", no_argument, 0, 'h' }, + { "device-num", required_argument, 0, 'n' }, + { "loop", required_argument, 0, 'l' }, + { "watch", required_argument, 0, 'w' }, + { }, +}; + +/* counter watch subopts */ +enum { + WATCH_SCOPE_DEVICE, + WATCH_SCOPE_SIGNAL, + WATCH_SCOPE_COUNT, + WATCH_COMPONENT_NONE, + WATCH_COMPONENT_SIGNAL, + WATCH_COMPONENT_COUNT, + WATCH_COMPONENT_FUNCTION, + WATCH_COMPONENT_SYNAPSE_ACTION, + WATCH_COMPONENT_EXTENSION, + WATCH_EVENT_OVERFLOW, + WATCH_EVENT_UNDERFLOW, + WATCH_EVENT_OVERFLOW_UNDERFLOW, + WATCH_EVENT_THRESHOLD, + WATCH_EVENT_INDEX, + WATCH_EVENT_CHANGE_OF_STATE, + WATCH_EVENT_CAPTURE, + WATCH_CHANNEL, + WATCH_ID, + WATCH_PARENT, + WATCH_SUBOPTS_MAX, +}; + +static char * const counter_watch_subopts[WATCH_SUBOPTS_MAX + 1] = { + /* component.scope */ + [WATCH_SCOPE_DEVICE] = "scope_device", + [WATCH_SCOPE_SIGNAL] = "scope_signal", + [WATCH_SCOPE_COUNT] = "scope_count", + /* component.type */ + [WATCH_COMPONENT_NONE] = "comp_none", + [WATCH_COMPONENT_SIGNAL] = "comp_signal", + [WATCH_COMPONENT_COUNT] = "comp_count", + [WATCH_COMPONENT_FUNCTION] = "comp_function", + [WATCH_COMPONENT_SYNAPSE_ACTION] = "comp_synapse_action", + [WATCH_COMPONENT_EXTENSION] = "comp_extension", + /* event */ + [WATCH_EVENT_OVERFLOW] = "evt_ovf", + [WATCH_EVENT_UNDERFLOW] = "evt_udf", + [WATCH_EVENT_OVERFLOW_UNDERFLOW] = "evt_ovf_udf", + [WATCH_EVENT_THRESHOLD] = "evt_threshold", + [WATCH_EVENT_INDEX] = "evt_index", + [WATCH_EVENT_CHANGE_OF_STATE] = "evt_change_of_state", + [WATCH_EVENT_CAPTURE] = "evt_capture", + /* channel, id, parent */ + [WATCH_CHANNEL] = "chan", + [WATCH_ID] = "id", + [WATCH_PARENT] = "parent", + /* Empty entry ends the opts array */ + NULL +}; + +int main(int argc, char **argv) +{ + int c, fd, i, ret, rc = 0, debug = 0, loop = 0, dev_num = 0, nwatch = 0; + struct counter_event event_data; + char *device_name = NULL, *subopts, *value; + struct counter_watch *watches; + + /* + * 1st pass: + * - list watch events number to allocate the watch array. + * - parse normal options (other than watch options) + */ + while ((c = getopt_long(argc, argv, "dhn:l:w:", longopts, NULL)) != -1) { + switch (c) { + case 'd': + debug = 1; + break; + case 'h': + print_usage(); + return EXIT_SUCCESS; + case 'n': + dev_num = strtoul(optarg, NULL, 10); + if (errno) { + perror("strtol failed: --device-num <n>\n"); + return EXIT_FAILURE; + } + break; + case 'l': + loop = strtol(optarg, NULL, 10); + if (errno) { + perror("strtol failed: --loop <n>\n"); + return EXIT_FAILURE; + } + break; + case 'w': + nwatch++; + break; + default: + return EXIT_FAILURE; + } + } + + if (nwatch) { + watches = calloc(nwatch, sizeof(*watches)); + if (!watches) { + perror("Error allocating watches\n"); + return EXIT_FAILURE; + } + } else { + /* default to simple watch example */ + watches = simple_watch; + nwatch = ARRAY_SIZE(simple_watch); + } + + /* 2nd pass: parse watch sub-options to fill in watch array */ + optind = 1; + i = 0; + while ((c = getopt_long(argc, argv, "dhn:l:w:", longopts, NULL)) != -1) { + switch (c) { + case 'w': + subopts = optarg; + while (*subopts != '\0') { + ret = getsubopt(&subopts, counter_watch_subopts, &value); + switch (ret) { + case WATCH_SCOPE_DEVICE: + case WATCH_SCOPE_SIGNAL: + case WATCH_SCOPE_COUNT: + /* match with counter_scope */ + watches[i].component.scope = ret; + break; + case WATCH_COMPONENT_NONE: + case WATCH_COMPONENT_SIGNAL: + case WATCH_COMPONENT_COUNT: + case WATCH_COMPONENT_FUNCTION: + case WATCH_COMPONENT_SYNAPSE_ACTION: + case WATCH_COMPONENT_EXTENSION: + /* match counter_component_type: subtract enum value */ + ret -= WATCH_COMPONENT_NONE; + watches[i].component.type = ret; + break; + case WATCH_EVENT_OVERFLOW: + case WATCH_EVENT_UNDERFLOW: + case WATCH_EVENT_OVERFLOW_UNDERFLOW: + case WATCH_EVENT_THRESHOLD: + case WATCH_EVENT_INDEX: + case WATCH_EVENT_CHANGE_OF_STATE: + case WATCH_EVENT_CAPTURE: + /* match counter_event_type: subtract enum value */ + ret -= WATCH_EVENT_OVERFLOW; + watches[i].event = ret; + break; + case WATCH_CHANNEL: + if (!value) { + fprintf(stderr, "Invalid chan=<number>\n"); + rc = EXIT_FAILURE; + goto err_free_watches; + } + watches[i].channel = strtoul(value, NULL, 10); + if (errno) { + perror("strtoul failed: chan=<number>\n"); + rc = EXIT_FAILURE; + goto err_free_watches; + } + break; + case WATCH_ID: + if (!value) { + fprintf(stderr, "Invalid id=<number>\n"); + rc = EXIT_FAILURE; + goto err_free_watches; + } + watches[i].component.id = strtoul(value, NULL, 10); + if (errno) { + perror("strtoul failed: id=<number>\n"); + rc = EXIT_FAILURE; + goto err_free_watches; + } + break; + case WATCH_PARENT: + if (!value) { + fprintf(stderr, "Invalid parent=<number>\n"); + rc = EXIT_FAILURE; + goto err_free_watches; + } + watches[i].component.parent = strtoul(value, NULL, 10); + if (errno) { + perror("strtoul failed: parent=<number>\n"); + rc = EXIT_FAILURE; + goto err_free_watches; + } + break; + default: + fprintf(stderr, "Unknown suboption '%s'\n", value); + rc = EXIT_FAILURE; + goto err_free_watches; + } + } + i++; + break; + } + } + + if (debug) + print_watch(watches, nwatch); + + ret = asprintf(&device_name, "/dev/counter%d", dev_num); + if (ret < 0) { + fprintf(stderr, "asprintf failed\n"); + rc = EXIT_FAILURE; + goto err_free_watches; + } + + if (debug) + printf("Opening %s\n", device_name); + + fd = open(device_name, O_RDWR); + if (fd == -1) { + fprintf(stderr, "Unable to open %s: %s\n", device_name, strerror(errno)); + free(device_name); + rc = EXIT_FAILURE; + goto err_free_watches; + } + free(device_name); + + for (i = 0; i < nwatch; i++) { + ret = ioctl(fd, COUNTER_ADD_WATCH_IOCTL, watches + i); + if (ret == -1) { + fprintf(stderr, "Error adding watches[%d]: %s\n", i, + strerror(errno)); + rc = EXIT_FAILURE; + goto err_close; + } + } + + ret = ioctl(fd, COUNTER_ENABLE_EVENTS_IOCTL); + if (ret == -1) { + perror("Error enabling events"); + rc = EXIT_FAILURE; + goto err_close; + } + + for (i = 0; loop <= 0 || i < loop; i++) { + ret = read(fd, &event_data, sizeof(event_data)); + if (ret == -1) { + perror("Failed to read event data"); + rc = EXIT_FAILURE; + goto err_close; + } + + if (ret != sizeof(event_data)) { + fprintf(stderr, "Failed to read event data (got: %d)\n", ret); + rc = EXIT_FAILURE; + goto err_close; + } + + printf("Timestamp: %llu\tData: %llu\t event: %s\tch: %d\n", + event_data.timestamp, event_data.value, + counter_event_type_name[event_data.watch.event], + event_data.watch.channel); + + if (event_data.status) { + fprintf(stderr, "Error %d: %s\n", event_data.status, + strerror(event_data.status)); + } + } + +err_close: + close(fd); +err_free_watches: + if (watches != simple_watch) + free(watches); + + return rc; +} diff --git a/tools/iio/iio_event_monitor.c b/tools/iio/iio_event_monitor.c index 2eaaa7123b04..8073c9e4fe46 100644 --- a/tools/iio/iio_event_monitor.c +++ b/tools/iio/iio_event_monitor.c @@ -105,6 +105,8 @@ static const char * const iio_modifier_names[] = { [IIO_MOD_LIGHT_GREEN] = "green", [IIO_MOD_LIGHT_BLUE] = "blue", [IIO_MOD_LIGHT_UV] = "uv", + [IIO_MOD_LIGHT_UVA] = "uva", + [IIO_MOD_LIGHT_UVB] = "uvb", [IIO_MOD_LIGHT_DUV] = "duv", [IIO_MOD_QUATERNION] = "quaternion", [IIO_MOD_TEMP_AMBIENT] = "ambient", diff --git a/tools/testing/nvdimm/test/Kbuild b/tools/testing/nvdimm/test/Kbuild index 197bcb2b7f35..003d48f5f24f 100644 --- a/tools/testing/nvdimm/test/Kbuild +++ b/tools/testing/nvdimm/test/Kbuild @@ -7,6 +7,7 @@ obj-m += nfit_test_iomap.o ifeq ($(CONFIG_ACPI_NFIT),m) nfit_test-y := nfit.o + obj-m += ndtest.o else nfit_test-y := ndtest.o endif |