summaryrefslogtreecommitdiff
path: root/drivers/bus
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/bus')
-rw-r--r--drivers/bus/Kconfig10
-rw-r--r--drivers/bus/Makefile1
-rw-r--r--drivers/bus/fsl-mc/fsl-mc-allocator.c1
-rw-r--r--drivers/bus/fsl-mc/mc-io.c1
-rw-r--r--drivers/bus/hisi_lpc.c47
-rw-r--r--drivers/bus/imx-weim.c34
-rw-r--r--drivers/bus/moxtet.c885
-rw-r--r--drivers/bus/sunxi-rsb.c4
-rw-r--r--drivers/bus/ti-sysc.c177
-rw-r--r--drivers/bus/uniphier-system-bus.c4
10 files changed, 1064 insertions, 100 deletions
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 1851112ccc29..6b331061d34b 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -29,6 +29,16 @@ config BRCMSTB_GISB_ARB
arbiter. This driver provides timeout and target abort error handling
and internal bus master decoding.
+config MOXTET
+ tristate "CZ.NIC Turris Mox module configuration bus"
+ depends on SPI_MASTER && OF
+ help
+ Say yes here to add support for the module configuration bus found
+ on CZ.NIC's Turris Mox. This is needed for the ability to discover
+ the order in which the modules are connected and to get/set some of
+ their settings. For example the GPIOs on Mox SFP module are
+ configured through this bus.
+
config HISILICON_LPC
bool "Support for ISA I/O space on HiSilicon Hip06/7"
depends on ARM64 && (ARCH_HISI || COMPILE_TEST)
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index ca300b1914ce..16b43d3468c6 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -8,6 +8,7 @@ obj-$(CONFIG_ARM_CCI) += arm-cci.o
obj-$(CONFIG_HISILICON_LPC) += hisi_lpc.o
obj-$(CONFIG_BRCMSTB_GISB_ARB) += brcmstb_gisb.o
+obj-$(CONFIG_MOXTET) += moxtet.o
# DPAA2 fsl-mc bus
obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/
diff --git a/drivers/bus/fsl-mc/fsl-mc-allocator.c b/drivers/bus/fsl-mc/fsl-mc-allocator.c
index 8ad77246f322..cc7bb900f524 100644
--- a/drivers/bus/fsl-mc/fsl-mc-allocator.c
+++ b/drivers/bus/fsl-mc/fsl-mc-allocator.c
@@ -330,7 +330,6 @@ void fsl_mc_object_free(struct fsl_mc_device *mc_adev)
fsl_mc_resource_free(resource);
- device_link_del(mc_adev->consumer_link);
mc_adev->consumer_link = NULL;
}
EXPORT_SYMBOL_GPL(fsl_mc_object_free);
diff --git a/drivers/bus/fsl-mc/mc-io.c b/drivers/bus/fsl-mc/mc-io.c
index 3ae574a58cce..d9629fc13a15 100644
--- a/drivers/bus/fsl-mc/mc-io.c
+++ b/drivers/bus/fsl-mc/mc-io.c
@@ -255,7 +255,6 @@ void fsl_mc_portal_free(struct fsl_mc_io *mc_io)
fsl_destroy_mc_io(mc_io);
fsl_mc_resource_free(resource);
- device_link_del(dpmcp_dev->consumer_link);
dpmcp_dev->consumer_link = NULL;
}
EXPORT_SYMBOL_GPL(fsl_mc_portal_free);
diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
index 19d7b6ff2f17..20c957185af2 100644
--- a/drivers/bus/hisi_lpc.c
+++ b/drivers/bus/hisi_lpc.c
@@ -456,6 +456,17 @@ struct hisi_lpc_acpi_cell {
size_t pdata_size;
};
+static void hisi_lpc_acpi_remove(struct device *hostdev)
+{
+ struct acpi_device *adev = ACPI_COMPANION(hostdev);
+ struct acpi_device *child;
+
+ device_for_each_child(hostdev, NULL, hisi_lpc_acpi_remove_subdev);
+
+ list_for_each_entry(child, &adev->children, node)
+ acpi_device_clear_enumerated(child);
+}
+
/*
* hisi_lpc_acpi_probe - probe children for ACPI FW
* @hostdev: LPC host device pointer
@@ -555,8 +566,7 @@ static int hisi_lpc_acpi_probe(struct device *hostdev)
return 0;
fail:
- device_for_each_child(hostdev, NULL,
- hisi_lpc_acpi_remove_subdev);
+ hisi_lpc_acpi_remove(hostdev);
return ret;
}
@@ -569,6 +579,10 @@ static int hisi_lpc_acpi_probe(struct device *dev)
{
return -ENODEV;
}
+
+static void hisi_lpc_acpi_remove(struct device *hostdev)
+{
+}
#endif // CONFIG_ACPI
/*
@@ -606,24 +620,27 @@ static int hisi_lpc_probe(struct platform_device *pdev)
range->fwnode = dev->fwnode;
range->flags = LOGIC_PIO_INDIRECT;
range->size = PIO_INDIRECT_SIZE;
+ range->hostdata = lpcdev;
+ range->ops = &hisi_lpc_ops;
+ lpcdev->io_host = range;
ret = logic_pio_register_range(range);
if (ret) {
dev_err(dev, "register IO range failed (%d)!\n", ret);
return ret;
}
- lpcdev->io_host = range;
/* register the LPC host PIO resources */
if (acpi_device)
ret = hisi_lpc_acpi_probe(dev);
else
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
- if (ret)
+ if (ret) {
+ logic_pio_unregister_range(range);
return ret;
+ }
- lpcdev->io_host->hostdata = lpcdev;
- lpcdev->io_host->ops = &hisi_lpc_ops;
+ dev_set_drvdata(dev, lpcdev);
io_end = lpcdev->io_host->io_start + lpcdev->io_host->size;
dev_info(dev, "registered range [%pa - %pa]\n",
@@ -632,6 +649,23 @@ static int hisi_lpc_probe(struct platform_device *pdev)
return ret;
}
+static int hisi_lpc_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct acpi_device *acpi_device = ACPI_COMPANION(dev);
+ struct hisi_lpc_dev *lpcdev = dev_get_drvdata(dev);
+ struct logic_pio_hwaddr *range = lpcdev->io_host;
+
+ if (acpi_device)
+ hisi_lpc_acpi_remove(dev);
+ else
+ of_platform_depopulate(dev);
+
+ logic_pio_unregister_range(range);
+
+ return 0;
+}
+
static const struct of_device_id hisi_lpc_of_match[] = {
{ .compatible = "hisilicon,hip06-lpc", },
{ .compatible = "hisilicon,hip07-lpc", },
@@ -645,5 +679,6 @@ static struct platform_driver hisi_lpc_driver = {
.acpi_match_table = ACPI_PTR(hisi_lpc_acpi_match),
},
.probe = hisi_lpc_probe,
+ .remove = hisi_lpc_remove,
};
builtin_platform_driver(hisi_lpc_driver);
diff --git a/drivers/bus/imx-weim.c b/drivers/bus/imx-weim.c
index db74334ca5ef..28bb65a5613f 100644
--- a/drivers/bus/imx-weim.c
+++ b/drivers/bus/imx-weim.c
@@ -19,6 +19,8 @@ struct imx_weim_devtype {
unsigned int cs_count;
unsigned int cs_regs_count;
unsigned int cs_stride;
+ unsigned int wcr_offset;
+ unsigned int wcr_bcm;
};
static const struct imx_weim_devtype imx1_weim_devtype = {
@@ -37,6 +39,8 @@ static const struct imx_weim_devtype imx50_weim_devtype = {
.cs_count = 4,
.cs_regs_count = 6,
.cs_stride = 0x18,
+ .wcr_offset = 0x90,
+ .wcr_bcm = BIT(0),
};
static const struct imx_weim_devtype imx51_weim_devtype = {
@@ -72,7 +76,7 @@ static const struct of_device_id weim_id_table[] = {
};
MODULE_DEVICE_TABLE(of, weim_id_table);
-static int __init imx_weim_gpr_setup(struct platform_device *pdev)
+static int imx_weim_gpr_setup(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct property *prop;
@@ -122,10 +126,10 @@ err:
}
/* Parse and set the timing for this device. */
-static int __init weim_timing_setup(struct device *dev,
- struct device_node *np, void __iomem *base,
- const struct imx_weim_devtype *devtype,
- struct cs_timing_state *ts)
+static int weim_timing_setup(struct device *dev,
+ struct device_node *np, void __iomem *base,
+ const struct imx_weim_devtype *devtype,
+ struct cs_timing_state *ts)
{
u32 cs_idx, value[MAX_CS_REGS_COUNT];
int i, ret;
@@ -183,8 +187,7 @@ static int __init weim_timing_setup(struct device *dev,
return 0;
}
-static int __init weim_parse_dt(struct platform_device *pdev,
- void __iomem *base)
+static int weim_parse_dt(struct platform_device *pdev, void __iomem *base)
{
const struct of_device_id *of_id = of_match_device(weim_id_table,
&pdev->dev);
@@ -192,6 +195,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
struct device_node *child;
int ret, have_child = 0;
struct cs_timing_state ts = {};
+ u32 reg;
if (devtype == &imx50_weim_devtype) {
ret = imx_weim_gpr_setup(pdev);
@@ -199,6 +203,17 @@ static int __init weim_parse_dt(struct platform_device *pdev,
return ret;
}
+ if (of_property_read_bool(pdev->dev.of_node, "fsl,burst-clk-enable")) {
+ if (devtype->wcr_bcm) {
+ reg = readl(base + devtype->wcr_offset);
+ writel(reg | devtype->wcr_bcm,
+ base + devtype->wcr_offset);
+ } else {
+ dev_err(&pdev->dev, "burst clk mode not supported.\n");
+ return -EINVAL;
+ }
+ }
+
for_each_available_child_of_node(pdev->dev.of_node, child) {
ret = weim_timing_setup(&pdev->dev, child, base, devtype, &ts);
if (ret)
@@ -217,7 +232,7 @@ static int __init weim_parse_dt(struct platform_device *pdev,
return ret;
}
-static int __init weim_probe(struct platform_device *pdev)
+static int weim_probe(struct platform_device *pdev)
{
struct resource *res;
struct clk *clk;
@@ -254,8 +269,9 @@ static struct platform_driver weim_driver = {
.name = "imx-weim",
.of_match_table = weim_id_table,
},
+ .probe = weim_probe,
};
-module_platform_driver_probe(weim_driver, weim_probe);
+module_platform_driver(weim_driver);
MODULE_AUTHOR("Freescale Semiconductor Inc.");
MODULE_DESCRIPTION("i.MX EIM Controller Driver");
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
new file mode 100644
index 000000000000..36cf13eee6b8
--- /dev/null
+++ b/drivers/bus/moxtet.c
@@ -0,0 +1,885 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Turris Mox module configuration bus driver
+ *
+ * Copyright (C) 2019 Marek Behun <marek.behun@nic.cz>
+ */
+
+#include <dt-bindings/bus/moxtet.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/moxtet.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/spi/spi.h>
+
+/*
+ * @name: module name for sysfs
+ * @hwirq_base: base index for IRQ for this module (-1 if no IRQs)
+ * @nirqs: how many interrupts does the shift register provide
+ * @desc: module description for kernel log
+ */
+static const struct {
+ const char *name;
+ int hwirq_base;
+ int nirqs;
+ const char *desc;
+} mox_module_table[] = {
+ /* do not change order of this array! */
+ { NULL, 0, 0, NULL },
+ { "sfp", -1, 0, "MOX D (SFP cage)" },
+ { "pci", MOXTET_IRQ_PCI, 1, "MOX B (Mini-PCIe)" },
+ { "topaz", MOXTET_IRQ_TOPAZ, 1, "MOX C (4 port switch)" },
+ { "peridot", MOXTET_IRQ_PERIDOT(0), 1, "MOX E (8 port switch)" },
+ { "usb3", MOXTET_IRQ_USB3, 2, "MOX F (USB 3.0)" },
+ { "pci-bridge", -1, 0, "MOX G (Mini-PCIe bridge)" },
+};
+
+static inline bool mox_module_known(unsigned int id)
+{
+ return id >= TURRIS_MOX_MODULE_FIRST && id <= TURRIS_MOX_MODULE_LAST;
+}
+
+static inline const char *mox_module_name(unsigned int id)
+{
+ if (mox_module_known(id))
+ return mox_module_table[id].name;
+ else
+ return "unknown";
+}
+
+#define DEF_MODULE_ATTR(name, fmt, ...) \
+static ssize_t \
+module_##name##_show(struct device *dev, struct device_attribute *a, \
+ char *buf) \
+{ \
+ struct moxtet_device *mdev = to_moxtet_device(dev); \
+ return sprintf(buf, (fmt), __VA_ARGS__); \
+} \
+static DEVICE_ATTR_RO(module_##name)
+
+DEF_MODULE_ATTR(id, "0x%x\n", mdev->id);
+DEF_MODULE_ATTR(name, "%s\n", mox_module_name(mdev->id));
+DEF_MODULE_ATTR(description, "%s\n",
+ mox_module_known(mdev->id) ? mox_module_table[mdev->id].desc
+ : "");
+
+static struct attribute *moxtet_dev_attrs[] = {
+ &dev_attr_module_id.attr,
+ &dev_attr_module_name.attr,
+ &dev_attr_module_description.attr,
+ NULL,
+};
+
+static const struct attribute_group moxtet_dev_group = {
+ .attrs = moxtet_dev_attrs,
+};
+
+static const struct attribute_group *moxtet_dev_groups[] = {
+ &moxtet_dev_group,
+ NULL,
+};
+
+static int moxtet_match(struct device *dev, struct device_driver *drv)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet_driver *tdrv = to_moxtet_driver(drv);
+ const enum turris_mox_module_id *t;
+
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ if (!tdrv->id_table)
+ return 0;
+
+ for (t = tdrv->id_table; *t; ++t)
+ if (*t == mdev->id)
+ return 1;
+
+ return 0;
+}
+
+struct bus_type moxtet_bus_type = {
+ .name = "moxtet",
+ .dev_groups = moxtet_dev_groups,
+ .match = moxtet_match,
+};
+EXPORT_SYMBOL_GPL(moxtet_bus_type);
+
+int __moxtet_register_driver(struct module *owner,
+ struct moxtet_driver *mdrv)
+{
+ mdrv->driver.owner = owner;
+ mdrv->driver.bus = &moxtet_bus_type;
+ return driver_register(&mdrv->driver);
+}
+EXPORT_SYMBOL_GPL(__moxtet_register_driver);
+
+static int moxtet_dev_check(struct device *dev, void *data)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet_device *new_dev = data;
+
+ if (mdev->moxtet == new_dev->moxtet && mdev->id == new_dev->id &&
+ mdev->idx == new_dev->idx)
+ return -EBUSY;
+ return 0;
+}
+
+static void moxtet_dev_release(struct device *dev)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+
+ put_device(mdev->moxtet->dev);
+ kfree(mdev);
+}
+
+static struct moxtet_device *
+moxtet_alloc_device(struct moxtet *moxtet)
+{
+ struct moxtet_device *dev;
+
+ if (!get_device(moxtet->dev))
+ return NULL;
+
+ dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+ if (!dev) {
+ put_device(moxtet->dev);
+ return NULL;
+ }
+
+ dev->moxtet = moxtet;
+ dev->dev.parent = moxtet->dev;
+ dev->dev.bus = &moxtet_bus_type;
+ dev->dev.release = moxtet_dev_release;
+
+ device_initialize(&dev->dev);
+
+ return dev;
+}
+
+static int moxtet_add_device(struct moxtet_device *dev)
+{
+ static DEFINE_MUTEX(add_mutex);
+ int ret;
+
+ if (dev->idx >= TURRIS_MOX_MAX_MODULES || dev->id > 0xf)
+ return -EINVAL;
+
+ dev_set_name(&dev->dev, "moxtet-%s.%u", mox_module_name(dev->id),
+ dev->idx);
+
+ mutex_lock(&add_mutex);
+
+ ret = bus_for_each_dev(&moxtet_bus_type, NULL, dev,
+ moxtet_dev_check);
+ if (ret)
+ goto done;
+
+ ret = device_add(&dev->dev);
+ if (ret < 0)
+ dev_err(dev->moxtet->dev, "can't add %s, status %d\n",
+ dev_name(dev->moxtet->dev), ret);
+
+done:
+ mutex_unlock(&add_mutex);
+ return ret;
+}
+
+static int __unregister(struct device *dev, void *null)
+{
+ if (dev->of_node) {
+ of_node_clear_flag(dev->of_node, OF_POPULATED);
+ of_node_put(dev->of_node);
+ }
+
+ device_unregister(dev);
+
+ return 0;
+}
+
+static struct moxtet_device *
+of_register_moxtet_device(struct moxtet *moxtet, struct device_node *nc)
+{
+ struct moxtet_device *dev;
+ u32 val;
+ int ret;
+
+ dev = moxtet_alloc_device(moxtet);
+ if (!dev) {
+ dev_err(moxtet->dev,
+ "Moxtet device alloc error for %pOF\n", nc);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ ret = of_property_read_u32(nc, "reg", &val);
+ if (ret) {
+ dev_err(moxtet->dev, "%pOF has no valid 'reg' property (%d)\n",
+ nc, ret);
+ goto err_put;
+ }
+
+ dev->idx = val;
+
+ if (dev->idx >= TURRIS_MOX_MAX_MODULES) {
+ dev_err(moxtet->dev, "%pOF Moxtet address 0x%x out of range\n",
+ nc, dev->idx);
+ ret = -EINVAL;
+ goto err_put;
+ }
+
+ dev->id = moxtet->modules[dev->idx];
+
+ if (!dev->id) {
+ dev_err(moxtet->dev, "%pOF Moxtet address 0x%x is empty\n", nc,
+ dev->idx);
+ ret = -ENODEV;
+ goto err_put;
+ }
+
+ of_node_get(nc);
+ dev->dev.of_node = nc;
+
+ ret = moxtet_add_device(dev);
+ if (ret) {
+ dev_err(moxtet->dev,
+ "Moxtet device register error for %pOF\n", nc);
+ of_node_put(nc);
+ goto err_put;
+ }
+
+ return dev;
+
+err_put:
+ put_device(&dev->dev);
+ return ERR_PTR(ret);
+}
+
+static void of_register_moxtet_devices(struct moxtet *moxtet)
+{
+ struct moxtet_device *dev;
+ struct device_node *nc;
+
+ if (!moxtet->dev->of_node)
+ return;
+
+ for_each_available_child_of_node(moxtet->dev->of_node, nc) {
+ if (of_node_test_and_set_flag(nc, OF_POPULATED))
+ continue;
+ dev = of_register_moxtet_device(moxtet, nc);
+ if (IS_ERR(dev)) {
+ dev_warn(moxtet->dev,
+ "Failed to create Moxtet device for %pOF\n",
+ nc);
+ of_node_clear_flag(nc, OF_POPULATED);
+ }
+ }
+}
+
+static void
+moxtet_register_devices_from_topology(struct moxtet *moxtet)
+{
+ struct moxtet_device *dev;
+ int i, ret;
+
+ for (i = 0; i < moxtet->count; ++i) {
+ dev = moxtet_alloc_device(moxtet);
+ if (!dev) {
+ dev_err(moxtet->dev, "Moxtet device %u alloc error\n",
+ i);
+ continue;
+ }
+
+ dev->idx = i;
+ dev->id = moxtet->modules[i];
+
+ ret = moxtet_add_device(dev);
+ if (ret && ret != -EBUSY) {
+ put_device(&dev->dev);
+ dev_err(moxtet->dev,
+ "Moxtet device %u register error: %i\n", i,
+ ret);
+ }
+ }
+}
+
+/*
+ * @nsame: how many modules with same id are already in moxtet->modules
+ */
+static int moxtet_set_irq(struct moxtet *moxtet, int idx, int id, int nsame)
+{
+ int i, first;
+ struct moxtet_irqpos *pos;
+
+ first = mox_module_table[id].hwirq_base +
+ nsame * mox_module_table[id].nirqs;
+
+ if (first + mox_module_table[id].nirqs > MOXTET_NIRQS)
+ return -EINVAL;
+
+ for (i = 0; i < mox_module_table[id].nirqs; ++i) {
+ pos = &moxtet->irq.position[first + i];
+ pos->idx = idx;
+ pos->bit = i;
+ moxtet->irq.exists |= BIT(first + i);
+ }
+
+ return 0;
+}
+
+static int moxtet_find_topology(struct moxtet *moxtet)
+{
+ u8 buf[TURRIS_MOX_MAX_MODULES];
+ int cnts[TURRIS_MOX_MODULE_LAST];
+ int i, ret;
+
+ memset(cnts, 0, sizeof(cnts));
+
+ ret = spi_read(to_spi_device(moxtet->dev), buf, TURRIS_MOX_MAX_MODULES);
+ if (ret < 0)
+ return ret;
+
+ if (buf[0] == TURRIS_MOX_CPU_ID_EMMC) {
+ dev_info(moxtet->dev, "Found MOX A (eMMC CPU) module\n");
+ } else if (buf[0] == TURRIS_MOX_CPU_ID_SD) {
+ dev_info(moxtet->dev, "Found MOX A (CPU) module\n");
+ } else {
+ dev_err(moxtet->dev, "Invalid Turris MOX A CPU module 0x%02x\n",
+ buf[0]);
+ return -ENODEV;
+ }
+
+ moxtet->count = 0;
+
+ for (i = 1; i < TURRIS_MOX_MAX_MODULES; ++i) {
+ int id;
+
+ if (buf[i] == 0xff)
+ break;
+
+ id = buf[i] & 0xf;
+
+ moxtet->modules[i-1] = id;
+ ++moxtet->count;
+
+ if (mox_module_known(id)) {
+ dev_info(moxtet->dev, "Found %s module\n",
+ mox_module_table[id].desc);
+
+ if (moxtet_set_irq(moxtet, i-1, id, cnts[id]++) < 0)
+ dev_err(moxtet->dev,
+ " Cannot set IRQ for module %s\n",
+ mox_module_table[id].desc);
+ } else {
+ dev_warn(moxtet->dev,
+ "Unknown Moxtet module found (ID 0x%02x)\n",
+ id);
+ }
+ }
+
+ return 0;
+}
+
+static int moxtet_spi_read(struct moxtet *moxtet, u8 *buf)
+{
+ struct spi_transfer xfer = {
+ .rx_buf = buf,
+ .tx_buf = moxtet->tx,
+ .len = moxtet->count + 1
+ };
+ int ret;
+
+ mutex_lock(&moxtet->lock);
+
+ ret = spi_sync_transfer(to_spi_device(moxtet->dev), &xfer, 1);
+
+ mutex_unlock(&moxtet->lock);
+
+ return ret;
+}
+
+int moxtet_device_read(struct device *dev)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet *moxtet = mdev->moxtet;
+ u8 buf[TURRIS_MOX_MAX_MODULES];
+ int ret;
+
+ if (mdev->idx >= moxtet->count)
+ return -EINVAL;
+
+ ret = moxtet_spi_read(moxtet, buf);
+ if (ret < 0)
+ return ret;
+
+ return buf[mdev->idx + 1] >> 4;
+}
+EXPORT_SYMBOL_GPL(moxtet_device_read);
+
+int moxtet_device_write(struct device *dev, u8 val)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet *moxtet = mdev->moxtet;
+ int ret;
+
+ if (mdev->idx >= moxtet->count)
+ return -EINVAL;
+
+ mutex_lock(&moxtet->lock);
+
+ moxtet->tx[moxtet->count - mdev->idx] = val;
+
+ ret = spi_write(to_spi_device(moxtet->dev), moxtet->tx,
+ moxtet->count + 1);
+
+ mutex_unlock(&moxtet->lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(moxtet_device_write);
+
+int moxtet_device_written(struct device *dev)
+{
+ struct moxtet_device *mdev = to_moxtet_device(dev);
+ struct moxtet *moxtet = mdev->moxtet;
+
+ if (mdev->idx >= moxtet->count)
+ return -EINVAL;
+
+ return moxtet->tx[moxtet->count - mdev->idx];
+}
+EXPORT_SYMBOL_GPL(moxtet_device_written);
+
+#ifdef CONFIG_DEBUG_FS
+static int moxtet_debug_open(struct inode *inode, struct file *file)
+{
+ file->private_data = inode->i_private;
+
+ return nonseekable_open(inode, file);
+}
+
+static ssize_t input_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ struct moxtet *moxtet = file->private_data;
+ u8 bin[TURRIS_MOX_MAX_MODULES];
+ u8 hex[sizeof(buf) * 2 + 1];
+ int ret, n;
+
+ ret = moxtet_spi_read(moxtet, bin);
+ if (ret < 0)
+ return ret;
+
+ n = moxtet->count + 1;
+ bin2hex(hex, bin, n);
+
+ hex[2*n] = '\n';
+
+ return simple_read_from_buffer(buf, len, ppos, hex, 2*n + 1);
+}
+
+static const struct file_operations input_fops = {
+ .owner = THIS_MODULE,
+ .open = moxtet_debug_open,
+ .read = input_read,
+ .llseek = no_llseek,
+};
+
+static ssize_t output_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos)
+{
+ struct moxtet *moxtet = file->private_data;
+ u8 hex[TURRIS_MOX_MAX_MODULES * 2 + 1];
+ u8 *p = hex;
+ int i;
+
+ mutex_lock(&moxtet->lock);
+
+ for (i = 0; i < moxtet->count; ++i)
+ p = hex_byte_pack(p, moxtet->tx[moxtet->count - i]);
+
+ mutex_unlock(&moxtet->lock);
+
+ *p++ = '\n';
+
+ return simple_read_from_buffer(buf, len, ppos, hex, p - hex);
+}
+
+static ssize_t output_write(struct file *file, const char __user *buf,
+ size_t len, loff_t *ppos)
+{
+ struct moxtet *moxtet = file->private_data;
+ u8 bin[TURRIS_MOX_MAX_MODULES];
+ u8 hex[sizeof(bin) * 2 + 1];
+ ssize_t res;
+ loff_t dummy = 0;
+ int err, i;
+
+ if (len > 2 * moxtet->count + 1 || len < 2 * moxtet->count)
+ return -EINVAL;
+
+ res = simple_write_to_buffer(hex, sizeof(hex), &dummy, buf, len);
+ if (res < 0)
+ return res;
+
+ if (len % 2 == 1 && hex[len - 1] != '\n')
+ return -EINVAL;
+
+ err = hex2bin(bin, hex, moxtet->count);
+ if (err < 0)
+ return -EINVAL;
+
+ mutex_lock(&moxtet->lock);
+
+ for (i = 0; i < moxtet->count; ++i)
+ moxtet->tx[moxtet->count - i] = bin[i];
+
+ err = spi_write(to_spi_device(moxtet->dev), moxtet->tx,
+ moxtet->count + 1);
+
+ mutex_unlock(&moxtet->lock);
+
+ return err < 0 ? err : len;
+}
+
+static const struct file_operations output_fops = {
+ .owner = THIS_MODULE,
+ .open = moxtet_debug_open,
+ .read = output_read,
+ .write = output_write,
+ .llseek = no_llseek,
+};
+
+static int moxtet_register_debugfs(struct moxtet *moxtet)
+{
+ struct dentry *root, *entry;
+
+ root = debugfs_create_dir("moxtet", NULL);
+
+ if (IS_ERR(root))
+ return PTR_ERR(root);
+
+ entry = debugfs_create_file_unsafe("input", 0444, root, moxtet,
+ &input_fops);
+ if (IS_ERR(entry))
+ goto err_remove;
+
+ entry = debugfs_create_file_unsafe("output", 0644, root, moxtet,
+ &output_fops);
+ if (IS_ERR(entry))
+ goto err_remove;
+
+ moxtet->debugfs_root = root;
+
+ return 0;
+err_remove:
+ debugfs_remove_recursive(root);
+ return PTR_ERR(entry);
+}
+
+static void moxtet_unregister_debugfs(struct moxtet *moxtet)
+{
+ debugfs_remove_recursive(moxtet->debugfs_root);
+}
+#else
+static inline int moxtet_register_debugfs(struct moxtet *moxtet)
+{
+ return 0;
+}
+
+static inline void moxtet_unregister_debugfs(struct moxtet *moxtet)
+{
+}
+#endif
+
+static int moxtet_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hw)
+{
+ struct moxtet *moxtet = d->host_data;
+
+ if (hw >= MOXTET_NIRQS || !(moxtet->irq.exists & BIT(hw))) {
+ dev_err(moxtet->dev, "Invalid hw irq number\n");
+ return -EINVAL;
+ }
+
+ irq_set_chip_data(irq, d->host_data);
+ irq_set_chip_and_handler(irq, &moxtet->irq.chip, handle_level_irq);
+
+ return 0;
+}
+
+static int moxtet_irq_domain_xlate(struct irq_domain *d,
+ struct device_node *ctrlr,
+ const u32 *intspec, unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ struct moxtet *moxtet = d->host_data;
+ int irq;
+
+ if (WARN_ON(intsize < 1))
+ return -EINVAL;
+
+ irq = intspec[0];
+
+ if (irq >= MOXTET_NIRQS || !(moxtet->irq.exists & BIT(irq)))
+ return -EINVAL;
+
+ *out_hwirq = irq;
+ *out_type = IRQ_TYPE_NONE;
+ return 0;
+}
+
+static const struct irq_domain_ops moxtet_irq_domain = {
+ .map = moxtet_irq_domain_map,
+ .xlate = moxtet_irq_domain_xlate,
+};
+
+static void moxtet_irq_mask(struct irq_data *d)
+{
+ struct moxtet *moxtet = irq_data_get_irq_chip_data(d);
+
+ moxtet->irq.masked |= BIT(d->hwirq);
+}
+
+static void moxtet_irq_unmask(struct irq_data *d)
+{
+ struct moxtet *moxtet = irq_data_get_irq_chip_data(d);
+
+ moxtet->irq.masked &= ~BIT(d->hwirq);
+}
+
+static void moxtet_irq_print_chip(struct irq_data *d, struct seq_file *p)
+{
+ struct moxtet *moxtet = irq_data_get_irq_chip_data(d);
+ struct moxtet_irqpos *pos = &moxtet->irq.position[d->hwirq];
+ int id;
+
+ id = moxtet->modules[pos->idx];
+
+ seq_printf(p, " moxtet-%s.%i#%i", mox_module_name(id), pos->idx,
+ pos->bit);
+}
+
+static const struct irq_chip moxtet_irq_chip = {
+ .name = "moxtet",
+ .irq_mask = moxtet_irq_mask,
+ .irq_unmask = moxtet_irq_unmask,
+ .irq_print_chip = moxtet_irq_print_chip,
+};
+
+static int moxtet_irq_read(struct moxtet *moxtet, unsigned long *map)
+{
+ struct moxtet_irqpos *pos = moxtet->irq.position;
+ u8 buf[TURRIS_MOX_MAX_MODULES];
+ int i, ret;
+
+ ret = moxtet_spi_read(moxtet, buf);
+ if (ret < 0)
+ return ret;
+
+ *map = 0;
+
+ for_each_set_bit(i, &moxtet->irq.exists, MOXTET_NIRQS) {
+ if (!(buf[pos[i].idx + 1] & BIT(4 + pos[i].bit)))
+ set_bit(i, map);
+ }
+
+ return 0;
+}
+
+static irqreturn_t moxtet_irq_thread_fn(int irq, void *data)
+{
+ struct moxtet *moxtet = data;
+ unsigned long set;
+ int nhandled = 0, i, sub_irq, ret;
+
+ ret = moxtet_irq_read(moxtet, &set);
+ if (ret < 0)
+ goto out;
+
+ set &= ~moxtet->irq.masked;
+
+ do {
+ for_each_set_bit(i, &set, MOXTET_NIRQS) {
+ sub_irq = irq_find_mapping(moxtet->irq.domain, i);
+ handle_nested_irq(sub_irq);
+ dev_dbg(moxtet->dev, "%i irq\n", i);
+ ++nhandled;
+ }
+
+ ret = moxtet_irq_read(moxtet, &set);
+ if (ret < 0)
+ goto out;
+
+ set &= ~moxtet->irq.masked;
+ } while (set);
+
+out:
+ return (nhandled > 0 ? IRQ_HANDLED : IRQ_NONE);
+}
+
+static void moxtet_irq_free(struct moxtet *moxtet)
+{
+ int i, irq;
+
+ for (i = 0; i < MOXTET_NIRQS; ++i) {
+ if (moxtet->irq.exists & BIT(i)) {
+ irq = irq_find_mapping(moxtet->irq.domain, i);
+ irq_dispose_mapping(irq);
+ }
+ }
+
+ irq_domain_remove(moxtet->irq.domain);
+}
+
+static int moxtet_irq_setup(struct moxtet *moxtet)
+{
+ int i, ret;
+
+ moxtet->irq.domain = irq_domain_add_simple(moxtet->dev->of_node,
+ MOXTET_NIRQS, 0,
+ &moxtet_irq_domain, moxtet);
+ if (moxtet->irq.domain == NULL) {
+ dev_err(moxtet->dev, "Could not add IRQ domain\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < MOXTET_NIRQS; ++i)
+ if (moxtet->irq.exists & BIT(i))
+ irq_create_mapping(moxtet->irq.domain, i);
+
+ moxtet->irq.chip = moxtet_irq_chip;
+ moxtet->irq.masked = ~0;
+
+ ret = request_threaded_irq(moxtet->dev_irq, NULL, moxtet_irq_thread_fn,
+ IRQF_ONESHOT, "moxtet", moxtet);
+ if (ret < 0)
+ goto err_free;
+
+ return 0;
+
+err_free:
+ moxtet_irq_free(moxtet);
+ return ret;
+}
+
+static int moxtet_probe(struct spi_device *spi)
+{
+ struct moxtet *moxtet;
+ int ret;
+
+ ret = spi_setup(spi);
+ if (ret < 0)
+ return ret;
+
+ moxtet = devm_kzalloc(&spi->dev, sizeof(struct moxtet),
+ GFP_KERNEL);
+ if (!moxtet)
+ return -ENOMEM;
+
+ moxtet->dev = &spi->dev;
+ spi_set_drvdata(spi, moxtet);
+
+ mutex_init(&moxtet->lock);
+
+ moxtet->dev_irq = of_irq_get(moxtet->dev->of_node, 0);
+ if (moxtet->dev_irq == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+
+ if (moxtet->dev_irq <= 0) {
+ dev_err(moxtet->dev, "No IRQ resource found\n");
+ return -ENXIO;
+ }
+
+ ret = moxtet_find_topology(moxtet);
+ if (ret < 0)
+ return ret;
+
+ if (moxtet->irq.exists) {
+ ret = moxtet_irq_setup(moxtet);
+ if (ret < 0)
+ return ret;
+ }
+
+ of_register_moxtet_devices(moxtet);
+ moxtet_register_devices_from_topology(moxtet);
+
+ ret = moxtet_register_debugfs(moxtet);
+ if (ret < 0)
+ dev_warn(moxtet->dev, "Failed creating debugfs entries: %i\n",
+ ret);
+
+ return 0;
+}
+
+static int moxtet_remove(struct spi_device *spi)
+{
+ struct moxtet *moxtet = spi_get_drvdata(spi);
+
+ free_irq(moxtet->dev_irq, moxtet);
+
+ moxtet_irq_free(moxtet);
+
+ moxtet_unregister_debugfs(moxtet);
+
+ device_for_each_child(moxtet->dev, NULL, __unregister);
+
+ mutex_destroy(&moxtet->lock);
+
+ return 0;
+}
+
+static const struct of_device_id moxtet_dt_ids[] = {
+ { .compatible = "cznic,moxtet" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, moxtet_dt_ids);
+
+static struct spi_driver moxtet_spi_driver = {
+ .driver = {
+ .name = "moxtet",
+ .of_match_table = moxtet_dt_ids,
+ },
+ .probe = moxtet_probe,
+ .remove = moxtet_remove,
+};
+
+static int __init moxtet_init(void)
+{
+ int ret;
+
+ ret = bus_register(&moxtet_bus_type);
+ if (ret < 0) {
+ pr_err("moxtet bus registration failed: %d\n", ret);
+ goto error;
+ }
+
+ ret = spi_register_driver(&moxtet_spi_driver);
+ if (ret < 0) {
+ pr_err("moxtet spi driver registration failed: %d\n", ret);
+ goto error_bus;
+ }
+
+ return 0;
+
+error_bus:
+ bus_unregister(&moxtet_bus_type);
+error:
+ return ret;
+}
+postcore_initcall_sync(moxtet_init);
+
+static void __exit moxtet_exit(void)
+{
+ spi_unregister_driver(&moxtet_spi_driver);
+ bus_unregister(&moxtet_bus_type);
+}
+module_exit(moxtet_exit);
+
+MODULE_AUTHOR("Marek Behun <marek.behun@nic.cz>");
+MODULE_DESCRIPTION("CZ.NIC's Turris Mox module configuration bus");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/bus/sunxi-rsb.c b/drivers/bus/sunxi-rsb.c
index 1b76d9585902..be79d6c6a4e4 100644
--- a/drivers/bus/sunxi-rsb.c
+++ b/drivers/bus/sunxi-rsb.c
@@ -651,10 +651,8 @@ static int sunxi_rsb_probe(struct platform_device *pdev)
return PTR_ERR(rsb->regs);
irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(dev, "failed to retrieve irq: %d\n", irq);
+ if (irq < 0)
return irq;
- }
rsb->clk = devm_clk_get(dev, NULL);
if (IS_ERR(rsb->clk)) {
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index e6deabd8305d..ad50efb470aa 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -1,14 +1,6 @@
+// SPDX-License-Identifier: GPL-2.0
/*
* ti-sysc.c - Texas Instruments sysc interconnect target driver
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
*/
#include <linux/io.h>
@@ -62,18 +54,26 @@ static const char * const clock_names[SYSC_MAX_CLOCKS] = {
* @module_size: size of the interconnect target module
* @module_va: virtual address of the interconnect target module
* @offsets: register offsets from module base
+ * @mdata: ti-sysc to hwmod translation data for a module
* @clocks: clocks used by the interconnect target module
* @clock_roles: clock role names for the found clocks
* @nr_clocks: number of clocks used by the interconnect target module
+ * @rsts: resets used by the interconnect target module
* @legacy_mode: configured for legacy mode if set
* @cap: interconnect target module capabilities
* @cfg: interconnect target module configuration
+ * @cookie: data used by legacy platform callbacks
* @name: name if available
* @revision: interconnect target module revision
+ * @enabled: sysc runtime enabled status
* @needs_resume: runtime resume needed on resume from suspend
+ * @child_needs_resume: runtime resume needed for child on resume from suspend
+ * @disable_on_idle: status flag used for disabling modules with resets
+ * @idle_work: work structure used to perform delayed idle on a module
* @clk_enable_quirk: module specific clock enable quirk
* @clk_disable_quirk: module specific clock disable quirk
* @reset_done_quirk: module specific reset done quirk
+ * @module_enable_quirk: module specific enable quirk
*/
struct sysc {
struct device *dev;
@@ -95,11 +95,11 @@ struct sysc {
unsigned int enabled:1;
unsigned int needs_resume:1;
unsigned int child_needs_resume:1;
- unsigned int disable_on_idle:1;
struct delayed_work idle_work;
void (*clk_enable_quirk)(struct sysc *sysc);
void (*clk_disable_quirk)(struct sysc *sysc);
void (*reset_done_quirk)(struct sysc *sysc);
+ void (*module_enable_quirk)(struct sysc *sysc);
};
static void sysc_parse_dts_quirks(struct sysc *ddata, struct device_node *np,
@@ -280,9 +280,6 @@ static int sysc_get_one_clock(struct sysc *ddata, const char *name)
ddata->clocks[index] = devm_clk_get(ddata->dev, name);
if (IS_ERR(ddata->clocks[index])) {
- if (PTR_ERR(ddata->clocks[index]) == -ENOENT)
- return 0;
-
dev_err(ddata->dev, "clock get error for %s: %li\n",
name, PTR_ERR(ddata->clocks[index]));
@@ -357,7 +354,7 @@ static int sysc_get_clocks(struct sysc *ddata)
continue;
error = sysc_get_one_clock(ddata, name);
- if (error && error != -ENOENT)
+ if (error)
return error;
}
@@ -503,7 +500,7 @@ static void sysc_clkdm_allow_idle(struct sysc *ddata)
static int sysc_init_resets(struct sysc *ddata)
{
ddata->rsts =
- devm_reset_control_get_optional(ddata->dev, "rstctrl");
+ devm_reset_control_get_optional_shared(ddata->dev, "rstctrl");
if (IS_ERR(ddata->rsts))
return PTR_ERR(ddata->rsts);
@@ -615,8 +612,8 @@ static void sysc_check_quirk_stdout(struct sysc *ddata,
* node but children have "ti,hwmods". These belong to the interconnect
* target node and are managed by this driver.
*/
-static int sysc_check_one_child(struct sysc *ddata,
- struct device_node *np)
+static void sysc_check_one_child(struct sysc *ddata,
+ struct device_node *np)
{
const char *name;
@@ -626,22 +623,14 @@ static int sysc_check_one_child(struct sysc *ddata,
sysc_check_quirk_stdout(ddata, np);
sysc_parse_dts_quirks(ddata, np, true);
-
- return 0;
}
-static int sysc_check_children(struct sysc *ddata)
+static void sysc_check_children(struct sysc *ddata)
{
struct device_node *child;
- int error;
-
- for_each_child_of_node(ddata->dev->of_node, child) {
- error = sysc_check_one_child(ddata, child);
- if (error)
- return error;
- }
- return 0;
+ for_each_child_of_node(ddata->dev->of_node, child)
+ sysc_check_one_child(ddata, child);
}
/*
@@ -794,9 +783,7 @@ static int sysc_map_and_check_registers(struct sysc *ddata)
if (error)
return error;
- error = sysc_check_children(ddata);
- if (error)
- return error;
+ sysc_check_children(ddata);
error = sysc_parse_registers(ddata);
if (error)
@@ -940,6 +927,9 @@ set_autoidle:
sysc_write(ddata, ddata->offsets[SYSC_SYSCONFIG], reg);
}
+ if (ddata->module_enable_quirk)
+ ddata->module_enable_quirk(ddata);
+
return 0;
}
@@ -949,7 +939,7 @@ static int sysc_best_idle_mode(u32 idlemodes, u32 *best_mode)
*best_mode = SYSC_IDLE_SMART_WKUP;
else if (idlemodes & BIT(SYSC_IDLE_SMART))
*best_mode = SYSC_IDLE_SMART;
- else if (idlemodes & SYSC_IDLE_FORCE)
+ else if (idlemodes & BIT(SYSC_IDLE_FORCE))
*best_mode = SYSC_IDLE_FORCE;
else
return -EINVAL;
@@ -1031,8 +1021,7 @@ static int __maybe_unused sysc_runtime_suspend_legacy(struct device *dev,
dev_err(dev, "%s: could not idle: %i\n",
__func__, error);
- if (ddata->disable_on_idle)
- reset_control_assert(ddata->rsts);
+ reset_control_assert(ddata->rsts);
return 0;
}
@@ -1043,8 +1032,7 @@ static int __maybe_unused sysc_runtime_resume_legacy(struct device *dev,
struct ti_sysc_platform_data *pdata;
int error;
- if (ddata->disable_on_idle)
- reset_control_deassert(ddata->rsts);
+ reset_control_deassert(ddata->rsts);
pdata = dev_get_platdata(ddata->dev);
if (!pdata)
@@ -1091,10 +1079,9 @@ static int __maybe_unused sysc_runtime_suspend(struct device *dev)
ddata->enabled = false;
err_allow_idle:
- sysc_clkdm_allow_idle(ddata);
+ reset_control_assert(ddata->rsts);
- if (ddata->disable_on_idle)
- reset_control_assert(ddata->rsts);
+ sysc_clkdm_allow_idle(ddata);
return error;
}
@@ -1109,11 +1096,11 @@ static int __maybe_unused sysc_runtime_resume(struct device *dev)
if (ddata->enabled)
return 0;
- if (ddata->disable_on_idle)
- reset_control_deassert(ddata->rsts);
sysc_clkdm_deny_idle(ddata);
+ reset_control_deassert(ddata->rsts);
+
if (sysc_opt_clks_needed(ddata)) {
error = sysc_enable_opt_clocks(ddata);
if (error)
@@ -1256,6 +1243,9 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_MODULE_QUIRK_I2C),
SYSC_QUIRK("i2c", 0, 0, 0x10, 0x90, 0x5040000a, 0xfffff0f0,
SYSC_MODULE_QUIRK_I2C),
+ SYSC_QUIRK("gpu", 0x50000000, 0x14, -1, -1, 0x00010201, 0xffffffff, 0),
+ SYSC_QUIRK("gpu", 0x50000000, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff,
+ SYSC_MODULE_QUIRK_SGX),
SYSC_QUIRK("wdt", 0, 0, 0x10, 0x14, 0x502a0500, 0xfffff0f0,
SYSC_MODULE_QUIRK_WDT),
@@ -1267,11 +1257,15 @@ static const struct sysc_revision_quirk sysc_revision_quirks[] = {
SYSC_QUIRK("control", 0, 0, 0x10, -1, 0x40000900, 0xffffffff, 0),
SYSC_QUIRK("cpgmac", 0, 0x1200, 0x1208, 0x1204, 0x4edb1902,
0xffff00f0, 0),
- SYSC_QUIRK("dcan", 0, 0, -1, -1, 0xffffffff, 0xffffffff, 0),
+ SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0xa3170504, 0xffffffff, 0),
+ SYSC_QUIRK("dcan", 0, 0x20, -1, -1, 0x4edb1902, 0xffffffff, 0),
SYSC_QUIRK("dmic", 0, 0, 0x10, -1, 0x50010000, 0xffffffff, 0),
SYSC_QUIRK("dwc3", 0, 0, 0x10, -1, 0x500a0200, 0xffffffff, 0),
+ SYSC_QUIRK("d2d", 0x4a0b6000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
+ SYSC_QUIRK("d2d", 0x4a0cd000, 0, 0x10, 0x14, 0x00000010, 0xffffffff, 0),
SYSC_QUIRK("epwmss", 0, 0, 0x4, -1, 0x47400001, 0xffffffff, 0),
SYSC_QUIRK("gpu", 0, 0x1fc00, 0x1fc10, -1, 0, 0, 0),
+ SYSC_QUIRK("gpu", 0, 0xfe00, 0xfe10, -1, 0x40000000 , 0xffffffff, 0),
SYSC_QUIRK("hsi", 0, 0, 0x10, 0x14, 0x50043101, 0xffffffff, 0),
SYSC_QUIRK("iss", 0, 0, 0x10, -1, 0x40000101, 0xffffffff, 0),
SYSC_QUIRK("lcdc", 0, 0, 0x54, -1, 0x4f201000, 0xffffffff, 0),
@@ -1423,6 +1417,15 @@ static void sysc_clk_disable_quirk_i2c(struct sysc *ddata)
sysc_clk_quirk_i2c(ddata, false);
}
+/* 36xx SGX needs a quirk for to bypass OCP IPG interrupt logic */
+static void sysc_module_enable_quirk_sgx(struct sysc *ddata)
+{
+ int offset = 0xff08; /* OCP_DEBUG_CONFIG */
+ u32 val = BIT(31); /* THALIA_INT_BYPASS */
+
+ sysc_write(ddata, offset, val);
+}
+
/* Watchdog timer needs a disable sequence after reset */
static void sysc_reset_done_quirk_wdt(struct sysc *ddata)
{
@@ -1465,6 +1468,9 @@ static void sysc_init_module_quirks(struct sysc *ddata)
return;
}
+ if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_SGX)
+ ddata->module_enable_quirk = sysc_module_enable_quirk_sgx;
+
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_WDT)
ddata->reset_done_quirk = sysc_reset_done_quirk_wdt;
}
@@ -1531,7 +1537,7 @@ static int sysc_legacy_init(struct sysc *ddata)
*/
static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset)
{
- int error, val;
+ int error;
if (!ddata->rsts)
return 0;
@@ -1542,14 +1548,9 @@ static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset)
return error;
}
- error = reset_control_deassert(ddata->rsts);
- if (error == -EEXIST)
- return 0;
-
- error = readx_poll_timeout(reset_control_status, ddata->rsts, val,
- val == 0, 100, MAX_MODULE_SOFTRESET_WAIT);
+ reset_control_deassert(ddata->rsts);
- return error;
+ return 0;
}
/*
@@ -1558,12 +1559,11 @@ static int sysc_rstctrl_reset_deassert(struct sysc *ddata, bool reset)
*/
static int sysc_reset(struct sysc *ddata)
{
- int sysc_offset, syss_offset, sysc_val, rstval, quirks, error = 0;
+ int sysc_offset, syss_offset, sysc_val, rstval, error = 0;
u32 sysc_mask, syss_done;
sysc_offset = ddata->offsets[SYSC_SYSCONFIG];
syss_offset = ddata->offsets[SYSC_SYSSTATUS];
- quirks = ddata->cfg.quirks;
if (ddata->legacy_mode || sysc_offset < 0 ||
ddata->cap->regbits->srst_shift < 0 ||
@@ -1629,17 +1629,19 @@ static int sysc_init_module(struct sysc *ddata)
if (error)
return error;
- if (manage_clocks) {
- sysc_clkdm_deny_idle(ddata);
+ sysc_clkdm_deny_idle(ddata);
- error = sysc_enable_opt_clocks(ddata);
- if (error)
- return error;
+ /*
+ * Always enable clocks. The bootloader may or may not have enabled
+ * the related clocks.
+ */
+ error = sysc_enable_opt_clocks(ddata);
+ if (error)
+ return error;
- error = sysc_enable_main_clocks(ddata);
- if (error)
- goto err_opt_clocks;
- }
+ error = sysc_enable_main_clocks(ddata);
+ if (error)
+ goto err_opt_clocks;
if (!(ddata->cfg.quirks & SYSC_QUIRK_NO_RESET_ON_INIT)) {
error = sysc_rstctrl_reset_deassert(ddata, true);
@@ -1657,7 +1659,7 @@ static int sysc_init_module(struct sysc *ddata)
goto err_main_clocks;
}
- if (!ddata->legacy_mode && manage_clocks) {
+ if (!ddata->legacy_mode) {
error = sysc_enable_module(ddata->dev);
if (error)
goto err_main_clocks;
@@ -1674,6 +1676,7 @@ err_main_clocks:
if (manage_clocks)
sysc_disable_main_clocks(ddata);
err_opt_clocks:
+ /* No re-enable of clockdomain autoidle to prevent module autoidle */
if (manage_clocks) {
sysc_disable_opt_clocks(ddata);
sysc_clkdm_allow_idle(ddata);
@@ -1692,10 +1695,7 @@ static int sysc_init_sysc_mask(struct sysc *ddata)
if (error)
return 0;
- if (val)
- ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
- else
- ddata->cfg.sysc_val = ddata->cap->sysc_mask;
+ ddata->cfg.sysc_val = val & ddata->cap->sysc_mask;
return 0;
}
@@ -2357,6 +2357,27 @@ static void ti_sysc_idle(struct work_struct *work)
ddata = container_of(work, struct sysc, idle_work.work);
+ /*
+ * One time decrement of clock usage counts if left on from init.
+ * Note that we disable opt clocks unconditionally in this case
+ * as they are enabled unconditionally during init without
+ * considering sysc_opt_clks_needed() at that point.
+ */
+ if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
+ SYSC_QUIRK_NO_IDLE_ON_INIT)) {
+ sysc_disable_main_clocks(ddata);
+ sysc_disable_opt_clocks(ddata);
+ sysc_clkdm_allow_idle(ddata);
+ }
+
+ /* Keep permanent PM runtime usage count for SYSC_QUIRK_NO_IDLE */
+ if (ddata->cfg.quirks & SYSC_QUIRK_NO_IDLE)
+ return;
+
+ /*
+ * Decrement PM runtime usage count for SYSC_QUIRK_NO_IDLE_ON_INIT
+ * and SYSC_QUIRK_NO_RESET_ON_INIT
+ */
if (pm_runtime_active(ddata->dev))
pm_runtime_put_sync(ddata->dev);
}
@@ -2385,27 +2406,27 @@ static int sysc_probe(struct platform_device *pdev)
error = sysc_init_dts_quirks(ddata);
if (error)
- goto unprepare;
+ return error;
error = sysc_map_and_check_registers(ddata);
if (error)
- goto unprepare;
+ return error;
error = sysc_init_sysc_mask(ddata);
if (error)
- goto unprepare;
+ return error;
error = sysc_init_idlemodes(ddata);
if (error)
- goto unprepare;
+ return error;
error = sysc_init_syss_mask(ddata);
if (error)
- goto unprepare;
+ return error;
error = sysc_init_pdata(ddata);
if (error)
- goto unprepare;
+ return error;
sysc_init_early_quirks(ddata);
@@ -2415,7 +2436,7 @@ static int sysc_probe(struct platform_device *pdev)
error = sysc_init_resets(ddata);
if (error)
- return error;
+ goto unprepare;
error = sysc_init_module(ddata);
if (error)
@@ -2429,6 +2450,10 @@ static int sysc_probe(struct platform_device *pdev)
goto unprepare;
}
+ /* Balance reset counts */
+ if (ddata->rsts)
+ reset_control_assert(ddata->rsts);
+
sysc_show_registers(ddata);
ddata->dev->type = &sysc_device_type;
@@ -2441,16 +2466,14 @@ static int sysc_probe(struct platform_device *pdev)
INIT_DELAYED_WORK(&ddata->idle_work, ti_sysc_idle);
/* At least earlycon won't survive without deferred idle */
- if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE_ON_INIT |
+ if (ddata->cfg.quirks & (SYSC_QUIRK_NO_IDLE |
+ SYSC_QUIRK_NO_IDLE_ON_INIT |
SYSC_QUIRK_NO_RESET_ON_INIT)) {
schedule_delayed_work(&ddata->idle_work, 3000);
} else {
pm_runtime_put(&pdev->dev);
}
- if (!of_get_available_child_count(ddata->dev->of_node))
- ddata->disable_on_idle = true;
-
return 0;
err:
diff --git a/drivers/bus/uniphier-system-bus.c b/drivers/bus/uniphier-system-bus.c
index e845c1a93f21..f70dedace20b 100644
--- a/drivers/bus/uniphier-system-bus.c
+++ b/drivers/bus/uniphier-system-bus.c
@@ -176,7 +176,6 @@ static int uniphier_system_bus_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct uniphier_system_bus_priv *priv;
- struct resource *regs;
const __be32 *ranges;
u32 cells, addr, size;
u64 paddr;
@@ -186,8 +185,7 @@ static int uniphier_system_bus_probe(struct platform_device *pdev)
if (!priv)
return -ENOMEM;
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- priv->membase = devm_ioremap_resource(dev, regs);
+ priv->membase = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(priv->membase))
return PTR_ERR(priv->membase);