diff options
author | David S. Miller <davem@davemloft.net> | 2021-07-23 15:59:46 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2021-07-23 16:13:06 +0100 |
commit | 5af84df962dd6699e3972fda7a0c8b579fb3ab04 (patch) | |
tree | 0a66f54c99c0c0d22588304d030ecb752487dfa1 /drivers | |
parent | 090597b4a9c1b81b03fd7cfb4ba458a0e7a78b31 (diff) | |
parent | 9f42f674a89200d4f465a7db6070e079f3c6145f (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Conflicts are simple overlapping changes.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers')
145 files changed, 1493 insertions, 2177 deletions
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index b7d663736d35..c38317979f74 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -239,8 +239,8 @@ static void nbd_dev_remove(struct nbd_device *nbd) if (disk) { del_gendisk(disk); - blk_mq_free_tag_set(&nbd->tag_set); blk_cleanup_disk(disk); + blk_mq_free_tag_set(&nbd->tag_set); } /* diff --git a/drivers/block/paride/pd.c b/drivers/block/paride/pd.c index 3b2b8e872beb..9b3298926356 100644 --- a/drivers/block/paride/pd.c +++ b/drivers/block/paride/pd.c @@ -1014,8 +1014,8 @@ static void __exit pd_exit(void) if (p) { disk->gd = NULL; del_gendisk(p); - blk_mq_free_tag_set(&disk->tag_set); blk_cleanup_disk(p); + blk_mq_free_tag_set(&disk->tag_set); pi_release(disk->pi); } } diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c index 8d49f8fa98bb..d83fee21f6c5 100644 --- a/drivers/block/xen-blkfront.c +++ b/drivers/block/xen-blkfront.c @@ -502,34 +502,21 @@ static int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg) static int blkif_ioctl(struct block_device *bdev, fmode_t mode, unsigned command, unsigned long argument) { - struct blkfront_info *info = bdev->bd_disk->private_data; int i; - dev_dbg(&info->xbdev->dev, "command: 0x%x, argument: 0x%lx\n", - command, (long)argument); - switch (command) { case CDROMMULTISESSION: - dev_dbg(&info->xbdev->dev, "FIXME: support multisession CDs later\n"); for (i = 0; i < sizeof(struct cdrom_multisession); i++) if (put_user(0, (char __user *)(argument + i))) return -EFAULT; return 0; - - case CDROM_GET_CAPABILITY: { - struct gendisk *gd = info->gd; - if (gd->flags & GENHD_FL_CD) + case CDROM_GET_CAPABILITY: + if (bdev->bd_disk->flags & GENHD_FL_CD) return 0; return -EINVAL; - } - default: - /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n", - command);*/ - return -EINVAL; /* same return as native Linux */ + return -EINVAL; } - - return 0; } static unsigned long blkif_ring_get_request(struct blkfront_ring_info *rinfo, @@ -1177,36 +1164,6 @@ out_release_minors: return err; } -static void xlvbd_release_gendisk(struct blkfront_info *info) -{ - unsigned int minor, nr_minors, i; - struct blkfront_ring_info *rinfo; - - if (info->rq == NULL) - return; - - /* No more blkif_request(). */ - blk_mq_stop_hw_queues(info->rq); - - for_each_rinfo(info, rinfo, i) { - /* No more gnttab callback work. */ - gnttab_cancel_free_callback(&rinfo->callback); - - /* Flush gnttab callback work. Must be done with no locks held. */ - flush_work(&rinfo->work); - } - - del_gendisk(info->gd); - - minor = info->gd->first_minor; - nr_minors = info->gd->minors; - xlbd_release_minors(minor, nr_minors); - - blk_cleanup_disk(info->gd); - info->gd = NULL; - blk_mq_free_tag_set(&info->tag_set); -} - /* Already hold rinfo->ring_lock. */ static inline void kick_pending_request_queues_locked(struct blkfront_ring_info *rinfo) { @@ -1756,12 +1713,6 @@ abort_transaction: return err; } -static void free_info(struct blkfront_info *info) -{ - list_del(&info->info_list); - kfree(info); -} - /* Common code used when first setting up, and when resuming. */ static int talk_to_blkback(struct xenbus_device *dev, struct blkfront_info *info) @@ -1880,13 +1831,6 @@ again: xenbus_dev_fatal(dev, err, "%s", message); destroy_blkring: blkif_free(info, 0); - - mutex_lock(&blkfront_mutex); - free_info(info); - mutex_unlock(&blkfront_mutex); - - dev_set_drvdata(&dev->dev, NULL); - return err; } @@ -2126,38 +2070,26 @@ static int blkfront_resume(struct xenbus_device *dev) static void blkfront_closing(struct blkfront_info *info) { struct xenbus_device *xbdev = info->xbdev; - struct block_device *bdev = NULL; - - mutex_lock(&info->mutex); + struct blkfront_ring_info *rinfo; + unsigned int i; - if (xbdev->state == XenbusStateClosing) { - mutex_unlock(&info->mutex); + if (xbdev->state == XenbusStateClosing) return; - } - if (info->gd) - bdev = bdgrab(info->gd->part0); - - mutex_unlock(&info->mutex); - - if (!bdev) { - xenbus_frontend_closed(xbdev); - return; - } + /* No more blkif_request(). */ + blk_mq_stop_hw_queues(info->rq); + blk_set_queue_dying(info->rq); + set_capacity(info->gd, 0); - mutex_lock(&bdev->bd_disk->open_mutex); + for_each_rinfo(info, rinfo, i) { + /* No more gnttab callback work. */ + gnttab_cancel_free_callback(&rinfo->callback); - if (bdev->bd_openers) { - xenbus_dev_error(xbdev, -EBUSY, - "Device in use; refusing to close"); - xenbus_switch_state(xbdev, XenbusStateClosing); - } else { - xlvbd_release_gendisk(info); - xenbus_frontend_closed(xbdev); + /* Flush gnttab callback work. Must be done with no locks held. */ + flush_work(&rinfo->work); } - mutex_unlock(&bdev->bd_disk->open_mutex); - bdput(bdev); + xenbus_frontend_closed(xbdev); } static void blkfront_setup_discard(struct blkfront_info *info) @@ -2472,8 +2404,7 @@ static void blkback_changed(struct xenbus_device *dev, break; fallthrough; case XenbusStateClosing: - if (info) - blkfront_closing(info); + blkfront_closing(info); break; } } @@ -2481,56 +2412,21 @@ static void blkback_changed(struct xenbus_device *dev, static int blkfront_remove(struct xenbus_device *xbdev) { struct blkfront_info *info = dev_get_drvdata(&xbdev->dev); - struct block_device *bdev = NULL; - struct gendisk *disk; dev_dbg(&xbdev->dev, "%s removed", xbdev->nodename); - if (!info) - return 0; - - blkif_free(info, 0); - - mutex_lock(&info->mutex); - - disk = info->gd; - if (disk) - bdev = bdgrab(disk->part0); - - info->xbdev = NULL; - mutex_unlock(&info->mutex); - - if (!bdev) { - mutex_lock(&blkfront_mutex); - free_info(info); - mutex_unlock(&blkfront_mutex); - return 0; - } - - /* - * The xbdev was removed before we reached the Closed - * state. See if it's safe to remove the disk. If the bdev - * isn't closed yet, we let release take care of it. - */ - - mutex_lock(&disk->open_mutex); - info = disk->private_data; - - dev_warn(disk_to_dev(disk), - "%s was hot-unplugged, %d stale handles\n", - xbdev->nodename, bdev->bd_openers); + del_gendisk(info->gd); - if (info && !bdev->bd_openers) { - xlvbd_release_gendisk(info); - disk->private_data = NULL; - mutex_lock(&blkfront_mutex); - free_info(info); - mutex_unlock(&blkfront_mutex); - } + mutex_lock(&blkfront_mutex); + list_del(&info->info_list); + mutex_unlock(&blkfront_mutex); - mutex_unlock(&disk->open_mutex); - bdput(bdev); + blkif_free(info, 0); + xlbd_release_minors(info->gd->first_minor, info->gd->minors); + blk_cleanup_disk(info->gd); + blk_mq_free_tag_set(&info->tag_set); + kfree(info); return 0; } @@ -2541,77 +2437,9 @@ static int blkfront_is_ready(struct xenbus_device *dev) return info->is_ready && info->xbdev; } -static int blkif_open(struct block_device *bdev, fmode_t mode) -{ - struct gendisk *disk = bdev->bd_disk; - struct blkfront_info *info; - int err = 0; - - mutex_lock(&blkfront_mutex); - - info = disk->private_data; - if (!info) { - /* xbdev gone */ - err = -ERESTARTSYS; - goto out; - } - - mutex_lock(&info->mutex); - - if (!info->gd) - /* xbdev is closed */ - err = -ERESTARTSYS; - - mutex_unlock(&info->mutex); - -out: - mutex_unlock(&blkfront_mutex); - return err; -} - -static void blkif_release(struct gendisk *disk, fmode_t mode) -{ - struct blkfront_info *info = disk->private_data; - struct xenbus_device *xbdev; - - mutex_lock(&blkfront_mutex); - if (disk->part0->bd_openers) - goto out_mutex; - - /* - * Check if we have been instructed to close. We will have - * deferred this request, because the bdev was still open. - */ - - mutex_lock(&info->mutex); - xbdev = info->xbdev; - - if (xbdev && xbdev->state == XenbusStateClosing) { - /* pending switch to state closed */ - dev_info(disk_to_dev(disk), "releasing disk\n"); - xlvbd_release_gendisk(info); - xenbus_frontend_closed(info->xbdev); - } - - mutex_unlock(&info->mutex); - - if (!xbdev) { - /* sudden device removal */ - dev_info(disk_to_dev(disk), "releasing disk\n"); - xlvbd_release_gendisk(info); - disk->private_data = NULL; - free_info(info); - } - -out_mutex: - mutex_unlock(&blkfront_mutex); -} - static const struct block_device_operations xlvbd_block_fops = { .owner = THIS_MODULE, - .open = blkif_open, - .release = blkif_release, .getgeo = blkif_getgeo, .ioctl = blkif_ioctl, .compat_ioctl = blkdev_compat_ptr_ioctl, diff --git a/drivers/char/powernv-op-panel.c b/drivers/char/powernv-op-panel.c index 027484ecfb0d..3c99696b145e 100644 --- a/drivers/char/powernv-op-panel.c +++ b/drivers/char/powernv-op-panel.c @@ -75,6 +75,7 @@ static int __op_panel_update_display(void) rc); break; } + break; case OPAL_SUCCESS: break; default: diff --git a/drivers/clk/renesas/r9a07g044-cpg.c b/drivers/clk/renesas/r9a07g044-cpg.c index 50b5269586a4..ae24e0397d3c 100644 --- a/drivers/clk/renesas/r9a07g044-cpg.c +++ b/drivers/clk/renesas/r9a07g044-cpg.c @@ -30,8 +30,9 @@ enum clk_ids { CLK_PLL2_DIV20, CLK_PLL3, CLK_PLL3_DIV2, + CLK_PLL3_DIV2_4, + CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV4, - CLK_PLL3_DIV8, CLK_PLL4, CLK_PLL5, CLK_PLL5_DIV2, @@ -42,12 +43,13 @@ enum clk_ids { }; /* Divider tables */ -static const struct clk_div_table dtable_3b[] = { +static const struct clk_div_table dtable_1_32[] = { {0, 1}, {1, 2}, {2, 4}, {3, 8}, {4, 32}, + {0, 0}, }; static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = { @@ -66,47 +68,56 @@ static const struct cpg_core_clk r9a07g044_core_clks[] __initconst = { DEF_FIXED(".pll2_div20", CLK_PLL2_DIV20, CLK_PLL2, 1, 20), DEF_FIXED(".pll3_div2", CLK_PLL3_DIV2, CLK_PLL3, 1, 2), + DEF_FIXED(".pll3_div2_4", CLK_PLL3_DIV2_4, CLK_PLL3_DIV2, 1, 4), + DEF_FIXED(".pll3_div2_4_2", CLK_PLL3_DIV2_4_2, CLK_PLL3_DIV2_4, 1, 2), DEF_FIXED(".pll3_div4", CLK_PLL3_DIV4, CLK_PLL3, 1, 4), - DEF_FIXED(".pll3_div8", CLK_PLL3_DIV8, CLK_PLL3, 1, 8), /* Core output clk */ DEF_FIXED("I", R9A07G044_CLK_I, CLK_PLL1, 1, 1), DEF_DIV("P0", R9A07G044_CLK_P0, CLK_PLL2_DIV16, DIVPL2A, - dtable_3b, CLK_DIVIDER_HIWORD_MASK), + dtable_1_32, CLK_DIVIDER_HIWORD_MASK), DEF_FIXED("TSU", R9A07G044_CLK_TSU, CLK_PLL2_DIV20, 1, 1), - DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV8, - DIVPL3B, dtable_3b, CLK_DIVIDER_HIWORD_MASK), + DEF_DIV("P1", R9A07G044_CLK_P1, CLK_PLL3_DIV2_4, + DIVPL3B, dtable_1_32, CLK_DIVIDER_HIWORD_MASK), + DEF_DIV("P2", R9A07G044_CLK_P2, CLK_PLL3_DIV2_4_2, + DIVPL3A, dtable_1_32, CLK_DIVIDER_HIWORD_MASK), }; static struct rzg2l_mod_clk r9a07g044_mod_clks[] = { - DEF_MOD("gic", R9A07G044_CLK_GIC600, - R9A07G044_CLK_P1, - 0x514, BIT(0), (BIT(0) | BIT(1))), - DEF_MOD("ia55", R9A07G044_CLK_IA55, - R9A07G044_CLK_P1, - 0x518, (BIT(0) | BIT(1)), BIT(0)), - DEF_MOD("scif0", R9A07G044_CLK_SCIF0, - R9A07G044_CLK_P0, - 0x584, BIT(0), BIT(0)), - DEF_MOD("scif1", R9A07G044_CLK_SCIF1, - R9A07G044_CLK_P0, - 0x584, BIT(1), BIT(1)), - DEF_MOD("scif2", R9A07G044_CLK_SCIF2, - R9A07G044_CLK_P0, - 0x584, BIT(2), BIT(2)), - DEF_MOD("scif3", R9A07G044_CLK_SCIF3, - R9A07G044_CLK_P0, - 0x584, BIT(3), BIT(3)), - DEF_MOD("scif4", R9A07G044_CLK_SCIF4, - R9A07G044_CLK_P0, - 0x584, BIT(4), BIT(4)), - DEF_MOD("sci0", R9A07G044_CLK_SCI0, - R9A07G044_CLK_P0, - 0x588, BIT(0), BIT(0)), + DEF_MOD("gic", R9A07G044_GIC600_GICCLK, R9A07G044_CLK_P1, + 0x514, 0), + DEF_MOD("ia55_pclk", R9A07G044_IA55_PCLK, R9A07G044_CLK_P2, + 0x518, 0), + DEF_MOD("ia55_clk", R9A07G044_IA55_CLK, R9A07G044_CLK_P1, + 0x518, 1), + DEF_MOD("scif0", R9A07G044_SCIF0_CLK_PCK, R9A07G044_CLK_P0, + 0x584, 0), + DEF_MOD("scif1", R9A07G044_SCIF1_CLK_PCK, R9A07G044_CLK_P0, + 0x584, 1), + DEF_MOD("scif2", R9A07G044_SCIF2_CLK_PCK, R9A07G044_CLK_P0, + 0x584, 2), + DEF_MOD("scif3", R9A07G044_SCIF3_CLK_PCK, R9A07G044_CLK_P0, + 0x584, 3), + DEF_MOD("scif4", R9A07G044_SCIF4_CLK_PCK, R9A07G044_CLK_P0, + 0x584, 4), + DEF_MOD("sci0", R9A07G044_SCI0_CLKP, R9A07G044_CLK_P0, + 0x588, 0), +}; + +static struct rzg2l_reset r9a07g044_resets[] = { + DEF_RST(R9A07G044_GIC600_GICRESET_N, 0x814, 0), + DEF_RST(R9A07G044_GIC600_DBG_GICRESET_N, 0x814, 1), + DEF_RST(R9A07G044_IA55_RESETN, 0x818, 0), + DEF_RST(R9A07G044_SCIF0_RST_SYSTEM_N, 0x884, 0), + DEF_RST(R9A07G044_SCIF1_RST_SYSTEM_N, 0x884, 1), + DEF_RST(R9A07G044_SCIF2_RST_SYSTEM_N, 0x884, 2), + DEF_RST(R9A07G044_SCIF3_RST_SYSTEM_N, 0x884, 3), + DEF_RST(R9A07G044_SCIF4_RST_SYSTEM_N, 0x884, 4), + DEF_RST(R9A07G044_SCI0_RST, 0x888, 0), }; static const unsigned int r9a07g044_crit_mod_clks[] __initconst = { - MOD_CLK_BASE + R9A07G044_CLK_GIC600, + MOD_CLK_BASE + R9A07G044_GIC600_GICCLK, }; const struct rzg2l_cpg_info r9a07g044_cpg_info = { @@ -123,5 +134,9 @@ const struct rzg2l_cpg_info r9a07g044_cpg_info = { /* Module Clocks */ .mod_clks = r9a07g044_mod_clks, .num_mod_clks = ARRAY_SIZE(r9a07g044_mod_clks), - .num_hw_mod_clks = R9A07G044_CLK_MIPI_DSI_PIN + 1, + .num_hw_mod_clks = R9A07G044_TSU_PCLK + 1, + + /* Resets */ + .resets = r9a07g044_resets, + .num_resets = ARRAY_SIZE(r9a07g044_resets), }; diff --git a/drivers/clk/renesas/renesas-rzg2l-cpg.c b/drivers/clk/renesas/renesas-rzg2l-cpg.c index 5009b9e48b13..e7c59af2a1d8 100644 --- a/drivers/clk/renesas/renesas-rzg2l-cpg.c +++ b/drivers/clk/renesas/renesas-rzg2l-cpg.c @@ -47,9 +47,9 @@ #define SDIV(val) DIV_RSMASK(val, 0, 0x7) #define CLK_ON_R(reg) (reg) -#define CLK_MON_R(reg) (0x680 - 0x500 + (reg)) -#define CLK_RST_R(reg) (0x800 - 0x500 + (reg)) -#define CLK_MRST_R(reg) (0x980 - 0x500 + (reg)) +#define CLK_MON_R(reg) (0x180 + (reg)) +#define CLK_RST_R(reg) (reg) +#define CLK_MRST_R(reg) (0x180 + (reg)) #define GET_REG_OFFSET(val) ((val >> 20) & 0xfff) #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff) @@ -78,6 +78,7 @@ struct rzg2l_cpg_priv { struct clk **clks; unsigned int num_core_clks; unsigned int num_mod_clks; + unsigned int num_resets; unsigned int last_dt_core_clk; struct raw_notifier_head notifiers; @@ -315,15 +316,13 @@ fail: * * @hw: handle between common and hardware-specific interfaces * @off: register offset - * @onoff: ON/MON bits - * @reset: reset bits + * @bit: ON/MON bit * @priv: CPG/MSTP private data */ struct mstp_clock { struct clk_hw hw; u16 off; - u8 onoff; - u8 reset; + u8 bit; struct rzg2l_cpg_priv *priv; }; @@ -337,6 +336,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable) struct device *dev = priv->dev; unsigned long flags; unsigned int i; + u32 bitmask = BIT(clock->bit); u32 value; if (!clock->off) { @@ -349,9 +349,9 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable) spin_lock_irqsave(&priv->rmw_lock, flags); if (enable) - value = (clock->onoff << 16) | clock->onoff; + value = (bitmask << 16) | bitmask; else - value = clock->onoff << 16; + value = bitmask << 16; writel(value, priv->base + CLK_ON_R(reg)); spin_unlock_irqrestore(&priv->rmw_lock, flags); @@ -360,7 +360,7 @@ static int rzg2l_mod_clock_endisable(struct clk_hw *hw, bool enable) return 0; for (i = 1000; i > 0; --i) { - if (((readl(priv->base + CLK_MON_R(reg))) & clock->onoff)) + if (((readl(priv->base + CLK_MON_R(reg))) & bitmask)) break; cpu_relax(); } @@ -388,6 +388,7 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw) { struct mstp_clock *clock = to_mod_clock(hw); struct rzg2l_cpg_priv *priv = clock->priv; + u32 bitmask = BIT(clock->bit); u32 value; if (!clock->off) { @@ -397,7 +398,7 @@ static int rzg2l_mod_clock_is_enabled(struct clk_hw *hw) value = readl(priv->base + CLK_MON_R(clock->off)); - return !(value & clock->onoff); + return !(value & bitmask); } static const struct clk_ops rzg2l_mod_clock_ops = { @@ -457,8 +458,7 @@ rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk *mod, init.num_parents = 1; clock->off = mod->off; - clock->onoff = mod->onoff; - clock->reset = mod->reset; + clock->bit = mod->bit; clock->priv = priv; clock->hw.init = &init; @@ -483,12 +483,11 @@ static int rzg2l_cpg_reset(struct reset_controller_dev *rcdev, { struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); const struct rzg2l_cpg_info *info = priv->info; - unsigned int reg = info->mod_clks[id].off; - u32 dis = info->mod_clks[id].reset; + unsigned int reg = info->resets[id].off; + u32 dis = BIT(info->resets[id].bit); u32 we = dis << 16; - dev_dbg(rcdev->dev, "reset name:%s id:%ld offset:0x%x\n", - info->mod_clks[id].name, id, CLK_RST_R(reg)); + dev_dbg(rcdev->dev, "reset id:%ld offset:0x%x\n", id, CLK_RST_R(reg)); /* Reset module */ writel(we, priv->base + CLK_RST_R(reg)); @@ -507,11 +506,10 @@ static int rzg2l_cpg_assert(struct reset_controller_dev *rcdev, { struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); const struct rzg2l_cpg_info *info = priv->info; - unsigned int reg = info->mod_clks[id].off; - u32 value = info->mod_clks[id].reset << 16; + unsigned int reg = info->resets[id].off; + u32 value = BIT(info->resets[id].bit) << 16; - dev_dbg(rcdev->dev, "assert name:%s id:%ld offset:0x%x\n", - info->mod_clks[id].name, id, CLK_RST_R(reg)); + dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, CLK_RST_R(reg)); writel(value, priv->base + CLK_RST_R(reg)); return 0; @@ -522,12 +520,12 @@ static int rzg2l_cpg_deassert(struct reset_controller_dev *rcdev, { struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); const struct rzg2l_cpg_info *info = priv->info; - unsigned int reg = info->mod_clks[id].off; - u32 dis = info->mod_clks[id].reset; + unsigned int reg = info->resets[id].off; + u32 dis = BIT(info->resets[id].bit); u32 value = (dis << 16) | dis; - dev_dbg(rcdev->dev, "deassert name:%s id:%ld offset:0x%x\n", - info->mod_clks[id].name, id, CLK_RST_R(reg)); + dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, + CLK_RST_R(reg)); writel(value, priv->base + CLK_RST_R(reg)); return 0; @@ -538,8 +536,8 @@ static int rzg2l_cpg_status(struct reset_controller_dev *rcdev, { struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); const struct rzg2l_cpg_info *info = priv->info; - unsigned int reg = info->mod_clks[id].off; - u32 bitmask = info->mod_clks[id].reset; + unsigned int reg = info->resets[id].off; + u32 bitmask = BIT(info->resets[id].bit); return !(readl(priv->base + CLK_MRST_R(reg)) & bitmask); } @@ -554,9 +552,11 @@ static const struct reset_control_ops rzg2l_cpg_reset_ops = { static int rzg2l_cpg_reset_xlate(struct reset_controller_dev *rcdev, const struct of_phandle_args *reset_spec) { + struct rzg2l_cpg_priv *priv = rcdev_to_priv(rcdev); + const struct rzg2l_cpg_info *info = priv->info; unsigned int id = reset_spec->args[0]; - if (id >= rcdev->nr_resets) { + if (id >= rcdev->nr_resets || !info->resets[id].off) { dev_err(rcdev->dev, "Invalid reset index %u\n", id); return -EINVAL; } @@ -571,7 +571,7 @@ static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv *priv) priv->rcdev.dev = priv->dev; priv->rcdev.of_reset_n_cells = 1; priv->rcdev.of_xlate = rzg2l_cpg_reset_xlate; - priv->rcdev.nr_resets = priv->num_mod_clks; + priv->rcdev.nr_resets = priv->num_resets; return devm_reset_controller_register(priv->dev, &priv->rcdev); } @@ -594,42 +594,49 @@ static int rzg2l_cpg_attach_dev(struct generic_pm_domain *unused, struct device { struct device_node *np = dev->of_node; struct of_phandle_args clkspec; + bool once = true; struct clk *clk; int error; int i = 0; while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i, &clkspec)) { - if (rzg2l_cpg_is_pm_clk(&clkspec)) - goto found; - - of_node_put(clkspec.np); + if (rzg2l_cpg_is_pm_clk(&clkspec)) { + if (once) { + once = false; + error = pm_clk_create(dev); + if (error) { + of_node_put(clkspec.np); + goto err; + } + } + clk = of_clk_get_from_provider(&clkspec); + of_node_put(clkspec.np); + if (IS_ERR(clk)) { + error = PTR_ERR(clk); + goto fail_destroy; + } + + error = pm_clk_add_clk(dev, clk); + if (error) { + dev_err(dev, "pm_clk_add_clk failed %d\n", + error); + goto fail_put; + } + } else { + of_node_put(clkspec.np); + } i++; } return 0; -found: - clk = of_clk_get_from_provider(&clkspec); - of_node_put(clkspec.np); - - if (IS_ERR(clk)) - return PTR_ERR(clk); - - error = pm_clk_create(dev); - if (error) - goto fail_put; - - error = pm_clk_add_clk(dev, clk); - if (error) - goto fail_destroy; - - return 0; +fail_put: + clk_put(clk); fail_destroy: pm_clk_destroy(dev); -fail_put: - clk_put(clk); +err: return error; } @@ -692,6 +699,7 @@ static int __init rzg2l_cpg_probe(struct platform_device *pdev) priv->clks = clks; priv->num_core_clks = info->num_total_core_clks; priv->num_mod_clks = info->num_hw_mod_clks; + priv->num_resets = info->num_resets; priv->last_dt_core_clk = info->last_dt_core_clk; for (i = 0; i < nclks; i++) diff --git a/drivers/clk/renesas/renesas-rzg2l-cpg.h b/drivers/clk/renesas/renesas-rzg2l-cpg.h index 3948bdd8afc9..63695280ce8b 100644 --- a/drivers/clk/renesas/renesas-rzg2l-cpg.h +++ b/drivers/clk/renesas/renesas-rzg2l-cpg.h @@ -21,6 +21,7 @@ #define DDIV_PACK(offset, bitpos, size) \ (((offset) << 20) | ((bitpos) << 12) | ((size) << 8)) #define DIVPL2A DDIV_PACK(CPG_PL2_DDIV, 0, 3) +#define DIVPL3A DDIV_PACK(CPG_PL3A_DDIV, 0, 3) #define DIVPL3B DDIV_PACK(CPG_PL3A_DDIV, 4, 3) /** @@ -76,26 +77,40 @@ enum clk_types { * @id: clock index in array containing all Core and Module Clocks * @parent: id of parent clock * @off: register offset - * @onoff: ON/MON bits - * @reset: reset bits + * @bit: ON/MON bit */ struct rzg2l_mod_clk { const char *name; unsigned int id; unsigned int parent; u16 off; - u8 onoff; - u8 reset; + u8 bit; }; -#define DEF_MOD(_name, _id, _parent, _off, _onoff, _reset) \ - [_id] = { \ +#define DEF_MOD(_name, _id, _parent, _off, _bit) \ + { \ .name = _name, \ - .id = MOD_CLK_BASE + _id, \ + .id = MOD_CLK_BASE + (_id), \ .parent = (_parent), \ .off = (_off), \ - .onoff = (_onoff), \ - .reset = (_reset) \ + .bit = (_bit), \ + } + +/** + * struct rzg2l_reset - Reset definitions + * + * @off: register offset + * @bit: reset bit + */ +struct rzg2l_reset { + u16 off; + u8 bit; +}; + +#define DEF_RST(_id, _off, _bit) \ + [_id] = { \ + .off = (_off), \ + .bit = (_bit) \ } /** @@ -126,6 +141,10 @@ struct rzg2l_cpg_info { unsigned int num_mod_clks; unsigned int num_hw_mod_clks; + /* Resets */ + const struct rzg2l_reset *resets; + unsigned int num_resets; + /* Critical Module Clocks that should not be disabled */ const unsigned int *crit_mod_clks; unsigned int num_crit_mod_clks; diff --git a/drivers/cpufreq/longhaul.c b/drivers/cpufreq/longhaul.c index 182a4dbca095..c538a153ee82 100644 --- a/drivers/cpufreq/longhaul.c +++ b/drivers/cpufreq/longhaul.c @@ -942,8 +942,6 @@ static int __init longhaul_init(void) return cpufreq_register_driver(&longhaul_driver); case 10: pr_err("Use acpi-cpufreq driver for VIA C7\n"); - default: - ; } return -ENODEV; diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c index 20d9bddbb985..394e6e1e9686 100644 --- a/drivers/dma-buf/sync_file.c +++ b/drivers/dma-buf/sync_file.c @@ -211,8 +211,8 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, struct sync_file *b) { struct sync_file *sync_file; - struct dma_fence **fences, **nfences, **a_fences, **b_fences; - int i, i_a, i_b, num_fences, a_num_fences, b_num_fences; + struct dma_fence **fences = NULL, **nfences, **a_fences, **b_fences; + int i = 0, i_a, i_b, num_fences, a_num_fences, b_num_fences; sync_file = sync_file_alloc(); if (!sync_file) @@ -236,7 +236,7 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, * If a sync_file can only be created with sync_file_merge * and sync_file_create, this is a reasonable assumption. */ - for (i = i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { + for (i_a = i_b = 0; i_a < a_num_fences && i_b < b_num_fences; ) { struct dma_fence *pt_a = a_fences[i_a]; struct dma_fence *pt_b = b_fences[i_b]; @@ -277,15 +277,16 @@ static struct sync_file *sync_file_merge(const char *name, struct sync_file *a, fences = nfences; } - if (sync_file_set_fence(sync_file, fences, i) < 0) { - kfree(fences); + if (sync_file_set_fence(sync_file, fences, i) < 0) goto err; - } strlcpy(sync_file->user_name, name, sizeof(sync_file->user_name)); return sync_file; err: + while (i) + dma_fence_put(fences[--i]); + kfree(fences); fput(sync_file->file); return NULL; diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c index 104ad420abbe..baab1ca9f621 100644 --- a/drivers/dma/ipu/ipu_idmac.c +++ b/drivers/dma/ipu/ipu_idmac.c @@ -618,6 +618,7 @@ static int ipu_enable_channel(struct idmac *idmac, struct idmac_channel *ichan) case IDMAC_SDC_1: case IDMAC_IC_7: ipu_channel_set_priority(ipu, channel, true); + break; default: break; } @@ -978,6 +979,7 @@ static int ipu_init_channel(struct idmac *idmac, struct idmac_channel *ichan) case IDMAC_SDC_0: case IDMAC_SDC_1: n_desc = 4; + break; default: break; } diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index c1a69149c8bf..4a51fdbf5aa9 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -813,6 +813,7 @@ inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308) case 16: if (is_mpc8308) return false; + break; case 1: case 2: case 4: diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c index 96ad21869ba7..a35858610780 100644 --- a/drivers/dma/ti/k3-udma.c +++ b/drivers/dma/ti/k3-udma.c @@ -4948,6 +4948,7 @@ static int setup_resources(struct udma_dev *ud) ud->tchan_cnt), ud->rchan_cnt - bitmap_weight(ud->rchan_map, ud->rchan_cnt)); + break; default: break; } diff --git a/drivers/edac/Kconfig b/drivers/edac/Kconfig index 91164c5f0757..2fc4c3f91fd5 100644 --- a/drivers/edac/Kconfig +++ b/drivers/edac/Kconfig @@ -271,7 +271,7 @@ config EDAC_PND2 config EDAC_IGEN6 tristate "Intel client SoC Integrated MC" depends on PCI && PCI_MMCONFIG && ARCH_HAVE_NMI_SAFE_CMPXCHG - depends on X64_64 && X86_MCE_INTEL + depends on X86_64 && X86_MCE_INTEL help Support for error detection and correction on the Intel client SoC Integrated Memory Controller using In-Band ECC IP. diff --git a/drivers/firmware/arm_ffa/bus.c b/drivers/firmware/arm_ffa/bus.c index 83166e02b191..00fe595a5bc8 100644 --- a/drivers/firmware/arm_ffa/bus.c +++ b/drivers/firmware/arm_ffa/bus.c @@ -46,9 +46,6 @@ static int ffa_device_probe(struct device *dev) struct ffa_driver *ffa_drv = to_ffa_driver(dev->driver); struct ffa_device *ffa_dev = to_ffa_dev(dev); - if (!ffa_device_match(dev, dev->driver)) - return -ENODEV; - return ffa_drv->probe(ffa_dev); } @@ -99,6 +96,9 @@ int ffa_driver_register(struct ffa_driver *driver, struct module *owner, { int ret; + if (!driver->probe) + return -EINVAL; + driver->driver.bus = &ffa_bus_type; driver->driver.name = driver->name; driver->driver.owner = owner; diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c index b1edb4b2e94a..c9fb56afbcb4 100644 --- a/drivers/firmware/arm_ffa/driver.c +++ b/drivers/firmware/arm_ffa/driver.c @@ -120,7 +120,7 @@ #define PACK_TARGET_INFO(s, r) \ (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r))) -/** +/* * FF-A specification mentions explicitly about '4K pages'. This should * not be confused with the kernel PAGE_SIZE, which is the translation * granule kernel is configured and may be one among 4K, 16K and 64K. @@ -149,8 +149,10 @@ static const int ffa_linux_errmap[] = { static inline int ffa_to_linux_errno(int errno) { - if (errno < FFA_RET_SUCCESS && errno >= -ARRAY_SIZE(ffa_linux_errmap)) - return ffa_linux_errmap[-errno]; + int err_idx = -errno; + + if (err_idx >= 0 && err_idx < ARRAY_SIZE(ffa_linux_errmap)) + return ffa_linux_errmap[err_idx]; return -EINVAL; } diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index 784cf0027da3..6c7e24935eca 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -104,11 +104,6 @@ static int scmi_dev_probe(struct device *dev) { struct scmi_driver *scmi_drv = to_scmi_driver(dev->driver); struct scmi_device *scmi_dev = to_scmi_dev(dev); - const struct scmi_device_id *id; - - id = scmi_dev_match_id(scmi_dev, scmi_drv); - if (!id) - return -ENODEV; if (!scmi_dev->handle) return -EPROBE_DEFER; @@ -139,6 +134,9 @@ int scmi_driver_register(struct scmi_driver *driver, struct module *owner, { int retval; + if (!driver->probe) + return -EINVAL; + retval = scmi_protocol_device_request(driver->id_table); if (retval) return retval; diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 66e5e694be7d..9b2e8d42a992 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -47,7 +47,6 @@ enum scmi_error_codes { SCMI_ERR_GENERIC = -8, /* Generic Error */ SCMI_ERR_HARDWARE = -9, /* Hardware Error */ SCMI_ERR_PROTOCOL = -10,/* Protocol Error */ - SCMI_ERR_MAX }; /* List of all SCMI devices active in system */ @@ -166,8 +165,10 @@ static const int scmi_linux_errmap[] = { static inline int scmi_to_linux_errno(int errno) { - if (errno < SCMI_SUCCESS && errno > SCMI_ERR_MAX) - return scmi_linux_errmap[-errno]; + int err_idx = -errno; + + if (err_idx >= SCMI_SUCCESS && err_idx < ARRAY_SIZE(scmi_linux_errmap)) + return scmi_linux_errmap[err_idx]; return -EIO; } @@ -1025,8 +1026,9 @@ static int __scmi_xfer_info_init(struct scmi_info *sinfo, const struct scmi_desc *desc = sinfo->desc; /* Pre-allocated messages, no more than what hdr.seq can support */ - if (WARN_ON(desc->max_msg >= MSG_TOKEN_MAX)) { - dev_err(dev, "Maximum message of %d exceeds supported %ld\n", + if (WARN_ON(!desc->max_msg || desc->max_msg > MSG_TOKEN_MAX)) { + dev_err(dev, + "Invalid maximum messages %d, not in range [1 - %lu]\n", desc->max_msg, MSG_TOKEN_MAX); return -EINVAL; } @@ -1137,6 +1139,8 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) * @proto_id and @name: if device was still not existent it is created as a * child of the specified SCMI instance @info and its transport properly * initialized as usual. + * + * Return: A properly initialized scmi device, NULL otherwise. */ static inline struct scmi_device * scmi_get_protocol_device(struct device_node *np, struct scmi_info *info, diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c index d860bebd984a..0efd20cd9d69 100644 --- a/drivers/firmware/arm_scmi/notify.c +++ b/drivers/firmware/arm_scmi/notify.c @@ -1457,6 +1457,8 @@ static void scmi_devm_release_notifier(struct device *dev, void *res) * * Generic devres managed helper to register a notifier_block against a * protocol event. + * + * Return: 0 on Success */ static int scmi_devm_notifier_register(struct scmi_device *sdev, u8 proto_id, u8 evt_id, @@ -1523,6 +1525,8 @@ static int scmi_devm_notifier_match(struct device *dev, void *res, void *data) * Generic devres managed helper to explicitly un-register a notifier_block * against a protocol event, which was previously registered using the above * @scmi_devm_notifier_register. + * + * Return: 0 on Success */ static int scmi_devm_notifier_unregister(struct scmi_device *sdev, u8 proto_id, u8 evt_id, diff --git a/drivers/firmware/arm_scmi/sensors.c b/drivers/firmware/arm_scmi/sensors.c index 2c88aa221559..308471586381 100644 --- a/drivers/firmware/arm_scmi/sensors.c +++ b/drivers/firmware/arm_scmi/sensors.c @@ -166,7 +166,8 @@ struct scmi_msg_sensor_reading_get { struct scmi_resp_sensor_reading_complete { __le32 id; - __le64 readings; + __le32 readings_low; + __le32 readings_high; }; struct scmi_sensor_reading_resp { @@ -717,7 +718,8 @@ static int scmi_sensor_reading_get(const struct scmi_protocol_handle *ph, resp = t->rx.buf; if (le32_to_cpu(resp->id) == sensor_id) - *value = get_unaligned_le64(&resp->readings); + *value = + get_unaligned_le64(&resp->readings_low); else ret = -EPROTO; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index db16b3e83694..cf62f43a03da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -269,7 +269,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, uint64_t *size); int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv, bool *table_freed); + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv); int amdgpu_amdkfd_gpuvm_sync_memory( diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 3b8e1ee8c475..4fb15750b9bb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1057,8 +1057,7 @@ static void unmap_bo_from_gpuvm(struct kgd_mem *mem, static int update_gpuvm_pte(struct kgd_mem *mem, struct kfd_mem_attachment *entry, - struct amdgpu_sync *sync, - bool *table_freed) + struct amdgpu_sync *sync) { struct amdgpu_bo_va *bo_va = entry->bo_va; struct amdgpu_device *adev = entry->adev; @@ -1069,7 +1068,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem, return ret; /* Update the page tables */ - ret = amdgpu_vm_bo_update(adev, bo_va, false, table_freed); + ret = amdgpu_vm_bo_update(adev, bo_va, false); if (ret) { pr_err("amdgpu_vm_bo_update failed\n"); return ret; @@ -1081,8 +1080,7 @@ static int update_gpuvm_pte(struct kgd_mem *mem, static int map_bo_to_gpuvm(struct kgd_mem *mem, struct kfd_mem_attachment *entry, struct amdgpu_sync *sync, - bool no_update_pte, - bool *table_freed) + bool no_update_pte) { int ret; @@ -1099,7 +1097,7 @@ static int map_bo_to_gpuvm(struct kgd_mem *mem, if (no_update_pte) return 0; - ret = update_gpuvm_pte(mem, entry, sync, table_freed); + ret = update_gpuvm_pte(mem, entry, sync); if (ret) { pr_err("update_gpuvm_pte() failed\n"); goto update_gpuvm_pte_failed; @@ -1393,8 +1391,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM; alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? - AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : - AMDGPU_GEM_CREATE_NO_CPU_ACCESS; + AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; alloc_flags = 0; @@ -1597,8 +1594,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( } int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - struct kgd_dev *kgd, struct kgd_mem *mem, - void *drm_priv, bool *table_freed) + struct kgd_dev *kgd, struct kgd_mem *mem, void *drm_priv) { struct amdgpu_device *adev = get_amdgpu_device(kgd); struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); @@ -1686,7 +1682,7 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( entry->va, entry->va + bo_size, entry); ret = map_bo_to_gpuvm(mem, entry, ctx.sync, - is_invalid_userptr, table_freed); + is_invalid_userptr); if (ret) { pr_err("Failed to map bo to gpuvm\n"); goto out_unreserve; @@ -2136,7 +2132,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) continue; kfd_mem_dmaunmap_attachment(mem, attachment); - ret = update_gpuvm_pte(mem, attachment, &sync, NULL); + ret = update_gpuvm_pte(mem, attachment, &sync); if (ret) { pr_err("%s: update PTE failed\n", __func__); /* make sure this gets validated again */ @@ -2342,7 +2338,7 @@ int amdgpu_amdkfd_gpuvm_restore_process_bos(void *info, struct dma_fence **ef) continue; kfd_mem_dmaunmap_attachment(mem, attachment); - ret = update_gpuvm_pte(mem, attachment, &sync_obj, NULL); + ret = update_gpuvm_pte(mem, attachment, &sync_obj); if (ret) { pr_debug("Memory eviction: update PTE failed. Try again\n"); goto validate_map_fail; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 76fe5b71e35d..30fa1f61e0e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -781,7 +781,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (r) return r; - r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false, NULL); + r = amdgpu_vm_bo_update(adev, fpriv->prt_va, false); if (r) return r; @@ -792,7 +792,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { bo_va = fpriv->csa_va; BUG_ON(!bo_va); - r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) return r; @@ -811,7 +811,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) if (bo_va == NULL) continue; - r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 71beb0db0125..abb928894eac 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1168,6 +1168,7 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x734F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI14}, /* Renoir */ + {0x1002, 0x15E7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, {0x1002, 0x1638, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, {0x1002, 0x164C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index b3404c43a911..d0d9bc445d7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -612,7 +612,7 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, if (operation == AMDGPU_VA_OP_MAP || operation == AMDGPU_VA_OP_REPLACE) { - r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) goto error; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index 32ce0e679dc7..83af307e97cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -278,6 +278,21 @@ static bool amdgpu_msi_ok(struct amdgpu_device *adev) return true; } +static void amdgpu_restore_msix(struct amdgpu_device *adev) +{ + u16 ctrl; + + pci_read_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, &ctrl); + if (!(ctrl & PCI_MSIX_FLAGS_ENABLE)) + return; + + /* VF FLR */ + ctrl &= ~PCI_MSIX_FLAGS_ENABLE; + pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl); + ctrl |= PCI_MSIX_FLAGS_ENABLE; + pci_write_config_word(adev->pdev, adev->pdev->msix_cap + PCI_MSIX_FLAGS, ctrl); +} + /** * amdgpu_irq_init - initialize interrupt handling * @@ -569,6 +584,9 @@ void amdgpu_irq_gpu_reset_resume_helper(struct amdgpu_device *adev) { int i, j, k; + if (amdgpu_sriov_vf(adev)) + amdgpu_restore_msix(adev); + for (i = 0; i < AMDGPU_IRQ_CLIENTID_MAX; ++i) { if (!adev->irq.client[i].sources) continue; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index c13b02caf8c3..fc66aca28594 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -809,7 +809,7 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, /* query/inject/cure begin */ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, - struct ras_query_if *info) + struct ras_query_if *info) { struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); struct ras_err_data err_data = {0, 0, 0, NULL}; @@ -1043,17 +1043,32 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, return ret; } -/* get the total error counts on all IPs */ -void amdgpu_ras_query_error_count(struct amdgpu_device *adev, - unsigned long *ce_count, - unsigned long *ue_count) +/** + * amdgpu_ras_query_error_count -- Get error counts of all IPs + * adev: pointer to AMD GPU device + * ce_count: pointer to an integer to be set to the count of correctible errors. + * ue_count: pointer to an integer to be set to the count of uncorrectible + * errors. + * + * If set, @ce_count or @ue_count, count and return the corresponding + * error counts in those integer pointers. Return 0 if the device + * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS. + */ +int amdgpu_ras_query_error_count(struct amdgpu_device *adev, + unsigned long *ce_count, + unsigned long *ue_count) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); struct ras_manager *obj; unsigned long ce, ue; if (!adev->ras_enabled || !con) - return; + return -EOPNOTSUPP; + + /* Don't count since no reporting. + */ + if (!ce_count && !ue_count) + return 0; ce = 0; ue = 0; @@ -1061,9 +1076,11 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev, struct ras_query_if info = { .head = obj->head, }; + int res; - if (amdgpu_ras_query_error_status(adev, &info)) - return; + res = amdgpu_ras_query_error_status(adev, &info); + if (res) + return res; ce += info.ce_count; ue += info.ue_count; @@ -1074,6 +1091,8 @@ void amdgpu_ras_query_error_count(struct amdgpu_device *adev, if (ue_count) *ue_count = ue; + + return 0; } /* query/inject/cure end */ @@ -2137,9 +2156,10 @@ static void amdgpu_ras_counte_dw(struct work_struct *work) /* Cache new values. */ - amdgpu_ras_query_error_count(adev, &ce_count, &ue_count); - atomic_set(&con->ras_ce_count, ce_count); - atomic_set(&con->ras_ue_count, ue_count); + if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { + atomic_set(&con->ras_ce_count, ce_count); + atomic_set(&con->ras_ue_count, ue_count); + } pm_runtime_mark_last_busy(dev->dev); Out: @@ -2312,9 +2332,10 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev, /* Those are the cached values at init. */ - amdgpu_ras_query_error_count(adev, &ce_count, &ue_count); - atomic_set(&con->ras_ce_count, ce_count); - atomic_set(&con->ras_ue_count, ue_count); + if (amdgpu_ras_query_error_count(adev, &ce_count, &ue_count) == 0) { + atomic_set(&con->ras_ce_count, ce_count); + atomic_set(&con->ras_ue_count, ue_count); + } return 0; cleanup: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 256cea5d34f2..b504ed8c9b50 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -490,9 +490,9 @@ int amdgpu_ras_request_reset_on_boot(struct amdgpu_device *adev, void amdgpu_ras_resume(struct amdgpu_device *adev); void amdgpu_ras_suspend(struct amdgpu_device *adev); -void amdgpu_ras_query_error_count(struct amdgpu_device *adev, - unsigned long *ce_count, - unsigned long *ue_count); +int amdgpu_ras_query_error_count(struct amdgpu_device *adev, + unsigned long *ce_count, + unsigned long *ue_count); /* error handling functions */ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 79cfa2d68487..078c068937fe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1758,7 +1758,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, r = vm->update_funcs->commit(¶ms, fence); if (table_freed) - *table_freed = *table_freed || params.table_freed; + *table_freed = params.table_freed; error_unlock: amdgpu_vm_eviction_unlock(vm); @@ -1816,7 +1816,6 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, * @adev: amdgpu_device pointer * @bo_va: requested BO and VM object * @clear: if true clear the entries - * @table_freed: return true if page table is freed * * Fill in the page table entries for @bo_va. * @@ -1824,7 +1823,7 @@ void amdgpu_vm_get_memory(struct amdgpu_vm *vm, uint64_t *vram_mem, * 0 for success, -EINVAL for failure. */ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - bool clear, bool *table_freed) + bool clear) { struct amdgpu_bo *bo = bo_va->base.bo; struct amdgpu_vm *vm = bo_va->base.vm; @@ -1903,7 +1902,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, resv, mapping->start, mapping->last, update_flags, mapping->offset, mem, - pages_addr, last_update, table_freed); + pages_addr, last_update, NULL); if (r) return r; } @@ -2155,7 +2154,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) { /* Per VM BOs never need to bo cleared in the page tables */ - r = amdgpu_vm_bo_update(adev, bo_va, false, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, false); if (r) return r; } @@ -2174,7 +2173,7 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev, else clear = true; - r = amdgpu_vm_bo_update(adev, bo_va, clear, NULL); + r = amdgpu_vm_bo_update(adev, bo_va, clear); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index ddb85a85cbba..f8fa653d4da7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -406,7 +406,7 @@ int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev, struct dma_fence **fence, bool *free_table); int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, - bool clear, bool *table_freed); + bool clear); bool amdgpu_vm_evictable(struct amdgpu_bo *bo); void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, struct amdgpu_bo *bo, bool evicted); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c index 33324427b555..7e0d8c092c7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c @@ -766,7 +766,7 @@ static const struct amdgpu_irq_src_funcs dce_virtual_crtc_irq_funcs = { static void dce_virtual_set_irq_funcs(struct amdgpu_device *adev) { - adev->crtc_irq.num_types = AMDGPU_CRTC_IRQ_VBLANK6 + 1; + adev->crtc_irq.num_types = adev->mode_info.num_crtc; adev->crtc_irq.funcs = &dce_virtual_crtc_irq_funcs; } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index 3ee481557fc9..ff2307d7ee0f 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -252,7 +252,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) * otherwise the mailbox msg will be ruined/reseted by * the VF FLR. */ - if (!down_read_trylock(&adev->reset_sem)) + if (!down_write_trylock(&adev->reset_sem)) return; amdgpu_virt_fini_data_exchange(adev); @@ -268,7 +268,7 @@ static void xgpu_ai_mailbox_flr_work(struct work_struct *work) flr_done: atomic_set(&adev->in_gpu_reset, 0); - up_read(&adev->reset_sem); + up_write(&adev->reset_sem); /* Trigger recovery for world switch failure if no TDR */ if (amdgpu_device_should_recover_gpu(adev) diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index 48e588d3c409..9f7aac435d69 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -273,7 +273,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) * otherwise the mailbox msg will be ruined/reseted by * the VF FLR. */ - if (!down_read_trylock(&adev->reset_sem)) + if (!down_write_trylock(&adev->reset_sem)) return; amdgpu_virt_fini_data_exchange(adev); @@ -289,7 +289,7 @@ static void xgpu_nv_mailbox_flr_work(struct work_struct *work) flr_done: atomic_set(&adev->in_gpu_reset, 0); - up_read(&adev->reset_sem); + up_write(&adev->reset_sem); /* Trigger recovery for world switch failure if no TDR */ if (amdgpu_device_should_recover_gpu(adev) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 67541c30327a..e48acdd03c1a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1393,7 +1393,6 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, long err = 0; int i; uint32_t *devices_arr = NULL; - bool table_freed = false; dev = kfd_device_by_id(GET_GPU_ID(args->handle)); if (!dev) @@ -1451,8 +1450,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, goto get_mem_obj_from_handle_failed; } err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu( - peer->kgd, (struct kgd_mem *)mem, - peer_pdd->drm_priv, &table_freed); + peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv); if (err) { pr_err("Failed to map to gpu %d/%d\n", i, args->n_devices); @@ -1470,17 +1468,16 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, } /* Flush TLBs after waiting for the page table updates to complete */ - if (table_freed) { - for (i = 0; i < args->n_devices; i++) { - peer = kfd_device_by_id(devices_arr[i]); - if (WARN_ON_ONCE(!peer)) - continue; - peer_pdd = kfd_get_process_device_data(peer, p); - if (WARN_ON_ONCE(!peer_pdd)) - continue; - kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY); - } + for (i = 0; i < args->n_devices; i++) { + peer = kfd_device_by_id(devices_arr[i]); + if (WARN_ON_ONCE(!peer)) + continue; + peer_pdd = kfd_get_process_device_data(peer, p); + if (WARN_ON_ONCE(!peer_pdd)) + continue; + kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY); } + kfree(devices_arr); return err; @@ -1568,27 +1565,10 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, } args->n_success = i+1; } - mutex_unlock(&p->mutex); - - err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true); - if (err) { - pr_debug("Sync memory failed, wait interrupted by user signal\n"); - goto sync_memory_failed; - } - - /* Flush TLBs after waiting for the page table updates to complete */ - for (i = 0; i < args->n_devices; i++) { - peer = kfd_device_by_id(devices_arr[i]); - if (WARN_ON_ONCE(!peer)) - continue; - peer_pdd = kfd_get_process_device_data(peer, p); - if (WARN_ON_ONCE(!peer_pdd)) - continue; - kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT); - } - kfree(devices_arr); + mutex_unlock(&p->mutex); + return 0; bind_process_to_device_failed: @@ -1596,7 +1576,6 @@ get_mem_obj_from_handle_failed: unmap_memory_from_gpu_failed: mutex_unlock(&p->mutex); copy_from_user_failed: -sync_memory_failed: kfree(devices_arr); return err; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 21ec8a18cad2..8a2c6fc438c0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -714,8 +714,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd, if (err) goto err_alloc_mem; - err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, - pdd->drm_priv, NULL); + err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->drm_priv); if (err) goto err_map_mem; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 9a71d8919bd6..c7b364e4a287 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -2375,21 +2375,27 @@ static bool svm_range_skip_recover(struct svm_range *prange) static void svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, - struct svm_range *prange, int32_t gpuidx) + int32_t gpuidx) { struct kfd_process_device *pdd; - if (gpuidx == MAX_GPU_INSTANCE) - /* fault is on different page of same range - * or fault is skipped to recover later - */ - pdd = svm_range_get_pdd_by_adev(prange, adev); - else - /* fault recovered - * or fault cannot recover because GPU no access on the range - */ - pdd = kfd_process_device_from_gpuidx(p, gpuidx); + /* fault is on different page of same range + * or fault is skipped to recover later + * or fault is on invalid virtual address + */ + if (gpuidx == MAX_GPU_INSTANCE) { + uint32_t gpuid; + int r; + r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx); + if (r < 0) + return; + } + + /* fault is recovered + * or fault cannot recover because GPU no access on the range + */ + pdd = kfd_process_device_from_gpuidx(p, gpuidx); if (pdd) WRITE_ONCE(pdd->faults, pdd->faults + 1); } @@ -2525,7 +2531,7 @@ out_unlock_svms: mutex_unlock(&svms->lock); mmap_read_unlock(mm); - svm_range_count_fault(adev, p, prange, gpuidx); + svm_range_count_fault(adev, p, gpuidx); mmput(mm); out: diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 01e1062dc235..d3a2a5ff57e9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -9191,7 +9191,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) #if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || \ defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE) /* restore the backlight level */ - if (dm->backlight_dev) + if (dm->backlight_dev && (amdgpu_dm_backlight_get_level(dm) != dm->brightness[0])) amdgpu_dm_backlight_set_level(dm, dm->brightness[0]); #endif /* diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index 66db5e988bc1..dad4a4c18bcf 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -31,8 +31,8 @@ #include "dcn31_smu.h" #include "yellow_carp_offset.h" -#include "mp/mp_13_0_1_offset.h" -#include "mp/mp_13_0_1_sh_mask.h" +#include "mp/mp_13_0_2_offset.h" +#include "mp/mp_13_0_2_sh_mask.h" #define REG(reg_name) \ (MP0_BASE.instance[0].segment[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index b8832bdde2bc..6da226bf11d5 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -1620,11 +1620,12 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train { enum dc_status status = DC_OK; - if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) - status = configure_lttpr_mode_non_transparent(link, lt_settings); - else + if (lt_settings->lttpr_mode == LTTPR_MODE_TRANSPARENT) status = configure_lttpr_mode_transparent(link); + else if (lt_settings->lttpr_mode == LTTPR_MODE_NON_TRANSPARENT) + status = configure_lttpr_mode_non_transparent(link, lt_settings); + return status; } @@ -1784,7 +1785,6 @@ bool perform_link_training_with_retries( link_enc = stream->link_enc; else link_enc = link->link_enc; - ASSERT(link_enc); /* We need to do this before the link training to ensure the idle pattern in SST * mode will be sent right after the link training diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index fc1fc1a4bf8b..836864a5a5dc 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -390,7 +390,7 @@ void dcn31_update_info_frame(struct pipe_ctx *pipe_ctx) is_hdmi_tmds = dc_is_hdmi_tmds_signal(pipe_ctx->stream->signal); is_dp = dc_is_dp_signal(pipe_ctx->stream->signal); - if (!is_hdmi_tmds) + if (!is_hdmi_tmds && !is_dp) return; if (is_hdmi_tmds) diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h deleted file mode 100644 index dfacc6b5d89d..000000000000 --- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_offset.h +++ /dev/null @@ -1,355 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * - */ -#ifndef _mp_13_0_1_OFFSET_HEADER -#define _mp_13_0_1_OFFSET_HEADER - - - -// addressBlock: mp_SmuMp0_SmnDec -// base address: 0x0 -#define regMP0_SMN_C2PMSG_32 0x0060 -#define regMP0_SMN_C2PMSG_32_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_33 0x0061 -#define regMP0_SMN_C2PMSG_33_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_34 0x0062 -#define regMP0_SMN_C2PMSG_34_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_35 0x0063 -#define regMP0_SMN_C2PMSG_35_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_36 0x0064 -#define regMP0_SMN_C2PMSG_36_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_37 0x0065 -#define regMP0_SMN_C2PMSG_37_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_38 0x0066 -#define regMP0_SMN_C2PMSG_38_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_39 0x0067 -#define regMP0_SMN_C2PMSG_39_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_40 0x0068 -#define regMP0_SMN_C2PMSG_40_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_41 0x0069 -#define regMP0_SMN_C2PMSG_41_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_42 0x006a -#define regMP0_SMN_C2PMSG_42_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_43 0x006b -#define regMP0_SMN_C2PMSG_43_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_44 0x006c -#define regMP0_SMN_C2PMSG_44_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_45 0x006d -#define regMP0_SMN_C2PMSG_45_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_46 0x006e -#define regMP0_SMN_C2PMSG_46_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_47 0x006f -#define regMP0_SMN_C2PMSG_47_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_48 0x0070 -#define regMP0_SMN_C2PMSG_48_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_49 0x0071 -#define regMP0_SMN_C2PMSG_49_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_50 0x0072 -#define regMP0_SMN_C2PMSG_50_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_51 0x0073 -#define regMP0_SMN_C2PMSG_51_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_52 0x0074 -#define regMP0_SMN_C2PMSG_52_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_53 0x0075 -#define regMP0_SMN_C2PMSG_53_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_54 0x0076 -#define regMP0_SMN_C2PMSG_54_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_55 0x0077 -#define regMP0_SMN_C2PMSG_55_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_56 0x0078 -#define regMP0_SMN_C2PMSG_56_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_57 0x0079 -#define regMP0_SMN_C2PMSG_57_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_58 0x007a -#define regMP0_SMN_C2PMSG_58_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_59 0x007b -#define regMP0_SMN_C2PMSG_59_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_60 0x007c -#define regMP0_SMN_C2PMSG_60_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_61 0x007d -#define regMP0_SMN_C2PMSG_61_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_62 0x007e -#define regMP0_SMN_C2PMSG_62_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_63 0x007f -#define regMP0_SMN_C2PMSG_63_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_64 0x0080 -#define regMP0_SMN_C2PMSG_64_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_65 0x0081 -#define regMP0_SMN_C2PMSG_65_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_66 0x0082 -#define regMP0_SMN_C2PMSG_66_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_67 0x0083 -#define regMP0_SMN_C2PMSG_67_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_68 0x0084 -#define regMP0_SMN_C2PMSG_68_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_69 0x0085 -#define regMP0_SMN_C2PMSG_69_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_70 0x0086 -#define regMP0_SMN_C2PMSG_70_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_71 0x0087 -#define regMP0_SMN_C2PMSG_71_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_72 0x0088 -#define regMP0_SMN_C2PMSG_72_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_73 0x0089 -#define regMP0_SMN_C2PMSG_73_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_74 0x008a -#define regMP0_SMN_C2PMSG_74_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_75 0x008b -#define regMP0_SMN_C2PMSG_75_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_76 0x008c -#define regMP0_SMN_C2PMSG_76_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_77 0x008d -#define regMP0_SMN_C2PMSG_77_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_78 0x008e -#define regMP0_SMN_C2PMSG_78_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_79 0x008f -#define regMP0_SMN_C2PMSG_79_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_80 0x0090 -#define regMP0_SMN_C2PMSG_80_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_81 0x0091 -#define regMP0_SMN_C2PMSG_81_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_82 0x0092 -#define regMP0_SMN_C2PMSG_82_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_83 0x0093 -#define regMP0_SMN_C2PMSG_83_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_84 0x0094 -#define regMP0_SMN_C2PMSG_84_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_85 0x0095 -#define regMP0_SMN_C2PMSG_85_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_86 0x0096 -#define regMP0_SMN_C2PMSG_86_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_87 0x0097 -#define regMP0_SMN_C2PMSG_87_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_88 0x0098 -#define regMP0_SMN_C2PMSG_88_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_89 0x0099 -#define regMP0_SMN_C2PMSG_89_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_90 0x009a -#define regMP0_SMN_C2PMSG_90_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_91 0x009b -#define regMP0_SMN_C2PMSG_91_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_92 0x009c -#define regMP0_SMN_C2PMSG_92_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_93 0x009d -#define regMP0_SMN_C2PMSG_93_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_94 0x009e -#define regMP0_SMN_C2PMSG_94_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_95 0x009f -#define regMP0_SMN_C2PMSG_95_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_96 0x00a0 -#define regMP0_SMN_C2PMSG_96_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_97 0x00a1 -#define regMP0_SMN_C2PMSG_97_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_98 0x00a2 -#define regMP0_SMN_C2PMSG_98_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_99 0x00a3 -#define regMP0_SMN_C2PMSG_99_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_100 0x00a4 -#define regMP0_SMN_C2PMSG_100_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_101 0x00a5 -#define regMP0_SMN_C2PMSG_101_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_102 0x00a6 -#define regMP0_SMN_C2PMSG_102_BASE_IDX 0 -#define regMP0_SMN_C2PMSG_103 0x00a7 -#define regMP0_SMN_C2PMSG_103_BASE_IDX 0 -#define regMP0_SMN_IH_CREDIT 0x00c1 -#define regMP0_SMN_IH_CREDIT_BASE_IDX 0 -#define regMP0_SMN_IH_SW_INT 0x00c2 -#define regMP0_SMN_IH_SW_INT_BASE_IDX 0 -#define regMP0_SMN_IH_SW_INT_CTRL 0x00c3 -#define regMP0_SMN_IH_SW_INT_CTRL_BASE_IDX 0 - - -// addressBlock: mp_SmuMp1_SmnDec -// base address: 0x0 -#define regMP1_SMN_C2PMSG_32 0x0260 -#define regMP1_SMN_C2PMSG_32_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_33 0x0261 -#define regMP1_SMN_C2PMSG_33_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_34 0x0262 -#define regMP1_SMN_C2PMSG_34_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_35 0x0263 -#define regMP1_SMN_C2PMSG_35_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_36 0x0264 -#define regMP1_SMN_C2PMSG_36_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_37 0x0265 -#define regMP1_SMN_C2PMSG_37_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_38 0x0266 -#define regMP1_SMN_C2PMSG_38_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_39 0x0267 -#define regMP1_SMN_C2PMSG_39_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_40 0x0268 -#define regMP1_SMN_C2PMSG_40_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_41 0x0269 -#define regMP1_SMN_C2PMSG_41_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_42 0x026a -#define regMP1_SMN_C2PMSG_42_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_43 0x026b -#define regMP1_SMN_C2PMSG_43_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_44 0x026c -#define regMP1_SMN_C2PMSG_44_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_45 0x026d -#define regMP1_SMN_C2PMSG_45_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_46 0x026e -#define regMP1_SMN_C2PMSG_46_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_47 0x026f -#define regMP1_SMN_C2PMSG_47_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_48 0x0270 -#define regMP1_SMN_C2PMSG_48_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_49 0x0271 -#define regMP1_SMN_C2PMSG_49_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_50 0x0272 -#define regMP1_SMN_C2PMSG_50_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_51 0x0273 -#define regMP1_SMN_C2PMSG_51_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_52 0x0274 -#define regMP1_SMN_C2PMSG_52_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_53 0x0275 -#define regMP1_SMN_C2PMSG_53_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_54 0x0276 -#define regMP1_SMN_C2PMSG_54_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_55 0x0277 -#define regMP1_SMN_C2PMSG_55_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_56 0x0278 -#define regMP1_SMN_C2PMSG_56_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_57 0x0279 -#define regMP1_SMN_C2PMSG_57_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_58 0x027a -#define regMP1_SMN_C2PMSG_58_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_59 0x027b -#define regMP1_SMN_C2PMSG_59_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_60 0x027c -#define regMP1_SMN_C2PMSG_60_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_61 0x027d -#define regMP1_SMN_C2PMSG_61_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_62 0x027e -#define regMP1_SMN_C2PMSG_62_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_63 0x027f -#define regMP1_SMN_C2PMSG_63_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_64 0x0280 -#define regMP1_SMN_C2PMSG_64_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_65 0x0281 -#define regMP1_SMN_C2PMSG_65_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_66 0x0282 -#define regMP1_SMN_C2PMSG_66_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_67 0x0283 -#define regMP1_SMN_C2PMSG_67_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_68 0x0284 -#define regMP1_SMN_C2PMSG_68_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_69 0x0285 -#define regMP1_SMN_C2PMSG_69_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_70 0x0286 -#define regMP1_SMN_C2PMSG_70_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_71 0x0287 -#define regMP1_SMN_C2PMSG_71_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_72 0x0288 -#define regMP1_SMN_C2PMSG_72_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_73 0x0289 -#define regMP1_SMN_C2PMSG_73_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_74 0x028a -#define regMP1_SMN_C2PMSG_74_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_75 0x028b -#define regMP1_SMN_C2PMSG_75_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_76 0x028c -#define regMP1_SMN_C2PMSG_76_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_77 0x028d -#define regMP1_SMN_C2PMSG_77_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_78 0x028e -#define regMP1_SMN_C2PMSG_78_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_79 0x028f -#define regMP1_SMN_C2PMSG_79_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_80 0x0290 -#define regMP1_SMN_C2PMSG_80_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_81 0x0291 -#define regMP1_SMN_C2PMSG_81_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_82 0x0292 -#define regMP1_SMN_C2PMSG_82_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_83 0x0293 -#define regMP1_SMN_C2PMSG_83_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_84 0x0294 -#define regMP1_SMN_C2PMSG_84_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_85 0x0295 -#define regMP1_SMN_C2PMSG_85_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_86 0x0296 -#define regMP1_SMN_C2PMSG_86_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_87 0x0297 -#define regMP1_SMN_C2PMSG_87_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_88 0x0298 -#define regMP1_SMN_C2PMSG_88_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_89 0x0299 -#define regMP1_SMN_C2PMSG_89_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_90 0x029a -#define regMP1_SMN_C2PMSG_90_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_91 0x029b -#define regMP1_SMN_C2PMSG_91_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_92 0x029c -#define regMP1_SMN_C2PMSG_92_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_93 0x029d -#define regMP1_SMN_C2PMSG_93_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_94 0x029e -#define regMP1_SMN_C2PMSG_94_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_95 0x029f -#define regMP1_SMN_C2PMSG_95_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_96 0x02a0 -#define regMP1_SMN_C2PMSG_96_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_97 0x02a1 -#define regMP1_SMN_C2PMSG_97_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_98 0x02a2 -#define regMP1_SMN_C2PMSG_98_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_99 0x02a3 -#define regMP1_SMN_C2PMSG_99_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_100 0x02a4 -#define regMP1_SMN_C2PMSG_100_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_101 0x02a5 -#define regMP1_SMN_C2PMSG_101_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_102 0x02a6 -#define regMP1_SMN_C2PMSG_102_BASE_IDX 0 -#define regMP1_SMN_C2PMSG_103 0x02a7 -#define regMP1_SMN_C2PMSG_103_BASE_IDX 0 -#define regMP1_SMN_IH_CREDIT 0x02c1 -#define regMP1_SMN_IH_CREDIT_BASE_IDX 0 -#define regMP1_SMN_IH_SW_INT 0x02c2 -#define regMP1_SMN_IH_SW_INT_BASE_IDX 0 -#define regMP1_SMN_IH_SW_INT_CTRL 0x02c3 -#define regMP1_SMN_IH_SW_INT_CTRL_BASE_IDX 0 -#define regMP1_SMN_FPS_CNT 0x02c4 -#define regMP1_SMN_FPS_CNT_BASE_IDX 0 -#define regMP1_SMN_EXT_SCRATCH0 0x0340 -#define regMP1_SMN_EXT_SCRATCH0_BASE_IDX 0 -#define regMP1_SMN_EXT_SCRATCH1 0x0341 -#define regMP1_SMN_EXT_SCRATCH1_BASE_IDX 0 -#define regMP1_SMN_EXT_SCRATCH2 0x0342 -#define regMP1_SMN_EXT_SCRATCH2_BASE_IDX 0 -#define regMP1_SMN_EXT_SCRATCH3 0x0343 -#define regMP1_SMN_EXT_SCRATCH3_BASE_IDX 0 -#define regMP1_SMN_EXT_SCRATCH4 0x0344 -#define regMP1_SMN_EXT_SCRATCH4_BASE_IDX 0 -#define regMP1_SMN_EXT_SCRATCH5 0x0345 -#define regMP1_SMN_EXT_SCRATCH5_BASE_IDX 0 -#define regMP1_SMN_EXT_SCRATCH6 0x0346 -#define regMP1_SMN_EXT_SCRATCH6_BASE_IDX 0 -#define regMP1_SMN_EXT_SCRATCH7 0x0347 -#define regMP1_SMN_EXT_SCRATCH7_BASE_IDX 0 - - -#endif diff --git a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h deleted file mode 100644 index 2d5e8b58e693..000000000000 --- a/drivers/gpu/drm/amd/include/asic_reg/mp/mp_13_0_1_sh_mask.h +++ /dev/null @@ -1,531 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * - */ -#ifndef _mp_13_0_1_SH_MASK_HEADER -#define _mp_13_0_1_SH_MASK_HEADER - - -// addressBlock: mp_SmuMp0_SmnDec -//MP0_SMN_C2PMSG_32 -#define MP0_SMN_C2PMSG_32__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_33 -#define MP0_SMN_C2PMSG_33__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_34 -#define MP0_SMN_C2PMSG_34__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_35 -#define MP0_SMN_C2PMSG_35__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_36 -#define MP0_SMN_C2PMSG_36__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_37 -#define MP0_SMN_C2PMSG_37__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_38 -#define MP0_SMN_C2PMSG_38__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_39 -#define MP0_SMN_C2PMSG_39__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_40 -#define MP0_SMN_C2PMSG_40__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_41 -#define MP0_SMN_C2PMSG_41__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_42 -#define MP0_SMN_C2PMSG_42__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_43 -#define MP0_SMN_C2PMSG_43__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_44 -#define MP0_SMN_C2PMSG_44__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_45 -#define MP0_SMN_C2PMSG_45__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_46 -#define MP0_SMN_C2PMSG_46__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_47 -#define MP0_SMN_C2PMSG_47__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_48 -#define MP0_SMN_C2PMSG_48__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_49 -#define MP0_SMN_C2PMSG_49__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_50 -#define MP0_SMN_C2PMSG_50__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_51 -#define MP0_SMN_C2PMSG_51__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_52 -#define MP0_SMN_C2PMSG_52__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_53 -#define MP0_SMN_C2PMSG_53__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_54 -#define MP0_SMN_C2PMSG_54__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_55 -#define MP0_SMN_C2PMSG_55__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_56 -#define MP0_SMN_C2PMSG_56__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_57 -#define MP0_SMN_C2PMSG_57__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_58 -#define MP0_SMN_C2PMSG_58__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_59 -#define MP0_SMN_C2PMSG_59__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_60 -#define MP0_SMN_C2PMSG_60__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_61 -#define MP0_SMN_C2PMSG_61__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_62 -#define MP0_SMN_C2PMSG_62__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_63 -#define MP0_SMN_C2PMSG_63__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_64 -#define MP0_SMN_C2PMSG_64__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_65 -#define MP0_SMN_C2PMSG_65__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_66 -#define MP0_SMN_C2PMSG_66__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_67 -#define MP0_SMN_C2PMSG_67__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_68 -#define MP0_SMN_C2PMSG_68__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_69 -#define MP0_SMN_C2PMSG_69__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_70 -#define MP0_SMN_C2PMSG_70__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_71 -#define MP0_SMN_C2PMSG_71__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_72 -#define MP0_SMN_C2PMSG_72__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_73 -#define MP0_SMN_C2PMSG_73__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_74 -#define MP0_SMN_C2PMSG_74__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_75 -#define MP0_SMN_C2PMSG_75__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_76 -#define MP0_SMN_C2PMSG_76__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_77 -#define MP0_SMN_C2PMSG_77__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_78 -#define MP0_SMN_C2PMSG_78__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_79 -#define MP0_SMN_C2PMSG_79__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_80 -#define MP0_SMN_C2PMSG_80__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_81 -#define MP0_SMN_C2PMSG_81__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_82 -#define MP0_SMN_C2PMSG_82__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_83 -#define MP0_SMN_C2PMSG_83__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_84 -#define MP0_SMN_C2PMSG_84__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_85 -#define MP0_SMN_C2PMSG_85__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_86 -#define MP0_SMN_C2PMSG_86__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_87 -#define MP0_SMN_C2PMSG_87__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_88 -#define MP0_SMN_C2PMSG_88__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_89 -#define MP0_SMN_C2PMSG_89__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_90 -#define MP0_SMN_C2PMSG_90__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_91 -#define MP0_SMN_C2PMSG_91__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_92 -#define MP0_SMN_C2PMSG_92__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_93 -#define MP0_SMN_C2PMSG_93__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_94 -#define MP0_SMN_C2PMSG_94__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_95 -#define MP0_SMN_C2PMSG_95__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_96 -#define MP0_SMN_C2PMSG_96__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_97 -#define MP0_SMN_C2PMSG_97__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_98 -#define MP0_SMN_C2PMSG_98__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_99 -#define MP0_SMN_C2PMSG_99__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_100 -#define MP0_SMN_C2PMSG_100__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_101 -#define MP0_SMN_C2PMSG_101__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_102 -#define MP0_SMN_C2PMSG_102__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_C2PMSG_103 -#define MP0_SMN_C2PMSG_103__CONTENT__SHIFT 0x0 -#define MP0_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL -//MP0_SMN_IH_CREDIT -#define MP0_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0 -#define MP0_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10 -#define MP0_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L -#define MP0_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L -//MP0_SMN_IH_SW_INT -#define MP0_SMN_IH_SW_INT__ID__SHIFT 0x0 -#define MP0_SMN_IH_SW_INT__VALID__SHIFT 0x8 -#define MP0_SMN_IH_SW_INT__ID_MASK 0x000000FFL -#define MP0_SMN_IH_SW_INT__VALID_MASK 0x00000100L -//MP0_SMN_IH_SW_INT_CTRL -#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0 -#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8 -#define MP0_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L -#define MP0_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L - - -// addressBlock: mp_SmuMp1Pub_CruDec -//MP1_FIRMWARE_FLAGS -#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT 0x0 -#define MP1_FIRMWARE_FLAGS__RESERVED__SHIFT 0x1 -#define MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK 0x00000001L -#define MP1_FIRMWARE_FLAGS__RESERVED_MASK 0xFFFFFFFEL - - -// addressBlock: mp_SmuMp1_SmnDec -//MP1_SMN_C2PMSG_32 -#define MP1_SMN_C2PMSG_32__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_32__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_33 -#define MP1_SMN_C2PMSG_33__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_33__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_34 -#define MP1_SMN_C2PMSG_34__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_34__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_35 -#define MP1_SMN_C2PMSG_35__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_35__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_36 -#define MP1_SMN_C2PMSG_36__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_36__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_37 -#define MP1_SMN_C2PMSG_37__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_37__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_38 -#define MP1_SMN_C2PMSG_38__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_38__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_39 -#define MP1_SMN_C2PMSG_39__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_39__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_40 -#define MP1_SMN_C2PMSG_40__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_40__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_41 -#define MP1_SMN_C2PMSG_41__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_41__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_42 -#define MP1_SMN_C2PMSG_42__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_42__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_43 -#define MP1_SMN_C2PMSG_43__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_43__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_44 -#define MP1_SMN_C2PMSG_44__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_44__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_45 -#define MP1_SMN_C2PMSG_45__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_45__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_46 -#define MP1_SMN_C2PMSG_46__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_46__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_47 -#define MP1_SMN_C2PMSG_47__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_47__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_48 -#define MP1_SMN_C2PMSG_48__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_48__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_49 -#define MP1_SMN_C2PMSG_49__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_49__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_50 -#define MP1_SMN_C2PMSG_50__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_50__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_51 -#define MP1_SMN_C2PMSG_51__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_51__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_52 -#define MP1_SMN_C2PMSG_52__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_52__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_53 -#define MP1_SMN_C2PMSG_53__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_53__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_54 -#define MP1_SMN_C2PMSG_54__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_54__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_55 -#define MP1_SMN_C2PMSG_55__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_55__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_56 -#define MP1_SMN_C2PMSG_56__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_56__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_57 -#define MP1_SMN_C2PMSG_57__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_57__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_58 -#define MP1_SMN_C2PMSG_58__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_58__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_59 -#define MP1_SMN_C2PMSG_59__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_59__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_60 -#define MP1_SMN_C2PMSG_60__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_60__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_61 -#define MP1_SMN_C2PMSG_61__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_61__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_62 -#define MP1_SMN_C2PMSG_62__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_62__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_63 -#define MP1_SMN_C2PMSG_63__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_63__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_64 -#define MP1_SMN_C2PMSG_64__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_64__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_65 -#define MP1_SMN_C2PMSG_65__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_65__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_66 -#define MP1_SMN_C2PMSG_66__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_66__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_67 -#define MP1_SMN_C2PMSG_67__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_67__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_68 -#define MP1_SMN_C2PMSG_68__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_68__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_69 -#define MP1_SMN_C2PMSG_69__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_69__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_70 -#define MP1_SMN_C2PMSG_70__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_70__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_71 -#define MP1_SMN_C2PMSG_71__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_71__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_72 -#define MP1_SMN_C2PMSG_72__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_72__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_73 -#define MP1_SMN_C2PMSG_73__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_73__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_74 -#define MP1_SMN_C2PMSG_74__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_74__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_75 -#define MP1_SMN_C2PMSG_75__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_75__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_76 -#define MP1_SMN_C2PMSG_76__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_76__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_77 -#define MP1_SMN_C2PMSG_77__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_77__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_78 -#define MP1_SMN_C2PMSG_78__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_78__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_79 -#define MP1_SMN_C2PMSG_79__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_79__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_80 -#define MP1_SMN_C2PMSG_80__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_80__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_81 -#define MP1_SMN_C2PMSG_81__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_81__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_82 -#define MP1_SMN_C2PMSG_82__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_82__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_83 -#define MP1_SMN_C2PMSG_83__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_83__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_84 -#define MP1_SMN_C2PMSG_84__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_84__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_85 -#define MP1_SMN_C2PMSG_85__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_85__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_86 -#define MP1_SMN_C2PMSG_86__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_86__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_87 -#define MP1_SMN_C2PMSG_87__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_87__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_88 -#define MP1_SMN_C2PMSG_88__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_88__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_89 -#define MP1_SMN_C2PMSG_89__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_89__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_90 -#define MP1_SMN_C2PMSG_90__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_91 -#define MP1_SMN_C2PMSG_91__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_91__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_92 -#define MP1_SMN_C2PMSG_92__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_92__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_93 -#define MP1_SMN_C2PMSG_93__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_93__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_94 -#define MP1_SMN_C2PMSG_94__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_94__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_95 -#define MP1_SMN_C2PMSG_95__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_95__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_96 -#define MP1_SMN_C2PMSG_96__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_96__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_97 -#define MP1_SMN_C2PMSG_97__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_97__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_98 -#define MP1_SMN_C2PMSG_98__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_98__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_99 -#define MP1_SMN_C2PMSG_99__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_99__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_100 -#define MP1_SMN_C2PMSG_100__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_100__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_101 -#define MP1_SMN_C2PMSG_101__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_101__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_102 -#define MP1_SMN_C2PMSG_102__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_102__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_C2PMSG_103 -#define MP1_SMN_C2PMSG_103__CONTENT__SHIFT 0x0 -#define MP1_SMN_C2PMSG_103__CONTENT_MASK 0xFFFFFFFFL -//MP1_SMN_IH_CREDIT -#define MP1_SMN_IH_CREDIT__CREDIT_VALUE__SHIFT 0x0 -#define MP1_SMN_IH_CREDIT__CLIENT_ID__SHIFT 0x10 -#define MP1_SMN_IH_CREDIT__CREDIT_VALUE_MASK 0x00000003L -#define MP1_SMN_IH_CREDIT__CLIENT_ID_MASK 0x00FF0000L -//MP1_SMN_IH_SW_INT -#define MP1_SMN_IH_SW_INT__ID__SHIFT 0x0 -#define MP1_SMN_IH_SW_INT__VALID__SHIFT 0x8 -#define MP1_SMN_IH_SW_INT__ID_MASK 0x000000FFL -#define MP1_SMN_IH_SW_INT__VALID_MASK 0x00000100L -//MP1_SMN_IH_SW_INT_CTRL -#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK__SHIFT 0x0 -#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK__SHIFT 0x8 -#define MP1_SMN_IH_SW_INT_CTRL__INT_MASK_MASK 0x00000001L -#define MP1_SMN_IH_SW_INT_CTRL__INT_ACK_MASK 0x00000100L -//MP1_SMN_FPS_CNT -#define MP1_SMN_FPS_CNT__COUNT__SHIFT 0x0 -#define MP1_SMN_FPS_CNT__COUNT_MASK 0xFFFFFFFFL -//MP1_SMN_EXT_SCRATCH0 -#define MP1_SMN_EXT_SCRATCH0__DATA__SHIFT 0x0 -#define MP1_SMN_EXT_SCRATCH0__DATA_MASK 0xFFFFFFFFL -//MP1_SMN_EXT_SCRATCH1 -#define MP1_SMN_EXT_SCRATCH1__DATA__SHIFT 0x0 -#define MP1_SMN_EXT_SCRATCH1__DATA_MASK 0xFFFFFFFFL -//MP1_SMN_EXT_SCRATCH2 -#define MP1_SMN_EXT_SCRATCH2__DATA__SHIFT 0x0 -#define MP1_SMN_EXT_SCRATCH2__DATA_MASK 0xFFFFFFFFL -//MP1_SMN_EXT_SCRATCH3 -#define MP1_SMN_EXT_SCRATCH3__DATA__SHIFT 0x0 -#define MP1_SMN_EXT_SCRATCH3__DATA_MASK 0xFFFFFFFFL -//MP1_SMN_EXT_SCRATCH4 -#define MP1_SMN_EXT_SCRATCH4__DATA__SHIFT 0x0 -#define MP1_SMN_EXT_SCRATCH4__DATA_MASK 0xFFFFFFFFL -//MP1_SMN_EXT_SCRATCH5 -#define MP1_SMN_EXT_SCRATCH5__DATA__SHIFT 0x0 -#define MP1_SMN_EXT_SCRATCH5__DATA_MASK 0xFFFFFFFFL -//MP1_SMN_EXT_SCRATCH6 -#define MP1_SMN_EXT_SCRATCH6__DATA__SHIFT 0x0 -#define MP1_SMN_EXT_SCRATCH6__DATA_MASK 0xFFFFFFFFL -//MP1_SMN_EXT_SCRATCH7 -#define MP1_SMN_EXT_SCRATCH7__DATA__SHIFT 0x0 -#define MP1_SMN_EXT_SCRATCH7__DATA_MASK 0xFFFFFFFFL - - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h index 6119a36b2cba..3fea2430dec0 100644 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h @@ -26,6 +26,7 @@ #include "amdgpu_smu.h" #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF +#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x03 #define SMU13_DRIVER_IF_VERSION_ALDE 0x07 /* MP Apertures */ diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h b/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h deleted file mode 100644 index b6c976a4d578..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1.h +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef __SMU_V13_0_1_H__ -#define __SMU_V13_0_1_H__ - -#include "amdgpu_smu.h" - -#define SMU13_0_1_DRIVER_IF_VERSION_INV 0xFFFFFFFF -#define SMU13_0_1_DRIVER_IF_VERSION_YELLOW_CARP 0x3 - -/* MP Apertures */ -#define MP0_Public 0x03800000 -#define MP0_SRAM 0x03900000 -#define MP1_Public 0x03b00000 -#define MP1_SRAM 0x03c00004 - -/* address block */ -#define smnMP1_FIRMWARE_FLAGS 0x3010024 - - -#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) - -int smu_v13_0_1_check_fw_status(struct smu_context *smu); - -int smu_v13_0_1_check_fw_version(struct smu_context *smu); - -int smu_v13_0_1_fini_smc_tables(struct smu_context *smu); - -int smu_v13_0_1_get_vbios_bootup_values(struct smu_context *smu); - -int smu_v13_0_1_set_default_dpm_tables(struct smu_context *smu); - -int smu_v13_0_1_set_driver_table_location(struct smu_context *smu); - -int smu_v13_0_1_gfx_off_control(struct smu_context *smu, bool enable); -#endif -#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 388c5cb5c647..0a5d46ac9ccd 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1528,6 +1528,7 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) case CHIP_SIENNA_CICHLID: case CHIP_NAVY_FLOUNDER: case CHIP_DIMGREY_CAVEFISH: + case CHIP_BEIGE_GOBY: if (amdgpu_runtime_pm == 2) ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile index 9b3a8503f5cd..d4c4c495762c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile @@ -23,7 +23,7 @@ # Makefile for the 'smu manager' sub-component of powerplay. # It provides the smu management services for the driver. -SMU13_MGR = smu_v13_0.o aldebaran_ppt.o smu_v13_0_1.o yellow_carp_ppt.o +SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR)) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index a3dc7194aaf8..a421ba85bd6d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -210,6 +210,9 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) case CHIP_ALDEBARAN: smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE; break; + case CHIP_YELLOW_CARP: + smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP; + break; default: dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type); smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_INV; @@ -694,6 +697,27 @@ failed: return ret; } +int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable) +{ + int ret = 0; + struct amdgpu_device *adev = smu->adev; + + switch (adev->asic_type) { + case CHIP_YELLOW_CARP: + if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) + return 0; + if (enable) + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); + else + ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); + break; + default: + break; + } + + return ret; +} + int smu_v13_0_system_features_control(struct smu_context *smu, bool en) { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c deleted file mode 100644 index 61917b49f2bf..000000000000 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_1.c +++ /dev/null @@ -1,311 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -//#include <linux/reboot.h> - -#define SWSMU_CODE_LAYER_L3 - -#include "amdgpu.h" -#include "amdgpu_smu.h" -#include "smu_v13_0_1.h" -#include "soc15_common.h" -#include "smu_cmn.h" -#include "atomfirmware.h" -#include "amdgpu_atomfirmware.h" -#include "amdgpu_atombios.h" -#include "atom.h" - -#include "asic_reg/mp/mp_13_0_1_offset.h" -#include "asic_reg/mp/mp_13_0_1_sh_mask.h" - -/* - * DO NOT use these for err/warn/info/debug messages. - * Use dev_err, dev_warn, dev_info and dev_dbg instead. - * They are more MGPU friendly. - */ -#undef pr_err -#undef pr_warn -#undef pr_info -#undef pr_debug - -int smu_v13_0_1_check_fw_status(struct smu_context *smu) -{ - struct amdgpu_device *adev = smu->adev; - uint32_t mp1_fw_flags; - - mp1_fw_flags = RREG32_PCIE(MP1_Public | - (smnMP1_FIRMWARE_FLAGS & 0xffffffff)); - - if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >> - MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT) - return 0; - - return -EIO; -} - -int smu_v13_0_1_check_fw_version(struct smu_context *smu) -{ - uint32_t if_version = 0xff, smu_version = 0xff; - uint16_t smu_major; - uint8_t smu_minor, smu_debug; - int ret = 0; - - ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); - if (ret) - return ret; - - smu_major = (smu_version >> 16) & 0xffff; - smu_minor = (smu_version >> 8) & 0xff; - smu_debug = (smu_version >> 0) & 0xff; - - switch (smu->adev->asic_type) { - case CHIP_YELLOW_CARP: - smu->smc_driver_if_version = SMU13_0_1_DRIVER_IF_VERSION_YELLOW_CARP; - break; - - default: - dev_err(smu->adev->dev, "smu unsupported asic type:%d.\n", smu->adev->asic_type); - smu->smc_driver_if_version = SMU13_0_1_DRIVER_IF_VERSION_INV; - break; - } - - dev_info(smu->adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n", - smu_version, smu_major, smu_minor, smu_debug); - - /* - * 1. if_version mismatch is not critical as our fw is designed - * to be backward compatible. - * 2. New fw usually brings some optimizations. But that's visible - * only on the paired driver. - * Considering above, we just leave user a warning message instead - * of halt driver loading. - */ - if (if_version != smu->smc_driver_if_version) { - dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " - "smu fw version = 0x%08x (%d.%d.%d)\n", - smu->smc_driver_if_version, if_version, - smu_version, smu_major, smu_minor, smu_debug); - dev_warn(smu->adev->dev, "SMU driver if version not matched\n"); - } - - return ret; -} - -int smu_v13_0_1_fini_smc_tables(struct smu_context *smu) -{ - struct smu_table_context *smu_table = &smu->smu_table; - - kfree(smu_table->clocks_table); - smu_table->clocks_table = NULL; - - kfree(smu_table->metrics_table); - smu_table->metrics_table = NULL; - - kfree(smu_table->watermarks_table); - smu_table->watermarks_table = NULL; - - return 0; -} - -static int smu_v13_0_1_atom_get_smu_clockinfo(struct amdgpu_device *adev, - uint8_t clk_id, - uint8_t syspll_id, - uint32_t *clk_freq) -{ - struct atom_get_smu_clock_info_parameters_v3_1 input = {0}; - struct atom_get_smu_clock_info_output_parameters_v3_1 *output; - int ret, index; - - input.clk_id = clk_id; - input.syspll_id = syspll_id; - input.command = GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ; - index = get_index_into_master_table(atom_master_list_of_command_functions_v2_1, - getsmuclockinfo); - - ret = amdgpu_atom_execute_table(adev->mode_info.atom_context, index, - (uint32_t *)&input); - if (ret) - return -EINVAL; - - output = (struct atom_get_smu_clock_info_output_parameters_v3_1 *)&input; - *clk_freq = le32_to_cpu(output->atom_smu_outputclkfreq.smu_clock_freq_hz) / 10000; - - return 0; -} - -int smu_v13_0_1_get_vbios_bootup_values(struct smu_context *smu) -{ - int ret, index; - uint16_t size; - uint8_t frev, crev; - struct atom_common_table_header *header; - struct atom_firmware_info_v3_4 *v_3_4; - struct atom_firmware_info_v3_3 *v_3_3; - struct atom_firmware_info_v3_1 *v_3_1; - - index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, - firmwareinfo); - - ret = amdgpu_atombios_get_data_table(smu->adev, index, &size, &frev, &crev, - (uint8_t **)&header); - if (ret) - return ret; - - if (header->format_revision != 3) { - dev_err(smu->adev->dev, "unknown atom_firmware_info version! for smu13\n"); - return -EINVAL; - } - - switch (header->content_revision) { - case 0: - case 1: - case 2: - v_3_1 = (struct atom_firmware_info_v3_1 *)header; - smu->smu_table.boot_values.revision = v_3_1->firmware_revision; - smu->smu_table.boot_values.gfxclk = v_3_1->bootup_sclk_in10khz; - smu->smu_table.boot_values.uclk = v_3_1->bootup_mclk_in10khz; - smu->smu_table.boot_values.socclk = 0; - smu->smu_table.boot_values.dcefclk = 0; - smu->smu_table.boot_values.vddc = v_3_1->bootup_vddc_mv; - smu->smu_table.boot_values.vddci = v_3_1->bootup_vddci_mv; - smu->smu_table.boot_values.mvddc = v_3_1->bootup_mvddc_mv; - smu->smu_table.boot_values.vdd_gfx = v_3_1->bootup_vddgfx_mv; - smu->smu_table.boot_values.cooling_id = v_3_1->coolingsolution_id; - break; - case 3: - v_3_3 = (struct atom_firmware_info_v3_3 *)header; - smu->smu_table.boot_values.revision = v_3_3->firmware_revision; - smu->smu_table.boot_values.gfxclk = v_3_3->bootup_sclk_in10khz; - smu->smu_table.boot_values.uclk = v_3_3->bootup_mclk_in10khz; - smu->smu_table.boot_values.socclk = 0; - smu->smu_table.boot_values.dcefclk = 0; - smu->smu_table.boot_values.vddc = v_3_3->bootup_vddc_mv; - smu->smu_table.boot_values.vddci = v_3_3->bootup_vddci_mv; - smu->smu_table.boot_values.mvddc = v_3_3->bootup_mvddc_mv; - smu->smu_table.boot_values.vdd_gfx = v_3_3->bootup_vddgfx_mv; - smu->smu_table.boot_values.cooling_id = v_3_3->coolingsolution_id; - break; - case 4: - default: - v_3_4 = (struct atom_firmware_info_v3_4 *)header; - smu->smu_table.boot_values.revision = v_3_4->firmware_revision; - smu->smu_table.boot_values.gfxclk = v_3_4->bootup_sclk_in10khz; - smu->smu_table.boot_values.uclk = v_3_4->bootup_mclk_in10khz; - smu->smu_table.boot_values.socclk = 0; - smu->smu_table.boot_values.dcefclk = 0; - smu->smu_table.boot_values.vddc = v_3_4->bootup_vddc_mv; - smu->smu_table.boot_values.vddci = v_3_4->bootup_vddci_mv; - smu->smu_table.boot_values.mvddc = v_3_4->bootup_mvddc_mv; - smu->smu_table.boot_values.vdd_gfx = v_3_4->bootup_vddgfx_mv; - smu->smu_table.boot_values.cooling_id = v_3_4->coolingsolution_id; - break; - } - - smu->smu_table.boot_values.format_revision = header->format_revision; - smu->smu_table.boot_values.content_revision = header->content_revision; - - smu_v13_0_1_atom_get_smu_clockinfo(smu->adev, - (uint8_t)SMU11_SYSPLL0_SOCCLK_ID, - (uint8_t)0, - &smu->smu_table.boot_values.socclk); - - smu_v13_0_1_atom_get_smu_clockinfo(smu->adev, - (uint8_t)SMU11_SYSPLL0_DCEFCLK_ID, - (uint8_t)0, - &smu->smu_table.boot_values.dcefclk); - - smu_v13_0_1_atom_get_smu_clockinfo(smu->adev, - (uint8_t)SMU11_SYSPLL0_ECLK_ID, - (uint8_t)0, - &smu->smu_table.boot_values.eclk); - - smu_v13_0_1_atom_get_smu_clockinfo(smu->adev, - (uint8_t)SMU11_SYSPLL0_VCLK_ID, - (uint8_t)0, - &smu->smu_table.boot_values.vclk); - - smu_v13_0_1_atom_get_smu_clockinfo(smu->adev, - (uint8_t)SMU11_SYSPLL0_DCLK_ID, - (uint8_t)0, - &smu->smu_table.boot_values.dclk); - - if ((smu->smu_table.boot_values.format_revision == 3) && - (smu->smu_table.boot_values.content_revision >= 2)) - smu_v13_0_1_atom_get_smu_clockinfo(smu->adev, - (uint8_t)SMU11_SYSPLL1_0_FCLK_ID, - (uint8_t)SMU11_SYSPLL1_2_ID, - &smu->smu_table.boot_values.fclk); - - return 0; -} - -int smu_v13_0_1_set_default_dpm_tables(struct smu_context *smu) -{ - struct smu_table_context *smu_table = &smu->smu_table; - - return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); -} - -int smu_v13_0_1_set_driver_table_location(struct smu_context *smu) -{ - struct smu_table *driver_table = &smu->smu_table.driver_table; - int ret = 0; - - if (!driver_table->mc_address) - return 0; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetDriverDramAddrHigh, - upper_32_bits(driver_table->mc_address), - NULL); - - if (ret) - return ret; - - ret = smu_cmn_send_smc_msg_with_param(smu, - SMU_MSG_SetDriverDramAddrLow, - lower_32_bits(driver_table->mc_address), - NULL); - - return ret; -} - -int smu_v13_0_1_gfx_off_control(struct smu_context *smu, bool enable) -{ - int ret = 0; - struct amdgpu_device *adev = smu->adev; - - switch (adev->asic_type) { - case CHIP_YELLOW_CARP: - if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) - return 0; - if (enable) - ret = smu_cmn_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL); - else - ret = smu_cmn_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL); - break; - default: - break; - } - - return ret; -} diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 18a1ffdca227..0cfeb9fc7c03 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -25,7 +25,7 @@ #include "amdgpu.h" #include "amdgpu_smu.h" -#include "smu_v13_0_1.h" +#include "smu_v13_0.h" #include "smu13_driver_if_yellow_carp.h" #include "yellow_carp_ppt.h" #include "smu_v13_0_1_ppsmc.h" @@ -186,6 +186,22 @@ err0_out: return -ENOMEM; } +static int yellow_carp_fini_smc_tables(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + + kfree(smu_table->clocks_table); + smu_table->clocks_table = NULL; + + kfree(smu_table->metrics_table); + smu_table->metrics_table = NULL; + + kfree(smu_table->watermarks_table); + smu_table->watermarks_table = NULL; + + return 0; +} + static int yellow_carp_system_features_control(struct smu_context *smu, bool en) { struct smu_feature *feature = &smu->smu_feature; @@ -282,13 +298,9 @@ static int yellow_carp_mode_reset(struct smu_context *smu, int type) if (index < 0) return index == -EACCES ? 0 : index; - mutex_lock(&smu->message_lock); - - ret = smu_cmn_send_msg_without_waiting(smu, (uint16_t)index, type); - - mutex_unlock(&smu->message_lock); - - mdelay(10); + ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL); + if (ret) + dev_err(smu->adev->dev, "Failed to mode reset!\n"); return ret; } @@ -659,6 +671,13 @@ static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu, return sizeof(struct gpu_metrics_v2_1); } +static int yellow_carp_set_default_dpm_tables(struct smu_context *smu) +{ + struct smu_table_context *smu_table = &smu->smu_table; + + return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false); +} + static int yellow_carp_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type, long input[], uint32_t size) { @@ -1203,17 +1222,17 @@ static int yellow_carp_set_fine_grain_gfx_freq_parameters(struct smu_context *sm } static const struct pptable_funcs yellow_carp_ppt_funcs = { - .check_fw_status = smu_v13_0_1_check_fw_status, - .check_fw_version = smu_v13_0_1_check_fw_version, + .check_fw_status = smu_v13_0_check_fw_status, + .check_fw_version = smu_v13_0_check_fw_version, .init_smc_tables = yellow_carp_init_smc_tables, - .fini_smc_tables = smu_v13_0_1_fini_smc_tables, - .get_vbios_bootup_values = smu_v13_0_1_get_vbios_bootup_values, + .fini_smc_tables = yellow_carp_fini_smc_tables, + .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values, .system_features_control = yellow_carp_system_features_control, .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, .send_smc_msg = smu_cmn_send_smc_msg, .dpm_set_vcn_enable = yellow_carp_dpm_set_vcn_enable, .dpm_set_jpeg_enable = yellow_carp_dpm_set_jpeg_enable, - .set_default_dpm_table = smu_v13_0_1_set_default_dpm_tables, + .set_default_dpm_table = yellow_carp_set_default_dpm_tables, .read_sensor = yellow_carp_read_sensor, .is_dpm_running = yellow_carp_is_dpm_running, .set_watermarks_table = yellow_carp_set_watermarks_table, @@ -1222,8 +1241,8 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = { .get_gpu_metrics = yellow_carp_get_gpu_metrics, .get_enabled_mask = smu_cmn_get_enabled_32_bits_mask, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, - .set_driver_table_location = smu_v13_0_1_set_driver_table_location, - .gfx_off_control = smu_v13_0_1_gfx_off_control, + .set_driver_table_location = smu_v13_0_set_driver_table_location, + .gfx_off_control = smu_v13_0_gfx_off_control, .post_init = yellow_carp_post_smu_init, .mode2_reset = yellow_carp_mode2_reset, .get_dpm_ultimate_freq = yellow_carp_get_dpm_ultimate_freq, diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c index f4fb68e8955a..e382b7f2353b 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c @@ -62,6 +62,7 @@ static void try_to_writeback(struct drm_i915_gem_object *obj, switch (obj->mm.madv) { case I915_MADV_DONTNEED: i915_gem_object_truncate(obj); + return; case __I915_MADV_PURGED: return; } diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c index 21c8b7350b7a..da4f5eb43ac2 100644 --- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c @@ -303,10 +303,7 @@ static void __gen8_ppgtt_alloc(struct i915_address_space * const vm, __i915_gem_object_pin_pages(pt->base); i915_gem_object_make_unshrinkable(pt->base); - if (lvl || - gen8_pt_count(*start, end) < I915_PDES || - intel_vgpu_active(vm->i915)) - fill_px(pt, vm->scratch[lvl]->encode); + fill_px(pt, vm->scratch[lvl]->encode); spin_lock(&pd->lock); if (likely(!pd->entry[idx])) { diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c index cac7f3f44642..f8948de72036 100644 --- a/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c +++ b/drivers/gpu/drm/i915/gt/intel_ggtt_fencing.c @@ -348,7 +348,7 @@ static struct i915_fence_reg *fence_find(struct i915_ggtt *ggtt) if (intel_has_pending_fb_unpin(ggtt->vm.i915)) return ERR_PTR(-EAGAIN); - return ERR_PTR(-EDEADLK); + return ERR_PTR(-ENOBUFS); } int __i915_vma_pin_fence(struct i915_vma *vma) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 141178754231..1e8a971a86f2 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -1169,7 +1169,7 @@ static int msm_gem_new_impl(struct drm_device *dev, case MSM_BO_CACHED_COHERENT: if (priv->has_cached_coherent) break; - /* fallthrough */ + fallthrough; default: DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n", (flags & MSM_BO_CACHE_MASK)); diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c index ef70140c5b09..873cbd38e6d3 100644 --- a/drivers/gpu/drm/panel/panel-novatek-nt35510.c +++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c @@ -706,9 +706,7 @@ static int nt35510_power_on(struct nt35510 *nt) if (ret) return ret; - ret = nt35510_read_id(nt); - if (ret) - return ret; + nt35510_read_id(nt); /* Set up stuff in manufacturer control, page 1 */ ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR, diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 19fd39d9a00c..37a1b6a6ad6d 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -127,7 +127,7 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo, struct qxl_bo *qbo; struct qxl_device *qdev; - if (!qxl_ttm_bo_is_qxl_bo(bo)) + if (!qxl_ttm_bo_is_qxl_bo(bo) || !bo->resource) return; qbo = to_qxl_bo(bo); qdev = to_qxl(qbo->tbo.base.dev); diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index 03395386e8a7..f4b08a8705b3 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -181,6 +181,9 @@ int ttm_range_man_fini(struct ttm_device *bdev, struct drm_mm *mm = &rman->mm; int ret; + if (!man) + return 0; + ttm_resource_manager_set_used(man, false); ret = ttm_resource_manager_evict_all(bdev, man); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 6f5ea00973e0..45aeeca9b8f6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -36,6 +36,7 @@ #include <drm/drm_ioctl.h> #include <drm/drm_sysfs.h> #include <drm/ttm/ttm_bo_driver.h> +#include <drm/ttm/ttm_range_manager.h> #include <drm/ttm/ttm_placement.h> #include <generated/utsrelease.h> diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c index 5648664f71bc..f2d625415458 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c @@ -354,7 +354,6 @@ static void vmw_otable_batch_takedown(struct vmw_private *dev_priv, ttm_bo_unpin(bo); ttm_bo_unreserve(bo); - ttm_bo_unpin(batch->otable_bo); ttm_bo_put(batch->otable_bo); batch->otable_bo = NULL; } diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c index caf6d0c4bc1b..142308526ec6 100644 --- a/drivers/hv/channel_mgmt.c +++ b/drivers/hv/channel_mgmt.c @@ -605,6 +605,17 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) */ mutex_lock(&vmbus_connection.channel_mutex); + list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { + if (guid_equal(&channel->offermsg.offer.if_type, + &newchannel->offermsg.offer.if_type) && + guid_equal(&channel->offermsg.offer.if_instance, + &newchannel->offermsg.offer.if_instance)) { + fnew = false; + newchannel->primary_channel = channel; + break; + } + } + init_vp_index(newchannel); /* Remember the channels that should be cleaned up upon suspend. */ @@ -617,16 +628,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) */ atomic_dec(&vmbus_connection.offer_in_progress); - list_for_each_entry(channel, &vmbus_connection.chn_list, listentry) { - if (guid_equal(&channel->offermsg.offer.if_type, - &newchannel->offermsg.offer.if_type) && - guid_equal(&channel->offermsg.offer.if_instance, - &newchannel->offermsg.offer.if_instance)) { - fnew = false; - break; - } - } - if (fnew) { list_add_tail(&newchannel->listentry, &vmbus_connection.chn_list); @@ -647,7 +648,6 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) /* * Process the sub-channel. */ - newchannel->primary_channel = channel; list_add_tail(&newchannel->sc_list, &channel->sc_list); } @@ -684,6 +684,30 @@ static void vmbus_process_offer(struct vmbus_channel *newchannel) } /* + * Check if CPUs used by other channels of the same device. + * It should only be called by init_vp_index(). + */ +static bool hv_cpuself_used(u32 cpu, struct vmbus_channel *chn) +{ + struct vmbus_channel *primary = chn->primary_channel; + struct vmbus_channel *sc; + + lockdep_assert_held(&vmbus_connection.channel_mutex); + + if (!primary) + return false; + + if (primary->target_cpu == cpu) + return true; + + list_for_each_entry(sc, &primary->sc_list, sc_list) + if (sc != chn && sc->target_cpu == cpu) + return true; + + return false; +} + +/* * We use this state to statically distribute the channel interrupt load. */ static int next_numa_node_id; @@ -702,6 +726,7 @@ static int next_numa_node_id; static void init_vp_index(struct vmbus_channel *channel) { bool perf_chn = hv_is_perf_channel(channel); + u32 i, ncpu = num_online_cpus(); cpumask_var_t available_mask; struct cpumask *alloced_mask; u32 target_cpu; @@ -724,31 +749,38 @@ static void init_vp_index(struct vmbus_channel *channel) return; } - while (true) { - numa_node = next_numa_node_id++; - if (numa_node == nr_node_ids) { - next_numa_node_id = 0; - continue; + for (i = 1; i <= ncpu + 1; i++) { + while (true) { + numa_node = next_numa_node_id++; + if (numa_node == nr_node_ids) { + next_numa_node_id = 0; + continue; + } + if (cpumask_empty(cpumask_of_node(numa_node))) + continue; + break; + } + alloced_mask = &hv_context.hv_numa_map[numa_node]; + + if (cpumask_weight(alloced_mask) == + cpumask_weight(cpumask_of_node(numa_node))) { + /* + * We have cycled through all the CPUs in the node; + * reset the alloced map. + */ + cpumask_clear(alloced_mask); } - if (cpumask_empty(cpumask_of_node(numa_node))) - continue; - break; - } - alloced_mask = &hv_context.hv_numa_map[numa_node]; - if (cpumask_weight(alloced_mask) == - cpumask_weight(cpumask_of_node(numa_node))) { - /* - * We have cycled through all the CPUs in the node; - * reset the alloced map. - */ - cpumask_clear(alloced_mask); - } + cpumask_xor(available_mask, alloced_mask, + cpumask_of_node(numa_node)); - cpumask_xor(available_mask, alloced_mask, cpumask_of_node(numa_node)); + target_cpu = cpumask_first(available_mask); + cpumask_set_cpu(target_cpu, alloced_mask); - target_cpu = cpumask_first(available_mask); - cpumask_set_cpu(target_cpu, alloced_mask); + if (channel->offermsg.offer.sub_channel_index >= ncpu || + i > ncpu || !hv_cpuself_used(target_cpu, channel)) + break; + } channel->target_cpu = target_cpu; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index dd20b01771c4..235f9bdaeaf2 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -379,6 +379,7 @@ static void arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu) switch (idx) { case CMDQ_ERR_CERROR_ABT_IDX: dev_err(smmu->dev, "retrying command fetch\n"); + return; case CMDQ_ERR_CERROR_NONE_IDX: return; case CMDQ_ERR_CERROR_ATC_INV_IDX: diff --git a/drivers/iommu/arm/arm-smmu/qcom_iommu.c b/drivers/iommu/arm/arm-smmu/qcom_iommu.c index 25ed444ff94d..021cf8f65ffc 100644 --- a/drivers/iommu/arm/arm-smmu/qcom_iommu.c +++ b/drivers/iommu/arm/arm-smmu/qcom_iommu.c @@ -849,12 +849,10 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) ret = iommu_device_register(&qcom_iommu->iommu, &qcom_iommu_ops, dev); if (ret) { dev_err(dev, "Failed to register iommu\n"); - goto err_sysfs_remove; + return ret; } - ret = bus_set_iommu(&platform_bus_type, &qcom_iommu_ops); - if (ret) - goto err_unregister_device; + bus_set_iommu(&platform_bus_type, &qcom_iommu_ops); if (qcom_iommu->local_base) { pm_runtime_get_sync(dev); @@ -863,13 +861,6 @@ static int qcom_iommu_device_probe(struct platform_device *pdev) } return 0; - -err_unregister_device: - iommu_device_unregister(&qcom_iommu->iommu); - -err_sysfs_remove: - iommu_device_sysfs_remove(&qcom_iommu->iommu); - return ret; } static int qcom_iommu_device_remove(struct platform_device *pdev) diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index a6a07d985709..dd22fc7d5176 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2429,10 +2429,11 @@ __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, return 0; } -static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn) +static void domain_context_clear_one(struct device_domain_info *info, u8 bus, u8 devfn) { - unsigned long flags; + struct intel_iommu *iommu = info->iommu; struct context_entry *context; + unsigned long flags; u16 did_old; if (!iommu) @@ -2444,7 +2445,16 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn spin_unlock_irqrestore(&iommu->lock, flags); return; } - did_old = context_domain_id(context); + + if (sm_supported(iommu)) { + if (hw_pass_through && domain_type_is_si(info->domain)) + did_old = FLPT_DEFAULT_DID; + else + did_old = info->domain->iommu_did[iommu->seq_id]; + } else { + did_old = context_domain_id(context); + } + context_clear_entry(context); __iommu_flush_cache(iommu, context, sizeof(*context)); spin_unlock_irqrestore(&iommu->lock, flags); @@ -2462,6 +2472,8 @@ static void domain_context_clear_one(struct intel_iommu *iommu, u8 bus, u8 devfn 0, 0, DMA_TLB_DSI_FLUSH); + + __iommu_flush_dev_iotlb(info, 0, MAX_AGAW_PFN_WIDTH); } static inline void unlink_domain_info(struct device_domain_info *info) @@ -4425,9 +4437,9 @@ out_free_dmar: static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *opaque) { - struct intel_iommu *iommu = opaque; + struct device_domain_info *info = opaque; - domain_context_clear_one(iommu, PCI_BUS_NUM(alias), alias & 0xff); + domain_context_clear_one(info, PCI_BUS_NUM(alias), alias & 0xff); return 0; } @@ -4437,12 +4449,13 @@ static int domain_context_clear_one_cb(struct pci_dev *pdev, u16 alias, void *op * devices, unbinding the driver from any one of them will possibly leave * the others unable to operate. */ -static void domain_context_clear(struct intel_iommu *iommu, struct device *dev) +static void domain_context_clear(struct device_domain_info *info) { - if (!iommu || !dev || !dev_is_pci(dev)) + if (!info->iommu || !info->dev || !dev_is_pci(info->dev)) return; - pci_for_each_dma_alias(to_pci_dev(dev), &domain_context_clear_one_cb, iommu); + pci_for_each_dma_alias(to_pci_dev(info->dev), + &domain_context_clear_one_cb, info); } static void __dmar_remove_one_dev_info(struct device_domain_info *info) @@ -4459,14 +4472,13 @@ static void __dmar_remove_one_dev_info(struct device_domain_info *info) iommu = info->iommu; domain = info->domain; - if (info->dev) { + if (info->dev && !dev_is_real_dma_subdevice(info->dev)) { if (dev_is_pci(info->dev) && sm_supported(iommu)) intel_pasid_tear_down_entry(iommu, info->dev, PASID_RID2PASID, false); iommu_disable_dev_iotlb(info); - if (!dev_is_real_dma_subdevice(info->dev)) - domain_context_clear(iommu, info->dev); + domain_context_clear(info); intel_pasid_free_table(info->dev); } diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 94b9d8e5b9a4..9febfb7f3025 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -544,12 +544,14 @@ static inline u32 rk_dma_addr_dte(dma_addr_t dt_dma) } #define DT_HI_MASK GENMASK_ULL(39, 32) +#define DTE_BASE_HI_MASK GENMASK(11, 4) #define DT_SHIFT 28 static inline phys_addr_t rk_dte_addr_phys_v2(u32 addr) { - return (phys_addr_t)(addr & RK_DTE_PT_ADDRESS_MASK) | - ((addr & DT_HI_MASK) << DT_SHIFT); + u64 addr64 = addr; + return (phys_addr_t)(addr64 & RK_DTE_PT_ADDRESS_MASK) | + ((addr64 & DTE_BASE_HI_MASK) << DT_SHIFT); } static inline u32 rk_dma_addr_dte_v2(dma_addr_t dt_dma) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 9890a1532cb0..ce8aed562929 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -28,6 +28,7 @@ #include <linux/errno.h> #include <linux/hdreg.h> #include <linux/kdev_t.h> +#include <linux/kref.h> #include <linux/blkdev.h> #include <linux/cdev.h> #include <linux/mutex.h> @@ -111,7 +112,7 @@ struct mmc_blk_data { #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ - unsigned int usage; + struct kref kref; unsigned int read_only; unsigned int part_type; unsigned int reset_done; @@ -181,10 +182,8 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) mutex_lock(&open_lock); md = disk->private_data; - if (md && md->usage == 0) + if (md && !kref_get_unless_zero(&md->kref)) md = NULL; - if (md) - md->usage++; mutex_unlock(&open_lock); return md; @@ -196,18 +195,25 @@ static inline int mmc_get_devidx(struct gendisk *disk) return devidx; } -static void mmc_blk_put(struct mmc_blk_data *md) +static void mmc_blk_kref_release(struct kref *ref) { - mutex_lock(&open_lock); - md->usage--; - if (md->usage == 0) { - int devidx = mmc_get_devidx(md->disk); + struct mmc_blk_data *md = container_of(ref, struct mmc_blk_data, kref); + int devidx; - ida_simple_remove(&mmc_blk_ida, devidx); - put_disk(md->disk); - kfree(md); - } + devidx = mmc_get_devidx(md->disk); + ida_simple_remove(&mmc_blk_ida, devidx); + + mutex_lock(&open_lock); + md->disk->private_data = NULL; mutex_unlock(&open_lock); + + put_disk(md->disk); + kfree(md); +} + +static void mmc_blk_put(struct mmc_blk_data *md) +{ + kref_put(&md->kref, mmc_blk_kref_release); } static ssize_t power_ro_lock_show(struct device *dev, @@ -2327,7 +2333,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, INIT_LIST_HEAD(&md->part); INIT_LIST_HEAD(&md->rpmbs); - md->usage = 1; + kref_init(&md->kref); + md->queue.blkdata = md; md->disk->major = MMC_BLOCK_MAJOR; diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index eda4a1892c33..0475d96047c4 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -75,7 +75,8 @@ static void mmc_host_classdev_release(struct device *dev) { struct mmc_host *host = cls_dev_to_mmc_host(dev); wakeup_source_unregister(host->ws); - ida_simple_remove(&mmc_host_ida, host->index); + if (of_alias_get_id(host->parent->of_node, "mmc") < 0) + ida_simple_remove(&mmc_host_ida, host->index); kfree(host); } @@ -502,7 +503,7 @@ static int mmc_first_nonreserved_index(void) */ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) { - int err; + int index; struct mmc_host *host; int alias_id, min_idx, max_idx; @@ -515,20 +516,19 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) alias_id = of_alias_get_id(dev->of_node, "mmc"); if (alias_id >= 0) { - min_idx = alias_id; - max_idx = alias_id + 1; + index = alias_id; } else { min_idx = mmc_first_nonreserved_index(); max_idx = 0; - } - err = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL); - if (err < 0) { - kfree(host); - return NULL; + index = ida_simple_get(&mmc_host_ida, min_idx, max_idx, GFP_KERNEL); + if (index < 0) { + kfree(host); + return NULL; + } } - host->index = err; + host->index = index; dev_set_name(&host->class_dev, "mmc%d", host->index); host->ws = wakeup_source_register(NULL, dev_name(&host->class_dev)); diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index 0db17bcc9c16..cb1a64a5c256 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -789,6 +789,8 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) break; } } + fallthrough; + case JZ4740_MMC_STATE_DONE: break; } diff --git a/drivers/mtd/chips/cfi_util.c b/drivers/mtd/chips/cfi_util.c index 99b7986002f0..6a6a2a21d2ed 100644 --- a/drivers/mtd/chips/cfi_util.c +++ b/drivers/mtd/chips/cfi_util.c @@ -108,8 +108,8 @@ map_word cfi_build_cmd(u_long cmd, struct map_info *map, struct cfi_private *cfi #if BITS_PER_LONG >= 64 case 8: onecmd |= (onecmd << (chip_mode * 32)); -#endif fallthrough; +#endif case 4: onecmd |= (onecmd << (chip_mode * 16)); fallthrough; @@ -164,8 +164,8 @@ unsigned long cfi_merge_status(map_word val, struct map_info *map, #if BITS_PER_LONG >= 64 case 8: res |= (onestat >> (chip_mode * 32)); -#endif fallthrough; +#endif case 4: res |= (onestat >> (chip_mode * 16)); fallthrough; diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index d22d78303311..31730efa7538 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -3450,7 +3450,9 @@ static int bond_master_netdev_event(unsigned long event, return bond_event_changename(event_bond); case NETDEV_UNREGISTER: bond_remove_proc_entry(event_bond); +#ifdef CONFIG_XFRM_OFFLOAD xfrm_dev_state_flush(dev_net(bond_dev), bond_dev, true); +#endif /* CONFIG_XFRM_OFFLOAD */ break; case NETDEV_REGISTER: bond_create_proc_entry(event_bond); diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index 93136f7e69f5..69f21b71614c 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -366,6 +366,8 @@ mt7530_fdb_write(struct mt7530_priv *priv, u16 vid, int i; reg[1] |= vid & CVID_MASK; + if (vid > 1) + reg[1] |= ATA2_IVL; reg[2] |= (aging & AGE_TIMER_MASK) << AGE_TIMER; reg[2] |= (port_mask & PORT_MAP_MASK) << PORT_MAP; /* STATIC_ENT indicate that entry is static wouldn't diff --git a/drivers/net/dsa/mt7530.h b/drivers/net/dsa/mt7530.h index 334d610a503d..b19b389ff10a 100644 --- a/drivers/net/dsa/mt7530.h +++ b/drivers/net/dsa/mt7530.h @@ -79,6 +79,7 @@ enum mt753x_bpdu_port_fw { #define STATIC_EMP 0 #define STATIC_ENT 3 #define MT7530_ATA2 0x78 +#define ATA2_IVL BIT(15) /* Register for address table write data */ #define MT7530_ATWD 0x7c diff --git a/drivers/net/dsa/mv88e6xxx/Kconfig b/drivers/net/dsa/mv88e6xxx/Kconfig index 05af632b0f59..634a48e6616b 100644 --- a/drivers/net/dsa/mv88e6xxx/Kconfig +++ b/drivers/net/dsa/mv88e6xxx/Kconfig @@ -12,7 +12,7 @@ config NET_DSA_MV88E6XXX config NET_DSA_MV88E6XXX_PTP bool "PTP support for Marvell 88E6xxx" default n - depends on PTP_1588_CLOCK + depends on NET_DSA_MV88E6XXX && PTP_1588_CLOCK help Say Y to enable PTP hardware timestamping on Marvell 88E6xxx switch chips that support it. diff --git a/drivers/net/dsa/sja1105/sja1105_main.c b/drivers/net/dsa/sja1105/sja1105_main.c index 6618abba23b3..07bb65a36083 100644 --- a/drivers/net/dsa/sja1105/sja1105_main.c +++ b/drivers/net/dsa/sja1105/sja1105_main.c @@ -397,6 +397,12 @@ static int sja1105_init_static_vlan(struct sja1105_private *priv) if (dsa_is_cpu_port(ds, port)) v->pvid = true; list_add(&v->list, &priv->dsa_8021q_vlans); + + v = kmemdup(v, sizeof(*v), GFP_KERNEL); + if (!v) + return -ENOMEM; + + list_add(&v->list, &priv->bridge_vlans); } ((struct sja1105_vlan_lookup_entry *)table->entries)[0] = pvid; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index f56245eeef7b..4db162cee911 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -1671,11 +1671,16 @@ static inline struct sk_buff *bnxt_tpa_end(struct bnxt *bp, if ((tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) && (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { - u16 vlan_proto = tpa_info->metadata >> - RX_CMP_FLAGS2_METADATA_TPID_SFT; + __be16 vlan_proto = htons(tpa_info->metadata >> + RX_CMP_FLAGS2_METADATA_TPID_SFT); u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK; - __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); + if (eth_type_vlan(vlan_proto)) { + __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); + } else { + dev_kfree_skb(skb); + return NULL; + } } skb_checksum_none_assert(skb); @@ -1897,9 +1902,15 @@ static int bnxt_rx_pkt(struct bnxt *bp, struct bnxt_cp_ring_info *cpr, (skb->dev->features & BNXT_HW_FEATURE_VLAN_ALL_RX)) { u32 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data); u16 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK; - u16 vlan_proto = meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT; + __be16 vlan_proto = htons(meta_data >> + RX_CMP_FLAGS2_METADATA_TPID_SFT); - __vlan_hwaccel_put_tag(skb, htons(vlan_proto), vtag); + if (eth_type_vlan(vlan_proto)) { + __vlan_hwaccel_put_tag(skb, vlan_proto, vtag); + } else { + dev_kfree_skb(skb); + goto next_rx; + } } skb_checksum_none_assert(skb); @@ -7563,8 +7574,12 @@ static int __bnxt_hwrm_func_qcaps(struct bnxt *bp) bp->flags &= ~BNXT_FLAG_WOL_CAP; if (flags & FUNC_QCAPS_RESP_FLAGS_WOL_MAGICPKT_SUPPORTED) bp->flags |= BNXT_FLAG_WOL_CAP; - if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) + if (flags & FUNC_QCAPS_RESP_FLAGS_PTP_SUPPORTED) { __bnxt_hwrm_ptp_qcfg(bp); + } else { + kfree(bp->ptp_cfg); + bp->ptp_cfg = NULL; + } } else { #ifdef CONFIG_BNXT_SRIOV struct bnxt_vf_info *vf = &bp->vf; @@ -10123,7 +10138,6 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init) } } - bnxt_ptp_start(bp); rc = bnxt_init_nic(bp, irq_re_init); if (rc) { netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc); @@ -10197,6 +10211,12 @@ int bnxt_half_open_nic(struct bnxt *bp) { int rc = 0; + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + netdev_err(bp->dev, "A previous firmware reset has not completed, aborting half open\n"); + rc = -ENODEV; + goto half_open_err; + } + rc = bnxt_alloc_mem(bp, false); if (rc) { netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc); @@ -10256,9 +10276,16 @@ static int bnxt_open(struct net_device *dev) rc = bnxt_hwrm_if_change(bp, true); if (rc) return rc; + + if (bnxt_ptp_init(bp)) { + netdev_warn(dev, "PTP initialization failed.\n"); + kfree(bp->ptp_cfg); + bp->ptp_cfg = NULL; + } rc = __bnxt_open_nic(bp, true, true); if (rc) { bnxt_hwrm_if_change(bp, false); + bnxt_ptp_clear(bp); } else { if (test_and_clear_bit(BNXT_STATE_FW_RESET_DET, &bp->state)) { if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { @@ -10349,6 +10376,7 @@ static int bnxt_close(struct net_device *dev) { struct bnxt *bp = netdev_priv(dev); + bnxt_ptp_clear(bp); bnxt_hwmon_close(bp); bnxt_close_nic(bp, true, true); bnxt_hwrm_shutdown_link(bp); @@ -11335,6 +11363,7 @@ static void bnxt_fw_reset_close(struct bnxt *bp) bnxt_clear_int_mode(bp); pci_disable_device(bp->pdev); } + bnxt_ptp_clear(bp); __bnxt_close_nic(bp, true, false); bnxt_vf_reps_free(bp); bnxt_clear_int_mode(bp); @@ -11959,10 +11988,21 @@ static bool bnxt_fw_reset_timeout(struct bnxt *bp) (bp->fw_reset_max_dsecs * HZ / 10)); } +static void bnxt_fw_reset_abort(struct bnxt *bp, int rc) +{ + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) { + bnxt_ulp_start(bp, rc); + bnxt_dl_health_status_update(bp, false); + } + bp->fw_reset_state = 0; + dev_close(bp->dev); +} + static void bnxt_fw_reset_task(struct work_struct *work) { struct bnxt *bp = container_of(work, struct bnxt, fw_reset_task.work); - int rc; + int rc = 0; if (!test_bit(BNXT_STATE_IN_FW_RESET, &bp->state)) { netdev_err(bp->dev, "bnxt_fw_reset_task() called when not in fw reset mode!\n"); @@ -11992,6 +12032,11 @@ static void bnxt_fw_reset_task(struct work_struct *work) } bp->fw_reset_timestamp = jiffies; rtnl_lock(); + if (test_bit(BNXT_STATE_ABORT_ERR, &bp->state)) { + bnxt_fw_reset_abort(bp, rc); + rtnl_unlock(); + return; + } bnxt_fw_reset_close(bp); if (bp->fw_cap & BNXT_FW_CAP_ERR_RECOVER_RELOAD) { bp->fw_reset_state = BNXT_FW_RESET_STATE_POLL_FW_DOWN; @@ -12039,6 +12084,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) if (val == 0xffff) { if (bnxt_fw_reset_timeout(bp)) { netdev_err(bp->dev, "Firmware reset aborted, PCI config space invalid\n"); + rc = -ETIMEDOUT; goto fw_reset_abort; } bnxt_queue_fw_reset_work(bp, HZ / 1000); @@ -12048,6 +12094,7 @@ static void bnxt_fw_reset_task(struct work_struct *work) clear_bit(BNXT_STATE_FW_FATAL_COND, &bp->state); if (pci_enable_device(bp->pdev)) { netdev_err(bp->dev, "Cannot re-enable PCI device\n"); + rc = -ENODEV; goto fw_reset_abort; } pci_set_master(bp->pdev); @@ -12074,9 +12121,10 @@ static void bnxt_fw_reset_task(struct work_struct *work) } rc = bnxt_open(bp->dev); if (rc) { - netdev_err(bp->dev, "bnxt_open_nic() failed\n"); - clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - dev_close(bp->dev); + netdev_err(bp->dev, "bnxt_open() failed during FW reset\n"); + bnxt_fw_reset_abort(bp, rc); + rtnl_unlock(); + return; } bp->fw_reset_state = 0; @@ -12103,12 +12151,8 @@ fw_reset_abort_status: netdev_err(bp->dev, "fw_health_status 0x%x\n", sts); } fw_reset_abort: - clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - if (bp->fw_reset_state != BNXT_FW_RESET_STATE_POLL_VF) - bnxt_dl_health_status_update(bp, false); - bp->fw_reset_state = 0; rtnl_lock(); - dev_close(bp->dev); + bnxt_fw_reset_abort(bp, rc); rtnl_unlock(); } @@ -12662,7 +12706,6 @@ static void bnxt_remove_one(struct pci_dev *pdev) if (BNXT_PF(bp)) devlink_port_type_clear(&bp->dl_port); - bnxt_ptp_clear(bp); pci_disable_pcie_error_reporting(pdev); unregister_netdev(dev); clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); @@ -13246,11 +13289,6 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) rc); } - if (bnxt_ptp_init(bp)) { - netdev_warn(dev, "PTP initialization failed.\n"); - kfree(bp->ptp_cfg); - bp->ptp_cfg = NULL; - } bnxt_inv_fw_health_reg(bp); bnxt_dl_register(bp); @@ -13436,7 +13474,8 @@ static pci_ers_result_t bnxt_io_error_detected(struct pci_dev *pdev, if (netif_running(netdev)) bnxt_close(netdev); - pci_disable_device(pdev); + if (pci_is_enabled(pdev)) + pci_disable_device(pdev); bnxt_free_ctx_mem(bp); kfree(bp->ctx); bp->ctx = NULL; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c index 8e90224c43a2..8a68df4d9e59 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c @@ -433,6 +433,7 @@ static int bnxt_hwrm_queue_dscp2pri_cfg(struct bnxt *bp, struct dcb_app *app, static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc) { int total_ets_bw = 0; + bool zero = false; u8 max_tc = 0; int i; @@ -453,13 +454,20 @@ static int bnxt_ets_validate(struct bnxt *bp, struct ieee_ets *ets, u8 *tc) break; case IEEE_8021QAZ_TSA_ETS: total_ets_bw += ets->tc_tx_bw[i]; + zero = zero || !ets->tc_tx_bw[i]; break; default: return -ENOTSUPP; } } - if (total_ets_bw > 100) + if (total_ets_bw > 100) { + netdev_warn(bp->dev, "rejecting ETS config exceeding available bandwidth\n"); return -EINVAL; + } + if (zero && total_ets_bw == 100) { + netdev_warn(bp->dev, "rejecting ETS config starving a TC\n"); + return -EINVAL; + } if (max_tc >= bp->max_tc) *tc = bp->max_tc; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index f698b6bd4ff8..9089e7f3fbd4 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -385,22 +385,6 @@ int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts) return 0; } -void bnxt_ptp_start(struct bnxt *bp) -{ - struct bnxt_ptp_cfg *ptp = bp->ptp_cfg; - - if (!ptp) - return; - - if (bp->flags & BNXT_FLAG_CHIP_P5) { - spin_lock_bh(&ptp->ptp_lock); - ptp->current_time = bnxt_refclk_read(bp, NULL); - WRITE_ONCE(ptp->old_time, ptp->current_time); - spin_unlock_bh(&ptp->ptp_lock); - ptp_schedule_worker(ptp->ptp_clock, 0); - } -} - static const struct ptp_clock_info bnxt_ptp_caps = { .owner = THIS_MODULE, .name = "bnxt clock", @@ -450,7 +434,13 @@ int bnxt_ptp_init(struct bnxt *bp) bnxt_unmap_ptp_regs(bp); return err; } - + if (bp->flags & BNXT_FLAG_CHIP_P5) { + spin_lock_bh(&ptp->ptp_lock); + ptp->current_time = bnxt_refclk_read(bp, NULL); + WRITE_ONCE(ptp->old_time, ptp->current_time); + spin_unlock_bh(&ptp->ptp_lock); + ptp_schedule_worker(ptp->ptp_clock, 0); + } return 0; } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h index 6b6245750e20..4135ea3ec788 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.h @@ -75,7 +75,6 @@ int bnxt_hwtstamp_set(struct net_device *dev, struct ifreq *ifr); int bnxt_hwtstamp_get(struct net_device *dev, struct ifreq *ifr); int bnxt_get_tx_ts_p5(struct bnxt *bp, struct sk_buff *skb); int bnxt_get_rx_ts_p5(struct bnxt *bp, u64 *ts, u32 pkt_ts); -void bnxt_ptp_start(struct bnxt *bp); int bnxt_ptp_init(struct bnxt *bp); void bnxt_ptp_clear(struct bnxt *bp); #endif diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c index a918e374f3c5..187ff643ad2a 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c @@ -479,16 +479,17 @@ struct bnxt_en_dev *bnxt_ulp_probe(struct net_device *dev) if (!edev) return ERR_PTR(-ENOMEM); edev->en_ops = &bnxt_en_ops_tbl; - if (bp->flags & BNXT_FLAG_ROCEV1_CAP) - edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP; - if (bp->flags & BNXT_FLAG_ROCEV2_CAP) - edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP; edev->net = dev; edev->pdev = bp->pdev; edev->l2_db_size = bp->db_size; edev->l2_db_size_nc = bp->db_size; bp->edev = edev; } + edev->flags &= ~BNXT_EN_FLAG_ROCE_CAP; + if (bp->flags & BNXT_FLAG_ROCEV1_CAP) + edev->flags |= BNXT_EN_FLAG_ROCEV1_CAP; + if (bp->flags & BNXT_FLAG_ROCEV2_CAP) + edev->flags |= BNXT_EN_FLAG_ROCEV2_CAP; return bp->edev; } EXPORT_SYMBOL(bnxt_ulp_probe); diff --git a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c index 4cddd628d41b..9ed3d1ab2ca5 100644 --- a/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c +++ b/drivers/net/ethernet/cavium/liquidio/cn23xx_pf_device.c @@ -420,7 +420,7 @@ static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct) * bits 32:47 indicate the PVF num. */ for (q_no = 0; q_no < ern; q_no++) { - reg_val = oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS; + reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS; /* for VF assigned queues. */ if (q_no < oct->sriov_info.pf_srn) { diff --git a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c index 2138239facfd..3d021edb78e6 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c +++ b/drivers/net/ethernet/freescale/dpaa2/dpaa2-switch.c @@ -2809,32 +2809,32 @@ static int dpaa2_switch_ctrl_if_setup(struct ethsw_core *ethsw) if (err) return err; - err = dpaa2_switch_seed_bp(ethsw); - if (err) - goto err_free_dpbp; - err = dpaa2_switch_alloc_rings(ethsw); if (err) - goto err_drain_dpbp; + goto err_free_dpbp; err = dpaa2_switch_setup_dpio(ethsw); if (err) goto err_destroy_rings; + err = dpaa2_switch_seed_bp(ethsw); + if (err) + goto err_deregister_dpio; + err = dpsw_ctrl_if_enable(ethsw->mc_io, 0, ethsw->dpsw_handle); if (err) { dev_err(ethsw->dev, "dpsw_ctrl_if_enable err %d\n", err); - goto err_deregister_dpio; + goto err_drain_dpbp; } return 0; +err_drain_dpbp: + dpaa2_switch_drain_bp(ethsw); err_deregister_dpio: dpaa2_switch_free_dpio(ethsw); err_destroy_rings: dpaa2_switch_destroy_rings(ethsw); -err_drain_dpbp: - dpaa2_switch_drain_bp(ethsw); err_free_dpbp: dpaa2_switch_free_dpbp(ethsw); diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 46ecb42f2ef8..d9fc5c456bf3 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -524,6 +524,7 @@ static void setup_memac(struct mac_device *mac_dev) | SUPPORTED_Autoneg \ | SUPPORTED_Pause \ | SUPPORTED_Asym_Pause \ + | SUPPORTED_FIBRE \ | SUPPORTED_MII) static DEFINE_MUTEX(eth_lock); diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c index 12f6c2442a7a..e53512f6878a 100644 --- a/drivers/net/ethernet/hisilicon/hip04_eth.c +++ b/drivers/net/ethernet/hisilicon/hip04_eth.c @@ -131,7 +131,7 @@ /* buf unit size is cache_line_size, which is 64, so the shift is 6 */ #define PPE_BUF_SIZE_SHIFT 6 #define PPE_TX_BUF_HOLD BIT(31) -#define CACHE_LINE_MASK 0x3F +#define SOC_CACHE_LINE_MASK 0x3F #else #define PPE_CFG_QOS_VMID_GRP_SHIFT 8 #define PPE_CFG_RX_CTRL_ALIGN_SHIFT 11 @@ -531,8 +531,8 @@ hip04_mac_start_xmit(struct sk_buff *skb, struct net_device *ndev) #if defined(CONFIG_HI13X1_GMAC) desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV | TX_RELEASE_TO_PPE | priv->port << TX_POOL_SHIFT); - desc->data_offset = (__force u32)cpu_to_be32(phys & CACHE_LINE_MASK); - desc->send_addr = (__force u32)cpu_to_be32(phys & ~CACHE_LINE_MASK); + desc->data_offset = (__force u32)cpu_to_be32(phys & SOC_CACHE_LINE_MASK); + desc->send_addr = (__force u32)cpu_to_be32(phys & ~SOC_CACHE_LINE_MASK); #else desc->cfg = (__force u32)cpu_to_be32(TX_CLEAR_WB | TX_FINISH_CACHE_INV); desc->send_addr = (__force u32)cpu_to_be32(phys); diff --git a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h index 0a6cda309b24..aa86a81c8f4a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h +++ b/drivers/net/ethernet/hisilicon/hns3/hclge_mbx.h @@ -98,6 +98,7 @@ struct hclgevf_mbx_resp_status { u32 origin_mbx_msg; bool received_resp; int resp_status; + u16 match_id; u8 additional_info[HCLGE_MBX_MAX_RESP_DATA_SIZE]; }; @@ -143,7 +144,8 @@ struct hclge_mbx_vf_to_pf_cmd { u8 mbx_need_resp; u8 rsv1[1]; u8 msg_len; - u8 rsv2[3]; + u8 rsv2; + u16 match_id; struct hclge_vf_to_pf_msg msg; }; @@ -153,7 +155,8 @@ struct hclge_mbx_pf_to_vf_cmd { u8 dest_vfid; u8 rsv[3]; u8 msg_len; - u8 rsv1[3]; + u8 rsv1; + u16 match_id; struct hclge_pf_to_vf_msg msg; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index dd3354a57c62..ebeaf12e409b 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -9552,13 +9552,17 @@ static int hclge_set_vport_vlan_filter(struct hclge_vport *vport, bool enable) if (ret) return ret; - if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) + if (test_bit(HNAE3_DEV_SUPPORT_PORT_VLAN_BYPASS_B, ae_dev->caps)) { ret = hclge_set_port_vlan_filter_bypass(hdev, vport->vport_id, !enable); - else if (!vport->vport_id) + } else if (!vport->vport_id) { + if (test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) + enable = false; + ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, HCLGE_FILTER_FE_INGRESS, enable, 0); + } return ret; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c index e10a2c36b706..c0a478ae9583 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_mbx.c @@ -47,6 +47,7 @@ static int hclge_gen_resp_to_vf(struct hclge_vport *vport, resp_pf_to_vf->dest_vfid = vf_to_pf_req->mbx_src_vfid; resp_pf_to_vf->msg_len = vf_to_pf_req->msg_len; + resp_pf_to_vf->match_id = vf_to_pf_req->match_id; resp_pf_to_vf->msg.code = HCLGE_MBX_PF_VF_RESP; resp_pf_to_vf->msg.vf_mbx_msg_code = vf_to_pf_req->msg.code; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c index 52eaf82b7cd7..8784d61e833f 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_main.c @@ -2641,6 +2641,16 @@ static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev) static int hclgevf_init_vlan_config(struct hclgevf_dev *hdev) { + struct hnae3_handle *nic = &hdev->nic; + int ret; + + ret = hclgevf_en_hw_strip_rxvtag(nic, true); + if (ret) { + dev_err(&hdev->pdev->dev, + "failed to enable rx vlan offload, ret = %d\n", ret); + return ret; + } + return hclgevf_set_vlan_filter(&hdev->nic, htons(ETH_P_8021Q), 0, false); } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c index 9b17735b9f4c..772b2f8acd2e 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3vf/hclgevf_mbx.c @@ -13,6 +13,7 @@ static int hclgevf_resp_to_errno(u16 resp_code) return resp_code ? -resp_code : 0; } +#define HCLGEVF_MBX_MATCH_ID_START 1 static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) { /* this function should be called with mbx_resp.mbx_mutex held @@ -21,6 +22,10 @@ static void hclgevf_reset_mbx_resp_status(struct hclgevf_dev *hdev) hdev->mbx_resp.received_resp = false; hdev->mbx_resp.origin_mbx_msg = 0; hdev->mbx_resp.resp_status = 0; + hdev->mbx_resp.match_id++; + /* Update match_id and ensure the value of match_id is not zero */ + if (hdev->mbx_resp.match_id == 0) + hdev->mbx_resp.match_id = HCLGEVF_MBX_MATCH_ID_START; memset(hdev->mbx_resp.additional_info, 0, HCLGE_MBX_MAX_RESP_DATA_SIZE); } @@ -115,6 +120,7 @@ int hclgevf_send_mbx_msg(struct hclgevf_dev *hdev, if (need_resp) { mutex_lock(&hdev->mbx_resp.mbx_mutex); hclgevf_reset_mbx_resp_status(hdev); + req->match_id = hdev->mbx_resp.match_id; status = hclgevf_cmd_send(&hdev->hw, &desc, 1); if (status) { dev_err(&hdev->pdev->dev, @@ -211,6 +217,19 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) resp->additional_info[i] = *temp; temp++; } + + /* If match_id is not zero, it means PF support + * match_id. If the match_id is right, VF get the + * right response, otherwise ignore the response. + * Driver will clear hdev->mbx_resp when send + * next message which need response. + */ + if (req->match_id) { + if (req->match_id == resp->match_id) + resp->received_resp = true; + } else { + resp->received_resp = true; + } break; case HCLGE_MBX_LINK_STAT_CHANGE: case HCLGE_MBX_ASSERTING_RESET: diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index ed77191d19f4..a775c69e4fd7 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1731,7 +1731,6 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) tx_send_failed++; tx_dropped++; ret = NETDEV_TX_OK; - ibmvnic_tx_scrq_flush(adapter, tx_scrq); goto out; } @@ -1753,6 +1752,7 @@ static netdev_tx_t ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev) dev_kfree_skb_any(skb); tx_send_failed++; tx_dropped++; + ibmvnic_tx_scrq_flush(adapter, tx_scrq); ret = NETDEV_TX_OK; goto out; } diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 913253f8ecb4..14aea40da50f 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -1825,7 +1825,8 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, struct sk_buff *skb) { if (ring_uses_build_skb(rx_ring)) { - unsigned long offset = (unsigned long)(skb->data) & ~PAGE_MASK; + unsigned long mask = (unsigned long)ixgbe_rx_pg_size(rx_ring) - 1; + unsigned long offset = (unsigned long)(skb->data) & mask; dma_sync_single_range_for_cpu(rx_ring->dev, IXGBE_CB(skb)->dma, diff --git a/drivers/net/ethernet/marvell/octeontx2/af/Makefile b/drivers/net/ethernet/marvell/octeontx2/af/Makefile index 1a3455620b38..cc8ac36cf687 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/Makefile +++ b/drivers/net/ethernet/marvell/octeontx2/af/Makefile @@ -10,4 +10,4 @@ obj-$(CONFIG_OCTEONTX2_AF) += rvu_af.o rvu_mbox-y := mbox.o rvu_trace.o rvu_af-y := cgx.o rvu.o rvu_cgx.o rvu_npa.o rvu_nix.o \ rvu_reg.o rvu_npc.o rvu_debugfs.o ptp.o rvu_npc_fs.o \ - rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o + rvu_cpt.o rvu_devlink.o rpm.o rvu_cn10k.o rvu_switch.o diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c index 10cddf1ac7b9..017163fb3cd5 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.c @@ -1314,7 +1314,7 @@ int rvu_mbox_handler_detach_resources(struct rvu *rvu, return rvu_detach_rsrcs(rvu, detach, detach->hdr.pcifunc); } -static int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) +int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc) { struct rvu_pfvf *pfvf = rvu_get_pfvf(rvu, pcifunc); int blkaddr = BLKADDR_NIX0, vf; @@ -2859,6 +2859,12 @@ static int rvu_enable_sriov(struct rvu *rvu) if (!vfs) return 0; + /* LBK channel number 63 is used for switching packets between + * CGX mapped VFs. Hence limit LBK pairs till 62 only. + */ + if (vfs > 62) + vfs = 62; + /* Save VFs number for reference in VF interrupts handlers. * Since interrupts might start arriving during SRIOV enablement * ordinary API cannot be used to get number of enabled VFs. @@ -3001,6 +3007,8 @@ static int rvu_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Initialize debugfs */ rvu_dbg_init(rvu); + mutex_init(&rvu->rswitch.switch_lock); + return 0; err_dl: rvu_unregister_dl(rvu); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h index 10e58a5d5861..91503fb2762c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu.h +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu.h @@ -415,6 +415,16 @@ struct npc_kpu_profile_adapter { size_t kpus; }; +#define RVU_SWITCH_LBK_CHAN 63 + +struct rvu_switch { + struct mutex switch_lock; /* Serialize flow installation */ + u32 used_entries; + u16 *entry2pcifunc; + u16 mode; + u16 start_entry; +}; + struct rvu { void __iomem *afreg_base; void __iomem *pfreg_base; @@ -445,6 +455,7 @@ struct rvu { /* CGX */ #define PF_CGXMAP_BASE 1 /* PF 0 is reserved for RVU PF */ + u16 cgx_mapped_vfs; /* maximum CGX mapped VFs */ u8 cgx_mapped_pfs; u8 cgx_cnt_max; /* CGX port count max */ u8 *pf2cgxlmac_map; /* pf to cgx_lmac map */ @@ -477,6 +488,9 @@ struct rvu { struct rvu_debugfs rvu_dbg; #endif struct rvu_devlink *rvu_dl; + + /* RVU switch implementation over NPC with DMAC rules */ + struct rvu_switch rswitch; }; static inline void rvu_write64(struct rvu *rvu, u64 block, u64 offset, u64 val) @@ -691,6 +705,7 @@ int nix_aq_context_read(struct rvu *rvu, struct nix_hw *nix_hw, struct nix_cn10k_aq_enq_req *aq_req, struct nix_cn10k_aq_enq_rsp *aq_rsp, u16 pcifunc, u8 ctype, u32 qidx); +int rvu_get_nix_blkaddr(struct rvu *rvu, u16 pcifunc); /* NPC APIs */ int rvu_npc_init(struct rvu *rvu); @@ -768,4 +783,10 @@ void rvu_dbg_exit(struct rvu *rvu); static inline void rvu_dbg_init(struct rvu *rvu) {} static inline void rvu_dbg_exit(struct rvu *rvu) {} #endif + +/* RVU Switch */ +void rvu_switch_enable(struct rvu *rvu); +void rvu_switch_disable(struct rvu *rvu); +void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc); + #endif /* RVU_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c index 6cc8fbb7190c..fe99ac4a4dd8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_cgx.c @@ -126,6 +126,7 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) unsigned long lmac_bmap; int size, free_pkind; int cgx, lmac, iter; + int numvfs, hwvfs; if (!cgx_cnt_max) return 0; @@ -166,6 +167,8 @@ static int rvu_map_cgx_lmac_pf(struct rvu *rvu) pkind->pfchan_map[free_pkind] = ((pf) & 0x3F) << 16; rvu_map_cgx_nix_block(rvu, pf, cgx, lmac); rvu->cgx_mapped_pfs++; + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvfs); + rvu->cgx_mapped_vfs += numvfs; pf++; } } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c index 370d4ca1e5ed..9b2dfbf90e51 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_debugfs.c @@ -2113,9 +2113,6 @@ static void rvu_print_npc_mcam_info(struct seq_file *s, int entry_acnt, entry_ecnt; int cntr_acnt, cntr_ecnt; - /* Skip PF0 */ - if (!pcifunc) - return; rvu_npc_get_mcam_entry_alloc_info(rvu, pcifunc, blkaddr, &entry_acnt, &entry_ecnt); rvu_npc_get_mcam_counter_alloc_info(rvu, pcifunc, blkaddr, @@ -2298,7 +2295,7 @@ static void rvu_dbg_npc_mcam_show_flows(struct seq_file *s, static void rvu_dbg_npc_mcam_show_action(struct seq_file *s, struct rvu_npc_mcam_rule *rule) { - if (rule->intf == NIX_INTF_TX) { + if (is_npc_intf_tx(rule->intf)) { switch (rule->tx_action.op) { case NIX_TX_ACTIONOP_DROP: seq_puts(s, "\taction: Drop\n"); diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c index 10a98bcb7c54..2688186066d9 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_devlink.c @@ -1364,6 +1364,44 @@ static void rvu_health_reporters_destroy(struct rvu *rvu) rvu_nix_health_reporters_destroy(rvu_dl); } +static int rvu_devlink_eswitch_mode_get(struct devlink *devlink, u16 *mode) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + struct rvu_switch *rswitch; + + rswitch = &rvu->rswitch; + *mode = rswitch->mode; + + return 0; +} + +static int rvu_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, + struct netlink_ext_ack *extack) +{ + struct rvu_devlink *rvu_dl = devlink_priv(devlink); + struct rvu *rvu = rvu_dl->rvu; + struct rvu_switch *rswitch; + + rswitch = &rvu->rswitch; + switch (mode) { + case DEVLINK_ESWITCH_MODE_LEGACY: + case DEVLINK_ESWITCH_MODE_SWITCHDEV: + if (rswitch->mode == mode) + return 0; + rswitch->mode = mode; + if (mode == DEVLINK_ESWITCH_MODE_SWITCHDEV) + rvu_switch_enable(rvu); + else + rvu_switch_disable(rvu); + break; + default: + return -EINVAL; + } + + return 0; +} + static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, struct netlink_ext_ack *extack) { @@ -1372,6 +1410,8 @@ static int rvu_devlink_info_get(struct devlink *devlink, struct devlink_info_req static const struct devlink_ops rvu_devlink_ops = { .info_get = rvu_devlink_info_get, + .eswitch_mode_get = rvu_devlink_eswitch_mode_get, + .eswitch_mode_set = rvu_devlink_eswitch_mode_set, }; int rvu_register_dl(struct rvu *rvu) @@ -1380,14 +1420,9 @@ int rvu_register_dl(struct rvu *rvu) struct devlink *dl; int err; - rvu_dl = kzalloc(sizeof(*rvu_dl), GFP_KERNEL); - if (!rvu_dl) - return -ENOMEM; - dl = devlink_alloc(&rvu_devlink_ops, sizeof(struct rvu_devlink)); if (!dl) { dev_warn(rvu->dev, "devlink_alloc failed\n"); - kfree(rvu_dl); return -ENOMEM; } @@ -1395,10 +1430,10 @@ int rvu_register_dl(struct rvu *rvu) if (err) { dev_err(rvu->dev, "devlink register failed with error %d\n", err); devlink_free(dl); - kfree(rvu_dl); return err; } + rvu_dl = devlink_priv(dl); rvu_dl->dl = dl; rvu_dl->rvu = rvu; rvu->rvu_dl = rvu_dl; @@ -1417,5 +1452,4 @@ void rvu_unregister_dl(struct rvu *rvu) rvu_health_reporters_destroy(rvu); devlink_unregister(dl); devlink_free(dl); - kfree(rvu_dl); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c index aeae37704428..0933699a0d2d 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_nix.c @@ -1952,6 +1952,35 @@ static void nix_tl1_default_cfg(struct rvu *rvu, struct nix_hw *nix_hw, pfvf_map[schq] = TXSCH_SET_FLAG(pfvf_map[schq], NIX_TXSCHQ_CFG_DONE); } +static void rvu_nix_tx_tl2_cfg(struct rvu *rvu, int blkaddr, + u16 pcifunc, struct nix_txsch *txsch) +{ + struct rvu_hwinfo *hw = rvu->hw; + int lbk_link_start, lbk_links; + u8 pf = rvu_get_pf(pcifunc); + int schq; + + if (!is_pf_cgxmapped(rvu, pf)) + return; + + lbk_link_start = hw->cgx_links; + + for (schq = 0; schq < txsch->schq.max; schq++) { + if (TXSCH_MAP_FUNC(txsch->pfvf_map[schq]) != pcifunc) + continue; + /* Enable all LBK links with channel 63 by default so that + * packets can be sent to LBK with a NPC TX MCAM rule + */ + lbk_links = hw->lbk_links; + while (lbk_links--) + rvu_write64(rvu, blkaddr, + NIX_AF_TL3_TL2X_LINKX_CFG(schq, + lbk_link_start + + lbk_links), + BIT_ULL(12) | RVU_SWITCH_LBK_CHAN); + } +} + int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, struct nix_txschq_config *req, struct msg_rsp *rsp) @@ -2040,6 +2069,9 @@ int rvu_mbox_handler_nix_txschq_cfg(struct rvu *rvu, rvu_write64(rvu, blkaddr, reg, regval); } + rvu_nix_tx_tl2_cfg(rvu, blkaddr, pcifunc, + &nix_hw->txsch[NIX_TXSCH_LVL_TL2]); + return 0; } @@ -3180,6 +3212,8 @@ int rvu_mbox_handler_nix_set_mac_addr(struct rvu *rvu, if (test_bit(PF_SET_VF_TRUSTED, &pfvf->flags) && from_vf) ether_addr_copy(pfvf->default_mac, req->mac_addr); + rvu_switch_update_rules(rvu, pcifunc); + return 0; } @@ -3849,6 +3883,8 @@ int rvu_mbox_handler_nix_lf_start_rx(struct rvu *rvu, struct msg_req *req, pfvf = rvu_get_pfvf(rvu, pcifunc); set_bit(NIXLF_INITIALIZED, &pfvf->flags); + rvu_switch_update_rules(rvu, pcifunc); + return rvu_cgx_start_stop_io(rvu, pcifunc, true); } diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c index 3612e0a2cab3..1097291aaa45 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc.c @@ -442,7 +442,8 @@ static void npc_fixup_vf_rule(struct rvu *rvu, struct npc_mcam *mcam, owner = mcam->entry2pfvf_map[index]; target_func = (entry->action >> 4) & 0xffff; /* do nothing when target is LBK/PF or owner is not PF */ - if (is_afvf(target_func) || (owner & RVU_PFVF_FUNC_MASK) || + if (is_pffunc_af(owner) || is_afvf(target_func) || + (owner & RVU_PFVF_FUNC_MASK) || !(target_func & RVU_PFVF_FUNC_MASK)) return; @@ -468,6 +469,8 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, { int bank = npc_get_bank(mcam, index); int kw = 0, actbank, actindex; + u8 tx_intf_mask = ~intf & 0x3; + u8 tx_intf = intf; u64 cam0, cam1; actbank = bank; /* Save bank id, to set action later on */ @@ -488,12 +491,21 @@ static void npc_config_mcam_entry(struct rvu *rvu, struct npc_mcam *mcam, */ for (; bank < (actbank + mcam->banks_per_entry); bank++, kw = kw + 2) { /* Interface should be set in all banks */ + if (is_npc_intf_tx(intf)) { + /* Last bit must be set and rest don't care + * for TX interfaces + */ + tx_intf_mask = 0x1; + tx_intf = intf & tx_intf_mask; + tx_intf_mask = ~tx_intf & tx_intf_mask; + } + rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 1), - intf); + tx_intf); rvu_write64(rvu, blkaddr, NPC_AF_MCAMEX_BANKX_CAMX_INTF(index, bank, 0), - ~intf & 0x3); + tx_intf_mask); /* Set the match key */ npc_get_keyword(entry, kw, &cam0, &cam1); @@ -650,6 +662,7 @@ void rvu_npc_install_ucast_entry(struct rvu *rvu, u16 pcifunc, eth_broadcast_addr((u8 *)&req.mask.dmac); req.features = BIT_ULL(NPC_DMAC); req.channel = chan; + req.chan_mask = 0xFFFU; req.intf = pfvf->nix_rx_intf; req.op = action.op; req.hdr.pcifunc = 0; /* AF is requester */ @@ -799,6 +812,7 @@ void rvu_npc_install_bcast_match_entry(struct rvu *rvu, u16 pcifunc, eth_broadcast_addr((u8 *)&req.mask.dmac); req.features = BIT_ULL(NPC_DMAC); req.channel = chan; + req.chan_mask = 0xFFFU; req.intf = pfvf->nix_rx_intf; req.entry = index; req.hdr.pcifunc = 0; /* AF is requester */ @@ -1745,6 +1759,8 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) int nixlf_count = rvu_get_nixlf_count(rvu); struct npc_mcam *mcam = &rvu->hw->mcam; int rsvd, err; + u16 index; + int cntr; u64 cfg; /* Actual number of MCAM entries vary by entry size */ @@ -1845,6 +1861,14 @@ static int npc_mcam_rsrcs_init(struct rvu *rvu, int blkaddr) if (!mcam->entry2target_pffunc) goto free_mem; + for (index = 0; index < mcam->bmap_entries; index++) { + mcam->entry2pfvf_map[index] = NPC_MCAM_INVALID_MAP; + mcam->entry2cntr_map[index] = NPC_MCAM_INVALID_MAP; + } + + for (cntr = 0; cntr < mcam->counters.max; cntr++) + mcam->cntr2pfvf_map[cntr] = NPC_MCAM_INVALID_MAP; + mutex_init(&mcam->lock); return 0; @@ -2562,7 +2586,7 @@ int rvu_mbox_handler_npc_mcam_alloc_entry(struct rvu *rvu, } /* Alloc request from PFFUNC with no NIXLF attached should be denied */ - if (!is_nixlf_attached(rvu, pcifunc)) + if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) return NPC_MCAM_ALLOC_DENIED; return npc_mcam_alloc_entries(mcam, pcifunc, req, rsp); @@ -2582,7 +2606,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, return NPC_MCAM_INVALID_REQ; /* Free request from PFFUNC with no NIXLF attached, ignore */ - if (!is_nixlf_attached(rvu, pcifunc)) + if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) return NPC_MCAM_INVALID_REQ; mutex_lock(&mcam->lock); @@ -2594,7 +2618,7 @@ int rvu_mbox_handler_npc_mcam_free_entry(struct rvu *rvu, if (rc) goto exit; - mcam->entry2pfvf_map[req->entry] = 0; + mcam->entry2pfvf_map[req->entry] = NPC_MCAM_INVALID_MAP; mcam->entry2target_pffunc[req->entry] = 0x0; npc_mcam_clear_bit(mcam, req->entry); npc_enable_mcam_entry(rvu, mcam, blkaddr, req->entry, false); @@ -2679,13 +2703,14 @@ int rvu_mbox_handler_npc_mcam_write_entry(struct rvu *rvu, else nix_intf = pfvf->nix_rx_intf; - if (npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) { + if (!is_pffunc_af(pcifunc) && + npc_mcam_verify_channel(rvu, pcifunc, req->intf, channel)) { rc = NPC_MCAM_INVALID_REQ; goto exit; } - if (npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, - pcifunc)) { + if (!is_pffunc_af(pcifunc) && + npc_mcam_verify_pf_func(rvu, &req->entry_data, req->intf, pcifunc)) { rc = NPC_MCAM_INVALID_REQ; goto exit; } @@ -2836,7 +2861,7 @@ int rvu_mbox_handler_npc_mcam_alloc_counter(struct rvu *rvu, return NPC_MCAM_INVALID_REQ; /* If the request is from a PFFUNC with no NIXLF attached, ignore */ - if (!is_nixlf_attached(rvu, pcifunc)) + if (!is_pffunc_af(pcifunc) && !is_nixlf_attached(rvu, pcifunc)) return NPC_MCAM_INVALID_REQ; /* Since list of allocated counter IDs needs to be sent to requester, @@ -3081,7 +3106,7 @@ int rvu_mbox_handler_npc_mcam_alloc_and_write_entry(struct rvu *rvu, if (rc) { /* Free allocated MCAM entry */ mutex_lock(&mcam->lock); - mcam->entry2pfvf_map[entry] = 0; + mcam->entry2pfvf_map[entry] = NPC_MCAM_INVALID_MAP; npc_mcam_clear_bit(mcam, entry); mutex_unlock(&mcam->lock); return rc; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c index 68633145a8b8..5c01cf4a9c5b 100644 --- a/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_npc_fs.c @@ -910,14 +910,17 @@ static void rvu_mcam_add_counter_to_rule(struct rvu *rvu, u16 pcifunc, static void npc_update_rx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, struct mcam_entry *entry, - struct npc_install_flow_req *req, u16 target) + struct npc_install_flow_req *req, + u16 target, bool pf_set_vfs_mac) { + struct rvu_switch *rswitch = &rvu->rswitch; struct nix_rx_action action; - u64 chan_mask; - chan_mask = req->chan_mask ? req->chan_mask : ~0ULL; - npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, chan_mask, 0, - NIX_INTF_RX); + if (rswitch->mode == DEVLINK_ESWITCH_MODE_SWITCHDEV && pf_set_vfs_mac) + req->chan_mask = 0x0; /* Do not care channel */ + + npc_update_entry(rvu, NPC_CHAN, entry, req->channel, 0, req->chan_mask, + 0, NIX_INTF_RX); *(u64 *)&action = 0x00; action.pf_func = target; @@ -949,9 +952,16 @@ static void npc_update_tx_entry(struct rvu *rvu, struct rvu_pfvf *pfvf, struct npc_install_flow_req *req, u16 target) { struct nix_tx_action action; + u64 mask = ~0ULL; + + /* If AF is installing then do not care about + * PF_FUNC in Send Descriptor + */ + if (is_pffunc_af(req->hdr.pcifunc)) + mask = 0; npc_update_entry(rvu, NPC_PF_FUNC, entry, (__force u16)htons(target), - 0, ~0ULL, 0, NIX_INTF_TX); + 0, mask, 0, NIX_INTF_TX); *(u64 *)&action = 0x00; action.op = req->op; @@ -1002,7 +1012,7 @@ static int npc_install_flow(struct rvu *rvu, int blkaddr, u16 target, req->intf); if (is_npc_intf_rx(req->intf)) - npc_update_rx_entry(rvu, pfvf, entry, req, target); + npc_update_rx_entry(rvu, pfvf, entry, req, target, pf_set_vfs_mac); else npc_update_tx_entry(rvu, pfvf, entry, req, target); @@ -1164,7 +1174,9 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, if (err) return err; - if (npc_mcam_verify_channel(rvu, target, req->intf, req->channel)) + /* Skip channel validation if AF is installing */ + if (!is_pffunc_af(req->hdr.pcifunc) && + npc_mcam_verify_channel(rvu, target, req->intf, req->channel)) return -EINVAL; pfvf = rvu_get_pfvf(rvu, target); @@ -1180,6 +1192,7 @@ int rvu_mbox_handler_npc_install_flow(struct rvu *rvu, eth_broadcast_addr((u8 *)&req->mask.dmac); } + /* Proceed if NIXLF is attached or not for TX rules */ err = nix_get_nixlf(rvu, target, &nixlf, NULL); if (err && is_npc_intf_rx(req->intf) && !pf_set_vfs_mac) return -EINVAL; diff --git a/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c new file mode 100644 index 000000000000..2e5379710aa5 --- /dev/null +++ b/drivers/net/ethernet/marvell/octeontx2/af/rvu_switch.c @@ -0,0 +1,258 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Marvell OcteonTx2 RVU Admin Function driver + * + * Copyright (C) 2021 Marvell. + */ + +#include <linux/bitfield.h> +#include "rvu.h" + +static int rvu_switch_install_rx_rule(struct rvu *rvu, u16 pcifunc, + u16 chan_mask) +{ + struct npc_install_flow_req req = { 0 }; + struct npc_install_flow_rsp rsp = { 0 }; + struct rvu_pfvf *pfvf; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + /* If the pcifunc is not initialized then nothing to do. + * This same function will be called again via rvu_switch_update_rules + * after pcifunc is initialized. + */ + if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags)) + return 0; + + ether_addr_copy(req.packet.dmac, pfvf->mac_addr); + eth_broadcast_addr((u8 *)&req.mask.dmac); + req.hdr.pcifunc = 0; /* AF is requester */ + req.vf = pcifunc; + req.features = BIT_ULL(NPC_DMAC); + req.channel = pfvf->rx_chan_base; + req.chan_mask = chan_mask; + req.intf = pfvf->nix_rx_intf; + req.op = NIX_RX_ACTION_DEFAULT; + req.default_rule = 1; + + return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); +} + +static int rvu_switch_install_tx_rule(struct rvu *rvu, u16 pcifunc, u16 entry) +{ + struct npc_install_flow_req req = { 0 }; + struct npc_install_flow_rsp rsp = { 0 }; + struct rvu_pfvf *pfvf; + u8 lbkid; + + pfvf = rvu_get_pfvf(rvu, pcifunc); + /* If the pcifunc is not initialized then nothing to do. + * This same function will be called again via rvu_switch_update_rules + * after pcifunc is initialized. + */ + if (!test_bit(NIXLF_INITIALIZED, &pfvf->flags)) + return 0; + + lbkid = pfvf->nix_blkaddr == BLKADDR_NIX0 ? 0 : 1; + ether_addr_copy(req.packet.dmac, pfvf->mac_addr); + eth_broadcast_addr((u8 *)&req.mask.dmac); + req.hdr.pcifunc = 0; /* AF is requester */ + req.vf = pcifunc; + req.entry = entry; + req.features = BIT_ULL(NPC_DMAC); + req.intf = pfvf->nix_tx_intf; + req.op = NIX_TX_ACTIONOP_UCAST_CHAN; + req.index = (lbkid << 8) | RVU_SWITCH_LBK_CHAN; + req.set_cntr = 1; + + return rvu_mbox_handler_npc_install_flow(rvu, &req, &rsp); +} + +static int rvu_switch_install_rules(struct rvu *rvu) +{ + struct rvu_switch *rswitch = &rvu->rswitch; + u16 start = rswitch->start_entry; + struct rvu_hwinfo *hw = rvu->hw; + int pf, vf, numvfs, hwvf; + u16 pcifunc, entry = 0; + int err; + + for (pf = 1; pf < hw->total_pfs; pf++) { + if (!is_pf_cgxmapped(rvu, pf)) + continue; + + pcifunc = pf << 10; + /* rvu_get_nix_blkaddr sets up the corresponding NIX block + * address and NIX RX and TX interfaces for a pcifunc. + * Generally it is called during attach call of a pcifunc but it + * is called here since we are pre-installing rules before + * nixlfs are attached + */ + rvu_get_nix_blkaddr(rvu, pcifunc); + + /* MCAM RX rule for a PF/VF already exists as default unicast + * rules installed by AF. Hence change the channel in those + * rules to ignore channel so that packets with the required + * DMAC received from LBK(by other PF/VFs in system) or from + * external world (from wire) are accepted. + */ + err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); + if (err) { + dev_err(rvu->dev, "RX rule for PF%d failed(%d)\n", + pf, err); + return err; + } + + err = rvu_switch_install_tx_rule(rvu, pcifunc, start + entry); + if (err) { + dev_err(rvu->dev, "TX rule for PF%d failed(%d)\n", + pf, err); + return err; + } + + rswitch->entry2pcifunc[entry++] = pcifunc; + + rvu_get_pf_numvfs(rvu, pf, &numvfs, &hwvf); + for (vf = 0; vf < numvfs; vf++, hwvf++) { + pcifunc = pf << 10 | ((vf + 1) & 0x3FF); + rvu_get_nix_blkaddr(rvu, pcifunc); + + err = rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); + if (err) { + dev_err(rvu->dev, + "RX rule for PF%dVF%d failed(%d)\n", + pf, vf, err); + return err; + } + + err = rvu_switch_install_tx_rule(rvu, pcifunc, + start + entry); + if (err) { + dev_err(rvu->dev, + "TX rule for PF%dVF%d failed(%d)\n", + pf, vf, err); + return err; + } + + rswitch->entry2pcifunc[entry++] = pcifunc; + } + } + + return 0; +} + +void rvu_switch_enable(struct rvu *rvu) +{ + struct npc_mcam_alloc_entry_req alloc_req = { 0 }; + struct npc_mcam_alloc_entry_rsp alloc_rsp = { 0 }; + struct npc_delete_flow_req uninstall_req = { 0 }; + struct npc_mcam_free_entry_req free_req = { 0 }; + struct rvu_switch *rswitch = &rvu->rswitch; + struct msg_rsp rsp; + int ret; + + alloc_req.contig = true; + alloc_req.count = rvu->cgx_mapped_pfs + rvu->cgx_mapped_vfs; + ret = rvu_mbox_handler_npc_mcam_alloc_entry(rvu, &alloc_req, + &alloc_rsp); + if (ret) { + dev_err(rvu->dev, + "Unable to allocate MCAM entries\n"); + goto exit; + } + + if (alloc_rsp.count != alloc_req.count) { + dev_err(rvu->dev, + "Unable to allocate %d MCAM entries, got %d\n", + alloc_req.count, alloc_rsp.count); + goto free_entries; + } + + rswitch->entry2pcifunc = kcalloc(alloc_req.count, sizeof(u16), + GFP_KERNEL); + if (!rswitch->entry2pcifunc) + goto free_entries; + + rswitch->used_entries = alloc_rsp.count; + rswitch->start_entry = alloc_rsp.entry; + + ret = rvu_switch_install_rules(rvu); + if (ret) + goto uninstall_rules; + + return; + +uninstall_rules: + uninstall_req.start = rswitch->start_entry; + uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; + rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp); + kfree(rswitch->entry2pcifunc); +free_entries: + free_req.all = 1; + rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp); +exit: + return; +} + +void rvu_switch_disable(struct rvu *rvu) +{ + struct npc_delete_flow_req uninstall_req = { 0 }; + struct npc_mcam_free_entry_req free_req = { 0 }; + struct rvu_switch *rswitch = &rvu->rswitch; + struct rvu_hwinfo *hw = rvu->hw; + int pf, vf, numvfs, hwvf; + struct msg_rsp rsp; + u16 pcifunc; + int err; + + if (!rswitch->used_entries) + return; + + for (pf = 1; pf < hw->total_pfs; pf++) { + if (!is_pf_cgxmapped(rvu, pf)) + continue; + + pcifunc = pf << 10; + err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); + if (err) + dev_err(rvu->dev, + "Reverting RX rule for PF%d failed(%d)\n", + pf, err); + + for (vf = 0; vf < numvfs; vf++, hwvf++) { + pcifunc = pf << 10 | ((vf + 1) & 0x3FF); + err = rvu_switch_install_rx_rule(rvu, pcifunc, 0xFFF); + if (err) + dev_err(rvu->dev, + "Reverting RX rule for PF%dVF%d failed(%d)\n", + pf, vf, err); + } + } + + uninstall_req.start = rswitch->start_entry; + uninstall_req.end = rswitch->start_entry + rswitch->used_entries - 1; + free_req.all = 1; + rvu_mbox_handler_npc_delete_flow(rvu, &uninstall_req, &rsp); + rvu_mbox_handler_npc_mcam_free_entry(rvu, &free_req, &rsp); + rswitch->used_entries = 0; + kfree(rswitch->entry2pcifunc); +} + +void rvu_switch_update_rules(struct rvu *rvu, u16 pcifunc) +{ + struct rvu_switch *rswitch = &rvu->rswitch; + u32 max = rswitch->used_entries; + u16 entry; + + if (!rswitch->used_entries) + return; + + for (entry = 0; entry < max; entry++) { + if (rswitch->entry2pcifunc[entry] == pcifunc) + break; + } + + if (entry >= max) + return; + + rvu_switch_install_tx_rule(rvu, pcifunc, rswitch->start_entry + entry); + rvu_switch_install_rx_rule(rvu, pcifunc, 0x0); +} diff --git a/drivers/net/ethernet/microchip/sparx5/Kconfig b/drivers/net/ethernet/microchip/sparx5/Kconfig index ac403d43c74c..7bdbb2d09a14 100644 --- a/drivers/net/ethernet/microchip/sparx5/Kconfig +++ b/drivers/net/ethernet/microchip/sparx5/Kconfig @@ -3,6 +3,7 @@ config SPARX5_SWITCH depends on NET_SWITCHDEV depends on HAS_IOMEM depends on OF + depends on ARCH_SPARX5 || COMPILE_TEST select PHYLINK select PHY_SPARX5_SERDES select RESET_CONTROLLER diff --git a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c index 2abf02eed7fb..1ac3b65df600 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/conntrack.c +++ b/drivers/net/ethernet/netronome/nfp/flower/conntrack.c @@ -1769,6 +1769,7 @@ int nfp_fl_ct_del_flow(struct nfp_fl_ct_map_entry *ct_map_ent) nfp_ct_map_params); nfp_fl_ct_clean_flow_entry(ct_map_ent->ct_entry); kfree(ct_map_ent); + break; default: break; } diff --git a/drivers/net/ethernet/realtek/r8169_main.c b/drivers/net/ethernet/realtek/r8169_main.c index f744557c33a3..c7af5bc3b8af 100644 --- a/drivers/net/ethernet/realtek/r8169_main.c +++ b/drivers/net/ethernet/realtek/r8169_main.c @@ -5084,7 +5084,8 @@ static int r8169_mdio_register(struct rtl8169_private *tp) new_bus->priv = tp; new_bus->parent = &pdev->dev; new_bus->irq[0] = PHY_MAC_INTERRUPT; - snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x", pci_dev_id(pdev)); + snprintf(new_bus->id, MII_BUS_ID_SIZE, "r8169-%x-%x", + pci_domain_nr(pdev->bus), pci_dev_id(pdev)); new_bus->read = r8169_mdio_read_reg; new_bus->write = r8169_mdio_write_reg; diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h index 86a1eb0634e8..80e62ca2e3d3 100644 --- a/drivers/net/ethernet/renesas/ravb.h +++ b/drivers/net/ethernet/renesas/ravb.h @@ -864,7 +864,7 @@ enum GECMR_BIT { /* The Ethernet AVB descriptor definitions. */ struct ravb_desc { - __le16 ds; /* Descriptor size */ + __le16 ds; /* Descriptor size */ u8 cc; /* Content control MSBs (reserved) */ u8 die_dt; /* Descriptor interrupt enable and type */ __le32 dptr; /* Descriptor pointer */ diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 69c50f81e1cb..805397088850 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -920,7 +920,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) if (ravb_rx(ndev, "a, q)) goto out; - /* Processing RX Descriptor Ring */ + /* Processing TX Descriptor Ring */ spin_lock_irqsave(&priv->lock, flags); /* Clear TX interrupt */ ravb_write(ndev, ~(mask | TIS_RESERVED), TIS); diff --git a/drivers/net/ethernet/xscale/ptp_ixp46x.c b/drivers/net/ethernet/xscale/ptp_ixp46x.c index 99d4d9439d05..a6fb88fd42f7 100644 --- a/drivers/net/ethernet/xscale/ptp_ixp46x.c +++ b/drivers/net/ethernet/xscale/ptp_ixp46x.c @@ -14,6 +14,8 @@ #include <linux/kernel.h> #include <linux/ptp_clock_kernel.h> #include <linux/soc/ixp4xx/cpu.h> +#include <linux/module.h> +#include <mach/ixp4xx-regs.h> #include "ixp46x_ts.h" diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 1692d3b1b6e1..e09b107b5c99 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -1552,7 +1552,8 @@ static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u32 speed, u8 duplex, u32 advertising); -static int rtl8152_set_mac_address(struct net_device *netdev, void *p) +static int __rtl8152_set_mac_address(struct net_device *netdev, void *p, + bool in_resume) { struct r8152 *tp = netdev_priv(netdev); struct sockaddr *addr = p; @@ -1561,9 +1562,11 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p) if (!is_valid_ether_addr(addr->sa_data)) goto out1; - ret = usb_autopm_get_interface(tp->intf); - if (ret < 0) - goto out1; + if (!in_resume) { + ret = usb_autopm_get_interface(tp->intf); + if (ret < 0) + goto out1; + } mutex_lock(&tp->control); @@ -1575,11 +1578,17 @@ static int rtl8152_set_mac_address(struct net_device *netdev, void *p) mutex_unlock(&tp->control); - usb_autopm_put_interface(tp->intf); + if (!in_resume) + usb_autopm_put_interface(tp->intf); out1: return ret; } +static int rtl8152_set_mac_address(struct net_device *netdev, void *p) +{ + return __rtl8152_set_mac_address(netdev, p, false); +} + /* Devices containing proper chips can support a persistent * host system provided MAC address. * Examples of this are Dell TB15 and Dell WD15 docks @@ -1698,7 +1707,7 @@ static int determine_ethernet_addr(struct r8152 *tp, struct sockaddr *sa) return ret; } -static int set_ethernet_addr(struct r8152 *tp) +static int set_ethernet_addr(struct r8152 *tp, bool in_resume) { struct net_device *dev = tp->netdev; struct sockaddr sa; @@ -1711,7 +1720,7 @@ static int set_ethernet_addr(struct r8152 *tp) if (tp->version == RTL_VER_01) ether_addr_copy(dev->dev_addr, sa.sa_data); else - ret = rtl8152_set_mac_address(dev, &sa); + ret = __rtl8152_set_mac_address(dev, &sa, in_resume); return ret; } @@ -6763,9 +6772,10 @@ static int rtl8152_close(struct net_device *netdev) tp->rtl_ops.down(tp); mutex_unlock(&tp->control); + } + if (!res) usb_autopm_put_interface(tp->intf); - } free_all_mem(tp); @@ -8443,7 +8453,7 @@ static int rtl8152_reset_resume(struct usb_interface *intf) clear_bit(SELECTIVE_SUSPEND, &tp->flags); tp->rtl_ops.init(tp); queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); - set_ethernet_addr(tp); + set_ethernet_addr(tp, true); return rtl8152_resume(intf); } @@ -9644,7 +9654,7 @@ static int rtl8152_probe(struct usb_interface *intf, tp->rtl_fw.retry = true; #endif queue_delayed_work(system_long_wq, &tp->hw_phy_work, 0); - set_ethernet_addr(tp); + set_ethernet_addr(tp, false); usb_set_intfdata(intf, tp); diff --git a/drivers/net/wireless/mediatek/mt76/mt7921/main.c b/drivers/net/wireless/mediatek/mt76/mt7921/main.c index 7fd21049ff5a..63ec140c9c37 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7921/main.c +++ b/drivers/net/wireless/mediatek/mt76/mt7921/main.c @@ -389,6 +389,7 @@ static int mt7921_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, case WLAN_CIPHER_SUITE_WEP104: if (!mvif->wep_sta) return -EOPNOTSUPP; + break; case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: case WLAN_CIPHER_SUITE_CCMP_256: diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index d3c5086673bc..320051f5a3dd 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1554,6 +1554,28 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid) wmb(); /* ensure the first interrupt sees the initialization */ } +/* + * Try getting shutdown_lock while setting up IO queues. + */ +static int nvme_setup_io_queues_trylock(struct nvme_dev *dev) +{ + /* + * Give up if the lock is being held by nvme_dev_disable. + */ + if (!mutex_trylock(&dev->shutdown_lock)) + return -ENODEV; + + /* + * Controller is in wrong state, fail early. + */ + if (dev->ctrl.state != NVME_CTRL_CONNECTING) { + mutex_unlock(&dev->shutdown_lock); + return -ENODEV; + } + + return 0; +} + static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) { struct nvme_dev *dev = nvmeq->dev; @@ -1582,8 +1604,11 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) goto release_cq; nvmeq->cq_vector = vector; - nvme_init_queue(nvmeq, qid); + result = nvme_setup_io_queues_trylock(dev); + if (result) + return result; + nvme_init_queue(nvmeq, qid); if (!polled) { result = queue_request_irq(nvmeq); if (result < 0) @@ -1591,10 +1616,12 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid, bool polled) } set_bit(NVMEQ_ENABLED, &nvmeq->flags); + mutex_unlock(&dev->shutdown_lock); return result; release_sq: dev->online_queues--; + mutex_unlock(&dev->shutdown_lock); adapter_delete_sq(dev, qid); release_cq: adapter_delete_cq(dev, qid); @@ -2167,7 +2194,18 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (nr_io_queues == 0) return 0; - clear_bit(NVMEQ_ENABLED, &adminq->flags); + /* + * Free IRQ resources as soon as NVMEQ_ENABLED bit transitions + * from set to unset. If there is a window to it is truely freed, + * pci_free_irq_vectors() jumping into this window will crash. + * And take lock to avoid racing with pci_free_irq_vectors() in + * nvme_dev_disable() path. + */ + result = nvme_setup_io_queues_trylock(dev); + if (result) + return result; + if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) + pci_free_irq(pdev, 0, adminq); if (dev->cmb_use_sqes) { result = nvme_cmb_qdepth(dev, nr_io_queues, @@ -2183,14 +2221,17 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) result = nvme_remap_bar(dev, size); if (!result) break; - if (!--nr_io_queues) - return -ENOMEM; + if (!--nr_io_queues) { + result = -ENOMEM; + goto out_unlock; + } } while (1); adminq->q_db = dev->dbs; retry: /* Deregister the admin queue's interrupt */ - pci_free_irq(pdev, 0, adminq); + if (test_and_clear_bit(NVMEQ_ENABLED, &adminq->flags)) + pci_free_irq(pdev, 0, adminq); /* * If we enable msix early due to not intx, disable it again before @@ -2199,8 +2240,10 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) pci_free_irq_vectors(pdev); result = nvme_setup_irqs(dev, nr_io_queues); - if (result <= 0) - return -EIO; + if (result <= 0) { + result = -EIO; + goto out_unlock; + } dev->num_vecs = result; result = max(result - 1, 1); @@ -2214,8 +2257,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) */ result = queue_request_irq(adminq); if (result) - return result; + goto out_unlock; set_bit(NVMEQ_ENABLED, &adminq->flags); + mutex_unlock(&dev->shutdown_lock); result = nvme_create_io_queues(dev); if (result || dev->online_queues < 2) @@ -2224,6 +2268,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) if (dev->online_queues - 1 < dev->max_qid) { nr_io_queues = dev->online_queues - 1; nvme_disable_io_queues(dev); + result = nvme_setup_io_queues_trylock(dev); + if (result) + return result; nvme_suspend_io_queues(dev); goto retry; } @@ -2232,6 +2279,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev) dev->io_queues[HCTX_TYPE_READ], dev->io_queues[HCTX_TYPE_POLL]); return 0; +out_unlock: + mutex_unlock(&dev->shutdown_lock); + return result; } static void nvme_del_queue_end(struct request *req, blk_status_t error) @@ -2962,7 +3012,6 @@ static void nvme_remove(struct pci_dev *pdev) if (!pci_device_is_present(pdev)) { nvme_change_ctrl_state(&dev->ctrl, NVME_CTRL_DEAD); nvme_dev_disable(dev, true); - nvme_dev_remove_admin(dev); } flush_work(&dev->ctrl.reset_work); diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 12acfe05cd68..8cb15ee5b249 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -123,7 +123,6 @@ struct nvme_tcp_ctrl { struct blk_mq_tag_set admin_tag_set; struct sockaddr_storage addr; struct sockaddr_storage src_addr; - struct net_device *ndev; struct nvme_ctrl ctrl; struct work_struct err_work; @@ -2533,8 +2532,7 @@ static struct nvme_ctrl *nvme_tcp_create_ctrl(struct device *dev, } if (opts->mask & NVMF_OPT_HOST_IFACE) { - ctrl->ndev = dev_get_by_name(&init_net, opts->host_iface); - if (!ctrl->ndev) { + if (!__dev_get_by_name(&init_net, opts->host_iface)) { pr_err("invalid interface passed: %s\n", opts->host_iface); ret = -ENODEV; diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c index 9bab07302bbf..d32fbfc93ea9 100644 --- a/drivers/pci/proc.c +++ b/drivers/pci/proc.c @@ -230,8 +230,8 @@ static long proc_bus_pci_ioctl(struct file *file, unsigned int cmd, break; } /* If arch decided it can't, fall through... */ -#endif /* HAVE_PCI_MMAP */ fallthrough; +#endif /* HAVE_PCI_MMAP */ default: ret = -EINVAL; break; diff --git a/drivers/power/supply/ab8500_fg.c b/drivers/power/supply/ab8500_fg.c index 3d45ed0157c6..a6ebdb269fdd 100644 --- a/drivers/power/supply/ab8500_fg.c +++ b/drivers/power/supply/ab8500_fg.c @@ -1728,6 +1728,7 @@ static void ab8500_fg_algorithm_calibrate(struct ab8500_fg *di) break; case AB8500_FG_CALIB_WAIT: dev_dbg(di->dev, "Calibration WFI\n"); + break; default: break; } @@ -2224,6 +2225,7 @@ static int ab8500_fg_get_ext_psy_data(struct device *dev, void *data) queue_work(di->fg_wq, &di->fg_work); break; } + break; default: break; } diff --git a/drivers/power/supply/abx500_chargalg.c b/drivers/power/supply/abx500_chargalg.c index a17849bfacbf..b72826cf6794 100644 --- a/drivers/power/supply/abx500_chargalg.c +++ b/drivers/power/supply/abx500_chargalg.c @@ -1150,6 +1150,7 @@ static int abx500_chargalg_get_ext_psy_data(struct device *dev, void *data) default: break; } + break; default: break; } diff --git a/drivers/pwm/pwm-berlin.c b/drivers/pwm/pwm-berlin.c index 5537b5f6dd5d..e157273fd2f7 100644 --- a/drivers/pwm/pwm-berlin.c +++ b/drivers/pwm/pwm-berlin.c @@ -190,12 +190,9 @@ static int berlin_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } - if (state->period != pwm->state.period || - state->duty_cycle != pwm->state.duty_cycle) { - err = berlin_pwm_config(chip, pwm, state->duty_cycle, state->period); - if (err) - return err; - } + err = berlin_pwm_config(chip, pwm, state->duty_cycle, state->period); + if (err) + return err; if (!enabled) return berlin_pwm_enable(chip, pwm); diff --git a/drivers/pwm/pwm-ep93xx.c b/drivers/pwm/pwm-ep93xx.c index 8a3d781e6514..fc3cb7d669c6 100644 --- a/drivers/pwm/pwm-ep93xx.c +++ b/drivers/pwm/pwm-ep93xx.c @@ -64,6 +64,11 @@ static int ep93xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, int ret; struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip); bool enabled = state->enabled; + void __iomem *base = ep93xx_pwm->base; + unsigned long long c; + unsigned long period_cycles; + unsigned long duty_cycles; + unsigned long term; if (state->polarity != pwm->state.polarity) { if (enabled) { @@ -97,57 +102,47 @@ static int ep93xx_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } - if (state->period != pwm->state.period || - state->duty_cycle != pwm->state.duty_cycle) { - struct ep93xx_pwm *ep93xx_pwm = to_ep93xx_pwm(chip); - void __iomem *base = ep93xx_pwm->base; - unsigned long long c; - unsigned long period_cycles; - unsigned long duty_cycles; - unsigned long term; + /* + * The clock needs to be enabled to access the PWM registers. + * Configuration can be changed at any time. + */ + if (!pwm_is_enabled(pwm)) { + ret = clk_prepare_enable(ep93xx_pwm->clk); + if (ret) + return ret; + } - /* - * The clock needs to be enabled to access the PWM registers. - * Configuration can be changed at any time. - */ - if (!pwm_is_enabled(pwm)) { - ret = clk_prepare_enable(ep93xx_pwm->clk); - if (ret) - return ret; - } + c = clk_get_rate(ep93xx_pwm->clk); + c *= state->period; + do_div(c, 1000000000); + period_cycles = c; + + c = period_cycles; + c *= state->duty_cycle; + do_div(c, state->period); + duty_cycles = c; - c = clk_get_rate(ep93xx_pwm->clk); - c *= state->period; - do_div(c, 1000000000); - period_cycles = c; - - c = period_cycles; - c *= state->duty_cycle; - do_div(c, state->period); - duty_cycles = c; - - if (period_cycles < 0x10000 && duty_cycles < 0x10000) { - term = readw(base + EP93XX_PWMx_TERM_COUNT); - - /* Order is important if PWM is running */ - if (period_cycles > term) { - writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT); - writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE); - } else { - writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE); - writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT); - } - ret = 0; + if (period_cycles < 0x10000 && duty_cycles < 0x10000) { + term = readw(base + EP93XX_PWMx_TERM_COUNT); + + /* Order is important if PWM is running */ + if (period_cycles > term) { + writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT); + writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE); } else { - ret = -EINVAL; + writew(duty_cycles, base + EP93XX_PWMx_DUTY_CYCLE); + writew(period_cycles, base + EP93XX_PWMx_TERM_COUNT); } + ret = 0; + } else { + ret = -EINVAL; + } - if (!pwm_is_enabled(pwm)) - clk_disable_unprepare(ep93xx_pwm->clk); + if (!pwm_is_enabled(pwm)) + clk_disable_unprepare(ep93xx_pwm->clk); - if (ret) - return ret; - } + if (ret) + return ret; if (!enabled) { ret = clk_prepare_enable(ep93xx_pwm->clk); diff --git a/drivers/pwm/pwm-spear.c b/drivers/pwm/pwm-spear.c index 48c31dac2f32..54c7990967dd 100644 --- a/drivers/pwm/pwm-spear.c +++ b/drivers/pwm/pwm-spear.c @@ -177,12 +177,9 @@ static int spear_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } - if (state->period != pwm->state.period || - state->duty_cycle != pwm->state.duty_cycle) { - err = spear_pwm_config(chip, pwm, state->duty_cycle, state->period); - if (err) - return err; - } + err = spear_pwm_config(chip, pwm, state->duty_cycle, state->period); + if (err) + return err; if (!pwm->state.enabled) return spear_pwm_enable(chip, pwm); diff --git a/drivers/pwm/pwm-sprd.c b/drivers/pwm/pwm-sprd.c index f2a85e8dd941..7004f55bbf11 100644 --- a/drivers/pwm/pwm-sprd.c +++ b/drivers/pwm/pwm-sprd.c @@ -183,13 +183,10 @@ static int sprd_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, } } - if (state->period != cstate->period || - state->duty_cycle != cstate->duty_cycle) { - ret = sprd_pwm_config(spc, pwm, state->duty_cycle, - state->period); - if (ret) - return ret; - } + ret = sprd_pwm_config(spc, pwm, state->duty_cycle, + state->period); + if (ret) + return ret; sprd_pwm_write(spc, pwm->hwpwm, SPRD_PWM_ENABLE, 1); } else if (cstate->enabled) { diff --git a/drivers/pwm/pwm-tiecap.c b/drivers/pwm/pwm-tiecap.c index dec3f1fb150c..35eb19a5a0d1 100644 --- a/drivers/pwm/pwm-tiecap.c +++ b/drivers/pwm/pwm-tiecap.c @@ -189,16 +189,13 @@ static int ecap_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, return 0; } - if (state->period != pwm->state.period || - state->duty_cycle != pwm->state.duty_cycle) { - if (state->period > NSEC_PER_SEC) - return -ERANGE; + if (state->period > NSEC_PER_SEC) + return -ERANGE; - err = ecap_pwm_config(chip, pwm, state->duty_cycle, - state->period, enabled); - if (err) - return err; - } + err = ecap_pwm_config(chip, pwm, state->duty_cycle, + state->period, enabled); + if (err) + return err; if (!enabled) return ecap_pwm_enable(chip, pwm); diff --git a/drivers/regulator/bd9576-regulator.c b/drivers/regulator/bd9576-regulator.c index e16c3727db7a..aa42da4d141e 100644 --- a/drivers/regulator/bd9576-regulator.c +++ b/drivers/regulator/bd9576-regulator.c @@ -294,9 +294,9 @@ static bool check_temp_flag_mismatch(struct regulator_dev *rdev, int severity, struct bd957x_regulator_data *r) { if ((severity == REGULATOR_SEVERITY_ERR && - r->ovd_notif != REGULATOR_EVENT_OVER_TEMP) || + r->temp_notif != REGULATOR_EVENT_OVER_TEMP) || (severity == REGULATOR_SEVERITY_WARN && - r->ovd_notif != REGULATOR_EVENT_OVER_TEMP_WARN)) { + r->temp_notif != REGULATOR_EVENT_OVER_TEMP_WARN)) { dev_warn(rdev_get_dev(rdev), "Can't support both thermal WARN and ERR\n"); if (severity == REGULATOR_SEVERITY_WARN) diff --git a/drivers/regulator/hi6421-regulator.c b/drivers/regulator/hi6421-regulator.c index bff8c515dcde..d144a4bdb76d 100644 --- a/drivers/regulator/hi6421-regulator.c +++ b/drivers/regulator/hi6421-regulator.c @@ -366,9 +366,8 @@ static struct hi6421_regulator_info static int hi6421_regulator_enable(struct regulator_dev *rdev) { - struct hi6421_regulator_pdata *pdata; + struct hi6421_regulator_pdata *pdata = rdev_get_drvdata(rdev); - pdata = dev_get_drvdata(rdev->dev.parent); /* hi6421 spec requires regulator enablement must be serialized: * - Because when BUCK, LDO switching from off to on, it will have * a huge instantaneous current; so you can not turn on two or @@ -385,9 +384,10 @@ static int hi6421_regulator_enable(struct regulator_dev *rdev) static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev) { - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); + struct hi6421_regulator_info *info; unsigned int reg_val; + info = container_of(rdev->desc, struct hi6421_regulator_info, desc); regmap_read(rdev->regmap, rdev->desc->enable_reg, ®_val); if (reg_val & info->mode_mask) return REGULATOR_MODE_IDLE; @@ -397,9 +397,10 @@ static unsigned int hi6421_regulator_ldo_get_mode(struct regulator_dev *rdev) static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev) { - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); + struct hi6421_regulator_info *info; unsigned int reg_val; + info = container_of(rdev->desc, struct hi6421_regulator_info, desc); regmap_read(rdev->regmap, rdev->desc->enable_reg, ®_val); if (reg_val & info->mode_mask) return REGULATOR_MODE_STANDBY; @@ -410,9 +411,10 @@ static unsigned int hi6421_regulator_buck_get_mode(struct regulator_dev *rdev) static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev, unsigned int mode) { - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); + struct hi6421_regulator_info *info; unsigned int new_mode; + info = container_of(rdev->desc, struct hi6421_regulator_info, desc); switch (mode) { case REGULATOR_MODE_NORMAL: new_mode = 0; @@ -434,9 +436,10 @@ static int hi6421_regulator_ldo_set_mode(struct regulator_dev *rdev, static int hi6421_regulator_buck_set_mode(struct regulator_dev *rdev, unsigned int mode) { - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); + struct hi6421_regulator_info *info; unsigned int new_mode; + info = container_of(rdev->desc, struct hi6421_regulator_info, desc); switch (mode) { case REGULATOR_MODE_NORMAL: new_mode = 0; @@ -459,7 +462,9 @@ static unsigned int hi6421_regulator_ldo_get_optimum_mode(struct regulator_dev *rdev, int input_uV, int output_uV, int load_uA) { - struct hi6421_regulator_info *info = rdev_get_drvdata(rdev); + struct hi6421_regulator_info *info; + + info = container_of(rdev->desc, struct hi6421_regulator_info, desc); if (load_uA > info->eco_microamp) return REGULATOR_MODE_NORMAL; @@ -543,14 +548,13 @@ static int hi6421_regulator_probe(struct platform_device *pdev) if (!pdata) return -ENOMEM; mutex_init(&pdata->lock); - platform_set_drvdata(pdev, pdata); for (i = 0; i < ARRAY_SIZE(hi6421_regulator_info); i++) { /* assign per-regulator data */ info = &hi6421_regulator_info[i]; config.dev = pdev->dev.parent; - config.driver_data = info; + config.driver_data = pdata; config.regmap = pmic->regmap; rdev = devm_regulator_register(&pdev->dev, &info->desc, diff --git a/drivers/regulator/hi6421v600-regulator.c b/drivers/regulator/hi6421v600-regulator.c index 9b162c0555c3..845bc3b4026d 100644 --- a/drivers/regulator/hi6421v600-regulator.c +++ b/drivers/regulator/hi6421v600-regulator.c @@ -98,10 +98,9 @@ static const unsigned int ldo34_voltages[] = { static int hi6421_spmi_regulator_enable(struct regulator_dev *rdev) { - struct hi6421_spmi_reg_priv *priv; + struct hi6421_spmi_reg_priv *priv = rdev_get_drvdata(rdev); int ret; - priv = dev_get_drvdata(rdev->dev.parent); /* cannot enable more than one regulator at one time */ mutex_lock(&priv->enable_mutex); @@ -119,9 +118,10 @@ static int hi6421_spmi_regulator_enable(struct regulator_dev *rdev) static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev) { - struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev); + struct hi6421_spmi_reg_info *sreg; unsigned int reg_val; + sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc); regmap_read(rdev->regmap, rdev->desc->enable_reg, ®_val); if (reg_val & sreg->eco_mode_mask) @@ -133,9 +133,10 @@ static unsigned int hi6421_spmi_regulator_get_mode(struct regulator_dev *rdev) static int hi6421_spmi_regulator_set_mode(struct regulator_dev *rdev, unsigned int mode) { - struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev); + struct hi6421_spmi_reg_info *sreg; unsigned int val; + sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc); switch (mode) { case REGULATOR_MODE_NORMAL: val = 0; @@ -159,7 +160,9 @@ hi6421_spmi_regulator_get_optimum_mode(struct regulator_dev *rdev, int input_uV, int output_uV, int load_uA) { - struct hi6421_spmi_reg_info *sreg = rdev_get_drvdata(rdev); + struct hi6421_spmi_reg_info *sreg; + + sreg = container_of(rdev->desc, struct hi6421_spmi_reg_info, desc); if (!sreg->eco_uA || ((unsigned int)load_uA > sreg->eco_uA)) return REGULATOR_MODE_NORMAL; @@ -252,13 +255,12 @@ static int hi6421_spmi_regulator_probe(struct platform_device *pdev) return -ENOMEM; mutex_init(&priv->enable_mutex); - platform_set_drvdata(pdev, priv); for (i = 0; i < ARRAY_SIZE(regulator_info); i++) { info = ®ulator_info[i]; config.dev = pdev->dev.parent; - config.driver_data = info; + config.driver_data = priv; config.regmap = pmic->regmap; rdev = devm_regulator_register(dev, &info->desc, &config); diff --git a/drivers/regulator/mtk-dvfsrc-regulator.c b/drivers/regulator/mtk-dvfsrc-regulator.c index d3d876198d6e..234af3a66c77 100644 --- a/drivers/regulator/mtk-dvfsrc-regulator.c +++ b/drivers/regulator/mtk-dvfsrc-regulator.c @@ -179,8 +179,7 @@ static int dvfsrc_vcore_regulator_probe(struct platform_device *pdev) for (i = 0; i < regulator_init_data->size; i++) { config.dev = dev->parent; config.driver_data = (mt_regulators + i); - rdev = devm_regulator_register(dev->parent, - &(mt_regulators + i)->desc, + rdev = devm_regulator_register(dev, &(mt_regulators + i)->desc, &config); if (IS_ERR(rdev)) { dev_err(dev, "failed to register %s\n", diff --git a/drivers/regulator/rtmv20-regulator.c b/drivers/regulator/rtmv20-regulator.c index 4bca64de0f67..2ee334174e2b 100644 --- a/drivers/regulator/rtmv20-regulator.c +++ b/drivers/regulator/rtmv20-regulator.c @@ -37,7 +37,7 @@ #define RTMV20_WIDTH2_MASK GENMASK(7, 0) #define RTMV20_LBPLVL_MASK GENMASK(3, 0) #define RTMV20_LBPEN_MASK BIT(7) -#define RTMV20_STROBEPOL_MASK BIT(1) +#define RTMV20_STROBEPOL_MASK BIT(0) #define RTMV20_VSYNPOL_MASK BIT(1) #define RTMV20_FSINEN_MASK BIT(7) #define RTMV20_ESEN_MASK BIT(6) diff --git a/drivers/s390/char/tape_char.c b/drivers/s390/char/tape_char.c index 8abb42923307..cc8237afeffa 100644 --- a/drivers/s390/char/tape_char.c +++ b/drivers/s390/char/tape_char.c @@ -371,8 +371,6 @@ __tapechar_ioctl(struct tape_device *device, case MTSEEK: if (device->required_tapemarks) tape_std_terminate_write(device); - default: - ; } rc = tape_mtop(device, op.mt_op, op.mt_count); diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index b341075397d9..377e3689d1d4 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c @@ -1454,6 +1454,7 @@ again: get_ccwdev_lock(ch->cdev), saveflags); if (rc != 0) ctcm_ccw_check_rc(ch, rc, "normal RX"); + break; default: break; } diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 959ba62ccbb7..7cc59f4f046c 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -434,6 +434,7 @@ static int qeth_l3_correct_routing_type(struct qeth_card *card, if (qeth_is_ipafunc_supported(card, prot, IPA_OSA_MC_ROUTER)) return 0; + goto out_inval; default: goto out_inval; } diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index 544efd4c42f0..b8cd75a872ee 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c @@ -487,6 +487,7 @@ static ssize_t zfcp_sysfs_port_fc_security_show(struct device *dev, if (0 == (status & ZFCP_STATUS_COMMON_OPEN) || 0 == (status & ZFCP_STATUS_COMMON_UNBLOCKED) || 0 == (status & ZFCP_STATUS_PORT_PHYS_OPEN) || + 0 != (status & ZFCP_STATUS_PORT_LINK_TEST) || 0 != (status & ZFCP_STATUS_COMMON_ERP_FAILED) || 0 != (status & ZFCP_STATUS_COMMON_ACCESS_BOXED)) i = sprintf(buf, "unknown\n"); diff --git a/drivers/scsi/arm/fas216.c b/drivers/scsi/arm/fas216.c index 30ed3d23635a..6baa9b36367d 100644 --- a/drivers/scsi/arm/fas216.c +++ b/drivers/scsi/arm/fas216.c @@ -2010,7 +2010,7 @@ static void fas216_rq_sns_done(FAS216_Info *info, struct scsi_cmnd *SCpnt, "request sense complete, result=0x%04x%02x%02x", result, SCpnt->SCp.Message, SCpnt->SCp.Status); - if (result != DID_OK || SCpnt->SCp.Status != GOOD) + if (result != DID_OK || SCpnt->SCp.Status != SAM_STAT_GOOD) /* * Something went wrong. Make sure that we don't * have valid data in the sense buffer that could diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 929a3b043ad7..3f6f14f0cafb 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -488,6 +488,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize) shost_printk(KERN_WARNING, shost, "error handler thread failed to spawn, error = %ld\n", PTR_ERR(shost->ehandler)); + shost->ehandler = NULL; goto fail; } diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 9f5068f3bcfb..dd205414e505 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -461,7 +461,7 @@ static void sas_discover_domain(struct work_struct *work) break; #else pr_notice("ATA device seen but CONFIG_SCSI_SAS_ATA=N so cannot attach\n"); - /* Fall through */ + fallthrough; #endif /* Fall through - only for the #else condition above. */ default: diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index 9eceafca59bc..2dba2b0af166 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -2607,14 +2607,13 @@ static int mpi3mr_issue_iocinit(struct mpi3mr_ioc *mrioc) goto out; } drv_info->information_length = cpu_to_le32(data_len); - strncpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); - strncpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); - drv_info->os_name[sizeof(drv_info->os_name) - 1] = 0; - strncpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); - drv_info->os_version[sizeof(drv_info->os_version) - 1] = 0; - strncpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); - strncpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); - strncpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, sizeof(drv_info->driver_release_date)); + strscpy(drv_info->driver_signature, "Broadcom", sizeof(drv_info->driver_signature)); + strscpy(drv_info->os_name, utsname()->sysname, sizeof(drv_info->os_name)); + strscpy(drv_info->os_version, utsname()->release, sizeof(drv_info->os_version)); + strscpy(drv_info->driver_name, MPI3MR_DRIVER_NAME, sizeof(drv_info->driver_name)); + strscpy(drv_info->driver_version, MPI3MR_DRIVER_VERSION, sizeof(drv_info->driver_version)); + strscpy(drv_info->driver_release_date, MPI3MR_DRIVER_RELDATE, + sizeof(drv_info->driver_release_date)); drv_info->driver_capabilities = 0; memcpy((u8 *)&mrioc->driver_info, (u8 *)drv_info, sizeof(mrioc->driver_info)); diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c index 0b8802beb7ce..ec05c42e8ee6 100644 --- a/drivers/scsi/pm8001/pm8001_ctl.c +++ b/drivers/scsi/pm8001/pm8001_ctl.c @@ -77,7 +77,7 @@ DEVICE_ATTR(interface_rev, S_IRUGO, pm8001_ctl_mpi_interface_rev_show, NULL); * @attr: device attribute (unused) * @buf: the buffer returned * - * A sysfs 'read only' shost attribute. + * A sysfs 'read-only' shost attribute. */ static ssize_t controller_fatal_error_show(struct device *cdev, struct device_attribute *attr, char *buf) @@ -149,7 +149,7 @@ static ssize_t pm8001_ctl_ila_version_show(struct device *cdev, static DEVICE_ATTR(ila_version, 0444, pm8001_ctl_ila_version_show, NULL); /** - * pm8001_ctl_inactive_fw_version_show - Inacative firmware version number + * pm8001_ctl_inactive_fw_version_show - Inactive firmware version number * @cdev: pointer to embedded class device * @attr: device attribute (unused) * @buf: the buffer returned @@ -396,6 +396,7 @@ static DEVICE_ATTR(aap_log, S_IRUGO, pm8001_ctl_aap_log_show, NULL); * @cdev:pointer to embedded class device * @attr: device attribute (unused) * @buf: the buffer returned + * * A sysfs 'read-only' shost attribute. */ static ssize_t pm8001_ctl_ib_queue_log_show(struct device *cdev, @@ -430,6 +431,7 @@ static DEVICE_ATTR(ib_log, S_IRUGO, pm8001_ctl_ib_queue_log_show, NULL); * @cdev:pointer to embedded class device * @attr: device attribute (unused) * @buf: the buffer returned + * * A sysfs 'read-only' shost attribute. */ @@ -464,6 +466,7 @@ static DEVICE_ATTR(ob_log, S_IRUGO, pm8001_ctl_ob_queue_log_show, NULL); * @cdev:pointer to embedded class device * @attr: device attribute (unused) * @buf:the buffer returned + * * A sysfs 'read-only' shost attribute. */ static ssize_t pm8001_ctl_bios_version_show(struct device *cdev, @@ -555,13 +558,13 @@ static ssize_t pm8001_ctl_iop_log_show(struct device *cdev, static DEVICE_ATTR(iop_log, S_IRUGO, pm8001_ctl_iop_log_show, NULL); /** - ** pm8001_ctl_fatal_log_show - fatal error logging - ** @cdev:pointer to embedded class device - ** @attr: device attribute - ** @buf: the buffer returned - ** - ** A sysfs 'read-only' shost attribute. - **/ + * pm8001_ctl_fatal_log_show - fatal error logging + * @cdev:pointer to embedded class device + * @attr: device attribute + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev, struct device_attribute *attr, char *buf) @@ -575,13 +578,13 @@ static ssize_t pm8001_ctl_fatal_log_show(struct device *cdev, static DEVICE_ATTR(fatal_log, S_IRUGO, pm8001_ctl_fatal_log_show, NULL); /** - ** non_fatal_log_show - non fatal error logging - ** @cdev:pointer to embedded class device - ** @attr: device attribute - ** @buf: the buffer returned - ** - ** A sysfs 'read-only' shost attribute. - **/ + * non_fatal_log_show - non fatal error logging + * @cdev:pointer to embedded class device + * @attr: device attribute + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ static ssize_t non_fatal_log_show(struct device *cdev, struct device_attribute *attr, char *buf) { @@ -620,12 +623,13 @@ static ssize_t non_fatal_count_store(struct device *cdev, static DEVICE_ATTR_RW(non_fatal_count); /** - ** pm8001_ctl_gsm_log_show - gsm dump collection - ** @cdev:pointer to embedded class device - ** @attr: device attribute (unused) - ** @buf: the buffer returned - ** A sysfs 'read-only' shost attribute. - **/ + * pm8001_ctl_gsm_log_show - gsm dump collection + * @cdev:pointer to embedded class device + * @attr: device attribute (unused) + * @buf: the buffer returned + * + * A sysfs 'read-only' shost attribute. + */ static ssize_t pm8001_ctl_gsm_log_show(struct device *cdev, struct device_attribute *attr, char *buf) { diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index 33f8217577b1..17c0f26e683a 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -384,7 +384,7 @@ static void update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, /** * pm8001_bar4_shift - function is called to shift BAR base address - * @pm8001_ha : our hba card infomation + * @pm8001_ha : our hba card information * @shiftValue : shifting value in memory bar. */ int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue) @@ -1151,7 +1151,7 @@ static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) } /** - * pm8001_chip_iounmap - which maped when initialized. + * pm8001_chip_iounmap - which mapped when initialized. * @pm8001_ha: our hba card information */ void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) @@ -1187,10 +1187,10 @@ pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha) pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); } - /** - * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt - * @pm8001_ha: our hba card information - */ +/** + * pm8001_chip_intx_interrupt_disable - disable PM8001 chip interrupt + * @pm8001_ha: our hba card information + */ static void pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha) { @@ -1876,8 +1876,8 @@ static void pm8001_send_read_log(struct pm8001_hba_info *pm8001_ha, * @piomb: the message contents of this outbound message. * * When FW has completed a ssp request for example a IO request, after it has - * filled the SG data with the data, it will trigger this event represent - * that he has finished the job,please check the coresponding buffer. + * filled the SG data with the data, it will trigger this event representing + * that he has finished the job; please check the corresponding buffer. * So we will tell the caller who maybe waiting the result to tell upper layer * that the task has been finished. */ @@ -3522,7 +3522,7 @@ hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) * * when sas layer find a device it will notify LLDD, then the driver register * the domain device to FW, this event is the return device ID which the FW - * has assigned, from now,inter-communication with FW is no longer using the + * has assigned, from now, inter-communication with FW is no longer using the * SAS address, use device ID which FW assigned. */ int pm8001_mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 313248c7bab9..47db7e0beae6 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -233,7 +233,7 @@ static irqreturn_t pm8001_interrupt_handler_msix(int irq, void *opaque) /** * pm8001_interrupt_handler_intx - main INTx interrupt handler. * @irq: interrupt number - * @dev_id: sas_ha structure. The HBA is retrieved from sas_has structure. + * @dev_id: sas_ha structure. The HBA is retrieved from sas_ha structure. */ static irqreturn_t pm8001_interrupt_handler_intx(int irq, void *dev_id) @@ -439,9 +439,9 @@ err_out: } /** - * pm8001_ioremap - remap the pci high physical address to kernal virtual + * pm8001_ioremap - remap the pci high physical address to kernel virtual * address so that we can access them. - * @pm8001_ha:our hba structure. + * @pm8001_ha: our hba structure. */ static int pm8001_ioremap(struct pm8001_hba_info *pm8001_ha) { @@ -652,7 +652,7 @@ static void pm8001_post_sas_ha_init(struct Scsi_Host *shost, * pm8001_init_sas_add - initialize sas address * @pm8001_ha: our ha struct. * - * Currently we just set the fixed SAS address to our HBA,for manufacture, + * Currently we just set the fixed SAS address to our HBA, for manufacture, * it should read from the EEPROM */ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha) @@ -790,7 +790,7 @@ struct pm8001_mpi3_phy_pg_trx_config { }; /** - * pm8001_get_internal_phy_settings : Retrieves the internal PHY settings + * pm8001_get_internal_phy_settings - Retrieves the internal PHY settings * @pm8001_ha : our adapter * @phycfg : PHY config page to populate */ @@ -810,7 +810,7 @@ void pm8001_get_internal_phy_settings(struct pm8001_hba_info *pm8001_ha, } /** - * pm8001_get_external_phy_settings : Retrieves the external PHY settings + * pm8001_get_external_phy_settings - Retrieves the external PHY settings * @pm8001_ha : our adapter * @phycfg : PHY config page to populate */ @@ -830,7 +830,7 @@ void pm8001_get_external_phy_settings(struct pm8001_hba_info *pm8001_ha, } /** - * pm8001_get_phy_mask : Retrieves the mask that denotes if a PHY is int/ext + * pm8001_get_phy_mask - Retrieves the mask that denotes if a PHY is int/ext * @pm8001_ha : our adapter * @phymask : The PHY mask */ @@ -868,7 +868,7 @@ void pm8001_get_phy_mask(struct pm8001_hba_info *pm8001_ha, int *phymask) } /** - * pm8001_set_phy_settings_ven_117c_12G() : Configure ATTO 12Gb PHY settings + * pm8001_set_phy_settings_ven_117c_12G() - Configure ATTO 12Gb PHY settings * @pm8001_ha : our adapter */ static @@ -903,7 +903,7 @@ int pm8001_set_phy_settings_ven_117c_12G(struct pm8001_hba_info *pm8001_ha) } /** - * pm8001_configure_phy_settings : Configures PHY settings based on vendor ID. + * pm8001_configure_phy_settings - Configures PHY settings based on vendor ID. * @pm8001_ha : our hba. */ static int pm8001_configure_phy_settings(struct pm8001_hba_info *pm8001_ha) @@ -1053,8 +1053,8 @@ intx: * @ent: pci device id * * This function is the main initialization function, when register a new - * pci driver it is invoked, all struct an hardware initilization should be done - * here, also, register interrupt + * pci driver it is invoked, all struct and hardware initialization should be + * done here, also, register interrupt. */ static int pm8001_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -1172,10 +1172,11 @@ err_out_enable: return rc; } -/* +/** * pm8001_init_ccb_tag - allocate memory to CCB and tag. * @pm8001_ha: our hba card information. * @shost: scsi host which has been allocated outside. + * @pdev: pci device. */ static int pm8001_init_ccb_tag(struct pm8001_hba_info *pm8001_ha, struct Scsi_Host *shost, @@ -1270,7 +1271,7 @@ static void pm8001_pci_remove(struct pci_dev *pdev) * pm8001_pci_suspend - power management suspend main entry point * @dev: Device struct * - * Returns 0 success, anything else error. + * Return: 0 on success, anything else on error. */ static int __maybe_unused pm8001_pci_suspend(struct device *dev) { @@ -1315,7 +1316,7 @@ static int __maybe_unused pm8001_pci_suspend(struct device *dev) * pm8001_pci_resume - power management resume main entry point * @dev: Device struct * - * Returns 0 success, anything else error. + * Return: 0 on success, anything else on error. */ static int __maybe_unused pm8001_pci_resume(struct device *dev) { diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 6f33d821e545..48548a95327b 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -98,14 +98,16 @@ void pm8001_tag_init(struct pm8001_hba_info *pm8001_ha) pm8001_tag_free(pm8001_ha, i); } - /** - * pm8001_mem_alloc - allocate memory for pm8001. - * @pdev: pci device. - * @virt_addr: the allocated virtual address - * @pphys_addr_hi: the physical address high byte address. - * @pphys_addr_lo: the physical address low byte address. - * @mem_size: memory size. - */ +/** + * pm8001_mem_alloc - allocate memory for pm8001. + * @pdev: pci device. + * @virt_addr: the allocated virtual address + * @pphys_addr: DMA address for this device + * @pphys_addr_hi: the physical address high byte address. + * @pphys_addr_lo: the physical address low byte address. + * @mem_size: memory size. + * @align: requested byte alignment + */ int pm8001_mem_alloc(struct pci_dev *pdev, void **virt_addr, dma_addr_t *pphys_addr, u32 *pphys_addr_hi, u32 *pphys_addr_lo, u32 mem_size, u32 align) @@ -339,7 +341,7 @@ static int pm8001_task_prep_ssp_tm(struct pm8001_hba_info *pm8001_ha, } /** - * pm8001_task_prep_ssp - the dispatcher function,prepare ssp data for ssp task + * pm8001_task_prep_ssp - the dispatcher function, prepare ssp data for ssp task * @pm8001_ha: our hba card information * @ccb: the ccb which attached to ssp task */ @@ -554,10 +556,10 @@ void pm8001_ccb_task_free(struct pm8001_hba_info *pm8001_ha, pm8001_tag_free(pm8001_ha, ccb_idx); } - /** - * pm8001_alloc_dev - find a empty pm8001_device - * @pm8001_ha: our hba card information - */ +/** + * pm8001_alloc_dev - find a empty pm8001_device + * @pm8001_ha: our hba card information + */ static struct pm8001_device *pm8001_alloc_dev(struct pm8001_hba_info *pm8001_ha) { u32 dev; @@ -705,7 +707,7 @@ static void pm8001_tmf_timedout(struct timer_list *t) * @parameter: ssp task parameter. * * when errors or exception happened, we may want to do something, for example - * abort the issued task which result in this execption, it is done by calling + * abort the issued task which result in this exception, it is done by calling * this function, note it is also with the task execute interface. */ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, @@ -984,11 +986,12 @@ void pm8001_open_reject_retry( } /** - * pm8001_I_T_nexus_reset() - * Standard mandates link reset for ATA (type 0) and hard reset for - * SSP (type 1) , only for RECOVERY - * @dev: the device structure for the device to reset. - */ + * pm8001_I_T_nexus_reset() - reset the initiator/target connection + * @dev: the device structure for the device to reset. + * + * Standard mandates link reset for ATA (type 0) and hard reset for + * SSP (type 1), only for RECOVERY + */ int pm8001_I_T_nexus_reset(struct domain_device *dev) { int rc = TMF_RESP_FUNC_FAILED; diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 45ecd9639977..6ffe17b849ae 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -140,7 +140,7 @@ ssize_t pm80xx_get_fatal_dump(struct device *cdev, pm8001_ha->fatal_bar_loc = 0; } - /* Read until accum_len is retrived */ + /* Read until accum_len is retrieved */ accum_len = pm8001_mr32(fatal_table_address, MPI_FATAL_EDUMP_TABLE_ACCUM_LEN); /* Determine length of data between previously stored transfer length @@ -1011,7 +1011,7 @@ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) value); return -EBUSY; } - /* check the MPI-State for initialization upto 100ms*/ + /* check the MPI-State for initialization up to 100ms*/ max_wait_count = 5;/* 100 msec */ do { msleep(FW_READY_INTERVAL); @@ -1093,7 +1093,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); - /** + /* * lower 26 bits of SCRATCHPAD0 register describes offset within the * PCIe BAR where the MPI configuration table is present */ @@ -1101,7 +1101,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) pm8001_dbg(pm8001_ha, DEV, "Scratchpad 0 Offset: 0x%x value 0x%x\n", offset, value); - /** + /* * Upper 6 bits describe the offset within PCI config space where BAR * is located. */ @@ -1109,7 +1109,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) pcibar = get_pci_bar_index(pcilogic); pm8001_dbg(pm8001_ha, INIT, "Scratchpad 0 PCI BAR: %d\n", pcibar); - /** + /* * Make sure the offset falls inside the ioremapped PCI BAR */ if (offset > pm8001_ha->io_mem[pcibar].memsize) { @@ -1121,7 +1121,7 @@ static int init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) pm8001_ha->main_cfg_tbl_addr = base_addr = pm8001_ha->io_mem[pcibar].memvirtaddr + offset; - /** + /* * Validate main configuration table address: first DWord should read * "PMCS" */ @@ -1385,7 +1385,7 @@ pm80xx_get_encrypt_info(struct pm8001_hba_info *pm8001_ha) } /** - * pm80xx_encrypt_update - update flash with encryption informtion + * pm80xx_encrypt_update - update flash with encryption information * @pm8001_ha: our hba card information. */ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) @@ -1422,7 +1422,7 @@ static int pm80xx_encrypt_update(struct pm8001_hba_info *pm8001_ha) } /** - * pm80xx_chip_init - the main init function that initialize whole PM8001 chip. + * pm80xx_chip_init - the main init function that initializes whole PM8001 chip. * @pm8001_ha: our hba card information */ static int pm80xx_chip_init(struct pm8001_hba_info *pm8001_ha) @@ -1541,7 +1541,7 @@ static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) } /** - * pm80xx_fatal_errors - returns non zero *ONLY* when fatal errors + * pm80xx_fatal_errors - returns non-zero *ONLY* when fatal errors * @pm8001_ha: our hba card information * * Fatal errors are recoverable only after a host reboot. @@ -1576,8 +1576,8 @@ pm80xx_fatal_errors(struct pm8001_hba_info *pm8001_ha) } /** - * pm80xx_chip_soft_rst - soft reset the PM8001 chip, so that the clear all - * the FW register status to the originated status. + * pm80xx_chip_soft_rst - soft reset the PM8001 chip, so that all + * FW register status are reset to the originated status. * @pm8001_ha: our hba card information */ @@ -1895,13 +1895,13 @@ static void pm80xx_send_read_log(struct pm8001_hba_info *pm8001_ha, } /** - * mpi_ssp_completion- process the event that FW response to the SSP request. + * mpi_ssp_completion - process the event that FW response to the SSP request. * @pm8001_ha: our hba card information * @piomb: the message contents of this outbound message. * * When FW has completed a ssp request for example a IO request, after it has - * filled the SG data with the data, it will trigger this event represent - * that he has finished the job,please check the coresponding buffer. + * filled the SG data with the data, it will trigger this event representing + * that he has finished the job; please check the corresponding buffer. * So we will tell the caller who maybe waiting the result to tell upper layer * that the task has been finished. */ @@ -3217,7 +3217,7 @@ mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * pm80xx_hw_event_ack_req- For PM8001,some events need to acknowage to FW. + * pm80xx_hw_event_ack_req- For PM8001, some events need to acknowledge to FW. * @pm8001_ha: our hba card information * @Qnum: the outbound queue message number. * @SEA: source of event to ack @@ -3275,7 +3275,7 @@ static void hw_event_port_recover(struct pm8001_hba_info *pm8001_ha, } /** - * hw_event_sas_phy_up -FW tells me a SAS phy up event. + * hw_event_sas_phy_up - FW tells me a SAS phy up event. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ @@ -3353,7 +3353,7 @@ hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * hw_event_sata_phy_up -FW tells me a SATA phy up event. + * hw_event_sata_phy_up - FW tells me a SATA phy up event. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ @@ -3400,7 +3400,7 @@ hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * hw_event_phy_down -we should notify the libsas the phy is down. + * hw_event_phy_down - we should notify the libsas the phy is down. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ @@ -3500,7 +3500,7 @@ static int mpi_phy_start_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * mpi_thermal_hw_event -The hw event has come. + * mpi_thermal_hw_event - a thermal hw event has come. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ @@ -3530,7 +3530,7 @@ static int mpi_thermal_hw_event(struct pm8001_hba_info *pm8001_ha, void *piomb) } /** - * mpi_hw_event -The hw event has come. + * mpi_hw_event - The hw event has come. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ @@ -4025,7 +4025,7 @@ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) case OPC_OUB_SET_DEV_INFO: pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_SET_DEV_INFO\n"); break; - /* spcv specifc commands */ + /* spcv specific commands */ case OPC_OUB_PHY_START_RESP: pm8001_dbg(pm8001_ha, MSG, "OPC_OUB_PHY_START_RESP opcode:%x\n", opc); @@ -4186,7 +4186,7 @@ static void build_smp_cmd(u32 deviceID, __le32 hTag, } /** - * pm80xx_chip_smp_req - send a SMP task to FW + * pm80xx_chip_smp_req - send an SMP task to FW * @pm8001_ha: our hba card information. * @ccb: the ccb information this request used. */ @@ -4346,7 +4346,7 @@ static int check_enc_sat_cmd(struct sas_task *task) } /** - * pm80xx_chip_ssp_io_req - send a SSP task to FW + * pm80xx_chip_ssp_io_req - send an SSP task to FW * @pm8001_ha: our hba card information. * @ccb: the ccb information this request used. */ @@ -4750,13 +4750,13 @@ pm80xx_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | LINKMODE_AUTO | pm8001_ha->link_rate | phy_id); /* SSC Disable and SAS Analog ST configuration */ - /** + /* payload.ase_sh_lm_slr_phyid = cpu_to_le32(SSC_DISABLE_30 | SAS_ASE | SPINHOLD_DISABLE | LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | phy_id); Have to add "SAS PHY Analog Setup SPASTI 1 Byte" Based on need - **/ + */ payload.sas_identify.dev_type = SAS_END_DEVICE; payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 8f9727e525aa..7456a26aef51 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -194,7 +194,7 @@ void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) * @bufflen: len of buffer * @sense: optional sense buffer * @sshdr: optional decoded sense header - * @timeout: request timeout in seconds + * @timeout: request timeout in HZ * @retries: number of times to retry request * @flags: flags for ->cmd_flags * @rq_flags: flags for ->rq_flags diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h index c98d540ac044..194755c9ddfe 100644 --- a/drivers/scsi/ufs/ufshcd.h +++ b/drivers/scsi/ufs/ufshcd.h @@ -1229,8 +1229,13 @@ static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba, static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag, bool is_scsi_cmd) { - if (hba->vops && hba->vops->setup_xfer_req) - return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd); + if (hba->vops && hba->vops->setup_xfer_req) { + unsigned long flags; + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd); + spin_unlock_irqrestore(hba->host->host_lock, flags); + } } static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba, diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 2ef74885ffa2..788dcdf25f00 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -352,8 +352,6 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi) } mr = spi_readl(as, MR); - if (spi->cs_gpiod) - gpiod_set_value(spi->cs_gpiod, 1); } else { u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0; int i; @@ -369,8 +367,6 @@ static void cs_activate(struct atmel_spi *as, struct spi_device *spi) mr = spi_readl(as, MR); mr = SPI_BFINS(PCS, ~(1 << chip_select), mr); - if (spi->cs_gpiod) - gpiod_set_value(spi->cs_gpiod, 1); spi_writel(as, MR, mr); } @@ -400,8 +396,6 @@ static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi) if (!spi->cs_gpiod) spi_writel(as, CR, SPI_BIT(LASTXFER)); - else - gpiod_set_value(spi->cs_gpiod, 0); } static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock) @@ -1483,7 +1477,8 @@ static int atmel_spi_probe(struct platform_device *pdev) master->bus_num = pdev->id; master->num_chipselect = 4; master->setup = atmel_spi_setup; - master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX); + master->flags = (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX | + SPI_MASTER_GPIO_SS); master->transfer_one = atmel_spi_one_transfer; master->set_cs = atmel_spi_set_cs; master->cleanup = atmel_spi_cleanup; diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index 5f8771fe1a31..775c0bf2f923 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -83,6 +83,7 @@ MODULE_PARM_DESC(polling_limit_us, * struct bcm2835_spi - BCM2835 SPI controller * @regs: base address of register map * @clk: core clock, divided to calculate serial clock + * @clk_hz: core clock cached speed * @irq: interrupt, signals TX FIFO empty or RX FIFO ¾ full * @tfr: SPI transfer currently processed * @ctlr: SPI controller reverse lookup @@ -116,6 +117,7 @@ MODULE_PARM_DESC(polling_limit_us, struct bcm2835_spi { void __iomem *regs; struct clk *clk; + unsigned long clk_hz; int irq; struct spi_transfer *tfr; struct spi_controller *ctlr; @@ -1045,19 +1047,18 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr, { struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr); struct bcm2835_spidev *slv = spi_get_ctldata(spi); - unsigned long spi_hz, clk_hz, cdiv; + unsigned long spi_hz, cdiv; unsigned long hz_per_byte, byte_limit; u32 cs = slv->prepare_cs; /* set clock */ spi_hz = tfr->speed_hz; - clk_hz = clk_get_rate(bs->clk); - if (spi_hz >= clk_hz / 2) { + if (spi_hz >= bs->clk_hz / 2) { cdiv = 2; /* clk_hz/2 is the fastest we can go */ } else if (spi_hz) { /* CDIV must be a multiple of two */ - cdiv = DIV_ROUND_UP(clk_hz, spi_hz); + cdiv = DIV_ROUND_UP(bs->clk_hz, spi_hz); cdiv += (cdiv % 2); if (cdiv >= 65536) @@ -1065,7 +1066,7 @@ static int bcm2835_spi_transfer_one(struct spi_controller *ctlr, } else { cdiv = 0; /* 0 is the slowest we can go */ } - tfr->effective_speed_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536); + tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536); bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv); /* handle all the 3-wire mode */ @@ -1354,6 +1355,7 @@ static int bcm2835_spi_probe(struct platform_device *pdev) return bs->irq ? bs->irq : -ENODEV; clk_prepare_enable(bs->clk); + bs->clk_hz = clk_get_rate(bs->clk); err = bcm2835_dma_init(ctlr, &pdev->dev, bs); if (err) diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c index 7a00346ff9b9..a2de23516553 100644 --- a/drivers/spi/spi-cadence-quadspi.c +++ b/drivers/spi/spi-cadence-quadspi.c @@ -309,6 +309,9 @@ static unsigned int cqspi_calc_dummy(const struct spi_mem_op *op, bool dtr) { unsigned int dummy_clk; + if (!op->dummy.nbytes) + return 0; + dummy_clk = op->dummy.nbytes * (8 / op->dummy.buswidth); if (dtr) dummy_clk /= 2; @@ -797,19 +800,20 @@ static int cqspi_write_setup(struct cqspi_flash_pdata *f_pdata, reg = cqspi_calc_rdreg(f_pdata); writel(reg, reg_base + CQSPI_REG_RD_INSTR); - if (f_pdata->dtr) { - /* - * Some flashes like the cypress Semper flash expect a 4-byte - * dummy address with the Read SR command in DTR mode, but this - * controller does not support sending address with the Read SR - * command. So, disable write completion polling on the - * controller's side. spi-nor will take care of polling the - * status register. - */ - reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL); - reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL; - writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL); - } + /* + * SPI NAND flashes require the address of the status register to be + * passed in the Read SR command. Also, some SPI NOR flashes like the + * cypress Semper flash expect a 4-byte dummy address in the Read SR + * command in DTR mode. + * + * But this controller does not support address phase in the Read SR + * command when doing auto-HW polling. So, disable write completion + * polling on the controller's side. spinand and spi-nor will take + * care of polling the status register. + */ + reg = readl(reg_base + CQSPI_REG_WR_COMPLETION_CTRL); + reg |= CQSPI_REG_WR_DISABLE_AUTO_POLL; + writel(reg, reg_base + CQSPI_REG_WR_COMPLETION_CTRL); reg = readl(reg_base + CQSPI_REG_SIZE); reg &= ~CQSPI_REG_SIZE_ADDRESS_MASK; diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c index a3afd1b9ac56..ceb16e70d235 100644 --- a/drivers/spi/spi-cadence.c +++ b/drivers/spi/spi-cadence.c @@ -517,6 +517,12 @@ static int cdns_spi_probe(struct platform_device *pdev) goto clk_dis_apb; } + pm_runtime_use_autosuspend(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); + pm_runtime_get_noresume(&pdev->dev); + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs); if (ret < 0) master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS; @@ -531,11 +537,6 @@ static int cdns_spi_probe(struct platform_device *pdev) /* SPI controller initializations */ cdns_spi_init_hw(xspi); - pm_runtime_set_active(&pdev->dev); - pm_runtime_enable(&pdev->dev); - pm_runtime_use_autosuspend(&pdev->dev); - pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT); - irq = platform_get_irq(pdev, 0); if (irq <= 0) { ret = -ENXIO; @@ -566,6 +567,9 @@ static int cdns_spi_probe(struct platform_device *pdev) master->bits_per_word_mask = SPI_BPW_MASK(8); + pm_runtime_mark_last_busy(&pdev->dev); + pm_runtime_put_autosuspend(&pdev->dev); + ret = spi_register_master(master); if (ret) { dev_err(&pdev->dev, "spi_register_master failed\n"); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 39dc02e366f4..4aee3db6d6df 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -506,7 +506,7 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx, { struct spi_device *spi = msg->spi; u32 ctrl = MX51_ECSPI_CTRL_ENABLE; - u32 testreg; + u32 testreg, delay; u32 cfg = readl(spi_imx->base + MX51_ECSPI_CONFIG); /* set Master or Slave mode */ @@ -567,6 +567,23 @@ static int mx51_ecspi_prepare_message(struct spi_imx_data *spi_imx, writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG); + /* + * Wait until the changes in the configuration register CONFIGREG + * propagate into the hardware. It takes exactly one tick of the + * SCLK clock, but we will wait two SCLK clock just to be sure. The + * effect of the delay it takes for the hardware to apply changes + * is noticable if the SCLK clock run very slow. In such a case, if + * the polarity of SCLK should be inverted, the GPIO ChipSelect might + * be asserted before the SCLK polarity changes, which would disrupt + * the SPI communication as the device on the other end would consider + * the change of SCLK polarity as a clock tick already. + */ + delay = (2 * 1000000) / spi_imx->spi_bus_clk; + if (likely(delay < 10)) /* SCLK is faster than 100 kHz */ + udelay(delay); + else /* SCLK is _very_ slow */ + usleep_range(delay, delay + 10); + return 0; } @@ -574,7 +591,7 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx, struct spi_device *spi) { u32 ctrl = readl(spi_imx->base + MX51_ECSPI_CTRL); - u32 clk, delay; + u32 clk; /* Clear BL field and set the right value */ ctrl &= ~MX51_ECSPI_CTRL_BL_MASK; @@ -596,23 +613,6 @@ static int mx51_ecspi_prepare_transfer(struct spi_imx_data *spi_imx, writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL); - /* - * Wait until the changes in the configuration register CONFIGREG - * propagate into the hardware. It takes exactly one tick of the - * SCLK clock, but we will wait two SCLK clock just to be sure. The - * effect of the delay it takes for the hardware to apply changes - * is noticable if the SCLK clock run very slow. In such a case, if - * the polarity of SCLK should be inverted, the GPIO ChipSelect might - * be asserted before the SCLK polarity changes, which would disrupt - * the SPI communication as the device on the other end would consider - * the change of SCLK polarity as a clock tick already. - */ - delay = (2 * 1000000) / clk; - if (likely(delay < 10)) /* SCLK is faster than 100 kHz */ - udelay(delay); - else /* SCLK is _very_ slow */ - usleep_range(delay, delay + 10); - return 0; } diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 976f73b9e299..68dca8ceb3ad 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -427,13 +427,23 @@ static int mtk_spi_fifo_transfer(struct spi_master *master, mtk_spi_setup_packet(master); cnt = xfer->len / 4; - iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt); + if (xfer->tx_buf) + iowrite32_rep(mdata->base + SPI_TX_DATA_REG, xfer->tx_buf, cnt); + + if (xfer->rx_buf) + ioread32_rep(mdata->base + SPI_RX_DATA_REG, xfer->rx_buf, cnt); remainder = xfer->len % 4; if (remainder > 0) { reg_val = 0; - memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder); - writel(reg_val, mdata->base + SPI_TX_DATA_REG); + if (xfer->tx_buf) { + memcpy(®_val, xfer->tx_buf + (cnt * 4), remainder); + writel(reg_val, mdata->base + SPI_TX_DATA_REG); + } + if (xfer->rx_buf) { + reg_val = readl(mdata->base + SPI_RX_DATA_REG); + memcpy(xfer->rx_buf + (cnt * 4), ®_val, remainder); + } } mtk_spi_enable_transfer(master); @@ -793,12 +803,6 @@ static int mtk_spi_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); - ret = devm_spi_register_master(&pdev->dev, master); - if (ret) { - dev_err(&pdev->dev, "failed to register master (%d)\n", ret); - goto err_disable_runtime_pm; - } - if (mdata->dev_comp->need_pad_sel) { if (mdata->pad_num != master->num_chipselect) { dev_err(&pdev->dev, @@ -838,6 +842,12 @@ static int mtk_spi_probe(struct platform_device *pdev) dev_notice(&pdev->dev, "SPI dma_set_mask(%d) failed, ret:%d\n", addr_bits, ret); + ret = devm_spi_register_master(&pdev->dev, master); + if (ret) { + dev_err(&pdev->dev, "failed to register master (%d)\n", ret); + goto err_disable_runtime_pm; + } + return 0; err_disable_runtime_pm: diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index 8ffcffbb8157..05618a618939 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -884,15 +884,18 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) ier = readl_relaxed(spi->base + STM32H7_SPI_IER); mask = ier; - /* EOTIE is triggered on EOT, SUSP and TXC events. */ + /* + * EOTIE enables irq from EOT, SUSP and TXC events. We need to set + * SUSP to acknowledge it later. TXC is automatically cleared + */ + mask |= STM32H7_SPI_SR_SUSP; /* - * When TXTF is set, DXPIE and TXPIE are cleared. So in case of - * Full-Duplex, need to poll RXP event to know if there are remaining - * data, before disabling SPI. + * DXPIE is set in Full-Duplex, one IT will be raised if TXP and RXP + * are set. So in case of Full-Duplex, need to poll TXP and RXP event. */ - if (spi->rx_buf && !spi->cur_usedma) - mask |= STM32H7_SPI_SR_RXP; + if ((spi->cur_comm == SPI_FULL_DUPLEX) && !spi->cur_usedma) + mask |= STM32H7_SPI_SR_TXP | STM32H7_SPI_SR_RXP; if (!(sr & mask)) { dev_warn(spi->dev, "spurious IT (sr=0x%08x, ier=0x%08x)\n", @@ -1925,6 +1928,7 @@ static int stm32_spi_probe(struct platform_device *pdev) master->can_dma = stm32_spi_can_dma; pm_runtime_set_active(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); pm_runtime_enable(&pdev->dev); ret = spi_register_master(master); @@ -1940,6 +1944,8 @@ static int stm32_spi_probe(struct platform_device *pdev) err_pm_disable: pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); err_dma_release: if (spi->dma_tx) dma_release_channel(spi->dma_tx); @@ -1956,9 +1962,14 @@ static int stm32_spi_remove(struct platform_device *pdev) struct spi_master *master = platform_get_drvdata(pdev); struct stm32_spi *spi = spi_master_get_devdata(master); + pm_runtime_get_sync(&pdev->dev); + spi_unregister_master(master); spi->cfg->disable(spi); + pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); if (master->dma_tx) dma_release_channel(master->dma_tx); if (master->dma_rx) @@ -1966,7 +1977,6 @@ static int stm32_spi_remove(struct platform_device *pdev) clk_disable_unprepare(spi->clk); - pm_runtime_disable(&pdev->dev); pinctrl_pm_select_sleep_state(&pdev->dev); diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c index 8e8588933628..15db7a3868fe 100644 --- a/drivers/usb/gadget/udc/fsl_qe_udc.c +++ b/drivers/usb/gadget/udc/fsl_qe_udc.c @@ -586,6 +586,7 @@ static int qe_ep_init(struct qe_udc *udc, case USB_SPEED_FULL: if (max <= 1023) break; + fallthrough; default: goto en_done; } diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c index 98f193078c05..1c855145711b 100644 --- a/drivers/video/fbdev/core/fbmem.c +++ b/drivers/video/fbdev/core/fbmem.c @@ -970,13 +970,11 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var) fb_var_to_videomode(&mode2, &info->var); /* make sure we don't delete the videomode of current var */ ret = fb_mode_is_equal(&mode1, &mode2); - - if (!ret) - fbcon_mode_deleted(info, &mode1); - - if (!ret) - fb_delete_videomode(&mode1, &info->modelist); - + if (!ret) { + ret = fbcon_mode_deleted(info, &mode1); + if (!ret) + fb_delete_videomode(&mode1, &info->modelist); + } return ret ? -EINVAL : 0; } diff --git a/drivers/video/fbdev/xilinxfb.c b/drivers/video/fbdev/xilinxfb.c index ffbf900648d9..438e2c78142f 100644 --- a/drivers/video/fbdev/xilinxfb.c +++ b/drivers/video/fbdev/xilinxfb.c @@ -241,6 +241,8 @@ xilinx_fb_blank(int blank_mode, struct fb_info *fbi) case FB_BLANK_POWERDOWN: /* turn off panel */ xilinx_fb_out32(drvdata, REG_CTRL, 0); + break; + default: break; } |