diff options
Diffstat (limited to 'drivers')
117 files changed, 2688 insertions, 1825 deletions
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c index 657964e8ab7e..37fb19047603 100644 --- a/drivers/acpi/acpi_lpss.c +++ b/drivers/acpi/acpi_lpss.c @@ -65,6 +65,7 @@ struct lpss_private_data; struct lpss_device_desc { unsigned int flags; + const char *clk_con_id; unsigned int prv_offset; size_t prv_size_override; void (*setup)(struct lpss_private_data *pdata); @@ -140,6 +141,7 @@ static struct lpss_device_desc lpt_i2c_dev_desc = { static struct lpss_device_desc lpt_uart_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_LTR, + .clk_con_id = "baudclk", .prv_offset = 0x800, .setup = lpss_uart_setup, }; @@ -156,6 +158,7 @@ static struct lpss_device_desc byt_pwm_dev_desc = { static struct lpss_device_desc byt_uart_dev_desc = { .flags = LPSS_CLK | LPSS_CLK_GATE | LPSS_CLK_DIVIDER | LPSS_SAVE_CTX, + .clk_con_id = "baudclk", .prv_offset = 0x800, .setup = lpss_uart_setup, }; @@ -313,7 +316,7 @@ out: return PTR_ERR(clk); pdata->clk = clk; - clk_register_clkdev(clk, NULL, devname); + clk_register_clkdev(clk, dev_desc->clk_con_id, devname); return 0; } diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index f9054cd36a72..5389579c5120 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c @@ -869,6 +869,8 @@ try_offline_again: */ ata_msleep(ap, 1); + sata_set_spd(link); + /* * Now, bring the host controller online again, this can take time * as PHY reset and communication establishment, 1st D2H FIS and diff --git a/drivers/char/tpm/tpm-chip.c b/drivers/char/tpm/tpm-chip.c index 1d278ccd751f..e096e9cddb40 100644 --- a/drivers/char/tpm/tpm-chip.c +++ b/drivers/char/tpm/tpm-chip.c @@ -140,24 +140,24 @@ static int tpm_dev_add_device(struct tpm_chip *chip) { int rc; - rc = device_add(&chip->dev); + rc = cdev_add(&chip->cdev, chip->dev.devt, 1); if (rc) { dev_err(&chip->dev, - "unable to device_register() %s, major %d, minor %d, err=%d\n", + "unable to cdev_add() %s, major %d, minor %d, err=%d\n", chip->devname, MAJOR(chip->dev.devt), MINOR(chip->dev.devt), rc); + device_unregister(&chip->dev); return rc; } - rc = cdev_add(&chip->cdev, chip->dev.devt, 1); + rc = device_add(&chip->dev); if (rc) { dev_err(&chip->dev, - "unable to cdev_add() %s, major %d, minor %d, err=%d\n", + "unable to device_register() %s, major %d, minor %d, err=%d\n", chip->devname, MAJOR(chip->dev.devt), MINOR(chip->dev.devt), rc); - device_unregister(&chip->dev); return rc; } @@ -174,27 +174,17 @@ static void tpm_dev_del_device(struct tpm_chip *chip) * tpm_chip_register() - create a character device for the TPM chip * @chip: TPM chip to use. * - * Creates a character device for the TPM chip and adds sysfs interfaces for - * the device, PPI and TCPA. As the last step this function adds the - * chip to the list of TPM chips available for use. + * Creates a character device for the TPM chip and adds sysfs attributes for + * the device. As the last step this function adds the chip to the list of TPM + * chips available for in-kernel use. * - * NOTE: This function should be only called after the chip initialization - * is complete. - * - * Called from tpm_<specific>.c probe function only for devices - * the driver has determined it should claim. Prior to calling - * this function the specific probe function has called pci_enable_device - * upon errant exit from this function specific probe function should call - * pci_disable_device + * This function should be only called after the chip initialization is + * complete. */ int tpm_chip_register(struct tpm_chip *chip) { int rc; - rc = tpm_dev_add_device(chip); - if (rc) - return rc; - /* Populate sysfs for TPM1 devices. */ if (!(chip->flags & TPM_CHIP_FLAG_TPM2)) { rc = tpm_sysfs_add_device(chip); @@ -208,6 +198,10 @@ int tpm_chip_register(struct tpm_chip *chip) chip->bios_dir = tpm_bios_log_setup(chip->devname); } + rc = tpm_dev_add_device(chip); + if (rc) + return rc; + /* Make the chip available. */ spin_lock(&driver_lock); list_add_rcu(&chip->list, &tpm_chip_list); diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c index b1e53e3aece5..42ffa5e7a1e0 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.c +++ b/drivers/char/tpm/tpm_ibmvtpm.c @@ -124,7 +124,7 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) { struct ibmvtpm_dev *ibmvtpm; struct ibmvtpm_crq crq; - u64 *word = (u64 *) &crq; + __be64 *word = (__be64 *)&crq; int rc; ibmvtpm = (struct ibmvtpm_dev *)TPM_VPRIV(chip); @@ -145,11 +145,11 @@ static int tpm_ibmvtpm_send(struct tpm_chip *chip, u8 *buf, size_t count) memcpy((void *)ibmvtpm->rtce_buf, (void *)buf, count); crq.valid = (u8)IBMVTPM_VALID_CMD; crq.msg = (u8)VTPM_TPM_COMMAND; - crq.len = (u16)count; - crq.data = ibmvtpm->rtce_dma_handle; + crq.len = cpu_to_be16(count); + crq.data = cpu_to_be32(ibmvtpm->rtce_dma_handle); - rc = ibmvtpm_send_crq(ibmvtpm->vdev, cpu_to_be64(word[0]), - cpu_to_be64(word[1])); + rc = ibmvtpm_send_crq(ibmvtpm->vdev, be64_to_cpu(word[0]), + be64_to_cpu(word[1])); if (rc != H_SUCCESS) { dev_err(ibmvtpm->dev, "tpm_ibmvtpm_send failed rc=%d\n", rc); rc = 0; diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h index f595f14426bf..6af92890518f 100644 --- a/drivers/char/tpm/tpm_ibmvtpm.h +++ b/drivers/char/tpm/tpm_ibmvtpm.h @@ -22,9 +22,9 @@ struct ibmvtpm_crq { u8 valid; u8 msg; - u16 len; - u32 data; - u64 reserved; + __be16 len; + __be32 data; + __be64 reserved; } __attribute__((packed, aligned(8))); struct ibmvtpm_crq_queue { diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c index db7f8bce7467..25006a8bb8e6 100644 --- a/drivers/clk/clk-divider.c +++ b/drivers/clk/clk-divider.c @@ -144,12 +144,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw, divider->flags); } -/* - * The reverse of DIV_ROUND_UP: The maximum number which - * divided by m is r - */ -#define MULT_ROUND_UP(r, m) ((r) * (m) + (m) - 1) - static bool _is_valid_table_div(const struct clk_div_table *table, unsigned int div) { @@ -225,19 +219,24 @@ static int _div_round_closest(const struct clk_div_table *table, unsigned long parent_rate, unsigned long rate, unsigned long flags) { - int up, down, div; + int up, down; + unsigned long up_rate, down_rate; - up = down = div = DIV_ROUND_CLOSEST(parent_rate, rate); + up = DIV_ROUND_UP(parent_rate, rate); + down = parent_rate / rate; if (flags & CLK_DIVIDER_POWER_OF_TWO) { - up = __roundup_pow_of_two(div); - down = __rounddown_pow_of_two(div); + up = __roundup_pow_of_two(up); + down = __rounddown_pow_of_two(down); } else if (table) { - up = _round_up_table(table, div); - down = _round_down_table(table, div); + up = _round_up_table(table, up); + down = _round_down_table(table, down); } - return (up - div) <= (div - down) ? up : down; + up_rate = DIV_ROUND_UP(parent_rate, up); + down_rate = DIV_ROUND_UP(parent_rate, down); + + return (rate - up_rate) <= (down_rate - rate) ? up : down; } static int _div_round(const struct clk_div_table *table, @@ -313,7 +312,7 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate, return i; } parent_rate = __clk_round_rate(__clk_get_parent(hw->clk), - MULT_ROUND_UP(rate, i)); + rate * i); now = DIV_ROUND_UP(parent_rate, i); if (_is_best_div(rate, now, best, flags)) { bestdiv = i; @@ -353,7 +352,7 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate, bestdiv = readl(divider->reg) >> divider->shift; bestdiv &= div_mask(divider->width); bestdiv = _get_div(divider->table, bestdiv, divider->flags); - return bestdiv; + return DIV_ROUND_UP(*prate, bestdiv); } return divider_round_rate(hw, rate, prate, divider->table, diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index eb0152961d3c..237f23f68bfc 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1350,7 +1350,6 @@ static unsigned long clk_core_get_rate(struct clk_core *clk) return rate; } -EXPORT_SYMBOL_GPL(clk_core_get_rate); /** * clk_get_rate - return the rate of clk @@ -2171,6 +2170,32 @@ int clk_get_phase(struct clk *clk) } /** + * clk_is_match - check if two clk's point to the same hardware clock + * @p: clk compared against q + * @q: clk compared against p + * + * Returns true if the two struct clk pointers both point to the same hardware + * clock node. Put differently, returns true if struct clk *p and struct clk *q + * share the same struct clk_core object. + * + * Returns false otherwise. Note that two NULL clks are treated as matching. + */ +bool clk_is_match(const struct clk *p, const struct clk *q) +{ + /* trivial case: identical struct clk's or both NULL */ + if (p == q) + return true; + + /* true if clk->core pointers match. Avoid derefing garbage */ + if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q)) + if (p->core == q->core) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(clk_is_match); + +/** * __clk_init - initialize the data structures in a struct clk * @dev: device initializing this clk, placeholder for now * @clk: clk being initialized diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c index b0b562b9ce0e..e60feffc10a1 100644 --- a/drivers/clk/qcom/gcc-msm8960.c +++ b/drivers/clk/qcom/gcc-msm8960.c @@ -48,6 +48,17 @@ static struct clk_pll pll3 = { }, }; +static struct clk_regmap pll4_vote = { + .enable_reg = 0x34c0, + .enable_mask = BIT(4), + .hw.init = &(struct clk_init_data){ + .name = "pll4_vote", + .parent_names = (const char *[]){ "pll4" }, + .num_parents = 1, + .ops = &clk_pll_vote_ops, + }, +}; + static struct clk_pll pll8 = { .l_reg = 0x3144, .m_reg = 0x3148, @@ -3023,6 +3034,7 @@ static struct clk_branch rpm_msg_ram_h_clk = { static struct clk_regmap *gcc_msm8960_clks[] = { [PLL3] = &pll3.clkr, + [PLL4_VOTE] = &pll4_vote, [PLL8] = &pll8.clkr, [PLL8_VOTE] = &pll8_vote, [PLL14] = &pll14.clkr, @@ -3247,6 +3259,7 @@ static const struct qcom_reset_map gcc_msm8960_resets[] = { static struct clk_regmap *gcc_apq8064_clks[] = { [PLL3] = &pll3.clkr, + [PLL4_VOTE] = &pll4_vote, [PLL8] = &pll8.clkr, [PLL8_VOTE] = &pll8_vote, [PLL14] = &pll14.clkr, diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c index 121ffde25dc3..c9ff27b4648b 100644 --- a/drivers/clk/qcom/lcc-ipq806x.c +++ b/drivers/clk/qcom/lcc-ipq806x.c @@ -462,7 +462,6 @@ static struct platform_driver lcc_ipq806x_driver = { .remove = lcc_ipq806x_remove, .driver = { .name = "lcc-ipq806x", - .owner = THIS_MODULE, .of_match_table = lcc_ipq806x_match_table, }, }; diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c index a75a408cfccd..e2c863295f00 100644 --- a/drivers/clk/qcom/lcc-msm8960.c +++ b/drivers/clk/qcom/lcc-msm8960.c @@ -417,8 +417,8 @@ static struct clk_rcg slimbus_src = { .mnctr_en_bit = 8, .mnctr_reset_bit = 7, .mnctr_mode_shift = 5, - .n_val_shift = 16, - .m_val_shift = 16, + .n_val_shift = 24, + .m_val_shift = 8, .width = 8, }, .p = { @@ -547,7 +547,7 @@ static int lcc_msm8960_probe(struct platform_device *pdev) return PTR_ERR(regmap); /* Use the correct frequency plan depending on speed of PLL4 */ - val = regmap_read(regmap, 0x4, &val); + regmap_read(regmap, 0x4, &val); if (val == 0x12) { slimbus_src.freq_tbl = clk_tbl_aif_osr_492; mi2s_osr_src.freq_tbl = clk_tbl_aif_osr_492; @@ -574,7 +574,6 @@ static struct platform_driver lcc_msm8960_driver = { .remove = lcc_msm8960_remove, .driver = { .name = "lcc-msm8960", - .owner = THIS_MODULE, .of_match_table = lcc_msm8960_match_table, }, }; diff --git a/drivers/clk/ti/fapll.c b/drivers/clk/ti/fapll.c index 6ef89639a9f6..d21640634adf 100644 --- a/drivers/clk/ti/fapll.c +++ b/drivers/clk/ti/fapll.c @@ -84,7 +84,7 @@ static int ti_fapll_enable(struct clk_hw *hw) struct fapll_data *fd = to_fapll(hw); u32 v = readl_relaxed(fd->base); - v |= (1 << FAPLL_MAIN_PLLEN); + v |= FAPLL_MAIN_PLLEN; writel_relaxed(v, fd->base); return 0; @@ -95,7 +95,7 @@ static void ti_fapll_disable(struct clk_hw *hw) struct fapll_data *fd = to_fapll(hw); u32 v = readl_relaxed(fd->base); - v &= ~(1 << FAPLL_MAIN_PLLEN); + v &= ~FAPLL_MAIN_PLLEN; writel_relaxed(v, fd->base); } @@ -104,7 +104,7 @@ static int ti_fapll_is_enabled(struct clk_hw *hw) struct fapll_data *fd = to_fapll(hw); u32 v = readl_relaxed(fd->base); - return v & (1 << FAPLL_MAIN_PLLEN); + return v & FAPLL_MAIN_PLLEN; } static unsigned long ti_fapll_recalc_rate(struct clk_hw *hw, diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c index 111849c4c8c2..d576a4dea64f 100644 --- a/drivers/gpu/drm/drm_crtc.c +++ b/drivers/gpu/drm/drm_crtc.c @@ -43,9 +43,10 @@ #include "drm_crtc_internal.h" #include "drm_internal.h" -static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, - struct drm_mode_fb_cmd2 *r, - struct drm_file *file_priv); +static struct drm_framebuffer * +internal_framebuffer_create(struct drm_device *dev, + struct drm_mode_fb_cmd2 *r, + struct drm_file *file_priv); /* Avoid boilerplate. I'm tired of typing. */ #define DRM_ENUM_NAME_FN(fnname, list) \ @@ -2943,13 +2944,11 @@ static int drm_mode_cursor_universal(struct drm_crtc *crtc, */ if (req->flags & DRM_MODE_CURSOR_BO) { if (req->handle) { - fb = add_framebuffer_internal(dev, &fbreq, file_priv); + fb = internal_framebuffer_create(dev, &fbreq, file_priv); if (IS_ERR(fb)) { DRM_DEBUG_KMS("failed to wrap cursor buffer in drm framebuffer\n"); return PTR_ERR(fb); } - - drm_framebuffer_reference(fb); } else { fb = NULL; } @@ -3308,9 +3307,10 @@ static int framebuffer_check(const struct drm_mode_fb_cmd2 *r) return 0; } -static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, - struct drm_mode_fb_cmd2 *r, - struct drm_file *file_priv) +static struct drm_framebuffer * +internal_framebuffer_create(struct drm_device *dev, + struct drm_mode_fb_cmd2 *r, + struct drm_file *file_priv) { struct drm_mode_config *config = &dev->mode_config; struct drm_framebuffer *fb; @@ -3348,12 +3348,6 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, return fb; } - mutex_lock(&file_priv->fbs_lock); - r->fb_id = fb->base.id; - list_add(&fb->filp_head, &file_priv->fbs); - DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); - mutex_unlock(&file_priv->fbs_lock); - return fb; } @@ -3375,15 +3369,24 @@ static struct drm_framebuffer *add_framebuffer_internal(struct drm_device *dev, int drm_mode_addfb2(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct drm_mode_fb_cmd2 *r = data; struct drm_framebuffer *fb; if (!drm_core_check_feature(dev, DRIVER_MODESET)) return -EINVAL; - fb = add_framebuffer_internal(dev, data, file_priv); + fb = internal_framebuffer_create(dev, r, file_priv); if (IS_ERR(fb)) return PTR_ERR(fb); + /* Transfer ownership to the filp for reaping on close */ + + DRM_DEBUG_KMS("[FB:%d]\n", fb->base.id); + mutex_lock(&file_priv->fbs_lock); + r->fb_id = fb->base.id; + list_add(&fb->filp_head, &file_priv->fbs); + mutex_unlock(&file_priv->fbs_lock); + return 0; } diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c index 0d15e6e30732..132581ca4ad8 100644 --- a/drivers/gpu/drm/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/drm_dp_mst_topology.c @@ -733,10 +733,14 @@ static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_sideband_msg_tx *txmsg) { bool ret; - mutex_lock(&mgr->qlock); + + /* + * All updates to txmsg->state are protected by mgr->qlock, and the two + * cases we check here are terminal states. For those the barriers + * provided by the wake_up/wait_event pair are enough. + */ ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); - mutex_unlock(&mgr->qlock); return ret; } @@ -1363,12 +1367,13 @@ static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, return 0; } -/* must be called holding qlock */ static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) { struct drm_dp_sideband_msg_tx *txmsg; int ret; + WARN_ON(!mutex_is_locked(&mgr->qlock)); + /* construct a chunk from the first msg in the tx_msg queue */ if (list_empty(&mgr->tx_msg_downq)) { mgr->tx_down_in_progress = false; diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 7fc6f8bd4821..1134526286c8 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -403,7 +403,7 @@ static int check_free_hole(u64 start, u64 end, u64 size, unsigned alignment) unsigned rem; rem = do_div(tmp, alignment); - if (tmp) + if (rem) start += alignment - rem; } diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 9a6da3536ae5..61ae8ff4eaed 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -836,8 +836,11 @@ static u32 *vmap_batch(struct drm_i915_gem_object *obj, } i = 0; - for_each_sg_page(obj->pages->sgl, &sg_iter, npages, first_page) + for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, first_page) { pages[i++] = sg_page_iter_page(&sg_iter); + if (i == npages) + break; + } addr = vmap(pages, i, 0, PAGE_KERNEL); if (addr == NULL) { diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index e38f45374d55..1a52d6ab0f80 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -1090,7 +1090,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) seq_printf(m, "Current P-state: %d\n", (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT); } else if (IS_GEN6(dev) || (IS_GEN7(dev) && !IS_VALLEYVIEW(dev)) || - IS_BROADWELL(dev)) { + IS_BROADWELL(dev) || IS_GEN9(dev)) { u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS); u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS); u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP); @@ -1109,11 +1109,15 @@ static int i915_frequency_info(struct seq_file *m, void *unused) intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); reqf = I915_READ(GEN6_RPNSWREQ); - reqf &= ~GEN6_TURBO_DISABLE; - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) - reqf >>= 24; - else - reqf >>= 25; + if (IS_GEN9(dev)) + reqf >>= 23; + else { + reqf &= ~GEN6_TURBO_DISABLE; + if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + reqf >>= 24; + else + reqf >>= 25; + } reqf = intel_gpu_freq(dev_priv, reqf); rpmodectl = I915_READ(GEN6_RP_CONTROL); @@ -1127,7 +1131,9 @@ static int i915_frequency_info(struct seq_file *m, void *unused) rpdownei = I915_READ(GEN6_RP_CUR_DOWN_EI); rpcurdown = I915_READ(GEN6_RP_CUR_DOWN); rpprevdown = I915_READ(GEN6_RP_PREV_DOWN); - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + if (IS_GEN9(dev)) + cagf = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; + else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) cagf = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; else cagf = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; @@ -1153,7 +1159,7 @@ static int i915_frequency_info(struct seq_file *m, void *unused) pm_ier, pm_imr, pm_isr, pm_iir, pm_mask); seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status); seq_printf(m, "Render p-state ratio: %d\n", - (gt_perf_status & 0xff00) >> 8); + (gt_perf_status & (IS_GEN9(dev) ? 0x1ff00 : 0xff00)) >> 8); seq_printf(m, "Render p-state VID: %d\n", gt_perf_status & 0xff); seq_printf(m, "Render p-state limit: %d\n", @@ -1178,14 +1184,17 @@ static int i915_frequency_info(struct seq_file *m, void *unused) GEN6_CURBSYTAVG_MASK); max_freq = (rp_state_cap & 0xff0000) >> 16; + max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); seq_printf(m, "Lowest (RPN) frequency: %dMHz\n", intel_gpu_freq(dev_priv, max_freq)); max_freq = (rp_state_cap & 0xff00) >> 8; + max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); seq_printf(m, "Nominal (RP1) frequency: %dMHz\n", intel_gpu_freq(dev_priv, max_freq)); max_freq = rp_state_cap & 0xff; + max_freq *= (IS_SKYLAKE(dev) ? GEN9_FREQ_SCALER : 1); seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n", intel_gpu_freq(dev_priv, max_freq)); @@ -1831,18 +1840,6 @@ static int i915_context_status(struct seq_file *m, void *unused) if (ret) return ret; - if (dev_priv->ips.pwrctx) { - seq_puts(m, "power context "); - describe_obj(m, dev_priv->ips.pwrctx); - seq_putc(m, '\n'); - } - - if (dev_priv->ips.renderctx) { - seq_puts(m, "render context "); - describe_obj(m, dev_priv->ips.renderctx); - seq_putc(m, '\n'); - } - list_for_each_entry(ctx, &dev_priv->context_list, link) { if (!i915.enable_execlists && ctx->legacy_hw_ctx.rcs_state == NULL) @@ -2246,6 +2243,11 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) enum pipe pipe; bool enabled = false; + if (!HAS_PSR(dev)) { + seq_puts(m, "PSR not supported\n"); + return 0; + } + intel_runtime_pm_get(dev_priv); mutex_lock(&dev_priv->psr.lock); @@ -2258,17 +2260,15 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) seq_printf(m, "Re-enable work scheduled: %s\n", yesno(work_busy(&dev_priv->psr.work.work))); - if (HAS_PSR(dev)) { - if (HAS_DDI(dev)) - enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; - else { - for_each_pipe(dev_priv, pipe) { - stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & - VLV_EDP_PSR_CURR_STATE_MASK; - if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || - (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) - enabled = true; - } + if (HAS_DDI(dev)) + enabled = I915_READ(EDP_PSR_CTL(dev)) & EDP_PSR_ENABLE; + else { + for_each_pipe(dev_priv, pipe) { + stat[pipe] = I915_READ(VLV_PSRSTAT(pipe)) & + VLV_EDP_PSR_CURR_STATE_MASK; + if ((stat[pipe] == VLV_EDP_PSR_ACTIVE_NORFB_UP) || + (stat[pipe] == VLV_EDP_PSR_ACTIVE_SF_UPDATE)) + enabled = true; } } seq_printf(m, "HW Enabled & Active bit: %s", yesno(enabled)); @@ -2285,7 +2285,7 @@ static int i915_edp_psr_status(struct seq_file *m, void *data) yesno((bool)dev_priv->psr.link_standby)); /* CHV PSR has no kind of performance counter */ - if (HAS_PSR(dev) && HAS_DDI(dev)) { + if (HAS_DDI(dev)) { psrperf = I915_READ(EDP_PSR_PERF_CNT(dev)) & EDP_PSR_PERF_CNT_MASK; @@ -2308,8 +2308,7 @@ static int i915_sink_crc(struct seq_file *m, void *data) u8 crc[6]; drm_modeset_lock_all(dev); - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_encoder(dev, connector) { if (connector->base.dpms != DRM_MODE_DPMS_ON) continue; @@ -2677,7 +2676,8 @@ static int i915_display_info(struct seq_file *m, void *unused) active = cursor_position(dev, crtc->pipe, &x, &y); seq_printf(m, "\tcursor visible? %s, position (%d, %d), size %dx%d, addr 0x%08x, active? %s\n", yesno(crtc->cursor_base), - x, y, crtc->cursor_width, crtc->cursor_height, + x, y, crtc->base.cursor->state->crtc_w, + crtc->base.cursor->state->crtc_h, crtc->cursor_addr, yesno(active)); } @@ -2853,7 +2853,7 @@ static int i915_ddb_info(struct seq_file *m, void *unused) for_each_pipe(dev_priv, pipe) { seq_printf(m, "Pipe %c\n", pipe_name(pipe)); - for_each_plane(pipe, plane) { + for_each_plane(dev_priv, pipe, plane) { entry = &ddb->plane[pipe][plane]; seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane + 1, entry->start, entry->end, @@ -2870,6 +2870,115 @@ static int i915_ddb_info(struct seq_file *m, void *unused) return 0; } +static void drrs_status_per_crtc(struct seq_file *m, + struct drm_device *dev, struct intel_crtc *intel_crtc) +{ + struct intel_encoder *intel_encoder; + struct drm_i915_private *dev_priv = dev->dev_private; + struct i915_drrs *drrs = &dev_priv->drrs; + int vrefresh = 0; + + for_each_encoder_on_crtc(dev, &intel_crtc->base, intel_encoder) { + /* Encoder connected on this CRTC */ + switch (intel_encoder->type) { + case INTEL_OUTPUT_EDP: + seq_puts(m, "eDP:\n"); + break; + case INTEL_OUTPUT_DSI: + seq_puts(m, "DSI:\n"); + break; + case INTEL_OUTPUT_HDMI: + seq_puts(m, "HDMI:\n"); + break; + case INTEL_OUTPUT_DISPLAYPORT: + seq_puts(m, "DP:\n"); + break; + default: + seq_printf(m, "Other encoder (id=%d).\n", + intel_encoder->type); + return; + } + } + + if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT) + seq_puts(m, "\tVBT: DRRS_type: Static"); + else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT) + seq_puts(m, "\tVBT: DRRS_type: Seamless"); + else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED) + seq_puts(m, "\tVBT: DRRS_type: None"); + else + seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value"); + + seq_puts(m, "\n\n"); + + if (intel_crtc->config->has_drrs) { + struct intel_panel *panel; + + mutex_lock(&drrs->mutex); + /* DRRS Supported */ + seq_puts(m, "\tDRRS Supported: Yes\n"); + + /* disable_drrs() will make drrs->dp NULL */ + if (!drrs->dp) { + seq_puts(m, "Idleness DRRS: Disabled"); + mutex_unlock(&drrs->mutex); + return; + } + + panel = &drrs->dp->attached_connector->panel; + seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X", + drrs->busy_frontbuffer_bits); + + seq_puts(m, "\n\t\t"); + if (drrs->refresh_rate_type == DRRS_HIGH_RR) { + seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n"); + vrefresh = panel->fixed_mode->vrefresh; + } else if (drrs->refresh_rate_type == DRRS_LOW_RR) { + seq_puts(m, "DRRS_State: DRRS_LOW_RR\n"); + vrefresh = panel->downclock_mode->vrefresh; + } else { + seq_printf(m, "DRRS_State: Unknown(%d)\n", + drrs->refresh_rate_type); + mutex_unlock(&drrs->mutex); + return; + } + seq_printf(m, "\t\tVrefresh: %d", vrefresh); + + seq_puts(m, "\n\t\t"); + mutex_unlock(&drrs->mutex); + } else { + /* DRRS not supported. Print the VBT parameter*/ + seq_puts(m, "\tDRRS Supported : No"); + } + seq_puts(m, "\n"); +} + +static int i915_drrs_status(struct seq_file *m, void *unused) +{ + struct drm_info_node *node = m->private; + struct drm_device *dev = node->minor->dev; + struct intel_crtc *intel_crtc; + int active_crtc_cnt = 0; + + for_each_intel_crtc(dev, intel_crtc) { + drm_modeset_lock(&intel_crtc->base.mutex, NULL); + + if (intel_crtc->active) { + active_crtc_cnt++; + seq_printf(m, "\nCRTC %d: ", active_crtc_cnt); + + drrs_status_per_crtc(m, dev, intel_crtc); + } + + drm_modeset_unlock(&intel_crtc->base.mutex); + } + + if (!active_crtc_cnt) + seq_puts(m, "No active crtc found\n"); + + return 0; +} + struct pipe_crc_info { const char *name; struct drm_device *dev; @@ -4362,7 +4471,7 @@ static int i915_sseu_status(struct seq_file *m, void *unused) struct drm_i915_private *dev_priv = dev->dev_private; unsigned int s_tot = 0, ss_tot = 0, ss_per = 0, eu_tot = 0, eu_per = 0; - if (INTEL_INFO(dev)->gen < 9) + if ((INTEL_INFO(dev)->gen < 8) || IS_BROADWELL(dev)) return -ENODEV; seq_puts(m, "SSEU Device Info\n"); @@ -4384,7 +4493,34 @@ static int i915_sseu_status(struct seq_file *m, void *unused) yesno(INTEL_INFO(dev)->has_eu_pg)); seq_puts(m, "SSEU Device Status\n"); - if (IS_SKYLAKE(dev)) { + if (IS_CHERRYVIEW(dev)) { + const int ss_max = 2; + int ss; + u32 sig1[ss_max], sig2[ss_max]; + + sig1[0] = I915_READ(CHV_POWER_SS0_SIG1); + sig1[1] = I915_READ(CHV_POWER_SS1_SIG1); + sig2[0] = I915_READ(CHV_POWER_SS0_SIG2); + sig2[1] = I915_READ(CHV_POWER_SS1_SIG2); + + for (ss = 0; ss < ss_max; ss++) { + unsigned int eu_cnt; + + if (sig1[ss] & CHV_SS_PG_ENABLE) + /* skip disabled subslice */ + continue; + + s_tot = 1; + ss_per++; + eu_cnt = ((sig1[ss] & CHV_EU08_PG_ENABLE) ? 0 : 2) + + ((sig1[ss] & CHV_EU19_PG_ENABLE) ? 0 : 2) + + ((sig1[ss] & CHV_EU210_PG_ENABLE) ? 0 : 2) + + ((sig2[ss] & CHV_EU311_PG_ENABLE) ? 0 : 2); + eu_tot += eu_cnt; + eu_per = max(eu_per, eu_cnt); + } + ss_tot = ss_per; + } else if (IS_SKYLAKE(dev)) { const int s_max = 3, ss_max = 4; int s, ss; u32 s_reg[s_max], eu_reg[2*s_max], eu_mask[2]; @@ -4548,6 +4684,7 @@ static const struct drm_info_list i915_debugfs_list[] = { {"i915_wa_registers", i915_wa_registers, 0}, {"i915_ddb_info", i915_ddb_info, 0}, {"i915_sseu_status", i915_sseu_status, 0}, + {"i915_drrs_status", i915_drrs_status, 0}, }; #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list) diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 053e1788f578..d49ed68f041e 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -68,6 +68,9 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_CHIPSET_ID: value = dev->pdev->device; break; + case I915_PARAM_REVISION: + value = dev->pdev->revision; + break; case I915_PARAM_HAS_GEM: value = 1; break; @@ -150,6 +153,16 @@ static int i915_getparam(struct drm_device *dev, void *data, case I915_PARAM_MMAP_VERSION: value = 1; break; + case I915_PARAM_SUBSLICE_TOTAL: + value = INTEL_INFO(dev)->subslice_total; + if (!value) + return -ENODEV; + break; + case I915_PARAM_EU_TOTAL: + value = INTEL_INFO(dev)->eu_total; + if (!value) + return -ENODEV; + break; default: DRM_DEBUG("Unknown parameter %d\n", param->param); return -EINVAL; @@ -608,14 +621,42 @@ static void intel_device_info_runtime_init(struct drm_device *dev) /* Initialize slice/subslice/EU info */ if (IS_CHERRYVIEW(dev)) { - u32 fuse, mask_eu; + u32 fuse, eu_dis; fuse = I915_READ(CHV_FUSE_GT); - mask_eu = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK | - CHV_FGT_EU_DIS_SS0_R1_MASK | - CHV_FGT_EU_DIS_SS1_R0_MASK | - CHV_FGT_EU_DIS_SS1_R1_MASK); - info->eu_total = 16 - hweight32(mask_eu); + + info->slice_total = 1; + + if (!(fuse & CHV_FGT_DISABLE_SS0)) { + info->subslice_per_slice++; + eu_dis = fuse & (CHV_FGT_EU_DIS_SS0_R0_MASK | + CHV_FGT_EU_DIS_SS0_R1_MASK); + info->eu_total += 8 - hweight32(eu_dis); + } + + if (!(fuse & CHV_FGT_DISABLE_SS1)) { + info->subslice_per_slice++; + eu_dis = fuse & (CHV_FGT_EU_DIS_SS1_R0_MASK | + CHV_FGT_EU_DIS_SS1_R1_MASK); + info->eu_total += 8 - hweight32(eu_dis); + } + + info->subslice_total = info->subslice_per_slice; + /* + * CHV expected to always have a uniform distribution of EU + * across subslices. + */ + info->eu_per_subslice = info->subslice_total ? + info->eu_total / info->subslice_total : + 0; + /* + * CHV supports subslice power gating on devices with more than + * one subslice, and supports EU power gating on devices with + * more than one EU pair per subslice. + */ + info->has_slice_pg = 0; + info->has_subslice_pg = (info->subslice_total > 1); + info->has_eu_pg = (info->eu_per_subslice > 2); } else if (IS_SKYLAKE(dev)) { const int s_max = 3, ss_max = 4, eu_max = 8; int s, ss; diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 0001642c38b4..82f8be4b6745 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -346,7 +346,6 @@ static const struct intel_device_info intel_broadwell_gt3m_info = { }; static const struct intel_device_info intel_cherryview_info = { - .is_preliminary = 1, .gen = 8, .num_pipes = 3, .need_gfx_hws = 1, .has_hotplug = 1, .ring_mask = RENDER_RING | BSD_RING | BLT_RING | VEBOX_RING, @@ -882,12 +881,6 @@ int i915_reset(struct drm_device *dev) } /* - * FIXME: This races pretty badly against concurrent holders of - * ring interrupts. This is possible since we've started to drop - * dev->struct_mutex in select places when waiting for the gpu. - */ - - /* * rps/rc6 re-init is necessary to restore state lost after the * reset and the re-install of gt irqs. Skip for ironlake per * previous concerns that it doesn't respond well to some forms diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index ee5bc43dfc0b..8ba7e1b7b733 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -56,7 +56,7 @@ #define DRIVER_NAME "i915" #define DRIVER_DESC "Intel Graphics" -#define DRIVER_DATE "20150227" +#define DRIVER_DATE "20150313" #undef WARN_ON /* Many gcc seem to no see through this and fall over :( */ @@ -70,6 +70,9 @@ #define WARN_ON(x) WARN((x), "WARN_ON(" #x ")") #endif +#undef WARN_ON_ONCE +#define WARN_ON_ONCE(x) WARN_ONCE((x), "WARN_ON_ONCE(" #x ")") + #define MISSING_CASE(x) WARN(1, "Missing switch case (%lu) in %s\n", \ (long) (x), __func__); @@ -223,9 +226,14 @@ enum hpd_pin { #define for_each_pipe(__dev_priv, __p) \ for ((__p) = 0; (__p) < INTEL_INFO(__dev_priv)->num_pipes; (__p)++) -#define for_each_plane(pipe, p) \ - for ((p) = 0; (p) < INTEL_INFO(dev)->num_sprites[(pipe)] + 1; (p)++) -#define for_each_sprite(p, s) for ((s) = 0; (s) < INTEL_INFO(dev)->num_sprites[(p)]; (s)++) +#define for_each_plane(__dev_priv, __pipe, __p) \ + for ((__p) = 0; \ + (__p) < INTEL_INFO(__dev_priv)->num_sprites[(__pipe)] + 1; \ + (__p)++) +#define for_each_sprite(__dev_priv, __p, __s) \ + for ((__s) = 0; \ + (__s) < INTEL_INFO(__dev_priv)->num_sprites[(__p)]; \ + (__s)++) #define for_each_crtc(dev, crtc) \ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) @@ -238,6 +246,12 @@ enum hpd_pin { &(dev)->mode_config.encoder_list, \ base.head) +#define for_each_intel_connector(dev, intel_connector) \ + list_for_each_entry(intel_connector, \ + &dev->mode_config.connector_list, \ + base.head) + + #define for_each_encoder_on_crtc(dev, __crtc, intel_encoder) \ list_for_each_entry((intel_encoder), &(dev)->mode_config.encoder_list, base.head) \ if ((intel_encoder)->base.crtc == (__crtc)) @@ -783,10 +797,19 @@ struct intel_context { struct list_head link; }; +enum fb_op_origin { + ORIGIN_GTT, + ORIGIN_CPU, + ORIGIN_CS, + ORIGIN_FLIP, +}; + struct i915_fbc { unsigned long uncompressed_size; unsigned threshold; unsigned int fb_id; + unsigned int possible_framebuffer_bits; + unsigned int busy_bits; struct intel_crtc *crtc; int y; @@ -799,14 +822,6 @@ struct i915_fbc { * possible. */ bool enabled; - /* On gen8 some rings cannont perform fbc clean operation so for now - * we are doing this on SW with mmio. - * This variable works in the opposite information direction - * of ring->fbc_dirty telling software on frontbuffer tracking - * to perform the cache clean on sw side. - */ - bool need_sw_cache_clean; - struct intel_fbc_work { struct delayed_work work; struct drm_crtc *crtc; @@ -1053,9 +1068,6 @@ struct intel_ilk_power_mgmt { int c_m; int r_t; - - struct drm_i915_gem_object *pwrctx; - struct drm_i915_gem_object *renderctx; }; struct drm_i915_private; @@ -1398,6 +1410,25 @@ struct ilk_wm_values { enum intel_ddb_partitioning partitioning; }; +struct vlv_wm_values { + struct { + uint16_t primary; + uint16_t sprite[2]; + uint8_t cursor; + } pipe[3]; + + struct { + uint16_t plane; + uint8_t cursor; + } sr; + + struct { + uint8_t cursor; + uint8_t sprite[2]; + uint8_t primary; + } ddl[3]; +}; + struct skl_ddb_entry { uint16_t start, end; /* in number of blocks, 'end' is exclusive */ }; @@ -1760,6 +1791,7 @@ struct drm_i915_private { union { struct ilk_wm_values hw; struct skl_wm_values skl_hw; + struct vlv_wm_values vlv; }; } wm; @@ -2396,6 +2428,7 @@ struct drm_i915_cmd_table { #define NUM_L3_SLICES(dev) (IS_HSW_GT3(dev) ? 2 : HAS_L3_DPF(dev)) #define GT_FREQUENCY_MULTIPLIER 50 +#define GEN9_FREQ_SCALER 3 #include "i915_trace.h" @@ -2433,7 +2466,7 @@ struct i915_params { bool disable_display; bool disable_vtd_wa; int use_mmio_flip; - bool mmio_debug; + int mmio_debug; bool verbose_state_checks; bool nuclear_pageflip; }; diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 0107c2ae77d0..0fe313d0f609 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -351,7 +351,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, struct drm_device *dev = obj->base.dev; void *vaddr = obj->phys_handle->vaddr + args->offset; char __user *user_data = to_user_ptr(args->data_ptr); - int ret; + int ret = 0; /* We manually control the domain here and pretend that it * remains coherent i.e. in the GTT domain, like shmem_pwrite. @@ -360,6 +360,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, if (ret) return ret; + intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) { unsigned long unwritten; @@ -370,13 +371,18 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj, mutex_unlock(&dev->struct_mutex); unwritten = copy_from_user(vaddr, user_data, args->size); mutex_lock(&dev->struct_mutex); - if (unwritten) - return -EFAULT; + if (unwritten) { + ret = -EFAULT; + goto out; + } } drm_clflush_virt_range(vaddr, args->size); i915_gem_chipset_flush(dev); - return 0; + +out: + intel_fb_obj_flush(obj, false); + return ret; } void *i915_gem_object_alloc(struct drm_device *dev) @@ -810,6 +816,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, offset = i915_gem_obj_ggtt_offset(obj) + args->offset; + intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT); + while (remain > 0) { /* Operation in this page * @@ -830,7 +838,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, if (fast_user_write(dev_priv->gtt.mappable, page_base, page_offset, user_data, page_length)) { ret = -EFAULT; - goto out_unpin; + goto out_flush; } remain -= page_length; @@ -838,6 +846,8 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, offset += page_length; } +out_flush: + intel_fb_obj_flush(obj, false); out_unpin: i915_gem_object_ggtt_unpin(obj); out: @@ -952,6 +962,8 @@ i915_gem_shmem_pwrite(struct drm_device *dev, if (ret) return ret; + intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); + i915_gem_object_pin_pages(obj); offset = args->offset; @@ -1030,6 +1042,7 @@ out: if (needs_clflush_after) i915_gem_chipset_flush(dev); + intel_fb_obj_flush(obj, false); return ret; } @@ -2929,9 +2942,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) req = obj->last_read_req; /* Do this after OLR check to make sure we make forward progress polling - * on this IOCTL with a timeout <=0 (like busy ioctl) + * on this IOCTL with a timeout == 0 (like busy ioctl) */ - if (args->timeout_ns <= 0) { + if (args->timeout_ns == 0) { ret = -ETIME; goto out; } @@ -2941,7 +2954,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file) i915_gem_request_reference(req); mutex_unlock(&dev->struct_mutex); - ret = __i915_wait_request(req, reset_counter, true, &args->timeout_ns, + ret = __i915_wait_request(req, reset_counter, true, + args->timeout_ns > 0 ? &args->timeout_ns : NULL, file->driver_priv); mutex_lock(&dev->struct_mutex); i915_gem_request_unreference(req); @@ -3756,7 +3770,7 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) } if (write) - intel_fb_obj_invalidate(obj, NULL); + intel_fb_obj_invalidate(obj, NULL, ORIGIN_GTT); trace_i915_gem_object_change_domain(obj, old_read_domains, @@ -4071,7 +4085,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write) } if (write) - intel_fb_obj_invalidate(obj, NULL); + intel_fb_obj_invalidate(obj, NULL, ORIGIN_CPU); trace_i915_gem_object_change_domain(obj, old_read_domains, @@ -4781,6 +4795,9 @@ i915_gem_init_hw(struct drm_device *dev) if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt()) return -EIO; + /* Double layer security blanket, see i915_gem_init() */ + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + if (dev_priv->ellc_size) I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf)); @@ -4813,7 +4830,7 @@ i915_gem_init_hw(struct drm_device *dev) for_each_ring(ring, dev_priv, i) { ret = ring->init_hw(ring); if (ret) - return ret; + goto out; } for (i = 0; i < NUM_L3_SLICES(dev); i++) @@ -4830,9 +4847,11 @@ i915_gem_init_hw(struct drm_device *dev) DRM_ERROR("Context enable failed %d\n", ret); i915_gem_cleanup_ringbuffer(dev); - return ret; + goto out; } +out: + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); return ret; } @@ -4866,6 +4885,14 @@ int i915_gem_init(struct drm_device *dev) dev_priv->gt.stop_ring = intel_logical_ring_stop; } + /* This is just a security blanket to placate dragons. + * On some systems, we very sporadically observe that the first TLBs + * used by the CS may be stale, despite us poking the TLB reset. If + * we hold the forcewake during initialisation these problems + * just magically go away. + */ + intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); + ret = i915_gem_init_userptr(dev); if (ret) goto out_unlock; @@ -4892,6 +4919,7 @@ int i915_gem_init(struct drm_device *dev) } out_unlock: + intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); mutex_unlock(&dev->struct_mutex); return ret; diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 85a6adaba258..dc10bc43864e 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c @@ -971,7 +971,7 @@ i915_gem_execbuffer_move_to_active(struct list_head *vmas, obj->dirty = 1; i915_gem_request_assign(&obj->last_write_req, req); - intel_fb_obj_invalidate(obj, ring); + intel_fb_obj_invalidate(obj, ring, ORIGIN_CS); /* update for the implicit flush after a batch */ obj->base.write_domain &= ~I915_GEM_GPU_DOMAINS; @@ -1518,7 +1518,7 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, * - The batch is already pinned into the relevant ppgtt, so we * already have the backing storage fully allocated. * - No other BO uses the global gtt (well contexts, but meh), - * so we don't really have issues with mutliple objects not + * so we don't really have issues with multiple objects not * fitting due to fragmentation. * So this is actually safe. */ diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 74df3d1581dd..2034f7cf238b 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -716,15 +716,19 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) if (size % (1<<30)) DRM_INFO("Pages will be wasted unless GTT size (%llu) is divisible by 1GB\n", size); - /* 1. Do all our allocations for page directories and page tables. */ - ret = gen8_ppgtt_alloc(ppgtt, max_pdp); + /* 1. Do all our allocations for page directories and page tables. + * We allocate more than was asked so that we can point the unused parts + * to valid entries that point to scratch page. Dynamic page tables + * will fix this eventually. + */ + ret = gen8_ppgtt_alloc(ppgtt, GEN8_LEGACY_PDPES); if (ret) return ret; /* * 2. Create DMA mappings for the page directories and page tables. */ - for (i = 0; i < max_pdp; i++) { + for (i = 0; i < GEN8_LEGACY_PDPES; i++) { ret = gen8_ppgtt_setup_page_directories(ppgtt, i); if (ret) goto bail; @@ -744,7 +748,7 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) * plugged in correctly. So we do that now/here. For aliasing PPGTT, we * will never need to touch the PDEs again. */ - for (i = 0; i < max_pdp; i++) { + for (i = 0; i < GEN8_LEGACY_PDPES; i++) { struct i915_page_directory_entry *pd = ppgtt->pdp.page_directory[i]; gen8_ppgtt_pde_t *pd_vaddr; pd_vaddr = kmap_atomic(ppgtt->pdp.page_directory[i]->page); @@ -764,9 +768,14 @@ static int gen8_ppgtt_init(struct i915_hw_ppgtt *ppgtt, uint64_t size) ppgtt->base.insert_entries = gen8_ppgtt_insert_entries; ppgtt->base.cleanup = gen8_ppgtt_cleanup; ppgtt->base.start = 0; - ppgtt->base.total = ppgtt->num_pd_entries * GEN8_PTES_PER_PAGE * PAGE_SIZE; - ppgtt->base.clear_range(&ppgtt->base, 0, ppgtt->base.total, true); + /* This is the area that we advertise as usable for the caller */ + ppgtt->base.total = max_pdp * GEN8_PDES_PER_PAGE * GEN8_PTES_PER_PAGE * PAGE_SIZE; + + /* Set all ptes to a valid scratch page. Also above requested space */ + ppgtt->base.clear_range(&ppgtt->base, 0, + ppgtt->num_pd_pages * GEN8_PTES_PER_PAGE * PAGE_SIZE, + true); DRM_DEBUG_DRIVER("Allocated %d pages for page directories (%d wasted)\n", ppgtt->num_pd_pages, ppgtt->num_pd_pages - max_pdp); diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 9baecb79de8c..49ad5fb82ace 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -1696,11 +1696,6 @@ static void i9xx_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe) * the work queue. */ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) { - /* TODO: RPS on GEN9+ is not supported yet. */ - if (WARN_ONCE(INTEL_INFO(dev_priv)->gen >= 9, - "GEN9+: unexpected RPS IRQ\n")) - return; - if (pm_iir & dev_priv->pm_rps_events) { spin_lock(&dev_priv->irq_lock); gen6_disable_pm_irq(dev_priv, pm_iir & dev_priv->pm_rps_events); @@ -3169,15 +3164,24 @@ static void gen8_irq_reset(struct drm_device *dev) ibx_irq_reset(dev); } -void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) +void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, + unsigned int pipe_mask) { uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN; spin_lock_irq(&dev_priv->irq_lock); - GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], - ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); - GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], - ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); + if (pipe_mask & 1 << PIPE_A) + GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_A, + dev_priv->de_irq_mask[PIPE_A], + ~dev_priv->de_irq_mask[PIPE_A] | extra_ier); + if (pipe_mask & 1 << PIPE_B) + GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, + dev_priv->de_irq_mask[PIPE_B], + ~dev_priv->de_irq_mask[PIPE_B] | extra_ier); + if (pipe_mask & 1 << PIPE_C) + GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, + dev_priv->de_irq_mask[PIPE_C], + ~dev_priv->de_irq_mask[PIPE_C] | extra_ier); spin_unlock_irq(&dev_priv->irq_lock); } diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 44f2262a5553..e2d20ffe6586 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c @@ -171,10 +171,10 @@ module_param_named(use_mmio_flip, i915.use_mmio_flip, int, 0600); MODULE_PARM_DESC(use_mmio_flip, "use MMIO flips (-1=never, 0=driver discretion [default], 1=always)"); -module_param_named(mmio_debug, i915.mmio_debug, bool, 0600); +module_param_named(mmio_debug, i915.mmio_debug, int, 0600); MODULE_PARM_DESC(mmio_debug, - "Enable the MMIO debug code (default: false). This may negatively " - "affect performance."); + "Enable the MMIO debug code for the first N failures (default: off). " + "This may negatively affect performance."); module_param_named(verbose_state_checks, i915.verbose_state_checks, bool, 0600); MODULE_PARM_DESC(verbose_state_checks, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 55143cb36e74..cc8ebabc488d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -566,6 +566,9 @@ #define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT) #define DSPFREQGUAR_SHIFT 14 #define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT) +#define DSP_MAXFIFO_PM5_STATUS (1 << 22) /* chv */ +#define DSP_AUTO_CDCLK_GATE_DISABLE (1 << 7) /* chv */ +#define DSP_MAXFIFO_PM5_ENABLE (1 << 6) /* chv */ #define _DP_SSC(val, pipe) ((val) << (2 * (pipe))) #define DP_SSC_MASK(pipe) _DP_SSC(0x3, (pipe)) #define DP_SSC_PWR_ON(pipe) _DP_SSC(0x0, (pipe)) @@ -641,6 +644,11 @@ enum skl_disp_power_wells { #define FB_GFX_FMIN_AT_VMIN_FUSE 0x137 #define FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT 8 +#define PUNIT_REG_DDR_SETUP2 0x139 +#define FORCE_DDR_FREQ_REQ_ACK (1 << 8) +#define FORCE_DDR_LOW_FREQ (1 << 1) +#define FORCE_DDR_HIGH_FREQ (1 << 0) + #define PUNIT_GPU_STATUS_REG 0xdb #define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16 #define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff @@ -1029,6 +1037,7 @@ enum skl_disp_power_wells { #define DPIO_CHV_FIRST_MOD (0 << 8) #define DPIO_CHV_SECOND_MOD (1 << 8) #define DPIO_CHV_FEEDFWD_GAIN_SHIFT 0 +#define DPIO_CHV_FEEDFWD_GAIN_MASK (0xF << 0) #define CHV_PLL_DW3(ch) _PIPE(ch, _CHV_PLL_DW3_CH0, _CHV_PLL_DW3_CH1) #define _CHV_PLL_DW6_CH0 0x8018 @@ -1040,11 +1049,14 @@ enum skl_disp_power_wells { #define _CHV_PLL_DW8_CH0 0x8020 #define _CHV_PLL_DW8_CH1 0x81A0 +#define DPIO_CHV_TDC_TARGET_CNT_SHIFT 0 +#define DPIO_CHV_TDC_TARGET_CNT_MASK (0x3FF << 0) #define CHV_PLL_DW8(ch) _PIPE(ch, _CHV_PLL_DW8_CH0, _CHV_PLL_DW8_CH1) #define _CHV_PLL_DW9_CH0 0x8024 #define _CHV_PLL_DW9_CH1 0x81A4 #define DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT 1 /* 3 bits */ +#define DPIO_CHV_INT_LOCK_THRESHOLD_MASK (7 << 1) #define DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE 1 /* 1: coarse & 0 : fine */ #define CHV_PLL_DW9(ch) _PIPE(ch, _CHV_PLL_DW9_CH0, _CHV_PLL_DW9_CH1) @@ -1522,6 +1534,8 @@ enum skl_disp_power_wells { /* Fuse readout registers for GT */ #define CHV_FUSE_GT (VLV_DISPLAY_BASE + 0x2168) +#define CHV_FGT_DISABLE_SS0 (1 << 10) +#define CHV_FGT_DISABLE_SS1 (1 << 11) #define CHV_FGT_EU_DIS_SS0_R0_SHIFT 16 #define CHV_FGT_EU_DIS_SS0_R0_MASK (0xf << CHV_FGT_EU_DIS_SS0_R0_SHIFT) #define CHV_FGT_EU_DIS_SS0_R1_SHIFT 20 @@ -2099,6 +2113,14 @@ enum skl_disp_power_wells { #define CDCLK_FREQ_SHIFT 4 #define CDCLK_FREQ_MASK (0x1f << CDCLK_FREQ_SHIFT) #define CZCLK_FREQ_MASK 0xf + +#define GCI_CONTROL (VLV_DISPLAY_BASE + 0x650C) +#define PFI_CREDIT_63 (9 << 28) /* chv only */ +#define PFI_CREDIT_31 (8 << 28) /* chv only */ +#define PFI_CREDIT(x) (((x) - 8) << 28) /* 8-15 */ +#define PFI_CREDIT_RESEND (1 << 27) +#define VGA_FAST_MODE_DISABLE (1 << 14) + #define GMBUSFREQ_VLV (VLV_DISPLAY_BASE + 0x6510) /* @@ -2427,6 +2449,12 @@ enum skl_disp_power_wells { #define GEN6_RP_STATE_LIMITS (MCHBAR_MIRROR_BASE_SNB + 0x5994) #define GEN6_RP_STATE_CAP (MCHBAR_MIRROR_BASE_SNB + 0x5998) +#define INTERVAL_1_28_US(us) (((us) * 100) >> 7) +#define INTERVAL_1_33_US(us) (((us) * 3) >> 2) +#define GT_INTERVAL_FROM_US(dev_priv, us) (IS_GEN9(dev_priv) ? \ + INTERVAL_1_33_US(us) : \ + INTERVAL_1_28_US(us)) + /* * Logical Context regs */ @@ -3019,7 +3047,7 @@ enum skl_disp_power_wells { /* Video Data Island Packet control */ #define VIDEO_DIP_DATA 0x61178 -/* Read the description of VIDEO_DIP_DATA (before Haswel) or VIDEO_DIP_ECC +/* Read the description of VIDEO_DIP_DATA (before Haswell) or VIDEO_DIP_ECC * (Haswell and newer) to see which VIDEO_DIP_DATA byte corresponds to each byte * of the infoframe structure specified by CEA-861. */ #define VIDEO_DIP_DATA_SIZE 32 @@ -4065,7 +4093,7 @@ enum skl_disp_power_wells { #define DPINVGTT_STATUS_MASK 0xff #define DPINVGTT_STATUS_MASK_CHV 0xfff -#define DSPARB 0x70030 +#define DSPARB (dev_priv->info.display_mmio_offset + 0x70030) #define DSPARB_CSTART_MASK (0x7f << 7) #define DSPARB_CSTART_SHIFT 7 #define DSPARB_BSTART_MASK (0x7f) @@ -4073,6 +4101,9 @@ enum skl_disp_power_wells { #define DSPARB_BEND_SHIFT 9 /* on 855 */ #define DSPARB_AEND_SHIFT 0 +#define DSPARB2 (VLV_DISPLAY_BASE + 0x70060) /* vlv/chv */ +#define DSPARB3 (VLV_DISPLAY_BASE + 0x7006c) /* chv */ + /* pnv/gen4/g4x/vlv/chv */ #define DSPFW1 (dev_priv->info.display_mmio_offset + 0x70034) #define DSPFW_SR_SHIFT 23 @@ -4096,8 +4127,8 @@ enum skl_disp_power_wells { #define DSPFW_SPRITEB_MASK_VLV (0xff<<16) /* vlv/chv */ #define DSPFW_CURSORA_SHIFT 8 #define DSPFW_CURSORA_MASK (0x3f<<8) -#define DSPFW_PLANEC_SHIFT_OLD 0 -#define DSPFW_PLANEC_MASK_OLD (0x7f<<0) /* pre-gen4 sprite C */ +#define DSPFW_PLANEC_OLD_SHIFT 0 +#define DSPFW_PLANEC_OLD_MASK (0x7f<<0) /* pre-gen4 sprite C */ #define DSPFW_SPRITEA_SHIFT 0 #define DSPFW_SPRITEA_MASK (0x7f<<0) /* g4x */ #define DSPFW_SPRITEA_MASK_VLV (0xff<<0) /* vlv/chv */ @@ -4136,25 +4167,25 @@ enum skl_disp_power_wells { #define DSPFW_SPRITED_WM1_SHIFT 24 #define DSPFW_SPRITED_WM1_MASK (0xff<<24) #define DSPFW_SPRITED_SHIFT 16 -#define DSPFW_SPRITED_MASK (0xff<<16) +#define DSPFW_SPRITED_MASK_VLV (0xff<<16) #define DSPFW_SPRITEC_WM1_SHIFT 8 #define DSPFW_SPRITEC_WM1_MASK (0xff<<8) #define DSPFW_SPRITEC_SHIFT 0 -#define DSPFW_SPRITEC_MASK (0xff<<0) +#define DSPFW_SPRITEC_MASK_VLV (0xff<<0) #define DSPFW8_CHV (VLV_DISPLAY_BASE + 0x700b8) #define DSPFW_SPRITEF_WM1_SHIFT 24 #define DSPFW_SPRITEF_WM1_MASK (0xff<<24) #define DSPFW_SPRITEF_SHIFT 16 -#define DSPFW_SPRITEF_MASK (0xff<<16) +#define DSPFW_SPRITEF_MASK_VLV (0xff<<16) #define DSPFW_SPRITEE_WM1_SHIFT 8 #define DSPFW_SPRITEE_WM1_MASK (0xff<<8) #define DSPFW_SPRITEE_SHIFT 0 -#define DSPFW_SPRITEE_MASK (0xff<<0) +#define DSPFW_SPRITEE_MASK_VLV (0xff<<0) #define DSPFW9_CHV (VLV_DISPLAY_BASE + 0x7007c) /* wtf #2? */ #define DSPFW_PLANEC_WM1_SHIFT 24 #define DSPFW_PLANEC_WM1_MASK (0xff<<24) #define DSPFW_PLANEC_SHIFT 16 -#define DSPFW_PLANEC_MASK (0xff<<16) +#define DSPFW_PLANEC_MASK_VLV (0xff<<16) #define DSPFW_CURSORC_WM1_SHIFT 8 #define DSPFW_CURSORC_WM1_MASK (0x3f<<16) #define DSPFW_CURSORC_SHIFT 0 @@ -4163,7 +4194,7 @@ enum skl_disp_power_wells { /* vlv/chv high order bits */ #define DSPHOWM (VLV_DISPLAY_BASE + 0x70064) #define DSPFW_SR_HI_SHIFT 24 -#define DSPFW_SR_HI_MASK (1<<24) +#define DSPFW_SR_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */ #define DSPFW_SPRITEF_HI_SHIFT 23 #define DSPFW_SPRITEF_HI_MASK (1<<23) #define DSPFW_SPRITEE_HI_SHIFT 22 @@ -4184,7 +4215,7 @@ enum skl_disp_power_wells { #define DSPFW_PLANEA_HI_MASK (1<<0) #define DSPHOWM1 (VLV_DISPLAY_BASE + 0x70068) #define DSPFW_SR_WM1_HI_SHIFT 24 -#define DSPFW_SR_WM1_HI_MASK (1<<24) +#define DSPFW_SR_WM1_HI_MASK (3<<24) /* 2 bits for chv, 1 for vlv */ #define DSPFW_SPRITEF_WM1_HI_SHIFT 23 #define DSPFW_SPRITEF_WM1_HI_MASK (1<<23) #define DSPFW_SPRITEE_WM1_HI_SHIFT 22 @@ -4205,21 +4236,17 @@ enum skl_disp_power_wells { #define DSPFW_PLANEA_WM1_HI_MASK (1<<0) /* drain latency register values*/ -#define DRAIN_LATENCY_PRECISION_16 16 -#define DRAIN_LATENCY_PRECISION_32 32 -#define DRAIN_LATENCY_PRECISION_64 64 #define VLV_DDL(pipe) (VLV_DISPLAY_BASE + 0x70050 + 4 * (pipe)) -#define DDL_CURSOR_PRECISION_HIGH (1<<31) -#define DDL_CURSOR_PRECISION_LOW (0<<31) #define DDL_CURSOR_SHIFT 24 -#define DDL_SPRITE_PRECISION_HIGH(sprite) (1<<(15+8*(sprite))) -#define DDL_SPRITE_PRECISION_LOW(sprite) (0<<(15+8*(sprite))) #define DDL_SPRITE_SHIFT(sprite) (8+8*(sprite)) -#define DDL_PLANE_PRECISION_HIGH (1<<7) -#define DDL_PLANE_PRECISION_LOW (0<<7) #define DDL_PLANE_SHIFT 0 +#define DDL_PRECISION_HIGH (1<<7) +#define DDL_PRECISION_LOW (0<<7) #define DRAIN_LATENCY_MASK 0x7f +#define CBR1_VLV (VLV_DISPLAY_BASE + 0x70400) +#define CBR_PND_DEADLINE_DISABLE (1<<31) + /* FIFO watermark sizes etc */ #define G4X_FIFO_LINE_SIZE 64 #define I915_FIFO_LINE_SIZE 64 @@ -6080,6 +6107,7 @@ enum skl_disp_power_wells { #define GEN6_TURBO_DISABLE (1<<31) #define GEN6_FREQUENCY(x) ((x)<<25) #define HSW_FREQUENCY(x) ((x)<<24) +#define GEN9_FREQUENCY(x) ((x)<<23) #define GEN6_OFFSET(x) ((x)<<19) #define GEN6_AGGRESSIVE_TURBO (0<<15) #define GEN6_RC_VIDEO_FREQ 0xA00C @@ -6098,8 +6126,10 @@ enum skl_disp_power_wells { #define GEN6_RPSTAT1 0xA01C #define GEN6_CAGF_SHIFT 8 #define HSW_CAGF_SHIFT 7 +#define GEN9_CAGF_SHIFT 23 #define GEN6_CAGF_MASK (0x7f << GEN6_CAGF_SHIFT) #define HSW_CAGF_MASK (0x7f << HSW_CAGF_SHIFT) +#define GEN9_CAGF_MASK (0x1ff << GEN9_CAGF_SHIFT) #define GEN6_RP_CONTROL 0xA024 #define GEN6_RP_MEDIA_TURBO (1<<11) #define GEN6_RP_MEDIA_MODE_MASK (3<<9) @@ -6225,6 +6255,17 @@ enum skl_disp_power_wells { #define GEN6_RC6 3 #define GEN6_RC7 4 +#define CHV_POWER_SS0_SIG1 0xa720 +#define CHV_POWER_SS1_SIG1 0xa728 +#define CHV_SS_PG_ENABLE (1<<1) +#define CHV_EU08_PG_ENABLE (1<<9) +#define CHV_EU19_PG_ENABLE (1<<17) +#define CHV_EU210_PG_ENABLE (1<<25) + +#define CHV_POWER_SS0_SIG2 0xa724 +#define CHV_POWER_SS1_SIG2 0xa72c +#define CHV_EU311_PG_ENABLE (1<<1) + #define GEN9_SLICE0_PGCTL_ACK 0x804c #define GEN9_SLICE1_PGCTL_ACK 0x8050 #define GEN9_SLICE2_PGCTL_ACK 0x8054 diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c index 67bd07edcbb0..247626885f49 100644 --- a/drivers/gpu/drm/i915/i915_sysfs.c +++ b/drivers/gpu/drm/i915/i915_sysfs.c @@ -319,7 +319,9 @@ static ssize_t gt_act_freq_mhz_show(struct device *kdev, ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff); } else { u32 rpstat = I915_READ(GEN6_RPSTAT1); - if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) + if (IS_GEN9(dev_priv)) + ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT; + else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT; else ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT; diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h index 0db9ccf32605..97a88b5f6a26 100644 --- a/drivers/gpu/drm/i915/i915_vgpu.h +++ b/drivers/gpu/drm/i915/i915_vgpu.h @@ -32,7 +32,7 @@ * The following structure pages are defined in GEN MMIO space * for virtualization. (One page for now) */ -#define VGT_MAGIC 0x4776544776544776 /* 'vGTvGTvG' */ +#define VGT_MAGIC 0x4776544776544776ULL /* 'vGTvGTvG' */ #define VGT_VERSION_MAJOR 1 #define VGT_VERSION_MINOR 0 diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c index 011b8960fd75..3903b90fb64e 100644 --- a/drivers/gpu/drm/i915/intel_atomic.c +++ b/drivers/gpu/drm/i915/intel_atomic.c @@ -214,12 +214,18 @@ struct drm_crtc_state * intel_crtc_duplicate_state(struct drm_crtc *crtc) { struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_crtc_state *crtc_state; if (WARN_ON(!intel_crtc->config)) - return kzalloc(sizeof(*intel_crtc->config), GFP_KERNEL); + crtc_state = kzalloc(sizeof(*crtc_state), GFP_KERNEL); + else + crtc_state = kmemdup(intel_crtc->config, + sizeof(*intel_crtc->config), GFP_KERNEL); - return kmemdup(intel_crtc->config, sizeof(*intel_crtc->config), - GFP_KERNEL); + if (crtc_state) + crtc_state->base.crtc = crtc; + + return &crtc_state->base; } /** diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 985d531aaf9e..8aee7d77ce9d 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -156,16 +156,7 @@ static const struct ddi_buf_trans skl_ddi_translations_edp[] = { static const struct ddi_buf_trans skl_ddi_translations_hdmi[] = { /* Idx NT mV T mV db */ - { 0x00000018, 0x000000a0 }, /* 0: 400 400 0 */ - { 0x00004014, 0x00000098 }, /* 1: 400 600 3.5 */ - { 0x00006012, 0x00000088 }, /* 2: 400 800 6 */ - { 0x00000018, 0x0000003c }, /* 3: 450 450 0 */ - { 0x00000018, 0x00000098 }, /* 4: 600 600 0 */ - { 0x00003015, 0x00000088 }, /* 5: 600 800 2.5 */ - { 0x00005013, 0x00000080 }, /* 6: 600 1000 4.5 */ - { 0x00000018, 0x00000088 }, /* 7: 800 800 0 */ - { 0x00000096, 0x00000080 }, /* 8: 800 1000 2 */ - { 0x00000018, 0x00000080 }, /* 9: 1200 1200 0 */ + { 0x00004014, 0x00000087 }, /* 0: 800 1000 2 */ }; enum port intel_ddi_get_encoder_port(struct intel_encoder *intel_encoder) @@ -202,7 +193,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) { struct drm_i915_private *dev_priv = dev->dev_private; u32 reg; - int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_800mV_0dB, + int i, n_hdmi_entries, n_dp_entries, n_edp_entries, hdmi_default_entry, size; int hdmi_level = dev_priv->vbt.ddi_port_info[port].hdmi_level_shift; const struct ddi_buf_trans *ddi_translations_fdi; @@ -223,9 +214,16 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) n_edp_entries = ARRAY_SIZE(skl_ddi_translations_dp); } + /* + * On SKL, the recommendation from the hw team is to always use + * a certain type of level shifter (and thus the corresponding + * 800mV+2dB entry). Given that's the only validated entry, we + * override what is in the VBT, at least until further notice. + */ + hdmi_level = 0; ddi_translations_hdmi = skl_ddi_translations_hdmi; n_hdmi_entries = ARRAY_SIZE(skl_ddi_translations_hdmi); - hdmi_800mV_0dB = 7; + hdmi_default_entry = 0; } else if (IS_BROADWELL(dev)) { ddi_translations_fdi = bdw_ddi_translations_fdi; ddi_translations_dp = bdw_ddi_translations_dp; @@ -234,7 +232,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); - hdmi_800mV_0dB = 7; + hdmi_default_entry = 7; } else if (IS_HASWELL(dev)) { ddi_translations_fdi = hsw_ddi_translations_fdi; ddi_translations_dp = hsw_ddi_translations_dp; @@ -242,7 +240,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) ddi_translations_hdmi = hsw_ddi_translations_hdmi; n_dp_entries = n_edp_entries = ARRAY_SIZE(hsw_ddi_translations_dp); n_hdmi_entries = ARRAY_SIZE(hsw_ddi_translations_hdmi); - hdmi_800mV_0dB = 6; + hdmi_default_entry = 6; } else { WARN(1, "ddi translation table missing\n"); ddi_translations_edp = bdw_ddi_translations_dp; @@ -252,7 +250,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) n_edp_entries = ARRAY_SIZE(bdw_ddi_translations_edp); n_dp_entries = ARRAY_SIZE(bdw_ddi_translations_dp); n_hdmi_entries = ARRAY_SIZE(bdw_ddi_translations_hdmi); - hdmi_800mV_0dB = 7; + hdmi_default_entry = 7; } switch (port) { @@ -295,7 +293,7 @@ static void intel_prepare_ddi_buffers(struct drm_device *dev, enum port port) /* Choose a good default if VBT is badly populated */ if (hdmi_level == HDMI_LEVEL_SHIFT_UNKNOWN || hdmi_level >= n_hdmi_entries) - hdmi_level = hdmi_800mV_0dB; + hdmi_level = hdmi_default_entry; /* Entry 9 is for HDMI: */ I915_WRITE(reg, ddi_translations_hdmi[hdmi_level].trans1); @@ -786,9 +784,18 @@ static void skl_ddi_clock_get(struct intel_encoder *encoder, case DPLL_CRTL1_LINK_RATE_810: link_clock = 81000; break; + case DPLL_CRTL1_LINK_RATE_1080: + link_clock = 108000; + break; case DPLL_CRTL1_LINK_RATE_1350: link_clock = 135000; break; + case DPLL_CRTL1_LINK_RATE_1620: + link_clock = 162000; + break; + case DPLL_CRTL1_LINK_RATE_2160: + link_clock = 216000; + break; case DPLL_CRTL1_LINK_RATE_2700: link_clock = 270000; break; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 1aa1cbd16c19..90b460cf2b57 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -37,6 +37,7 @@ #include <drm/i915_drm.h> #include "i915_drv.h" #include "i915_trace.h" +#include <drm/drm_atomic.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_dp_helper.h> #include <drm/drm_crtc_helper.h> @@ -390,7 +391,7 @@ static const intel_limit_t intel_limits_chv = { * them would make no difference. */ .dot = { .min = 25000 * 5, .max = 540000 * 5}, - .vco = { .min = 4860000, .max = 6480000 }, + .vco = { .min = 4800000, .max = 6480000 }, .n = { .min = 1, .max = 1 }, .m1 = { .min = 2, .max = 2 }, .m2 = { .min = 24 << 22, .max = 175 << 22 }, @@ -896,8 +897,12 @@ bool intel_crtc_active(struct drm_crtc *crtc) * * We can ditch the crtc->primary->fb check as soon as we can * properly reconstruct framebuffers. + * + * FIXME: The intel_crtc->active here should be switched to + * crtc->state->active once we have proper CRTC states wired up + * for atomic. */ - return intel_crtc->active && crtc->primary->fb && + return intel_crtc->active && crtc->primary->state->fb && intel_crtc->config->base.adjusted_mode.crtc_clock; } @@ -1300,14 +1305,14 @@ static void assert_sprites_disabled(struct drm_i915_private *dev_priv, u32 val; if (INTEL_INFO(dev)->gen >= 9) { - for_each_sprite(pipe, sprite) { + for_each_sprite(dev_priv, pipe, sprite) { val = I915_READ(PLANE_CTL(pipe, sprite)); I915_STATE_WARN(val & PLANE_CTL_ENABLE, "plane %d assertion failure, should be off on pipe %c but is still active\n", sprite, pipe_name(pipe)); } } else if (IS_VALLEYVIEW(dev)) { - for_each_sprite(pipe, sprite) { + for_each_sprite(dev_priv, pipe, sprite) { reg = SPCNTR(pipe, sprite); val = I915_READ(reg); I915_STATE_WARN(val & SP_ENABLE, @@ -2533,7 +2538,6 @@ intel_find_plane_obj(struct intel_crtc *intel_crtc, break; } } - } static void i9xx_update_primary_plane(struct drm_crtc *crtc, @@ -2654,9 +2658,6 @@ static void i9xx_update_primary_plane(struct drm_crtc *crtc, I915_WRITE(reg, dspcntr); - DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", - i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, - fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); if (INTEL_INFO(dev)->gen >= 4) { I915_WRITE(DSPSURF(plane), @@ -2758,9 +2759,6 @@ static void ironlake_update_primary_plane(struct drm_crtc *crtc, I915_WRITE(reg, dspcntr); - DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n", - i915_gem_obj_ggtt_offset(obj), linear_offset, x, y, - fb->pitches[0]); I915_WRITE(DSPSTRIDE(plane), fb->pitches[0]); I915_WRITE(DSPSURF(plane), i915_gem_obj_ggtt_offset(obj) + intel_crtc->dspaddr_offset); @@ -2886,11 +2884,6 @@ static void skylake_update_primary_plane(struct drm_crtc *crtc, I915_WRITE(PLANE_CTL(pipe, 0), plane_ctl); - DRM_DEBUG_KMS("Writing base %08lX %d,%d,%d,%d pitch=%d\n", - i915_gem_obj_ggtt_offset(obj), - x, y, fb->width, fb->height, - fb->pitches[0]); - I915_WRITE(PLANE_POS(pipe, 0), 0); I915_WRITE(PLANE_OFFSET(pipe, 0), (y << 16) | x); I915_WRITE(PLANE_SIZE(pipe, 0), @@ -3148,38 +3141,6 @@ static void intel_fdi_normal_train(struct drm_crtc *crtc) FDI_FE_ERRC_ENABLE); } -static bool pipe_has_enabled_pch(struct intel_crtc *crtc) -{ - return crtc->base.state->enable && crtc->active && - crtc->config->has_pch_encoder; -} - -static void ivb_modeset_global_resources(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *pipe_B_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); - struct intel_crtc *pipe_C_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_C]); - uint32_t temp; - - /* - * When everything is off disable fdi C so that we could enable fdi B - * with all lanes. Note that we don't care about enabled pipes without - * an enabled pch encoder. - */ - if (!pipe_has_enabled_pch(pipe_B_crtc) && - !pipe_has_enabled_pch(pipe_C_crtc)) { - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); - WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); - - temp = I915_READ(SOUTH_CHICKEN1); - temp &= ~FDI_BC_BIFURCATION_SELECT; - DRM_DEBUG_KMS("disabling fdi C rx\n"); - I915_WRITE(SOUTH_CHICKEN1, temp); - } -} - /* The FDI link training functions for ILK/Ibexpeak. */ static void ironlake_fdi_link_train(struct drm_crtc *crtc) { @@ -3835,20 +3796,23 @@ static void ironlake_pch_transcoder_set_timings(struct intel_crtc *crtc, I915_READ(VSYNCSHIFT(cpu_transcoder))); } -static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) +static void cpt_set_fdi_bc_bifurcation(struct drm_device *dev, bool enable) { struct drm_i915_private *dev_priv = dev->dev_private; uint32_t temp; temp = I915_READ(SOUTH_CHICKEN1); - if (temp & FDI_BC_BIFURCATION_SELECT) + if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable) return; WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE); WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE); - temp |= FDI_BC_BIFURCATION_SELECT; - DRM_DEBUG_KMS("enabling fdi C rx\n"); + temp &= ~FDI_BC_BIFURCATION_SELECT; + if (enable) + temp |= FDI_BC_BIFURCATION_SELECT; + + DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis"); I915_WRITE(SOUTH_CHICKEN1, temp); POSTING_READ(SOUTH_CHICKEN1); } @@ -3856,20 +3820,19 @@ static void cpt_enable_fdi_bc_bifurcation(struct drm_device *dev) static void ivybridge_update_fdi_bc_bifurcation(struct intel_crtc *intel_crtc) { struct drm_device *dev = intel_crtc->base.dev; - struct drm_i915_private *dev_priv = dev->dev_private; switch (intel_crtc->pipe) { case PIPE_A: break; case PIPE_B: if (intel_crtc->config->fdi_lanes > 2) - WARN_ON(I915_READ(SOUTH_CHICKEN1) & FDI_BC_BIFURCATION_SELECT); + cpt_set_fdi_bc_bifurcation(dev, false); else - cpt_enable_fdi_bc_bifurcation(dev); + cpt_set_fdi_bc_bifurcation(dev, true); break; case PIPE_C: - cpt_enable_fdi_bc_bifurcation(dev); + cpt_set_fdi_bc_bifurcation(dev, true); break; default: @@ -4204,6 +4167,24 @@ static void intel_enable_sprite_planes(struct drm_crtc *crtc) } } +/* + * Disable a plane internally without actually modifying the plane's state. + * This will allow us to easily restore the plane later by just reprogramming + * its state. + */ +static void disable_plane_internal(struct drm_plane *plane) +{ + struct intel_plane *intel_plane = to_intel_plane(plane); + struct drm_plane_state *state = + plane->funcs->atomic_duplicate_state(plane); + struct intel_plane_state *intel_state = to_intel_plane_state(state); + + intel_state->visible = false; + intel_plane->commit_plane(plane, intel_state); + + intel_plane_destroy_state(plane, state); +} + static void intel_disable_sprite_planes(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -4213,8 +4194,8 @@ static void intel_disable_sprite_planes(struct drm_crtc *crtc) drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { intel_plane = to_intel_plane(plane); - if (intel_plane->pipe == pipe) - plane->funcs->disable_plane(plane); + if (plane->fb && intel_plane->pipe == pipe) + disable_plane_internal(plane); } } @@ -4983,24 +4964,23 @@ static void cherryview_set_cdclk(struct drm_device *dev, int cdclk) WARN_ON(dev_priv->display.get_display_clock_speed(dev) != dev_priv->vlv_cdclk_freq); switch (cdclk) { - case 400000: - cmd = 3; - break; case 333333: case 320000: - cmd = 2; - break; case 266667: - cmd = 1; - break; case 200000: - cmd = 0; break; default: MISSING_CASE(cdclk); return; } + /* + * Specs are full of misinformation, but testing on actual + * hardware has shown that we just need to write the desired + * CCK divider into the Punit register. + */ + cmd = DIV_ROUND_CLOSEST(dev_priv->hpll_freq << 1, cdclk) - 1; + mutex_lock(&dev_priv->rps.hw_lock); val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); val &= ~DSPFREQGUAR_MASK_CHV; @@ -5020,27 +5000,25 @@ static int valleyview_calc_cdclk(struct drm_i915_private *dev_priv, int max_pixclk) { int freq_320 = (dev_priv->hpll_freq << 1) % 320000 != 0 ? 333333 : 320000; - - /* FIXME: Punit isn't quite ready yet */ - if (IS_CHERRYVIEW(dev_priv->dev)) - return 400000; + int limit = IS_CHERRYVIEW(dev_priv) ? 95 : 90; /* * Really only a few cases to deal with, as only 4 CDclks are supported: * 200MHz * 267MHz * 320/333MHz (depends on HPLL freq) - * 400MHz - * So we check to see whether we're above 90% of the lower bin and - * adjust if needed. + * 400MHz (VLV only) + * So we check to see whether we're above 90% (VLV) or 95% (CHV) + * of the lower bin and adjust if needed. * * We seem to get an unstable or solid color picture at 200MHz. * Not sure what's wrong. For now use 200MHz only when all pipes * are off. */ - if (max_pixclk > freq_320*9/10) + if (!IS_CHERRYVIEW(dev_priv) && + max_pixclk > freq_320*limit/100) return 400000; - else if (max_pixclk > 266667*9/10) + else if (max_pixclk > 266667*limit/100) return freq_320; else if (max_pixclk > 0) return 266667; @@ -5081,6 +5059,42 @@ static void valleyview_modeset_global_pipes(struct drm_device *dev, *prepare_pipes |= (1 << intel_crtc->pipe); } +static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv) +{ + unsigned int credits, default_credits; + + if (IS_CHERRYVIEW(dev_priv)) + default_credits = PFI_CREDIT(12); + else + default_credits = PFI_CREDIT(8); + + if (DIV_ROUND_CLOSEST(dev_priv->vlv_cdclk_freq, 1000) >= dev_priv->rps.cz_freq) { + /* CHV suggested value is 31 or 63 */ + if (IS_CHERRYVIEW(dev_priv)) + credits = PFI_CREDIT_31; + else + credits = PFI_CREDIT(15); + } else { + credits = default_credits; + } + + /* + * WA - write default credits before re-programming + * FIXME: should we also set the resend bit here? + */ + I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | + default_credits); + + I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE | + credits | PFI_CREDIT_RESEND); + + /* + * FIXME is this guaranteed to clear + * immediately or should we poll for it? + */ + WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND); +} + static void valleyview_modeset_global_resources(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -5104,6 +5118,8 @@ static void valleyview_modeset_global_resources(struct drm_device *dev) else valleyview_set_cdclk(dev, req_cdclk); + vlv_program_pfi_credits(dev_priv); + intel_display_power_put(dev_priv, POWER_DOMAIN_PIPE_A); } } @@ -5517,13 +5533,21 @@ bool intel_connector_get_hw_state(struct intel_connector *connector) return encoder->get_hw_state(encoder, &pipe); } +static int pipe_required_fdi_lanes(struct drm_device *dev, enum pipe pipe) +{ + struct intel_crtc *crtc = + to_intel_crtc(intel_get_crtc_for_pipe(dev, pipe)); + + if (crtc->base.state->enable && + crtc->config->has_pch_encoder) + return crtc->config->fdi_lanes; + + return 0; +} + static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, struct intel_crtc_state *pipe_config) { - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_crtc *pipe_B_crtc = - to_intel_crtc(dev_priv->pipe_to_crtc_mapping[PIPE_B]); - DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n", pipe_name(pipe), pipe_config->fdi_lanes); if (pipe_config->fdi_lanes > 4) { @@ -5550,22 +5574,20 @@ static bool ironlake_check_fdi_lanes(struct drm_device *dev, enum pipe pipe, case PIPE_A: return true; case PIPE_B: - if (dev_priv->pipe_to_crtc_mapping[PIPE_C]->enabled && - pipe_config->fdi_lanes > 2) { + if (pipe_config->fdi_lanes > 2 && + pipe_required_fdi_lanes(dev, PIPE_C) > 0) { DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", pipe_name(pipe), pipe_config->fdi_lanes); return false; } return true; case PIPE_C: - if (!pipe_has_enabled_pch(pipe_B_crtc) || - pipe_B_crtc->config->fdi_lanes <= 2) { - if (pipe_config->fdi_lanes > 2) { - DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n", - pipe_name(pipe), pipe_config->fdi_lanes); - return false; - } - } else { + if (pipe_config->fdi_lanes > 2) { + DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n", + pipe_name(pipe), pipe_config->fdi_lanes); + return false; + } + if (pipe_required_fdi_lanes(dev, PIPE_B) > 2) { DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n"); return false; } @@ -5699,10 +5721,6 @@ static int valleyview_get_display_clock_speed(struct drm_device *dev) u32 val; int divider; - /* FIXME: Punit isn't quite ready yet */ - if (IS_CHERRYVIEW(dev)) - return 400000; - if (dev_priv->hpll_freq == 0) dev_priv->hpll_freq = valleyview_get_vco(dev_priv); @@ -6144,9 +6162,10 @@ static void chv_prepare_pll(struct intel_crtc *crtc, int pipe = crtc->pipe; int dpll_reg = DPLL(crtc->pipe); enum dpio_channel port = vlv_pipe_to_channel(pipe); - u32 loopfilter, intcoeff; + u32 loopfilter, tribuf_calcntr; u32 bestn, bestm1, bestm2, bestp1, bestp2, bestm2_frac; - int refclk; + u32 dpio_val; + int vco; bestn = pipe_config->dpll.n; bestm2_frac = pipe_config->dpll.m2 & 0x3fffff; @@ -6154,6 +6173,9 @@ static void chv_prepare_pll(struct intel_crtc *crtc, bestm2 = pipe_config->dpll.m2 >> 22; bestp1 = pipe_config->dpll.p1; bestp2 = pipe_config->dpll.p2; + vco = pipe_config->dpll.vco; + dpio_val = 0; + loopfilter = 0; /* * Enable Refclk and SSC @@ -6179,26 +6201,56 @@ static void chv_prepare_pll(struct intel_crtc *crtc, 1 << DPIO_CHV_N_DIV_SHIFT); /* M2 fraction division */ - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); + if (bestm2_frac) + vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW2(port), bestm2_frac); /* M2 fraction division enable */ - vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), - DPIO_CHV_FRAC_DIV_EN | - (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT)); + dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW3(port)); + dpio_val &= ~(DPIO_CHV_FEEDFWD_GAIN_MASK | DPIO_CHV_FRAC_DIV_EN); + dpio_val |= (2 << DPIO_CHV_FEEDFWD_GAIN_SHIFT); + if (bestm2_frac) + dpio_val |= DPIO_CHV_FRAC_DIV_EN; + vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW3(port), dpio_val); + + /* Program digital lock detect threshold */ + dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW9(port)); + dpio_val &= ~(DPIO_CHV_INT_LOCK_THRESHOLD_MASK | + DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE); + dpio_val |= (0x5 << DPIO_CHV_INT_LOCK_THRESHOLD_SHIFT); + if (!bestm2_frac) + dpio_val |= DPIO_CHV_INT_LOCK_THRESHOLD_SEL_COARSE; + vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW9(port), dpio_val); /* Loop filter */ - refclk = i9xx_get_refclk(crtc, 0); - loopfilter = 5 << DPIO_CHV_PROP_COEFF_SHIFT | - 2 << DPIO_CHV_GAIN_CTRL_SHIFT; - if (refclk == 100000) - intcoeff = 11; - else if (refclk == 38400) - intcoeff = 10; - else - intcoeff = 9; - loopfilter |= intcoeff << DPIO_CHV_INT_COEFF_SHIFT; + if (vco == 5400000) { + loopfilter |= (0x3 << DPIO_CHV_PROP_COEFF_SHIFT); + loopfilter |= (0x8 << DPIO_CHV_INT_COEFF_SHIFT); + loopfilter |= (0x1 << DPIO_CHV_GAIN_CTRL_SHIFT); + tribuf_calcntr = 0x9; + } else if (vco <= 6200000) { + loopfilter |= (0x5 << DPIO_CHV_PROP_COEFF_SHIFT); + loopfilter |= (0xB << DPIO_CHV_INT_COEFF_SHIFT); + loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); + tribuf_calcntr = 0x9; + } else if (vco <= 6480000) { + loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); + loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); + loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); + tribuf_calcntr = 0x8; + } else { + /* Not supported. Apply the same limits as in the max case */ + loopfilter |= (0x4 << DPIO_CHV_PROP_COEFF_SHIFT); + loopfilter |= (0x9 << DPIO_CHV_INT_COEFF_SHIFT); + loopfilter |= (0x3 << DPIO_CHV_GAIN_CTRL_SHIFT); + tribuf_calcntr = 0; + } vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW6(port), loopfilter); + dpio_val = vlv_dpio_read(dev_priv, pipe, CHV_PLL_DW8(port)); + dpio_val &= ~DPIO_CHV_TDC_TARGET_CNT_MASK; + dpio_val |= (tribuf_calcntr << DPIO_CHV_TDC_TARGET_CNT_SHIFT); + vlv_dpio_write(dev_priv, pipe, CHV_PLL_DW8(port), dpio_val); + /* AFC Recal */ vlv_dpio_write(dev_priv, pipe, CHV_CMN_DW14(port), vlv_dpio_read(dev_priv, pipe, CHV_CMN_DW14(port)) | @@ -8409,8 +8461,8 @@ static void i845_update_cursor(struct drm_crtc *crtc, u32 base) uint32_t cntl = 0, size = 0; if (base) { - unsigned int width = intel_crtc->cursor_width; - unsigned int height = intel_crtc->cursor_height; + unsigned int width = intel_crtc->base.cursor->state->crtc_w; + unsigned int height = intel_crtc->base.cursor->state->crtc_h; unsigned int stride = roundup_pow_of_two(width) * 4; switch (stride) { @@ -8474,7 +8526,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) cntl = 0; if (base) { cntl = MCURSOR_GAMMA_ENABLE; - switch (intel_crtc->cursor_width) { + switch (intel_crtc->base.cursor->state->crtc_w) { case 64: cntl |= CURSOR_MODE_64_ARGB_AX; break; @@ -8485,7 +8537,7 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) cntl |= CURSOR_MODE_256_ARGB_AX; break; default: - MISSING_CASE(intel_crtc->cursor_width); + MISSING_CASE(intel_crtc->base.cursor->state->crtc_w); return; } cntl |= pipe << 28; /* Connect to correct pipe */ @@ -8532,7 +8584,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, base = 0; if (x < 0) { - if (x + intel_crtc->cursor_width <= 0) + if (x + intel_crtc->base.cursor->state->crtc_w <= 0) base = 0; pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; @@ -8541,7 +8593,7 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, pos |= x << CURSOR_X_SHIFT; if (y < 0) { - if (y + intel_crtc->cursor_height <= 0) + if (y + intel_crtc->base.cursor->state->crtc_h <= 0) base = 0; pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; @@ -8557,8 +8609,8 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, /* ILK+ do this automagically */ if (HAS_GMCH_DISPLAY(dev) && crtc->cursor->state->rotation == BIT(DRM_ROTATE_180)) { - base += (intel_crtc->cursor_height * - intel_crtc->cursor_width - 1) * 4; + base += (intel_crtc->base.cursor->state->crtc_h * + intel_crtc->base.cursor->state->crtc_w - 1) * 4; } if (IS_845G(dev) || IS_I865G(dev)) @@ -9219,7 +9271,6 @@ static void intel_unpin_work_fn(struct work_struct *__work) mutex_lock(&dev->struct_mutex); intel_unpin_fb_obj(intel_fb_obj(work->old_fb)); drm_gem_object_unreference(&work->pending_flip_obj->base); - drm_framebuffer_unreference(work->old_fb); intel_fbc_update(dev); @@ -9228,6 +9279,7 @@ static void intel_unpin_work_fn(struct work_struct *__work) mutex_unlock(&dev->struct_mutex); intel_frontbuffer_flip_complete(dev, INTEL_FRONTBUFFER_PRIMARY(pipe)); + drm_framebuffer_unreference(work->old_fb); BUG_ON(atomic_read(&to_intel_crtc(work->crtc)->unpin_work_count) == 0); atomic_dec(&to_intel_crtc(work->crtc)->unpin_work_count); @@ -9799,7 +9851,7 @@ void intel_check_page_flip(struct drm_device *dev, int pipe) struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe]; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - WARN_ON(!in_irq()); + WARN_ON(!in_interrupt()); if (crtc == NULL) return; @@ -9891,10 +9943,6 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, if (atomic_read(&intel_crtc->unpin_work_count) >= 2) flush_workqueue(dev_priv->wq); - ret = i915_mutex_lock_interruptible(dev); - if (ret) - goto cleanup; - /* Reference the objects for the scheduled work. */ drm_framebuffer_reference(work->old_fb); drm_gem_object_reference(&obj->base); @@ -9904,6 +9952,10 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, work->pending_flip_obj = obj; + ret = i915_mutex_lock_interruptible(dev); + if (ret) + goto cleanup; + atomic_inc(&intel_crtc->unpin_work_count); intel_crtc->reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter); @@ -9968,13 +10020,14 @@ cleanup_unpin: intel_unpin_fb_obj(obj); cleanup_pending: atomic_dec(&intel_crtc->unpin_work_count); + mutex_unlock(&dev->struct_mutex); +cleanup: crtc->primary->fb = old_fb; update_state_fb(crtc->primary); + + drm_gem_object_unreference_unlocked(&obj->base); drm_framebuffer_unreference(work->old_fb); - drm_gem_object_unreference(&obj->base); - mutex_unlock(&dev->struct_mutex); -cleanup: spin_lock_irq(&dev->event_lock); intel_crtc->unpin_work = NULL; spin_unlock_irq(&dev->event_lock); @@ -10014,8 +10067,7 @@ static void intel_modeset_update_staged_output_state(struct drm_device *dev) struct intel_encoder *encoder; struct intel_connector *connector; - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { connector->new_encoder = to_intel_encoder(connector->base.encoder); } @@ -10046,8 +10098,7 @@ static void intel_modeset_commit_output_state(struct drm_device *dev) struct intel_encoder *encoder; struct intel_connector *connector; - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { connector->base.encoder = &connector->new_encoder->base; } @@ -10135,8 +10186,7 @@ compute_baseline_pipe_bpp(struct intel_crtc *crtc, pipe_config->pipe_bpp = bpp; /* Clamp display bpp to EDID value */ - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { if (!connector->new_encoder || connector->new_encoder->new_crtc != crtc) continue; @@ -10263,8 +10313,7 @@ static bool check_digital_port_conflicts(struct drm_device *dev) * list to detect the problem on ddi platforms * where there's just one encoder per digital port. */ - list_for_each_entry(connector, - &dev->mode_config.connector_list, base.head) { + for_each_intel_connector(dev, connector) { struct intel_encoder *encoder = connector->new_encoder; if (!encoder) @@ -10437,8 +10486,7 @@ intel_modeset_affected_pipes(struct drm_crtc *crtc, unsigned *modeset_pipes, * to be part of the prepare_pipes mask. We don't (yet) support global * modeset across multiple crtcs, so modeset_pipes will only have one * bit set at most. */ - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { if (connector->base.encoder == &connector->new_encoder->base) continue; @@ -10807,7 +10855,7 @@ static void check_wm_state(struct drm_device *dev) continue; /* planes */ - for_each_plane(pipe, plane) { + for_each_plane(dev_priv, pipe, plane) { hw_entry = &hw_ddb.plane[pipe][plane]; sw_entry = &sw_ddb->plane[pipe][plane]; @@ -10841,8 +10889,7 @@ check_connector_state(struct drm_device *dev) { struct intel_connector *connector; - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { /* This also checks the encoder/connector hw state with the * ->get_hw_state callbacks. */ intel_connector_check_state(connector); @@ -10872,8 +10919,7 @@ check_encoder_state(struct drm_device *dev) I915_STATE_WARN(encoder->connectors_active && !encoder->base.crtc, "encoder's active_connectors set, but no crtc\n"); - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { if (connector->base.encoder != &encoder->base) continue; enabled = true; @@ -11394,7 +11440,7 @@ static void intel_set_config_restore_state(struct drm_device *dev, } count = 0; - list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { + for_each_intel_connector(dev, connector) { connector->new_encoder = to_intel_encoder(config->save_connector_encoders[count++]); } @@ -11486,8 +11532,7 @@ intel_modeset_stage_output_state(struct drm_device *dev, WARN_ON(!set->fb && (set->num_connectors != 0)); WARN_ON(set->fb && (set->num_connectors == 0)); - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { /* Otherwise traverse passed in connector list and get encoders * for them. */ for (ro = 0; ro < set->num_connectors; ro++) { @@ -11512,15 +11557,16 @@ intel_modeset_stage_output_state(struct drm_device *dev, if (&connector->new_encoder->base != connector->base.encoder) { - DRM_DEBUG_KMS("encoder changed, full mode switch\n"); + DRM_DEBUG_KMS("[CONNECTOR:%d:%s] encoder changed, full mode switch\n", + connector->base.base.id, + connector->base.name); config->mode_changed = true; } } /* connector->new_encoder is now updated for all connectors. */ /* Update crtc of enabled connectors. */ - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { struct drm_crtc *new_crtc; if (!connector->new_encoder) @@ -11549,9 +11595,7 @@ intel_modeset_stage_output_state(struct drm_device *dev, /* Check for any encoders that needs to be disabled. */ for_each_intel_encoder(dev, encoder) { int num_connectors = 0; - list_for_each_entry(connector, - &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { if (connector->new_encoder == encoder) { WARN_ON(!connector->new_encoder->new_crtc); num_connectors++; @@ -11566,13 +11610,14 @@ intel_modeset_stage_output_state(struct drm_device *dev, /* Only now check for crtc changes so we don't miss encoders * that will be disabled. */ if (&encoder->new_crtc->base != encoder->base.crtc) { - DRM_DEBUG_KMS("crtc changed, full mode switch\n"); + DRM_DEBUG_KMS("[ENCODER:%d:%s] crtc changed, full mode switch\n", + encoder->base.base.id, + encoder->base.name); config->mode_changed = true; } } /* Now we've also updated encoder->new_crtc for all encoders. */ - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { if (connector->new_encoder) if (connector->new_encoder != connector->encoder) connector->encoder = connector->new_encoder; @@ -11588,7 +11633,8 @@ intel_modeset_stage_output_state(struct drm_device *dev, } if (crtc->new_enabled != crtc->base.state->enable) { - DRM_DEBUG_KMS("crtc %sabled, full mode switch\n", + DRM_DEBUG_KMS("[CRTC:%d] %sabled, full mode switch\n", + crtc->base.base.id, crtc->new_enabled ? "en" : "dis"); config->mode_changed = true; } @@ -11611,7 +11657,7 @@ static void disable_crtc_nofb(struct intel_crtc *crtc) DRM_DEBUG_KMS("Trying to restore without FB -> disabling pipe %c\n", pipe_name(crtc->pipe)); - list_for_each_entry(connector, &dev->mode_config.connector_list, base.head) { + for_each_intel_connector(dev, connector) { if (connector->new_encoder && connector->new_encoder->new_crtc == crtc) connector->new_encoder = NULL; @@ -12182,8 +12228,8 @@ void intel_plane_destroy(struct drm_plane *plane) } const struct drm_plane_funcs intel_plane_funcs = { - .update_plane = drm_atomic_helper_update_plane, - .disable_plane = drm_atomic_helper_disable_plane, + .update_plane = drm_plane_helper_update, + .disable_plane = drm_plane_helper_disable, .destroy = intel_plane_destroy, .set_property = drm_atomic_helper_plane_set_property, .atomic_get_property = intel_plane_atomic_get_property, @@ -12302,7 +12348,7 @@ intel_check_cursor_plane(struct drm_plane *plane, finish: if (intel_crtc->active) { - if (intel_crtc->cursor_width != state->base.crtc_w) + if (plane->state->crtc_w != state->base.crtc_w) intel_crtc->atomic.update_wm = true; intel_crtc->atomic.fb_bits |= @@ -12345,8 +12391,6 @@ intel_commit_cursor_plane(struct drm_plane *plane, intel_crtc->cursor_addr = addr; intel_crtc->cursor_bo = obj; update: - intel_crtc->cursor_width = state->base.crtc_w; - intel_crtc->cursor_height = state->base.crtc_h; if (intel_crtc->active) intel_crtc_update_cursor(crtc, state->visible); @@ -12574,10 +12618,15 @@ static void intel_setup_outputs(struct drm_device *dev) if (HAS_DDI(dev)) { int found; - /* Haswell uses DDI functions to detect digital outputs */ + /* + * Haswell uses DDI functions to detect digital outputs. + * On SKL pre-D0 the strap isn't connected, so we assume + * it's there. + */ found = I915_READ(DDI_BUF_CTL_A) & DDI_INIT_DISPLAY_DETECTED; - /* DDI A only supports eDP */ - if (found) + /* WaIgnoreDDIAStrap: skl */ + if (found || + (IS_SKYLAKE(dev) && INTEL_REVID(dev) < SKL_REVID_D0)) intel_ddi_init(dev, PORT_A); /* DDI B, C and D detection is indicated by the SFUSE_STRAP @@ -13068,8 +13117,6 @@ static void intel_init_display(struct drm_device *dev) } else if (IS_IVYBRIDGE(dev)) { /* FIXME: detect B0+ stepping and use auto training */ dev_priv->display.fdi_link_train = ivb_manual_fdi_link_train; - dev_priv->display.modeset_global_resources = - ivb_modeset_global_resources; } else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) { dev_priv->display.fdi_link_train = hsw_fdi_link_train; } else if (IS_VALLEYVIEW(dev)) { @@ -13365,7 +13412,7 @@ void intel_modeset_init(struct drm_device *dev) for_each_pipe(dev_priv, pipe) { intel_crtc_init(dev, pipe); - for_each_sprite(pipe, sprite) { + for_each_sprite(dev_priv, pipe, sprite) { ret = intel_plane_init(dev, pipe, sprite); if (ret) DRM_DEBUG_KMS("pipe %c sprite %c init failed: %d\n", @@ -13421,9 +13468,7 @@ static void intel_enable_pipe_a(struct drm_device *dev) /* We can't just switch on the pipe A, we need to set things up with a * proper mode and output configuration. As a gross hack, enable pipe A * by enabling the load detect pipe once. */ - list_for_each_entry(connector, - &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { if (connector->encoder->type == INTEL_OUTPUT_ANALOG) { crt = &connector->base; break; @@ -13494,8 +13539,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) crtc->plane = plane; /* ... and break all links. */ - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { if (connector->encoder->base.crtc != &crtc->base) continue; @@ -13504,8 +13548,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc) } /* multiple connectors may have the same encoder: * handle them and break crtc link separately */ - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) + for_each_intel_connector(dev, connector) if (connector->encoder->base.crtc == &crtc->base) { connector->encoder->base.crtc = NULL; connector->encoder->connectors_active = false; @@ -13609,9 +13652,7 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder) * a bug in one of the get_hw_state functions. Or someplace else * in our code, like the register restore mess on resume. Clamp * things to off as a safer default. */ - list_for_each_entry(connector, - &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { if (connector->encoder != encoder) continue; connector->base.dpms = DRM_MODE_DPMS_OFF; @@ -13726,8 +13767,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) pipe_name(pipe)); } - list_for_each_entry(connector, &dev->mode_config.connector_list, - base.head) { + for_each_intel_connector(dev, connector) { if (connector->get_hw_state(connector)) { connector->base.dpms = DRM_MODE_DPMS_ON; connector->encoder->connectors_active = true; @@ -13907,8 +13947,6 @@ void intel_modeset_cleanup(struct drm_device *dev) intel_fbc_disable(dev); - ironlake_teardown_rc6(dev); - mutex_unlock(&dev->struct_mutex); /* flush any delayed tasks or pending work */ diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d1141d37e205..ca60060710d2 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -84,6 +84,11 @@ static const struct dp_link_dpll chv_dpll[] = { { DP_LINK_BW_5_4, /* m2_int = 27, m2_fraction = 0 */ { .p1 = 2, .p2 = 1, .n = 1, .m1 = 2, .m2 = 0x6c00000 } } }; +/* Skylake supports following rates */ +static const uint32_t gen9_rates[] = { 162000, 216000, 270000, 324000, + 432000, 540000 }; + +static const uint32_t default_rates[] = { 162000, 270000, 540000 }; /** * is_edp - is the given port attached to an eDP panel (either CPU or PCH) @@ -129,7 +134,10 @@ intel_dp_max_link_bw(struct intel_dp *intel_dp) case DP_LINK_BW_2_7: break; case DP_LINK_BW_5_4: /* 1.2 capable displays may advertise higher bw */ - if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || + if (IS_SKYLAKE(dev) && INTEL_REVID(dev) <= SKL_REVID_B0) + /* WaDisableHBR2:skl */ + max_link_bw = DP_LINK_BW_2_7; + else if (((IS_HASWELL(dev) && !IS_HSW_ULX(dev)) || INTEL_INFO(dev)->gen >= 8) && intel_dp->dpcd[DP_DPCD_REV] >= 0x12) max_link_bw = DP_LINK_BW_5_4; @@ -1075,7 +1083,7 @@ intel_dp_connector_unregister(struct intel_connector *intel_connector) } static void -skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_bw) +skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_clock) { u32 ctrl1; @@ -1084,19 +1092,35 @@ skl_edp_set_pll_config(struct intel_crtc_state *pipe_config, int link_bw) pipe_config->dpll_hw_state.cfgcr2 = 0; ctrl1 = DPLL_CTRL1_OVERRIDE(SKL_DPLL0); - switch (link_bw) { - case DP_LINK_BW_1_62: + switch (link_clock / 2) { + case 81000: ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_810, SKL_DPLL0); break; - case DP_LINK_BW_2_7: + case 135000: ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1350, SKL_DPLL0); break; - case DP_LINK_BW_5_4: + case 270000: ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2700, SKL_DPLL0); break; + case 162000: + ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1620, + SKL_DPLL0); + break; + /* TBD: For DP link rates 2.16 GHz and 4.32 GHz, VCO is 8640 which + results in CDCLK change. Need to handle the change of CDCLK by + disabling pipes and re-enabling them */ + case 108000: + ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_1080, + SKL_DPLL0); + break; + case 216000: + ctrl1 |= DPLL_CRTL1_LINK_RATE(DPLL_CRTL1_LINK_RATE_2160, + SKL_DPLL0); + break; + } pipe_config->dpll_hw_state.ctrl1 = ctrl1; } @@ -1117,6 +1141,52 @@ hsw_dp_set_ddi_pll_sel(struct intel_crtc_state *pipe_config, int link_bw) } } +static int +intel_read_sink_rates(struct intel_dp *intel_dp, uint32_t *sink_rates) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + int i = 0; + uint16_t val; + + if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) { + /* + * Receiver supports only main-link rate selection by + * link rate table method, so read link rates from + * supported_link_rates + */ + for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i) { + val = le16_to_cpu(intel_dp->supported_rates[i]); + if (val == 0) + break; + + sink_rates[i] = val * 200; + } + + if (i <= 0) + DRM_ERROR("No rates in SUPPORTED_LINK_RATES"); + } + return i; +} + +static int +intel_read_source_rates(struct intel_dp *intel_dp, uint32_t *source_rates) +{ + struct drm_device *dev = intel_dp_to_dev(intel_dp); + int i; + int max_default_rate; + + if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) { + for (i = 0; i < ARRAY_SIZE(gen9_rates); ++i) + source_rates[i] = gen9_rates[i]; + } else { + /* Index of the max_link_bw supported + 1 */ + max_default_rate = (intel_dp_max_link_bw(intel_dp) >> 3) + 1; + for (i = 0; i < max_default_rate; ++i) + source_rates[i] = default_rates[i]; + } + return i; +} + static void intel_dp_set_clock(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, int link_bw) @@ -1150,6 +1220,45 @@ intel_dp_set_clock(struct intel_encoder *encoder, } } +static int intel_supported_rates(const uint32_t *source_rates, int source_len, +const uint32_t *sink_rates, int sink_len, uint32_t *supported_rates) +{ + int i = 0, j = 0, k = 0; + + /* For panels with edp version less than 1.4 */ + if (sink_len == 0) { + for (i = 0; i < source_len; ++i) + supported_rates[i] = source_rates[i]; + return source_len; + } + + /* For edp1.4 panels, find the common rates between source and sink */ + while (i < source_len && j < sink_len) { + if (source_rates[i] == sink_rates[j]) { + supported_rates[k] = source_rates[i]; + ++k; + ++i; + ++j; + } else if (source_rates[i] < sink_rates[j]) { + ++i; + } else { + ++j; + } + } + return k; +} + +static int rate_to_index(uint32_t find, const uint32_t *rates) +{ + int i = 0; + + for (i = 0; i < DP_MAX_SUPPORTED_RATES; ++i) + if (find == rates[i]) + break; + + return i; +} + bool intel_dp_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) @@ -1166,10 +1275,25 @@ intel_dp_compute_config(struct intel_encoder *encoder, int max_lane_count = intel_dp_max_lane_count(intel_dp); /* Conveniently, the link BW constants become indices with a shift...*/ int min_clock = 0; - int max_clock = intel_dp_max_link_bw(intel_dp) >> 3; + int max_clock; int bpp, mode_rate; - static int bws[] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7, DP_LINK_BW_5_4 }; int link_avail, link_clock; + uint32_t sink_rates[8]; + uint32_t supported_rates[8] = {0}; + uint32_t source_rates[8]; + int source_len, sink_len, supported_len; + + sink_len = intel_read_sink_rates(intel_dp, sink_rates); + + source_len = intel_read_source_rates(intel_dp, source_rates); + + supported_len = intel_supported_rates(source_rates, source_len, + sink_rates, sink_len, supported_rates); + + /* No common link rates between source and sink */ + WARN_ON(supported_len <= 0); + + max_clock = supported_len - 1; if (HAS_PCH_SPLIT(dev) && !HAS_DDI(dev) && port != PORT_A) pipe_config->has_pch_encoder = true; @@ -1193,8 +1317,8 @@ intel_dp_compute_config(struct intel_encoder *encoder, return false; DRM_DEBUG_KMS("DP link computation with max lane count %i " - "max bw %02x pixel clock %iKHz\n", - max_lane_count, bws[max_clock], + "max bw %d pixel clock %iKHz\n", + max_lane_count, supported_rates[max_clock], adjusted_mode->crtc_clock); /* Walk through all bpp values. Luckily they're all nicely spaced with 2 @@ -1223,8 +1347,11 @@ intel_dp_compute_config(struct intel_encoder *encoder, bpp); for (clock = min_clock; clock <= max_clock; clock++) { - for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { - link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); + for (lane_count = min_lane_count; + lane_count <= max_lane_count; + lane_count <<= 1) { + + link_clock = supported_rates[clock]; link_avail = intel_dp_max_data_rate(link_clock, lane_count); @@ -1253,10 +1380,19 @@ found: if (intel_dp->color_range) pipe_config->limited_color_range = true; - intel_dp->link_bw = bws[clock]; intel_dp->lane_count = lane_count; + + intel_dp->link_bw = + drm_dp_link_rate_to_bw_code(supported_rates[clock]); + + if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) { + intel_dp->rate_select = + rate_to_index(supported_rates[clock], sink_rates); + intel_dp->link_bw = 0; + } + pipe_config->pipe_bpp = bpp; - pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); + pipe_config->port_clock = supported_rates[clock]; DRM_DEBUG_KMS("DP link bw %02x lane count %d clock %d bpp %d\n", intel_dp->link_bw, intel_dp->lane_count, @@ -1279,7 +1415,7 @@ found: } if (IS_SKYLAKE(dev) && is_edp(intel_dp)) - skl_edp_set_pll_config(pipe_config, intel_dp->link_bw); + skl_edp_set_pll_config(pipe_config, supported_rates[clock]); else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) hsw_dp_set_ddi_pll_sel(pipe_config, intel_dp->link_bw); else @@ -3366,6 +3502,9 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) if (drm_dp_enhanced_frame_cap(intel_dp->dpcd)) link_config[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_BW_SET, link_config, 2); + if (INTEL_INFO(dev)->gen >= 9 && intel_dp->supported_rates[0]) + drm_dp_dpcd_write(&intel_dp->aux, DP_LINK_RATE_SET, + &intel_dp->rate_select, 1); link_config[0] = 0; link_config[1] = DP_SET_ANSI_8B10B; @@ -3578,6 +3717,7 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); struct drm_device *dev = dig_port->base.base.dev; struct drm_i915_private *dev_priv = dev->dev_private; + uint8_t rev; if (intel_dp_dpcd_read_wake(&intel_dp->aux, 0x000, intel_dp->dpcd, sizeof(intel_dp->dpcd)) < 0) @@ -3609,6 +3749,16 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) } else intel_dp->use_tps3 = false; + /* Intermediate frequency support */ + if (is_edp(intel_dp) && + (intel_dp->dpcd[DP_EDP_CONFIGURATION_CAP] & DP_DPCD_DISPLAY_CONTROL_CAPABLE) && + (intel_dp_dpcd_read_wake(&intel_dp->aux, DP_EDP_DPCD_REV, &rev, 1) == 1) && + (rev >= 0x03)) { /* eDp v1.4 or higher */ + intel_dp_dpcd_read_wake(&intel_dp->aux, + DP_SUPPORTED_LINK_RATES, + intel_dp->supported_rates, + sizeof(intel_dp->supported_rates)); + } if (!(intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT)) return true; /* native DP sink */ @@ -4966,12 +5116,13 @@ void intel_edp_drrs_invalidate(struct drm_device *dev, if (!dev_priv->drrs.dp) return; + cancel_delayed_work_sync(&dev_priv->drrs.work); + mutex_lock(&dev_priv->drrs.mutex); crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; pipe = to_intel_crtc(crtc)->pipe; if (dev_priv->drrs.refresh_rate_type == DRRS_LOW_RR) { - cancel_delayed_work_sync(&dev_priv->drrs.work); intel_dp_set_drrs_state(dev_priv->dev, dev_priv->drrs.dp->attached_connector->panel. fixed_mode->vrefresh); @@ -5004,13 +5155,13 @@ void intel_edp_drrs_flush(struct drm_device *dev, if (!dev_priv->drrs.dp) return; + cancel_delayed_work_sync(&dev_priv->drrs.work); + mutex_lock(&dev_priv->drrs.mutex); crtc = dp_to_dig_port(dev_priv->drrs.dp)->base.base.crtc; pipe = to_intel_crtc(crtc)->pipe; dev_priv->drrs.busy_frontbuffer_bits &= ~frontbuffer_bits; - cancel_delayed_work_sync(&dev_priv->drrs.work); - if (dev_priv->drrs.refresh_rate_type != DRRS_LOW_RR && !dev_priv->drrs.busy_frontbuffer_bits) schedule_delayed_work(&dev_priv->drrs.work, diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c index 9f67a379a9a5..be124928ca14 100644 --- a/drivers/gpu/drm/i915/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/intel_dp_mst.c @@ -58,7 +58,7 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder, pipe_config->pipe_bpp = 24; pipe_config->port_clock = drm_dp_bw_code_to_link_rate(intel_dp->link_bw); - list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) { + for_each_intel_connector(dev, intel_connector) { if (intel_connector->new_encoder == encoder) { found = intel_connector; break; @@ -140,7 +140,7 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder) struct drm_crtc *crtc = encoder->base.crtc; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - list_for_each_entry(intel_connector, &dev->mode_config.connector_list, base.head) { + for_each_intel_connector(dev, intel_connector) { if (intel_connector->new_encoder == encoder) { found = intel_connector; break; diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f4aa849b243e..c77128c67cf8 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -464,7 +464,6 @@ struct intel_crtc { struct drm_i915_gem_object *cursor_bo; uint32_t cursor_addr; - int16_t cursor_width, cursor_height; uint32_t cursor_cntl; uint32_t cursor_size; uint32_t cursor_base; @@ -623,10 +622,12 @@ struct intel_dp { uint32_t color_range; bool color_range_auto; uint8_t link_bw; + uint8_t rate_select; uint8_t lane_count; uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; uint8_t psr_dpcd[EDP_PSR_RECEIVER_CAP_SIZE]; uint8_t downstream_ports[DP_MAX_DOWNSTREAM_PORTS]; + __le16 supported_rates[DP_MAX_SUPPORTED_RATES]; struct drm_dp_aux aux; uint8_t train_set[4]; int panel_power_up_delay; @@ -839,7 +840,8 @@ static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) } int intel_get_crtc_scanline(struct intel_crtc *crtc); -void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv); +void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv, + unsigned int pipe_mask); /* intel_crt.c */ void intel_crt_init(struct drm_device *dev); @@ -874,7 +876,8 @@ void intel_ddi_set_vc_payload_alloc(struct drm_crtc *crtc, bool state); /* intel_frontbuffer.c */ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, - struct intel_engine_cs *ring); + struct intel_engine_cs *ring, + enum fb_op_origin origin); void intel_frontbuffer_flip_prepare(struct drm_device *dev, unsigned frontbuffer_bits); void intel_frontbuffer_flip_complete(struct drm_device *dev, @@ -1115,7 +1118,11 @@ bool intel_fbc_enabled(struct drm_device *dev); void intel_fbc_update(struct drm_device *dev); void intel_fbc_init(struct drm_i915_private *dev_priv); void intel_fbc_disable(struct drm_device *dev); -void bdw_fbc_sw_flush(struct drm_device *dev, u32 value); +void intel_fbc_invalidate(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits, + enum fb_op_origin origin); +void intel_fbc_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits); /* intel_hdmi.c */ void intel_hdmi_init(struct drm_device *dev, int hdmi_reg, enum port port); @@ -1231,7 +1238,6 @@ void intel_enable_gt_powersave(struct drm_device *dev); void intel_disable_gt_powersave(struct drm_device *dev); void intel_suspend_gt_powersave(struct drm_device *dev); void intel_reset_gt_powersave(struct drm_device *dev); -void ironlake_teardown_rc6(struct drm_device *dev); void gen6_update_ring_freq(struct drm_device *dev); void gen6_rps_idle(struct drm_i915_private *dev_priv); void gen6_rps_boost(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/intel_fbc.c b/drivers/gpu/drm/i915/intel_fbc.c index 618f7bdab0ba..9fcf446e95f5 100644 --- a/drivers/gpu/drm/i915/intel_fbc.c +++ b/drivers/gpu/drm/i915/intel_fbc.c @@ -174,29 +174,10 @@ static bool g4x_fbc_enabled(struct drm_device *dev) return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN; } -static void snb_fbc_blit_update(struct drm_device *dev) +static void intel_fbc_nuke(struct drm_i915_private *dev_priv) { - struct drm_i915_private *dev_priv = dev->dev_private; - u32 blt_ecoskpd; - - /* Make sure blitter notifies FBC of writes */ - - /* Blitter is part of Media powerwell on VLV. No impact of - * his param in other platforms for now */ - intel_uncore_forcewake_get(dev_priv, FORCEWAKE_MEDIA); - - blt_ecoskpd = I915_READ(GEN6_BLITTER_ECOSKPD); - blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY << - GEN6_BLITTER_LOCK_SHIFT; - I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); - blt_ecoskpd |= GEN6_BLITTER_FBC_NOTIFY; - I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); - blt_ecoskpd &= ~(GEN6_BLITTER_FBC_NOTIFY << - GEN6_BLITTER_LOCK_SHIFT); - I915_WRITE(GEN6_BLITTER_ECOSKPD, blt_ecoskpd); - POSTING_READ(GEN6_BLITTER_ECOSKPD); - - intel_uncore_forcewake_put(dev_priv, FORCEWAKE_MEDIA); + I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE); + POSTING_READ(MSG_FBC_REND_STATE); } static void ilk_fbc_enable(struct drm_crtc *crtc) @@ -239,9 +220,10 @@ static void ilk_fbc_enable(struct drm_crtc *crtc) I915_WRITE(SNB_DPFC_CTL_SA, SNB_CPU_FENCE_ENABLE | obj->fence_reg); I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); - snb_fbc_blit_update(dev); } + intel_fbc_nuke(dev_priv); + DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); } @@ -320,7 +302,7 @@ static void gen7_fbc_enable(struct drm_crtc *crtc) SNB_CPU_FENCE_ENABLE | obj->fence_reg); I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y); - snb_fbc_blit_update(dev); + intel_fbc_nuke(dev_priv); DRM_DEBUG_KMS("enabled fbc on plane %c\n", plane_name(intel_crtc->plane)); } @@ -340,19 +322,6 @@ bool intel_fbc_enabled(struct drm_device *dev) return dev_priv->fbc.enabled; } -void bdw_fbc_sw_flush(struct drm_device *dev, u32 value) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (!IS_GEN8(dev)) - return; - - if (!intel_fbc_enabled(dev)) - return; - - I915_WRITE(MSG_FBC_REND_STATE, value); -} - static void intel_fbc_work_fn(struct work_struct *__work) { struct intel_fbc_work *work = @@ -685,6 +654,44 @@ out_disable: i915_gem_stolen_cleanup_compression(dev); } +void intel_fbc_invalidate(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits, + enum fb_op_origin origin) +{ + struct drm_device *dev = dev_priv->dev; + unsigned int fbc_bits; + + if (origin == ORIGIN_GTT) + return; + + if (dev_priv->fbc.enabled) + fbc_bits = INTEL_FRONTBUFFER_PRIMARY(dev_priv->fbc.crtc->pipe); + else if (dev_priv->fbc.fbc_work) + fbc_bits = INTEL_FRONTBUFFER_PRIMARY( + to_intel_crtc(dev_priv->fbc.fbc_work->crtc)->pipe); + else + fbc_bits = dev_priv->fbc.possible_framebuffer_bits; + + dev_priv->fbc.busy_bits |= (fbc_bits & frontbuffer_bits); + + if (dev_priv->fbc.busy_bits) + intel_fbc_disable(dev); +} + +void intel_fbc_flush(struct drm_i915_private *dev_priv, + unsigned int frontbuffer_bits) +{ + struct drm_device *dev = dev_priv->dev; + + if (!dev_priv->fbc.busy_bits) + return; + + dev_priv->fbc.busy_bits &= ~frontbuffer_bits; + + if (!dev_priv->fbc.busy_bits) + intel_fbc_update(dev); +} + /** * intel_fbc_init - Initialize FBC * @dev_priv: the i915 device @@ -693,12 +700,22 @@ out_disable: */ void intel_fbc_init(struct drm_i915_private *dev_priv) { + enum pipe pipe; + if (!HAS_FBC(dev_priv)) { dev_priv->fbc.enabled = false; dev_priv->fbc.no_fbc_reason = FBC_UNSUPPORTED; return; } + for_each_pipe(dev_priv, pipe) { + dev_priv->fbc.possible_framebuffer_bits |= + INTEL_FRONTBUFFER_PRIMARY(pipe); + + if (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8) + break; + } + if (INTEL_INFO(dev_priv)->gen >= 7) { dev_priv->display.fbc_enabled = ilk_fbc_enabled; dev_priv->display.enable_fbc = gen7_fbc_enable; diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c index 234a699b8219..757c0d216f80 100644 --- a/drivers/gpu/drm/i915/intel_fbdev.c +++ b/drivers/gpu/drm/i915/intel_fbdev.c @@ -71,6 +71,31 @@ static int intel_fbdev_set_par(struct fb_info *info) return ret; } +static int intel_fbdev_blank(int blank, struct fb_info *info) +{ + struct drm_fb_helper *fb_helper = info->par; + struct intel_fbdev *ifbdev = + container_of(fb_helper, struct intel_fbdev, helper); + int ret; + + ret = drm_fb_helper_blank(blank, info); + + if (ret == 0) { + /* + * FIXME: fbdev presumes that all callbacks also work from + * atomic contexts and relies on that for emergency oops + * printing. KMS totally doesn't do that and the locking here is + * by far not the only place this goes wrong. Ignore this for + * now until we solve this for real. + */ + mutex_lock(&fb_helper->dev->struct_mutex); + intel_fb_obj_invalidate(ifbdev->fb->obj, NULL, ORIGIN_GTT); + mutex_unlock(&fb_helper->dev->struct_mutex); + } + + return ret; +} + static struct fb_ops intelfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, @@ -79,7 +104,7 @@ static struct fb_ops intelfb_ops = { .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_pan_display = drm_fb_helper_pan_display, - .fb_blank = drm_fb_helper_blank, + .fb_blank = intel_fbdev_blank, .fb_setcmap = drm_fb_helper_setcmap, .fb_debug_enter = drm_fb_helper_debug_enter, .fb_debug_leave = drm_fb_helper_debug_leave, diff --git a/drivers/gpu/drm/i915/intel_frontbuffer.c b/drivers/gpu/drm/i915/intel_frontbuffer.c index 73cb6e036445..0a1bac8ac72b 100644 --- a/drivers/gpu/drm/i915/intel_frontbuffer.c +++ b/drivers/gpu/drm/i915/intel_frontbuffer.c @@ -118,8 +118,6 @@ static void intel_mark_fb_busy(struct drm_device *dev, continue; intel_increase_pllclock(dev, pipe); - if (ring && intel_fbc_enabled(dev)) - ring->fbc_dirty = true; } } @@ -127,6 +125,7 @@ static void intel_mark_fb_busy(struct drm_device *dev, * intel_fb_obj_invalidate - invalidate frontbuffer object * @obj: GEM object to invalidate * @ring: set for asynchronous rendering + * @origin: which operation caused the invalidation * * This function gets called every time rendering on the given object starts and * frontbuffer caching (fbc, low refresh rate for DRRS, panel self refresh) must @@ -135,7 +134,8 @@ static void intel_mark_fb_busy(struct drm_device *dev, * scheduled. */ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, - struct intel_engine_cs *ring) + struct intel_engine_cs *ring, + enum fb_op_origin origin) { struct drm_device *dev = obj->base.dev; struct drm_i915_private *dev_priv = dev->dev_private; @@ -158,6 +158,7 @@ void intel_fb_obj_invalidate(struct drm_i915_gem_object *obj, intel_psr_invalidate(dev, obj->frontbuffer_bits); intel_edp_drrs_invalidate(dev, obj->frontbuffer_bits); + intel_fbc_invalidate(dev_priv, obj->frontbuffer_bits, origin); } /** @@ -185,16 +186,7 @@ void intel_frontbuffer_flush(struct drm_device *dev, intel_edp_drrs_flush(dev, frontbuffer_bits); intel_psr_flush(dev, frontbuffer_bits); - - /* - * FIXME: Unconditional fbc flushing here is a rather gross hack and - * needs to be reworked into a proper frontbuffer tracking scheme like - * psr employs. - */ - if (dev_priv->fbc.need_sw_cache_clean) { - dev_priv->fbc.need_sw_cache_clean = false; - bdw_fbc_sw_flush(dev, FBC_REND_CACHE_CLEAN); - } + intel_fbc_flush(dev_priv, frontbuffer_bits); } /** diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 542cf6844dc3..288c9d24098e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -263,6 +263,47 @@ static const struct cxsr_latency *intel_get_cxsr_latency(int is_desktop, return NULL; } +static void chv_set_memory_dvfs(struct drm_i915_private *dev_priv, bool enable) +{ + u32 val; + + mutex_lock(&dev_priv->rps.hw_lock); + + val = vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2); + if (enable) + val &= ~FORCE_DDR_HIGH_FREQ; + else + val |= FORCE_DDR_HIGH_FREQ; + val &= ~FORCE_DDR_LOW_FREQ; + val |= FORCE_DDR_FREQ_REQ_ACK; + vlv_punit_write(dev_priv, PUNIT_REG_DDR_SETUP2, val); + + if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DDR_SETUP2) & + FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) + DRM_ERROR("timed out waiting for Punit DDR DVFS request\n"); + + mutex_unlock(&dev_priv->rps.hw_lock); +} + +static void chv_set_memory_pm5(struct drm_i915_private *dev_priv, bool enable) +{ + u32 val; + + mutex_lock(&dev_priv->rps.hw_lock); + + val = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ); + if (enable) + val |= DSP_MAXFIFO_PM5_ENABLE; + else + val &= ~DSP_MAXFIFO_PM5_ENABLE; + vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, val); + + mutex_unlock(&dev_priv->rps.hw_lock); +} + +#define FW_WM(value, plane) \ + (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK) + void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) { struct drm_device *dev = dev_priv->dev; @@ -270,6 +311,8 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) if (IS_VALLEYVIEW(dev)) { I915_WRITE(FW_BLC_SELF_VLV, enable ? FW_CSPWRDWNEN : 0); + if (IS_CHERRYVIEW(dev)) + chv_set_memory_pm5(dev_priv, enable); } else if (IS_G4X(dev) || IS_CRESTLINE(dev)) { I915_WRITE(FW_BLC_SELF, enable ? FW_BLC_SELF_EN : 0); } else if (IS_PINEVIEW(dev)) { @@ -292,6 +335,7 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) enable ? "enabled" : "disabled"); } + /* * Latency for FIFO fetches is dependent on several factors: * - memory configuration (speed, channels) @@ -308,6 +352,61 @@ void intel_set_memory_cxsr(struct drm_i915_private *dev_priv, bool enable) */ static const int pessimal_latency_ns = 5000; +#define VLV_FIFO_START(dsparb, dsparb2, lo_shift, hi_shift) \ + ((((dsparb) >> (lo_shift)) & 0xff) | ((((dsparb2) >> (hi_shift)) & 0x1) << 8)) + +static int vlv_get_fifo_size(struct drm_device *dev, + enum pipe pipe, int plane) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + int sprite0_start, sprite1_start, size; + + switch (pipe) { + uint32_t dsparb, dsparb2, dsparb3; + case PIPE_A: + dsparb = I915_READ(DSPARB); + dsparb2 = I915_READ(DSPARB2); + sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 0, 0); + sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 8, 4); + break; + case PIPE_B: + dsparb = I915_READ(DSPARB); + dsparb2 = I915_READ(DSPARB2); + sprite0_start = VLV_FIFO_START(dsparb, dsparb2, 16, 8); + sprite1_start = VLV_FIFO_START(dsparb, dsparb2, 24, 12); + break; + case PIPE_C: + dsparb2 = I915_READ(DSPARB2); + dsparb3 = I915_READ(DSPARB3); + sprite0_start = VLV_FIFO_START(dsparb3, dsparb2, 0, 16); + sprite1_start = VLV_FIFO_START(dsparb3, dsparb2, 8, 20); + break; + default: + return 0; + } + + switch (plane) { + case 0: + size = sprite0_start; + break; + case 1: + size = sprite1_start - sprite0_start; + break; + case 2: + size = 512 - 1 - sprite1_start; + break; + default: + return 0; + } + + DRM_DEBUG_KMS("Pipe %c %s %c FIFO size: %d\n", + pipe_name(pipe), plane == 0 ? "primary" : "sprite", + plane == 0 ? plane_name(pipe) : sprite_name(pipe, plane - 1), + size); + + return size; +} + static int i9xx_get_fifo_size(struct drm_device *dev, int plane) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -553,7 +652,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) crtc = single_enabled_crtc(dev); if (crtc) { const struct drm_display_mode *adjusted_mode; - int pixel_size = crtc->primary->fb->bits_per_pixel / 8; + int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; int clock; adjusted_mode = &to_intel_crtc(crtc)->config->base.adjusted_mode; @@ -565,7 +664,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) pixel_size, latency->display_sr); reg = I915_READ(DSPFW1); reg &= ~DSPFW_SR_MASK; - reg |= wm << DSPFW_SR_SHIFT; + reg |= FW_WM(wm, SR); I915_WRITE(DSPFW1, reg); DRM_DEBUG_KMS("DSPFW1 register is %x\n", reg); @@ -575,7 +674,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) pixel_size, latency->cursor_sr); reg = I915_READ(DSPFW3); reg &= ~DSPFW_CURSOR_SR_MASK; - reg |= (wm & 0x3f) << DSPFW_CURSOR_SR_SHIFT; + reg |= FW_WM(wm, CURSOR_SR); I915_WRITE(DSPFW3, reg); /* Display HPLL off SR */ @@ -584,7 +683,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) pixel_size, latency->display_hpll_disable); reg = I915_READ(DSPFW3); reg &= ~DSPFW_HPLL_SR_MASK; - reg |= wm & DSPFW_HPLL_SR_MASK; + reg |= FW_WM(wm, HPLL_SR); I915_WRITE(DSPFW3, reg); /* cursor HPLL off SR */ @@ -593,7 +692,7 @@ static void pineview_update_wm(struct drm_crtc *unused_crtc) pixel_size, latency->cursor_hpll_disable); reg = I915_READ(DSPFW3); reg &= ~DSPFW_HPLL_CURSOR_MASK; - reg |= (wm & 0x3f) << DSPFW_HPLL_CURSOR_SHIFT; + reg |= FW_WM(wm, HPLL_CURSOR); I915_WRITE(DSPFW3, reg); DRM_DEBUG_KMS("DSPFW3 register is %x\n", reg); @@ -629,7 +728,7 @@ static bool g4x_compute_wm0(struct drm_device *dev, clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; - pixel_size = crtc->primary->fb->bits_per_pixel / 8; + pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; /* Use the small buffer method to calculate plane watermark */ entries = ((clock * pixel_size / 1000) * display_latency_ns) / 1000; @@ -644,7 +743,7 @@ static bool g4x_compute_wm0(struct drm_device *dev, /* Use the large buffer method to calculate cursor watermark */ line_time_us = max(htotal * 1000 / clock, 1); line_count = (cursor_latency_ns / line_time_us + 1000) / 1000; - entries = line_count * to_intel_crtc(crtc)->cursor_width * pixel_size; + entries = line_count * crtc->cursor->state->crtc_w * pixel_size; tlb_miss = cursor->fifo_size*cursor->cacheline_size - hdisplay * 8; if (tlb_miss > 0) entries += tlb_miss; @@ -716,7 +815,7 @@ static bool g4x_compute_srwm(struct drm_device *dev, clock = adjusted_mode->crtc_clock; htotal = adjusted_mode->crtc_htotal; hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; - pixel_size = crtc->primary->fb->bits_per_pixel / 8; + pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; line_time_us = max(htotal * 1000 / clock, 1); line_count = (latency_ns / line_time_us + 1000) / 1000; @@ -730,7 +829,7 @@ static bool g4x_compute_srwm(struct drm_device *dev, *display_wm = entries + display->guard_size; /* calculate the self-refresh watermark for display cursor */ - entries = line_count * pixel_size * to_intel_crtc(crtc)->cursor_width; + entries = line_count * pixel_size * crtc->cursor->state->crtc_w; entries = DIV_ROUND_UP(entries, cursor->cacheline_size); *cursor_wm = entries + cursor->guard_size; @@ -739,232 +838,234 @@ static bool g4x_compute_srwm(struct drm_device *dev, display, cursor); } -static bool vlv_compute_drain_latency(struct drm_crtc *crtc, - int pixel_size, - int *prec_mult, - int *drain_latency) -{ - struct drm_device *dev = crtc->dev; - int entries; - int clock = to_intel_crtc(crtc)->config->base.adjusted_mode.crtc_clock; +#define FW_WM_VLV(value, plane) \ + (((value) << DSPFW_ ## plane ## _SHIFT) & DSPFW_ ## plane ## _MASK_VLV) - if (WARN(clock == 0, "Pixel clock is zero!\n")) - return false; +static void vlv_write_wm_values(struct intel_crtc *crtc, + const struct vlv_wm_values *wm) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; - if (WARN(pixel_size == 0, "Pixel size is zero!\n")) - return false; + I915_WRITE(VLV_DDL(pipe), + (wm->ddl[pipe].cursor << DDL_CURSOR_SHIFT) | + (wm->ddl[pipe].sprite[1] << DDL_SPRITE_SHIFT(1)) | + (wm->ddl[pipe].sprite[0] << DDL_SPRITE_SHIFT(0)) | + (wm->ddl[pipe].primary << DDL_PLANE_SHIFT)); - entries = DIV_ROUND_UP(clock, 1000) * pixel_size; - if (IS_CHERRYVIEW(dev)) - *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_32 : - DRAIN_LATENCY_PRECISION_16; - else - *prec_mult = (entries > 128) ? DRAIN_LATENCY_PRECISION_64 : - DRAIN_LATENCY_PRECISION_32; - *drain_latency = (64 * (*prec_mult) * 4) / entries; + I915_WRITE(DSPFW1, + FW_WM(wm->sr.plane, SR) | + FW_WM(wm->pipe[PIPE_B].cursor, CURSORB) | + FW_WM_VLV(wm->pipe[PIPE_B].primary, PLANEB) | + FW_WM_VLV(wm->pipe[PIPE_A].primary, PLANEA)); + I915_WRITE(DSPFW2, + FW_WM_VLV(wm->pipe[PIPE_A].sprite[1], SPRITEB) | + FW_WM(wm->pipe[PIPE_A].cursor, CURSORA) | + FW_WM_VLV(wm->pipe[PIPE_A].sprite[0], SPRITEA)); + I915_WRITE(DSPFW3, + FW_WM(wm->sr.cursor, CURSOR_SR)); + + if (IS_CHERRYVIEW(dev_priv)) { + I915_WRITE(DSPFW7_CHV, + FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) | + FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC)); + I915_WRITE(DSPFW8_CHV, + FW_WM_VLV(wm->pipe[PIPE_C].sprite[1], SPRITEF) | + FW_WM_VLV(wm->pipe[PIPE_C].sprite[0], SPRITEE)); + I915_WRITE(DSPFW9_CHV, + FW_WM_VLV(wm->pipe[PIPE_C].primary, PLANEC) | + FW_WM(wm->pipe[PIPE_C].cursor, CURSORC)); + I915_WRITE(DSPHOWM, + FW_WM(wm->sr.plane >> 9, SR_HI) | + FW_WM(wm->pipe[PIPE_C].sprite[1] >> 8, SPRITEF_HI) | + FW_WM(wm->pipe[PIPE_C].sprite[0] >> 8, SPRITEE_HI) | + FW_WM(wm->pipe[PIPE_C].primary >> 8, PLANEC_HI) | + FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) | + FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) | + FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) | + FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) | + FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) | + FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); + } else { + I915_WRITE(DSPFW7, + FW_WM_VLV(wm->pipe[PIPE_B].sprite[1], SPRITED) | + FW_WM_VLV(wm->pipe[PIPE_B].sprite[0], SPRITEC)); + I915_WRITE(DSPHOWM, + FW_WM(wm->sr.plane >> 9, SR_HI) | + FW_WM(wm->pipe[PIPE_B].sprite[1] >> 8, SPRITED_HI) | + FW_WM(wm->pipe[PIPE_B].sprite[0] >> 8, SPRITEC_HI) | + FW_WM(wm->pipe[PIPE_B].primary >> 8, PLANEB_HI) | + FW_WM(wm->pipe[PIPE_A].sprite[1] >> 8, SPRITEB_HI) | + FW_WM(wm->pipe[PIPE_A].sprite[0] >> 8, SPRITEA_HI) | + FW_WM(wm->pipe[PIPE_A].primary >> 8, PLANEA_HI)); + } - if (*drain_latency > DRAIN_LATENCY_MASK) - *drain_latency = DRAIN_LATENCY_MASK; + POSTING_READ(DSPFW1); - return true; + dev_priv->wm.vlv = *wm; } -/* - * Update drain latency registers of memory arbiter - * - * Valleyview SoC has a new memory arbiter and needs drain latency registers - * to be programmed. Each plane has a drain latency multiplier and a drain - * latency value. - */ +#undef FW_WM_VLV -static void vlv_update_drain_latency(struct drm_crtc *crtc) +static uint8_t vlv_compute_drain_latency(struct drm_crtc *crtc, + struct drm_plane *plane) { struct drm_device *dev = crtc->dev; - struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - int pixel_size; - int drain_latency; - enum pipe pipe = intel_crtc->pipe; - int plane_prec, prec_mult, plane_dl; - const int high_precision = IS_CHERRYVIEW(dev) ? - DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64; + int entries, prec_mult, drain_latency, pixel_size; + int clock = intel_crtc->config->base.adjusted_mode.crtc_clock; + const int high_precision = IS_CHERRYVIEW(dev) ? 16 : 64; - plane_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_PLANE_PRECISION_HIGH | - DRAIN_LATENCY_MASK | DDL_CURSOR_PRECISION_HIGH | - (DRAIN_LATENCY_MASK << DDL_CURSOR_SHIFT)); + /* + * FIXME the plane might have an fb + * but be invisible (eg. due to clipping) + */ + if (!intel_crtc->active || !plane->state->fb) + return 0; - if (!intel_crtc_active(crtc)) { - I915_WRITE(VLV_DDL(pipe), plane_dl); - return; - } + if (WARN(clock == 0, "Pixel clock is zero!\n")) + return 0; - /* Primary plane Drain Latency */ - pixel_size = crtc->primary->fb->bits_per_pixel / 8; /* BPP */ - if (vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) { - plane_prec = (prec_mult == high_precision) ? - DDL_PLANE_PRECISION_HIGH : - DDL_PLANE_PRECISION_LOW; - plane_dl |= plane_prec | drain_latency; - } + pixel_size = drm_format_plane_cpp(plane->state->fb->pixel_format, 0); - /* Cursor Drain Latency - * BPP is always 4 for cursor - */ - pixel_size = 4; + if (WARN(pixel_size == 0, "Pixel size is zero!\n")) + return 0; + + entries = DIV_ROUND_UP(clock, 1000) * pixel_size; + + prec_mult = high_precision; + drain_latency = 64 * prec_mult * 4 / entries; - /* Program cursor DL only if it is enabled */ - if (intel_crtc->cursor_base && - vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, &drain_latency)) { - plane_prec = (prec_mult == high_precision) ? - DDL_CURSOR_PRECISION_HIGH : - DDL_CURSOR_PRECISION_LOW; - plane_dl |= plane_prec | (drain_latency << DDL_CURSOR_SHIFT); + if (drain_latency > DRAIN_LATENCY_MASK) { + prec_mult /= 2; + drain_latency = 64 * prec_mult * 4 / entries; } - I915_WRITE(VLV_DDL(pipe), plane_dl); -} + if (drain_latency > DRAIN_LATENCY_MASK) + drain_latency = DRAIN_LATENCY_MASK; -#define single_plane_enabled(mask) is_power_of_2(mask) + return drain_latency | (prec_mult == high_precision ? + DDL_PRECISION_HIGH : DDL_PRECISION_LOW); +} -static void valleyview_update_wm(struct drm_crtc *crtc) +static int vlv_compute_wm(struct intel_crtc *crtc, + struct intel_plane *plane, + int fifo_size) { - struct drm_device *dev = crtc->dev; - static const int sr_latency_ns = 12000; - struct drm_i915_private *dev_priv = dev->dev_private; - int planea_wm, planeb_wm, cursora_wm, cursorb_wm; - int plane_sr, cursor_sr; - int ignore_plane_sr, ignore_cursor_sr; - unsigned int enabled = 0; - bool cxsr_enabled; + int clock, entries, pixel_size; - vlv_update_drain_latency(crtc); + /* + * FIXME the plane might have an fb + * but be invisible (eg. due to clipping) + */ + if (!crtc->active || !plane->base.state->fb) + return 0; - if (g4x_compute_wm0(dev, PIPE_A, - &valleyview_wm_info, pessimal_latency_ns, - &valleyview_cursor_wm_info, pessimal_latency_ns, - &planea_wm, &cursora_wm)) - enabled |= 1 << PIPE_A; + pixel_size = drm_format_plane_cpp(plane->base.state->fb->pixel_format, 0); + clock = crtc->config->base.adjusted_mode.crtc_clock; - if (g4x_compute_wm0(dev, PIPE_B, - &valleyview_wm_info, pessimal_latency_ns, - &valleyview_cursor_wm_info, pessimal_latency_ns, - &planeb_wm, &cursorb_wm)) - enabled |= 1 << PIPE_B; + entries = DIV_ROUND_UP(clock, 1000) * pixel_size; - if (single_plane_enabled(enabled) && - g4x_compute_srwm(dev, ffs(enabled) - 1, - sr_latency_ns, - &valleyview_wm_info, - &valleyview_cursor_wm_info, - &plane_sr, &ignore_cursor_sr) && - g4x_compute_srwm(dev, ffs(enabled) - 1, - 2*sr_latency_ns, - &valleyview_wm_info, - &valleyview_cursor_wm_info, - &ignore_plane_sr, &cursor_sr)) { - cxsr_enabled = true; - } else { - cxsr_enabled = false; - intel_set_memory_cxsr(dev_priv, false); - plane_sr = cursor_sr = 0; + /* + * Set up the watermark such that we don't start issuing memory + * requests until we are within PND's max deadline value (256us). + * Idea being to be idle as long as possible while still taking + * advatange of PND's deadline scheduling. The limit of 8 + * cachelines (used when the FIFO will anyway drain in less time + * than 256us) should match what we would be done if trickle + * feed were enabled. + */ + return fifo_size - clamp(DIV_ROUND_UP(256 * entries, 64), 0, fifo_size - 8); +} + +static bool vlv_compute_sr_wm(struct drm_device *dev, + struct vlv_wm_values *wm) +{ + struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_crtc *crtc; + enum pipe pipe = INVALID_PIPE; + int num_planes = 0; + int fifo_size = 0; + struct intel_plane *plane; + + wm->sr.cursor = wm->sr.plane = 0; + + crtc = single_enabled_crtc(dev); + /* maxfifo not supported on pipe C */ + if (crtc && to_intel_crtc(crtc)->pipe != PIPE_C) { + pipe = to_intel_crtc(crtc)->pipe; + num_planes = !!wm->pipe[pipe].primary + + !!wm->pipe[pipe].sprite[0] + + !!wm->pipe[pipe].sprite[1]; + fifo_size = INTEL_INFO(dev_priv)->num_pipes * 512 - 1; } - DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, " - "B: plane=%d, cursor=%d, SR: plane=%d, cursor=%d\n", - planea_wm, cursora_wm, - planeb_wm, cursorb_wm, - plane_sr, cursor_sr); + if (fifo_size == 0 || num_planes > 1) + return false; - I915_WRITE(DSPFW1, - (plane_sr << DSPFW_SR_SHIFT) | - (cursorb_wm << DSPFW_CURSORB_SHIFT) | - (planeb_wm << DSPFW_PLANEB_SHIFT) | - (planea_wm << DSPFW_PLANEA_SHIFT)); - I915_WRITE(DSPFW2, - (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | - (cursora_wm << DSPFW_CURSORA_SHIFT)); - I915_WRITE(DSPFW3, - (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) | - (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); + wm->sr.cursor = vlv_compute_wm(to_intel_crtc(crtc), + to_intel_plane(crtc->cursor), 0x3f); - if (cxsr_enabled) - intel_set_memory_cxsr(dev_priv, true); + list_for_each_entry(plane, &dev->mode_config.plane_list, base.head) { + if (plane->base.type == DRM_PLANE_TYPE_CURSOR) + continue; + + if (plane->pipe != pipe) + continue; + + wm->sr.plane = vlv_compute_wm(to_intel_crtc(crtc), + plane, fifo_size); + if (wm->sr.plane != 0) + break; + } + + return true; } -static void cherryview_update_wm(struct drm_crtc *crtc) +static void valleyview_update_wm(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; - static const int sr_latency_ns = 12000; struct drm_i915_private *dev_priv = dev->dev_private; - int planea_wm, planeb_wm, planec_wm; - int cursora_wm, cursorb_wm, cursorc_wm; - int plane_sr, cursor_sr; - int ignore_plane_sr, ignore_cursor_sr; - unsigned int enabled = 0; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + enum pipe pipe = intel_crtc->pipe; bool cxsr_enabled; + struct vlv_wm_values wm = dev_priv->wm.vlv; - vlv_update_drain_latency(crtc); + wm.ddl[pipe].primary = vlv_compute_drain_latency(crtc, crtc->primary); + wm.pipe[pipe].primary = vlv_compute_wm(intel_crtc, + to_intel_plane(crtc->primary), + vlv_get_fifo_size(dev, pipe, 0)); - if (g4x_compute_wm0(dev, PIPE_A, - &valleyview_wm_info, pessimal_latency_ns, - &valleyview_cursor_wm_info, pessimal_latency_ns, - &planea_wm, &cursora_wm)) - enabled |= 1 << PIPE_A; + wm.ddl[pipe].cursor = vlv_compute_drain_latency(crtc, crtc->cursor); + wm.pipe[pipe].cursor = vlv_compute_wm(intel_crtc, + to_intel_plane(crtc->cursor), + 0x3f); - if (g4x_compute_wm0(dev, PIPE_B, - &valleyview_wm_info, pessimal_latency_ns, - &valleyview_cursor_wm_info, pessimal_latency_ns, - &planeb_wm, &cursorb_wm)) - enabled |= 1 << PIPE_B; + cxsr_enabled = vlv_compute_sr_wm(dev, &wm); - if (g4x_compute_wm0(dev, PIPE_C, - &valleyview_wm_info, pessimal_latency_ns, - &valleyview_cursor_wm_info, pessimal_latency_ns, - &planec_wm, &cursorc_wm)) - enabled |= 1 << PIPE_C; + if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0) + return; - if (single_plane_enabled(enabled) && - g4x_compute_srwm(dev, ffs(enabled) - 1, - sr_latency_ns, - &valleyview_wm_info, - &valleyview_cursor_wm_info, - &plane_sr, &ignore_cursor_sr) && - g4x_compute_srwm(dev, ffs(enabled) - 1, - 2*sr_latency_ns, - &valleyview_wm_info, - &valleyview_cursor_wm_info, - &ignore_plane_sr, &cursor_sr)) { - cxsr_enabled = true; - } else { - cxsr_enabled = false; - intel_set_memory_cxsr(dev_priv, false); - plane_sr = cursor_sr = 0; - } + DRM_DEBUG_KMS("Setting FIFO watermarks - %c: plane=%d, cursor=%d, " + "SR: plane=%d, cursor=%d\n", pipe_name(pipe), + wm.pipe[pipe].primary, wm.pipe[pipe].cursor, + wm.sr.plane, wm.sr.cursor); - DRM_DEBUG_KMS("Setting FIFO watermarks - A: plane=%d, cursor=%d, " - "B: plane=%d, cursor=%d, C: plane=%d, cursor=%d, " - "SR: plane=%d, cursor=%d\n", - planea_wm, cursora_wm, - planeb_wm, cursorb_wm, - planec_wm, cursorc_wm, - plane_sr, cursor_sr); + /* + * FIXME DDR DVFS introduces massive memory latencies which + * are not known to system agent so any deadline specified + * by the display may not be respected. To support DDR DVFS + * the watermark code needs to be rewritten to essentially + * bypass deadline mechanism and rely solely on the + * watermarks. For now disable DDR DVFS. + */ + if (IS_CHERRYVIEW(dev_priv)) + chv_set_memory_dvfs(dev_priv, false); - I915_WRITE(DSPFW1, - (plane_sr << DSPFW_SR_SHIFT) | - (cursorb_wm << DSPFW_CURSORB_SHIFT) | - (planeb_wm << DSPFW_PLANEB_SHIFT) | - (planea_wm << DSPFW_PLANEA_SHIFT)); - I915_WRITE(DSPFW2, - (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | - (cursora_wm << DSPFW_CURSORA_SHIFT)); - I915_WRITE(DSPFW3, - (I915_READ(DSPFW3) & ~DSPFW_CURSOR_SR_MASK) | - (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); - I915_WRITE(DSPFW9_CHV, - (I915_READ(DSPFW9_CHV) & ~(DSPFW_PLANEC_MASK | - DSPFW_CURSORC_MASK)) | - (planec_wm << DSPFW_PLANEC_SHIFT) | - (cursorc_wm << DSPFW_CURSORC_SHIFT)); + if (!cxsr_enabled) + intel_set_memory_cxsr(dev_priv, false); + + vlv_write_wm_values(intel_crtc, &wm); if (cxsr_enabled) intel_set_memory_cxsr(dev_priv, true); @@ -979,30 +1080,47 @@ static void valleyview_update_sprite_wm(struct drm_plane *plane, { struct drm_device *dev = crtc->dev; struct drm_i915_private *dev_priv = dev->dev_private; - int pipe = to_intel_plane(plane)->pipe; + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + enum pipe pipe = intel_crtc->pipe; int sprite = to_intel_plane(plane)->plane; - int drain_latency; - int plane_prec; - int sprite_dl; - int prec_mult; - const int high_precision = IS_CHERRYVIEW(dev) ? - DRAIN_LATENCY_PRECISION_32 : DRAIN_LATENCY_PRECISION_64; + bool cxsr_enabled; + struct vlv_wm_values wm = dev_priv->wm.vlv; - sprite_dl = I915_READ(VLV_DDL(pipe)) & ~(DDL_SPRITE_PRECISION_HIGH(sprite) | - (DRAIN_LATENCY_MASK << DDL_SPRITE_SHIFT(sprite))); + if (enabled) { + wm.ddl[pipe].sprite[sprite] = + vlv_compute_drain_latency(crtc, plane); - if (enabled && vlv_compute_drain_latency(crtc, pixel_size, &prec_mult, - &drain_latency)) { - plane_prec = (prec_mult == high_precision) ? - DDL_SPRITE_PRECISION_HIGH(sprite) : - DDL_SPRITE_PRECISION_LOW(sprite); - sprite_dl |= plane_prec | - (drain_latency << DDL_SPRITE_SHIFT(sprite)); + wm.pipe[pipe].sprite[sprite] = + vlv_compute_wm(intel_crtc, + to_intel_plane(plane), + vlv_get_fifo_size(dev, pipe, sprite+1)); + } else { + wm.ddl[pipe].sprite[sprite] = 0; + wm.pipe[pipe].sprite[sprite] = 0; } - I915_WRITE(VLV_DDL(pipe), sprite_dl); + cxsr_enabled = vlv_compute_sr_wm(dev, &wm); + + if (memcmp(&wm, &dev_priv->wm.vlv, sizeof(wm)) == 0) + return; + + DRM_DEBUG_KMS("Setting FIFO watermarks - %c: sprite %c=%d, " + "SR: plane=%d, cursor=%d\n", pipe_name(pipe), + sprite_name(pipe, sprite), + wm.pipe[pipe].sprite[sprite], + wm.sr.plane, wm.sr.cursor); + + if (!cxsr_enabled) + intel_set_memory_cxsr(dev_priv, false); + + vlv_write_wm_values(intel_crtc, &wm); + + if (cxsr_enabled) + intel_set_memory_cxsr(dev_priv, true); } +#define single_plane_enabled(mask) is_power_of_2(mask) + static void g4x_update_wm(struct drm_crtc *crtc) { struct drm_device *dev = crtc->dev; @@ -1045,17 +1163,17 @@ static void g4x_update_wm(struct drm_crtc *crtc) plane_sr, cursor_sr); I915_WRITE(DSPFW1, - (plane_sr << DSPFW_SR_SHIFT) | - (cursorb_wm << DSPFW_CURSORB_SHIFT) | - (planeb_wm << DSPFW_PLANEB_SHIFT) | - (planea_wm << DSPFW_PLANEA_SHIFT)); + FW_WM(plane_sr, SR) | + FW_WM(cursorb_wm, CURSORB) | + FW_WM(planeb_wm, PLANEB) | + FW_WM(planea_wm, PLANEA)); I915_WRITE(DSPFW2, (I915_READ(DSPFW2) & ~DSPFW_CURSORA_MASK) | - (cursora_wm << DSPFW_CURSORA_SHIFT)); + FW_WM(cursora_wm, CURSORA)); /* HPLL off in SR has some issues on G4x... disable it */ I915_WRITE(DSPFW3, (I915_READ(DSPFW3) & ~(DSPFW_HPLL_SR_EN | DSPFW_CURSOR_SR_MASK)) | - (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); + FW_WM(cursor_sr, CURSOR_SR)); if (cxsr_enabled) intel_set_memory_cxsr(dev_priv, true); @@ -1080,7 +1198,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) int clock = adjusted_mode->crtc_clock; int htotal = adjusted_mode->crtc_htotal; int hdisplay = to_intel_crtc(crtc)->config->pipe_src_w; - int pixel_size = crtc->primary->fb->bits_per_pixel / 8; + int pixel_size = crtc->primary->state->fb->bits_per_pixel / 8; unsigned long line_time_us; int entries; @@ -1098,7 +1216,7 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) entries, srwm); entries = (((sr_latency_ns / line_time_us) + 1000) / 1000) * - pixel_size * to_intel_crtc(crtc)->cursor_width; + pixel_size * crtc->cursor->state->crtc_w; entries = DIV_ROUND_UP(entries, i965_cursor_wm_info.cacheline_size); cursor_sr = i965_cursor_wm_info.fifo_size - @@ -1121,19 +1239,21 @@ static void i965_update_wm(struct drm_crtc *unused_crtc) srwm); /* 965 has limitations... */ - I915_WRITE(DSPFW1, (srwm << DSPFW_SR_SHIFT) | - (8 << DSPFW_CURSORB_SHIFT) | - (8 << DSPFW_PLANEB_SHIFT) | - (8 << DSPFW_PLANEA_SHIFT)); - I915_WRITE(DSPFW2, (8 << DSPFW_CURSORA_SHIFT) | - (8 << DSPFW_PLANEC_SHIFT_OLD)); + I915_WRITE(DSPFW1, FW_WM(srwm, SR) | + FW_WM(8, CURSORB) | + FW_WM(8, PLANEB) | + FW_WM(8, PLANEA)); + I915_WRITE(DSPFW2, FW_WM(8, CURSORA) | + FW_WM(8, PLANEC_OLD)); /* update cursor SR watermark */ - I915_WRITE(DSPFW3, (cursor_sr << DSPFW_CURSOR_SR_SHIFT)); + I915_WRITE(DSPFW3, FW_WM(cursor_sr, CURSOR_SR)); if (cxsr_enabled) intel_set_memory_cxsr(dev_priv, true); } +#undef FW_WM + static void i9xx_update_wm(struct drm_crtc *unused_crtc) { struct drm_device *dev = unused_crtc->dev; @@ -1157,7 +1277,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) crtc = intel_get_crtc_for_plane(dev, 0); if (intel_crtc_active(crtc)) { const struct drm_display_mode *adjusted_mode; - int cpp = crtc->primary->fb->bits_per_pixel / 8; + int cpp = crtc->primary->state->fb->bits_per_pixel / 8; if (IS_GEN2(dev)) cpp = 4; @@ -1179,7 +1299,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) crtc = intel_get_crtc_for_plane(dev, 1); if (intel_crtc_active(crtc)) { const struct drm_display_mode *adjusted_mode; - int cpp = crtc->primary->fb->bits_per_pixel / 8; + int cpp = crtc->primary->state->fb->bits_per_pixel / 8; if (IS_GEN2(dev)) cpp = 4; @@ -1202,7 +1322,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) if (IS_I915GM(dev) && enabled) { struct drm_i915_gem_object *obj; - obj = intel_fb_obj(enabled->primary->fb); + obj = intel_fb_obj(enabled->primary->state->fb); /* self-refresh seems busted with untiled */ if (obj->tiling_mode == I915_TILING_NONE) @@ -1226,7 +1346,7 @@ static void i9xx_update_wm(struct drm_crtc *unused_crtc) int clock = adjusted_mode->crtc_clock; int htotal = adjusted_mode->crtc_htotal; int hdisplay = to_intel_crtc(enabled)->config->pipe_src_w; - int pixel_size = enabled->primary->fb->bits_per_pixel / 8; + int pixel_size = enabled->primary->state->fb->bits_per_pixel / 8; unsigned long line_time_us; int entries; @@ -1663,7 +1783,7 @@ hsw_compute_linetime_wm(struct drm_device *dev, struct drm_crtc *crtc) struct drm_display_mode *mode = &intel_crtc->config->base.adjusted_mode; u32 linetime, ips_linetime; - if (!intel_crtc_active(crtc)) + if (!intel_crtc->active) return 0; /* The WM are computed with base on how long it takes to fill a single @@ -1918,19 +2038,31 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc, enum pipe pipe = intel_crtc->pipe; struct drm_plane *plane; - if (!intel_crtc_active(crtc)) + if (!intel_crtc->active) return; p->active = true; p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc); - p->pri.bytes_per_pixel = crtc->primary->fb->bits_per_pixel / 8; - p->cur.bytes_per_pixel = 4; + + if (crtc->primary->state->fb) { + p->pri.enabled = true; + p->pri.bytes_per_pixel = + crtc->primary->state->fb->bits_per_pixel / 8; + } else { + p->pri.enabled = false; + p->pri.bytes_per_pixel = 0; + } + + if (crtc->cursor->state->fb) { + p->cur.enabled = true; + p->cur.bytes_per_pixel = 4; + } else { + p->cur.enabled = false; + p->cur.bytes_per_pixel = 0; + } p->pri.horiz_pixels = intel_crtc->config->pipe_src_w; - p->cur.horiz_pixels = intel_crtc->cursor_width; - /* TODO: for now, assume primary and cursor planes are always enabled. */ - p->pri.enabled = true; - p->cur.enabled = true; + p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w; drm_for_each_legacy_plane(plane, &dev->mode_config.plane_list) { struct intel_plane *intel_plane = to_intel_plane(plane); @@ -2430,7 +2562,7 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, nth_active_pipe = 0; for_each_crtc(dev, crtc) { - if (!intel_crtc_active(crtc)) + if (!to_intel_crtc(crtc)->active) continue; if (crtc == for_crtc) @@ -2463,13 +2595,12 @@ static void skl_ddb_entry_init_from_hw(struct skl_ddb_entry *entry, u32 reg) void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv, struct skl_ddb_allocation *ddb /* out */) { - struct drm_device *dev = dev_priv->dev; enum pipe pipe; int plane; u32 val; for_each_pipe(dev_priv, pipe) { - for_each_plane(pipe, plane) { + for_each_plane(dev_priv, pipe, plane) { val = I915_READ(PLANE_BUF_CFG(pipe, plane)); skl_ddb_entry_init_from_hw(&ddb->plane[pipe][plane], val); @@ -2518,6 +2649,7 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc, struct skl_ddb_allocation *ddb /* out */) { struct drm_device *dev = crtc->dev; + struct drm_i915_private *dev_priv = dev->dev_private; struct intel_crtc *intel_crtc = to_intel_crtc(crtc); enum pipe pipe = intel_crtc->pipe; struct skl_ddb_entry *alloc = &ddb->pipe[pipe]; @@ -2542,7 +2674,7 @@ skl_allocate_pipe_ddb(struct drm_crtc *crtc, alloc->end -= cursor_blocks; /* 1. Allocate the mininum required blocks for each active plane */ - for_each_plane(pipe, plane) { + for_each_plane(dev_priv, pipe, plane) { const struct intel_plane_wm_parameters *p; p = ¶ms->plane[plane]; @@ -2670,7 +2802,7 @@ static void skl_compute_wm_global_parameters(struct drm_device *dev, struct drm_plane *plane; list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) - config->num_pipes_active += intel_crtc_active(crtc); + config->num_pipes_active += to_intel_crtc(crtc)->active; /* FIXME: I don't think we need those two global parameters on SKL */ list_for_each_entry(plane, &dev->mode_config.plane_list, head) { @@ -2691,32 +2823,36 @@ static void skl_compute_wm_pipe_parameters(struct drm_crtc *crtc, struct drm_framebuffer *fb; int i = 1; /* Index for sprite planes start */ - p->active = intel_crtc_active(crtc); + p->active = intel_crtc->active; if (p->active) { p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal; p->pixel_rate = skl_pipe_pixel_rate(intel_crtc->config); - /* - * For now, assume primary and cursor planes are always enabled. - */ - p->plane[0].enabled = true; - p->plane[0].bytes_per_pixel = - crtc->primary->fb->bits_per_pixel / 8; - p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w; - p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h; - p->plane[0].tiling = DRM_FORMAT_MOD_NONE; fb = crtc->primary->state->fb; - /* - * Framebuffer can be NULL on plane disable, but it does not - * matter for watermarks if we assume no tiling in that case. - */ - if (fb) + if (fb) { + p->plane[0].enabled = true; + p->plane[0].bytes_per_pixel = fb->bits_per_pixel / 8; p->plane[0].tiling = fb->modifier[0]; + } else { + p->plane[0].enabled = false; + p->plane[0].bytes_per_pixel = 0; + p->plane[0].tiling = DRM_FORMAT_MOD_NONE; + } + p->plane[0].horiz_pixels = intel_crtc->config->pipe_src_w; + p->plane[0].vert_pixels = intel_crtc->config->pipe_src_h; - p->cursor.enabled = true; - p->cursor.bytes_per_pixel = 4; - p->cursor.horiz_pixels = intel_crtc->cursor_width ? - intel_crtc->cursor_width : 64; + fb = crtc->cursor->state->fb; + if (fb) { + p->cursor.enabled = true; + p->cursor.bytes_per_pixel = fb->bits_per_pixel / 8; + p->cursor.horiz_pixels = crtc->cursor->state->crtc_w; + p->cursor.vert_pixels = crtc->cursor->state->crtc_h; + } else { + p->cursor.enabled = false; + p->cursor.bytes_per_pixel = 0; + p->cursor.horiz_pixels = 64; + p->cursor.vert_pixels = 64; + } } list_for_each_entry(plane, &dev->mode_config.plane_list, head) { @@ -2822,7 +2958,7 @@ static void skl_compute_wm_level(const struct drm_i915_private *dev_priv, static uint32_t skl_compute_linetime_wm(struct drm_crtc *crtc, struct skl_pipe_wm_parameters *p) { - if (!intel_crtc_active(crtc)) + if (!to_intel_crtc(crtc)->active) return 0; return DIV_ROUND_UP(8 * p->pipe_htotal * 1000, p->pixel_rate); @@ -2996,12 +3132,11 @@ static void skl_write_wm_values(struct drm_i915_private *dev_priv, static void skl_wm_flush_pipe(struct drm_i915_private *dev_priv, enum pipe pipe, int pass) { - struct drm_device *dev = dev_priv->dev; int plane; DRM_DEBUG_KMS("flush pipe %c (pass %d)\n", pipe_name(pipe), pass); - for_each_plane(pipe, plane) { + for_each_plane(dev_priv, pipe, plane) { I915_WRITE(PLANE_SURF(pipe, plane), I915_READ(PLANE_SURF(pipe, plane))); } @@ -3370,7 +3505,7 @@ static void skl_pipe_wm_get_hw_state(struct drm_crtc *crtc) hw->plane_trans[pipe][i] = I915_READ(PLANE_WM_TRANS(pipe, i)); hw->cursor_trans[pipe] = I915_READ(CUR_WM_TRANS(pipe)); - if (!intel_crtc_active(crtc)) + if (!intel_crtc->active) return; hw->dirty[pipe] = true; @@ -3425,7 +3560,7 @@ static void ilk_pipe_wm_get_hw_state(struct drm_crtc *crtc) if (IS_HASWELL(dev) || IS_BROADWELL(dev)) hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe)); - active->pipe_enabled = intel_crtc_active(crtc); + active->pipe_enabled = intel_crtc->active; if (active->pipe_enabled) { u32 tmp = hw->wm_pipe[pipe]; @@ -3539,41 +3674,6 @@ void intel_update_sprite_watermarks(struct drm_plane *plane, pixel_size, enabled, scaled); } -static struct drm_i915_gem_object * -intel_alloc_context_page(struct drm_device *dev) -{ - struct drm_i915_gem_object *ctx; - int ret; - - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - - ctx = i915_gem_alloc_object(dev, 4096); - if (!ctx) { - DRM_DEBUG("failed to alloc power context, RC6 disabled\n"); - return NULL; - } - - ret = i915_gem_obj_ggtt_pin(ctx, 4096, 0); - if (ret) { - DRM_ERROR("failed to pin power context: %d\n", ret); - goto err_unref; - } - - ret = i915_gem_object_set_to_gtt_domain(ctx, 1); - if (ret) { - DRM_ERROR("failed to set-domain on power context: %d\n", ret); - goto err_unpin; - } - - return ctx; - -err_unpin: - i915_gem_object_ggtt_unpin(ctx); -err_unref: - drm_gem_object_unreference(&ctx->base); - return NULL; -} - /** * Lock protecting IPS related data structures */ @@ -3706,7 +3806,7 @@ static void ironlake_disable_drps(struct drm_device *dev) * ourselves, instead of doing a rmw cycle (which might result in us clearing * all limits and the gpu stuck at whatever frequency it is at atm). */ -static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val) +static u32 intel_rps_limits(struct drm_i915_private *dev_priv, u8 val) { u32 limits; @@ -3716,9 +3816,15 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val) * the hw runs at the minimal clock before selecting the desired * frequency, if the down threshold expires in that window we will not * receive a down interrupt. */ - limits = dev_priv->rps.max_freq_softlimit << 24; - if (val <= dev_priv->rps.min_freq_softlimit) - limits |= dev_priv->rps.min_freq_softlimit << 16; + if (IS_GEN9(dev_priv->dev)) { + limits = (dev_priv->rps.max_freq_softlimit) << 23; + if (val <= dev_priv->rps.min_freq_softlimit) + limits |= (dev_priv->rps.min_freq_softlimit) << 14; + } else { + limits = dev_priv->rps.max_freq_softlimit << 24; + if (val <= dev_priv->rps.min_freq_softlimit) + limits |= dev_priv->rps.min_freq_softlimit << 16; + } return limits; } @@ -3726,6 +3832,8 @@ static u32 gen6_rps_limits(struct drm_i915_private *dev_priv, u8 val) static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) { int new_power; + u32 threshold_up = 0, threshold_down = 0; /* in % */ + u32 ei_up = 0, ei_down = 0; new_power = dev_priv->rps.power; switch (dev_priv->rps.power) { @@ -3758,59 +3866,53 @@ static void gen6_set_rps_thresholds(struct drm_i915_private *dev_priv, u8 val) switch (new_power) { case LOW_POWER: /* Upclock if more than 95% busy over 16ms */ - I915_WRITE(GEN6_RP_UP_EI, 12500); - I915_WRITE(GEN6_RP_UP_THRESHOLD, 11800); + ei_up = 16000; + threshold_up = 95; /* Downclock if less than 85% busy over 32ms */ - I915_WRITE(GEN6_RP_DOWN_EI, 25000); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 21250); - - I915_WRITE(GEN6_RP_CONTROL, - GEN6_RP_MEDIA_TURBO | - GEN6_RP_MEDIA_HW_NORMAL_MODE | - GEN6_RP_MEDIA_IS_GFX | - GEN6_RP_ENABLE | - GEN6_RP_UP_BUSY_AVG | - GEN6_RP_DOWN_IDLE_AVG); + ei_down = 32000; + threshold_down = 85; break; case BETWEEN: /* Upclock if more than 90% busy over 13ms */ - I915_WRITE(GEN6_RP_UP_EI, 10250); - I915_WRITE(GEN6_RP_UP_THRESHOLD, 9225); + ei_up = 13000; + threshold_up = 90; /* Downclock if less than 75% busy over 32ms */ - I915_WRITE(GEN6_RP_DOWN_EI, 25000); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 18750); - - I915_WRITE(GEN6_RP_CONTROL, - GEN6_RP_MEDIA_TURBO | - GEN6_RP_MEDIA_HW_NORMAL_MODE | - GEN6_RP_MEDIA_IS_GFX | - GEN6_RP_ENABLE | - GEN6_RP_UP_BUSY_AVG | - GEN6_RP_DOWN_IDLE_AVG); + ei_down = 32000; + threshold_down = 75; break; case HIGH_POWER: /* Upclock if more than 85% busy over 10ms */ - I915_WRITE(GEN6_RP_UP_EI, 8000); - I915_WRITE(GEN6_RP_UP_THRESHOLD, 6800); + ei_up = 10000; + threshold_up = 85; /* Downclock if less than 60% busy over 32ms */ - I915_WRITE(GEN6_RP_DOWN_EI, 25000); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 15000); - - I915_WRITE(GEN6_RP_CONTROL, - GEN6_RP_MEDIA_TURBO | - GEN6_RP_MEDIA_HW_NORMAL_MODE | - GEN6_RP_MEDIA_IS_GFX | - GEN6_RP_ENABLE | - GEN6_RP_UP_BUSY_AVG | - GEN6_RP_DOWN_IDLE_AVG); + ei_down = 32000; + threshold_down = 60; break; } + I915_WRITE(GEN6_RP_UP_EI, + GT_INTERVAL_FROM_US(dev_priv, ei_up)); + I915_WRITE(GEN6_RP_UP_THRESHOLD, + GT_INTERVAL_FROM_US(dev_priv, (ei_up * threshold_up / 100))); + + I915_WRITE(GEN6_RP_DOWN_EI, + GT_INTERVAL_FROM_US(dev_priv, ei_down)); + I915_WRITE(GEN6_RP_DOWN_THRESHOLD, + GT_INTERVAL_FROM_US(dev_priv, (ei_down * threshold_down / 100))); + + I915_WRITE(GEN6_RP_CONTROL, + GEN6_RP_MEDIA_TURBO | + GEN6_RP_MEDIA_HW_NORMAL_MODE | + GEN6_RP_MEDIA_IS_GFX | + GEN6_RP_ENABLE | + GEN6_RP_UP_BUSY_AVG | + GEN6_RP_DOWN_IDLE_AVG); + dev_priv->rps.power = new_power; dev_priv->rps.last_adj = 0; } @@ -3847,7 +3949,10 @@ static void gen6_set_rps(struct drm_device *dev, u8 val) if (val != dev_priv->rps.cur_freq) { gen6_set_rps_thresholds(dev_priv, val); - if (IS_HASWELL(dev) || IS_BROADWELL(dev)) + if (IS_GEN9(dev)) + I915_WRITE(GEN6_RPNSWREQ, + GEN9_FREQUENCY(val)); + else if (IS_HASWELL(dev) || IS_BROADWELL(dev)) I915_WRITE(GEN6_RPNSWREQ, HSW_FREQUENCY(val)); else @@ -3860,7 +3965,7 @@ static void gen6_set_rps(struct drm_device *dev, u8 val) /* Make sure we continue to get interrupts * until we hit the minimum or maximum frequencies. */ - I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, gen6_rps_limits(dev_priv, val)); + I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, intel_rps_limits(dev_priv, val)); I915_WRITE(GEN6_PMINTRMSK, gen6_rps_pm_mask(dev_priv, val)); POSTING_READ(GEN6_RPNSWREQ); @@ -4081,6 +4186,13 @@ static void gen6_init_rps_frequencies(struct drm_device *dev) dev_priv->rps.rp0_freq = (rp_state_cap >> 0) & 0xff; dev_priv->rps.rp1_freq = (rp_state_cap >> 8) & 0xff; dev_priv->rps.min_freq = (rp_state_cap >> 16) & 0xff; + if (IS_SKYLAKE(dev)) { + /* Store the frequency values in 16.66 MHZ units, which is + the natural hardware unit for SKL */ + dev_priv->rps.rp0_freq *= GEN9_FREQ_SCALER; + dev_priv->rps.rp1_freq *= GEN9_FREQ_SCALER; + dev_priv->rps.min_freq *= GEN9_FREQ_SCALER; + } /* hw_max = RP0 until we check for overclocking */ dev_priv->rps.max_freq = dev_priv->rps.rp0_freq; @@ -4121,23 +4233,21 @@ static void gen9_enable_rps(struct drm_device *dev) gen6_init_rps_frequencies(dev); - I915_WRITE(GEN6_RPNSWREQ, 0xc800000); - I915_WRITE(GEN6_RC_VIDEO_FREQ, 0xc800000); + /* Program defaults and thresholds for RPS*/ + I915_WRITE(GEN6_RC_VIDEO_FREQ, + GEN9_FREQUENCY(dev_priv->rps.rp1_freq)); + + /* 1 second timeout*/ + I915_WRITE(GEN6_RP_DOWN_TIMEOUT, + GT_INTERVAL_FROM_US(dev_priv, 1000000)); - I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 0xf4240); - I915_WRITE(GEN6_RP_INTERRUPT_LIMITS, 0x12060000); - I915_WRITE(GEN6_RP_UP_THRESHOLD, 0xe808); - I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 0x3bd08); - I915_WRITE(GEN6_RP_UP_EI, 0x101d0); - I915_WRITE(GEN6_RP_DOWN_EI, 0x55730); I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 0xa); - I915_WRITE(GEN6_PMINTRMSK, 0x6); - I915_WRITE(GEN6_RP_CONTROL, GEN6_RP_MEDIA_TURBO | - GEN6_RP_MEDIA_HW_MODE | GEN6_RP_MEDIA_IS_GFX | - GEN6_RP_ENABLE | GEN6_RP_UP_BUSY_AVG | - GEN6_RP_DOWN_IDLE_AVG); - gen6_enable_rps_interrupts(dev); + /* Leaning on the below call to gen6_set_rps to program/setup the + * Up/Down EI & threshold registers, as well as the RP_CONTROL, + * RP_INTERRUPT_LIMITS & RPNSWREQ registers */ + dev_priv->rps.power = HIGH_POWER; /* force a reset */ + gen6_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } @@ -4990,124 +5100,6 @@ static void valleyview_enable_rps(struct drm_device *dev) intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); } -void ironlake_teardown_rc6(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (dev_priv->ips.renderctx) { - i915_gem_object_ggtt_unpin(dev_priv->ips.renderctx); - drm_gem_object_unreference(&dev_priv->ips.renderctx->base); - dev_priv->ips.renderctx = NULL; - } - - if (dev_priv->ips.pwrctx) { - i915_gem_object_ggtt_unpin(dev_priv->ips.pwrctx); - drm_gem_object_unreference(&dev_priv->ips.pwrctx->base); - dev_priv->ips.pwrctx = NULL; - } -} - -static void ironlake_disable_rc6(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (I915_READ(PWRCTXA)) { - /* Wake the GPU, prevent RC6, then restore RSTDBYCTL */ - I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) | RCX_SW_EXIT); - wait_for(((I915_READ(RSTDBYCTL) & RSX_STATUS_MASK) == RSX_STATUS_ON), - 50); - - I915_WRITE(PWRCTXA, 0); - POSTING_READ(PWRCTXA); - - I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); - POSTING_READ(RSTDBYCTL); - } -} - -static int ironlake_setup_rc6(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - - if (dev_priv->ips.renderctx == NULL) - dev_priv->ips.renderctx = intel_alloc_context_page(dev); - if (!dev_priv->ips.renderctx) - return -ENOMEM; - - if (dev_priv->ips.pwrctx == NULL) - dev_priv->ips.pwrctx = intel_alloc_context_page(dev); - if (!dev_priv->ips.pwrctx) { - ironlake_teardown_rc6(dev); - return -ENOMEM; - } - - return 0; -} - -static void ironlake_enable_rc6(struct drm_device *dev) -{ - struct drm_i915_private *dev_priv = dev->dev_private; - struct intel_engine_cs *ring = &dev_priv->ring[RCS]; - bool was_interruptible; - int ret; - - /* rc6 disabled by default due to repeated reports of hanging during - * boot and resume. - */ - if (!intel_enable_rc6(dev)) - return; - - WARN_ON(!mutex_is_locked(&dev->struct_mutex)); - - ret = ironlake_setup_rc6(dev); - if (ret) - return; - - was_interruptible = dev_priv->mm.interruptible; - dev_priv->mm.interruptible = false; - - /* - * GPU can automatically power down the render unit if given a page - * to save state. - */ - ret = intel_ring_begin(ring, 6); - if (ret) { - ironlake_teardown_rc6(dev); - dev_priv->mm.interruptible = was_interruptible; - return; - } - - intel_ring_emit(ring, MI_SUSPEND_FLUSH | MI_SUSPEND_FLUSH_EN); - intel_ring_emit(ring, MI_SET_CONTEXT); - intel_ring_emit(ring, i915_gem_obj_ggtt_offset(dev_priv->ips.renderctx) | - MI_MM_SPACE_GTT | - MI_SAVE_EXT_STATE_EN | - MI_RESTORE_EXT_STATE_EN | - MI_RESTORE_INHIBIT); - intel_ring_emit(ring, MI_SUSPEND_FLUSH); - intel_ring_emit(ring, MI_NOOP); - intel_ring_emit(ring, MI_FLUSH); - intel_ring_advance(ring); - - /* - * Wait for the command parser to advance past MI_SET_CONTEXT. The HW - * does an implicit flush, combined with MI_FLUSH above, it should be - * safe to assume that renderctx is valid - */ - ret = intel_ring_idle(ring); - dev_priv->mm.interruptible = was_interruptible; - if (ret) { - DRM_ERROR("failed to enable ironlake power savings\n"); - ironlake_teardown_rc6(dev); - return; - } - - I915_WRITE(PWRCTXA, i915_gem_obj_ggtt_offset(dev_priv->ips.pwrctx) | PWRCTX_EN); - I915_WRITE(RSTDBYCTL, I915_READ(RSTDBYCTL) & ~RCX_SW_EXIT); - - intel_print_rc6_info(dev, GEN6_RC_CTL_RC6_ENABLE); -} - static unsigned long intel_pxfreq(u32 vidfreq) { unsigned long freq; @@ -5620,12 +5612,7 @@ static void gen6_suspend_rps(struct drm_device *dev) flush_delayed_work(&dev_priv->rps.delayed_resume_work); - /* - * TODO: disable RPS interrupts on GEN9+ too once RPS support - * is added for it. - */ - if (INTEL_INFO(dev)->gen < 9) - gen6_disable_rps_interrupts(dev); + gen6_disable_rps_interrupts(dev); } /** @@ -5655,7 +5642,6 @@ void intel_disable_gt_powersave(struct drm_device *dev) if (IS_IRONLAKE_M(dev)) { ironlake_disable_drps(dev); - ironlake_disable_rc6(dev); } else if (INTEL_INFO(dev)->gen >= 6) { intel_suspend_gt_powersave(dev); @@ -5683,12 +5669,7 @@ static void intel_gen6_powersave_work(struct work_struct *work) mutex_lock(&dev_priv->rps.hw_lock); - /* - * TODO: reset/enable RPS interrupts on GEN9+ too, once RPS support is - * added for it. - */ - if (INTEL_INFO(dev)->gen < 9) - gen6_reset_rps_interrupts(dev); + gen6_reset_rps_interrupts(dev); if (IS_CHERRYVIEW(dev)) { cherryview_enable_rps(dev); @@ -5707,8 +5688,7 @@ static void intel_gen6_powersave_work(struct work_struct *work) } dev_priv->rps.enabled = true; - if (INTEL_INFO(dev)->gen < 9) - gen6_enable_rps_interrupts(dev); + gen6_enable_rps_interrupts(dev); mutex_unlock(&dev_priv->rps.hw_lock); @@ -5726,7 +5706,6 @@ void intel_enable_gt_powersave(struct drm_device *dev) if (IS_IRONLAKE_M(dev)) { mutex_lock(&dev->struct_mutex); ironlake_enable_drps(dev); - ironlake_enable_rc6(dev); intel_init_emon(dev); mutex_unlock(&dev->struct_mutex); } else if (INTEL_INFO(dev)->gen >= 6) { @@ -6259,11 +6238,22 @@ static void ivybridge_init_clock_gating(struct drm_device *dev) gen6_check_mch_setup(dev); } +static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) +{ + I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); + + /* + * Disable trickle feed and enable pnd deadline calculation + */ + I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); + I915_WRITE(CBR1_VLV, 0); +} + static void valleyview_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); + vlv_init_display_clock_gating(dev_priv); /* WaDisableEarlyCull:vlv */ I915_WRITE(_3D_CHICKEN3, @@ -6311,8 +6301,6 @@ static void valleyview_init_clock_gating(struct drm_device *dev) I915_WRITE(GEN7_UCGCTL4, I915_READ(GEN7_UCGCTL4) | GEN7_L3BANK2X_CLOCK_GATE_DISABLE); - I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); - /* * BSpec says this must be set, even though * WaDisable4x2SubspanOptimization isn't listed for VLV. @@ -6349,9 +6337,7 @@ static void cherryview_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - I915_WRITE(DSPCLK_GATE_D, VRHUNIT_CLOCK_GATE_DISABLE); - - I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE); + vlv_init_display_clock_gating(dev_priv); /* WaVSRefCountFullforceMissDisable:chv */ /* WaDSRefCountFullforceMissDisable:chv */ @@ -6541,7 +6527,7 @@ void intel_init_pm(struct drm_device *dev) else if (INTEL_INFO(dev)->gen == 8) dev_priv->display.init_clock_gating = broadwell_init_clock_gating; } else if (IS_CHERRYVIEW(dev)) { - dev_priv->display.update_wm = cherryview_update_wm; + dev_priv->display.update_wm = valleyview_update_wm; dev_priv->display.update_sprite_wm = valleyview_update_sprite_wm; dev_priv->display.init_clock_gating = cherryview_init_clock_gating; @@ -6709,7 +6695,9 @@ static int chv_freq_opcode(struct drm_i915_private *dev_priv, int val) int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) { - if (IS_CHERRYVIEW(dev_priv->dev)) + if (IS_GEN9(dev_priv->dev)) + return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER; + else if (IS_CHERRYVIEW(dev_priv->dev)) return chv_gpu_freq(dev_priv, val); else if (IS_VALLEYVIEW(dev_priv->dev)) return byt_gpu_freq(dev_priv, val); @@ -6719,7 +6707,9 @@ int intel_gpu_freq(struct drm_i915_private *dev_priv, int val) int intel_freq_opcode(struct drm_i915_private *dev_priv, int val) { - if (IS_CHERRYVIEW(dev_priv->dev)) + if (IS_GEN9(dev_priv->dev)) + return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER; + else if (IS_CHERRYVIEW(dev_priv->dev)) return chv_freq_opcode(dev_priv, val); else if (IS_VALLEYVIEW(dev_priv->dev)) return byt_freq_opcode(dev_priv, val); diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index cd79c3843452..441e2502b889 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -317,29 +317,6 @@ gen7_render_ring_cs_stall_wa(struct intel_engine_cs *ring) return 0; } -static int gen7_ring_fbc_flush(struct intel_engine_cs *ring, u32 value) -{ - int ret; - - if (!ring->fbc_dirty) - return 0; - - ret = intel_ring_begin(ring, 6); - if (ret) - return ret; - /* WaFbcNukeOn3DBlt:ivb/hsw */ - intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1)); - intel_ring_emit(ring, MSG_FBC_REND_STATE); - intel_ring_emit(ring, value); - intel_ring_emit(ring, MI_STORE_REGISTER_MEM(1) | MI_SRM_LRM_GLOBAL_GTT); - intel_ring_emit(ring, MSG_FBC_REND_STATE); - intel_ring_emit(ring, ring->scratch.gtt_offset + 256); - intel_ring_advance(ring); - - ring->fbc_dirty = false; - return 0; -} - static int gen7_render_ring_flush(struct intel_engine_cs *ring, u32 invalidate_domains, u32 flush_domains) @@ -398,9 +375,6 @@ gen7_render_ring_flush(struct intel_engine_cs *ring, intel_ring_emit(ring, 0); intel_ring_advance(ring); - if (!invalidate_domains && flush_domains) - return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); - return 0; } @@ -458,14 +432,7 @@ gen8_render_ring_flush(struct intel_engine_cs *ring, return ret; } - ret = gen8_emit_pipe_control(ring, flags, scratch_addr); - if (ret) - return ret; - - if (!invalidate_domains && flush_domains) - return gen7_ring_fbc_flush(ring, FBC_REND_NUKE); - - return 0; + return gen8_emit_pipe_control(ring, flags, scratch_addr); } static void ring_write_tail(struct intel_engine_cs *ring, @@ -2477,7 +2444,6 @@ static int gen6_ring_flush(struct intel_engine_cs *ring, u32 invalidate, u32 flush) { struct drm_device *dev = ring->dev; - struct drm_i915_private *dev_priv = dev->dev_private; uint32_t cmd; int ret; @@ -2486,7 +2452,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring, return ret; cmd = MI_FLUSH_DW; - if (INTEL_INFO(ring->dev)->gen >= 8) + if (INTEL_INFO(dev)->gen >= 8) cmd += 1; /* We always require a command barrier so that subsequent @@ -2506,7 +2472,7 @@ static int gen6_ring_flush(struct intel_engine_cs *ring, cmd |= MI_INVALIDATE_TLB; intel_ring_emit(ring, cmd); intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT); - if (INTEL_INFO(ring->dev)->gen >= 8) { + if (INTEL_INFO(dev)->gen >= 8) { intel_ring_emit(ring, 0); /* upper addr */ intel_ring_emit(ring, 0); /* value */ } else { @@ -2515,13 +2481,6 @@ static int gen6_ring_flush(struct intel_engine_cs *ring, } intel_ring_advance(ring); - if (!invalidate && flush) { - if (IS_GEN7(dev)) - return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN); - else if (IS_BROADWELL(dev)) - dev_priv->fbc.need_sw_cache_clean = true; - } - return 0; } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 8f3b49a23ccf..c761fe05ad6f 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h @@ -267,7 +267,6 @@ struct intel_engine_cs { */ struct drm_i915_gem_request *outstanding_lazy_request; bool gpu_caches_dirty; - bool fbc_dirty; wait_queue_head_t irq_queue; diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 6d8e29abbc33..ce00e6994eeb 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -194,8 +194,39 @@ static void hsw_power_well_post_enable(struct drm_i915_private *dev_priv) outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); - if (IS_BROADWELL(dev) || (INTEL_INFO(dev)->gen >= 9)) - gen8_irq_power_well_post_enable(dev_priv); + if (IS_BROADWELL(dev)) + gen8_irq_power_well_post_enable(dev_priv, + 1 << PIPE_C | 1 << PIPE_B); +} + +static void skl_power_well_post_enable(struct drm_i915_private *dev_priv, + struct i915_power_well *power_well) +{ + struct drm_device *dev = dev_priv->dev; + + /* + * After we re-enable the power well, if we touch VGA register 0x3d5 + * we'll get unclaimed register interrupts. This stops after we write + * anything to the VGA MSR register. The vgacon module uses this + * register all the time, so if we unbind our driver and, as a + * consequence, bind vgacon, we'll get stuck in an infinite loop at + * console_unlock(). So make here we touch the VGA MSR register, making + * sure vgacon can keep working normally without triggering interrupts + * and error messages. + */ + if (power_well->data == SKL_DISP_PW_2) { + vga_get_uninterruptible(dev->pdev, VGA_RSRC_LEGACY_IO); + outb(inb(VGA_MSR_READ), VGA_MSR_WRITE); + vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); + + gen8_irq_power_well_post_enable(dev_priv, + 1 << PIPE_C | 1 << PIPE_B); + } + + if (power_well->data == SKL_DISP_PW_1) { + intel_prepare_ddi(dev); + gen8_irq_power_well_post_enable(dev_priv, 1 << PIPE_A); + } } static void hsw_set_power_well(struct drm_i915_private *dev_priv, @@ -293,7 +324,7 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, { uint32_t tmp, fuse_status; uint32_t req_mask, state_mask; - bool check_fuse_status = false; + bool is_enabled, enable_requested, check_fuse_status = false; tmp = I915_READ(HSW_PWR_WELL_DRIVER); fuse_status = I915_READ(SKL_FUSE_STATUS); @@ -324,15 +355,17 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, } req_mask = SKL_POWER_WELL_REQ(power_well->data); + enable_requested = tmp & req_mask; state_mask = SKL_POWER_WELL_STATE(power_well->data); + is_enabled = tmp & state_mask; if (enable) { - if (!(tmp & req_mask)) { + if (!enable_requested) { I915_WRITE(HSW_PWR_WELL_DRIVER, tmp | req_mask); - DRM_DEBUG_KMS("Enabling %s\n", power_well->name); } - if (!(tmp & state_mask)) { + if (!is_enabled) { + DRM_DEBUG_KMS("Enabling %s\n", power_well->name); if (wait_for((I915_READ(HSW_PWR_WELL_DRIVER) & state_mask), 1)) DRM_ERROR("%s enable timeout\n", @@ -340,7 +373,7 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, check_fuse_status = true; } } else { - if (tmp & req_mask) { + if (enable_requested) { I915_WRITE(HSW_PWR_WELL_DRIVER, tmp & ~req_mask); POSTING_READ(HSW_PWR_WELL_DRIVER); DRM_DEBUG_KMS("Disabling %s\n", power_well->name); @@ -358,6 +391,9 @@ static void skl_set_power_well(struct drm_i915_private *dev_priv, DRM_ERROR("PG2 distributing status timeout\n"); } } + + if (enable && !is_enabled) + skl_power_well_post_enable(dev_priv, power_well); } static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv, @@ -1420,7 +1456,7 @@ void intel_power_domains_init_hw(struct drm_i915_private *dev_priv) } /** - * intel_aux_display_runtime_get - grab an auxilliary power domain reference + * intel_aux_display_runtime_get - grab an auxiliary power domain reference * @dev_priv: i915 device instance * * This function grabs a power domain reference for the auxiliary power domain @@ -1437,10 +1473,10 @@ void intel_aux_display_runtime_get(struct drm_i915_private *dev_priv) } /** - * intel_aux_display_runtime_put - release an auxilliary power domain reference + * intel_aux_display_runtime_put - release an auxiliary power domain reference * @dev_priv: i915 device instance * - * This function drops the auxilliary power domain reference obtained by + * This function drops the auxiliary power domain reference obtained by * intel_aux_display_runtime_get() and might power down the corresponding * hardware block right away if this is the last reference. */ diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 64ad2b40179f..9e554c2cfbb4 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1247,7 +1247,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder) switch (crtc->config->pixel_multiplier) { default: - WARN(1, "unknown pixel mutlipler specified\n"); + WARN(1, "unknown pixel multiplier specified\n"); case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break; case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break; case 4: rate = SDVO_CLOCK_RATE_MULT_4X; break; diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c index 7051da7015d3..a82873631851 100644 --- a/drivers/gpu/drm/i915/intel_sprite.c +++ b/drivers/gpu/drm/i915/intel_sprite.c @@ -1361,10 +1361,10 @@ out_unlock: int intel_plane_restore(struct drm_plane *plane) { - if (!plane->crtc || !plane->fb) + if (!plane->crtc || !plane->state->fb) return 0; - return plane->funcs->update_plane(plane, plane->crtc, plane->fb, + return plane->funcs->update_plane(plane, plane->crtc, plane->state->fb, plane->state->crtc_x, plane->state->crtc_y, plane->state->crtc_w, plane->state->crtc_h, plane->state->src_x, plane->state->src_y, diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 8879f17770aa..ab5cc94588e1 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -557,18 +557,24 @@ hsw_unclaimed_reg_debug(struct drm_i915_private *dev_priv, u32 reg, bool read, WARN(1, "Unclaimed register detected %s %s register 0x%x\n", when, op, reg); __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + i915.mmio_debug--; /* Only report the first N failures */ } } static void hsw_unclaimed_reg_detect(struct drm_i915_private *dev_priv) { - if (i915.mmio_debug) + static bool mmio_debug_once = true; + + if (i915.mmio_debug || !mmio_debug_once) return; if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) { - DRM_ERROR("Unclaimed register detected. Please use the i915.mmio_debug=1 to debug this problem."); + DRM_DEBUG("Unclaimed register detected, " + "enabling oneshot unclaimed register reporting. " + "Please use i915.mmio_debug=N for more information.\n"); __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM); + i915.mmio_debug = mmio_debug_once--; } } @@ -1082,8 +1088,14 @@ static void intel_uncore_fw_domains_init(struct drm_device *dev) /* We need to init first for ECOBUS access and then * determine later if we want to reinit, in case of MT access is - * not working + * not working. In this stage we don't know which flavour this + * ivb is, so it is better to reset also the gen6 fw registers + * before the ecobus check. */ + + __raw_i915_write32(dev_priv, FORCEWAKE, 0); + __raw_posting_read(dev_priv, ECOBUS); + fw_domain_init(dev_priv, FW_DOMAIN_ID_RENDER, FORCEWAKE_MT, FORCEWAKE_MT_ACK); diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index d13d1b5a859f..df09ca7c4889 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -1030,37 +1030,59 @@ static inline bool radeon_test_signaled(struct radeon_fence *fence) return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags); } +struct radeon_wait_cb { + struct fence_cb base; + struct task_struct *task; +}; + +static void +radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb) +{ + struct radeon_wait_cb *wait = + container_of(cb, struct radeon_wait_cb, base); + + wake_up_process(wait->task); +} + static signed long radeon_fence_default_wait(struct fence *f, bool intr, signed long t) { struct radeon_fence *fence = to_radeon_fence(f); struct radeon_device *rdev = fence->rdev; - bool signaled; + struct radeon_wait_cb cb; - fence_enable_sw_signaling(&fence->base); + cb.task = current; - /* - * This function has to return -EDEADLK, but cannot hold - * exclusive_lock during the wait because some callers - * may already hold it. This means checking needs_reset without - * lock, and not fiddling with any gpu internals. - * - * The callback installed with fence_enable_sw_signaling will - * run before our wait_event_*timeout call, so we will see - * both the signaled fence and the changes to needs_reset. - */ + if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb)) + return t; + + while (t > 0) { + if (intr) + set_current_state(TASK_INTERRUPTIBLE); + else + set_current_state(TASK_UNINTERRUPTIBLE); + + /* + * radeon_test_signaled must be called after + * set_current_state to prevent a race with wake_up_process + */ + if (radeon_test_signaled(fence)) + break; + + if (rdev->needs_reset) { + t = -EDEADLK; + break; + } + + t = schedule_timeout(t); + + if (t > 0 && intr && signal_pending(current)) + t = -ERESTARTSYS; + } + + __set_current_state(TASK_RUNNING); + fence_remove_callback(f, &cb.base); - if (intr) - t = wait_event_interruptible_timeout(rdev->fence_queue, - ((signaled = radeon_test_signaled(fence)) || - rdev->needs_reset), t); - else - t = wait_event_timeout(rdev->fence_queue, - ((signaled = radeon_test_signaled(fence)) || - rdev->needs_reset), t); - - if (t > 0 && !signaled) - return -EDEADLK; return t; } diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 86e75798320f..b1d74bc375d8 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -7236,8 +7236,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~UPLL_BYPASS_EN_MASK); if (!vclk || !dclk) { - /* keep the Bypass mode, put PLL to sleep */ - WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); + /* keep the Bypass mode */ return 0; } @@ -7253,8 +7252,7 @@ int si_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) /* set VCO_MODE to 1 */ WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_VCO_MODE_MASK, ~UPLL_VCO_MODE_MASK); - /* toggle UPLL_SLEEP to 1 then back to 0 */ - WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); + /* disable sleep mode */ WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_SLEEP_MASK); /* deassert UPLL_RESET */ diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index 6c6b655defcf..e13b9cbc304e 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -725,32 +725,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_err1; } - ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, - (dev_priv->vram_size >> PAGE_SHIFT)); - if (unlikely(ret != 0)) { - DRM_ERROR("Failed initializing memory manager for VRAM.\n"); - goto out_err2; - } - - dev_priv->has_gmr = true; - if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || - refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, - VMW_PL_GMR) != 0) { - DRM_INFO("No GMR memory available. " - "Graphics memory resources are very limited.\n"); - dev_priv->has_gmr = false; - } - - if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { - dev_priv->has_mob = true; - if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, - VMW_PL_MOB) != 0) { - DRM_INFO("No MOB memory available. " - "3D will be disabled.\n"); - dev_priv->has_mob = false; - } - } - dev_priv->mmio_mtrr = arch_phys_wc_add(dev_priv->mmio_start, dev_priv->mmio_size); @@ -813,6 +787,33 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) goto out_no_fman; } + + ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, + (dev_priv->vram_size >> PAGE_SHIFT)); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed initializing memory manager for VRAM.\n"); + goto out_no_vram; + } + + dev_priv->has_gmr = true; + if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || + refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, + VMW_PL_GMR) != 0) { + DRM_INFO("No GMR memory available. " + "Graphics memory resources are very limited.\n"); + dev_priv->has_gmr = false; + } + + if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { + dev_priv->has_mob = true; + if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, + VMW_PL_MOB) != 0) { + DRM_INFO("No MOB memory available. " + "3D will be disabled.\n"); + dev_priv->has_mob = false; + } + } + vmw_kms_save_vga(dev_priv); /* Start kms and overlay systems, needs fifo. */ @@ -838,6 +839,12 @@ out_no_fifo: vmw_kms_close(dev_priv); out_no_kms: vmw_kms_restore_vga(dev_priv); + if (dev_priv->has_mob) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); + if (dev_priv->has_gmr) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); + (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); +out_no_vram: vmw_fence_manager_takedown(dev_priv->fman); out_no_fman: if (dev_priv->capabilities & SVGA_CAP_IRQMASK) @@ -853,12 +860,6 @@ out_err4: iounmap(dev_priv->mmio_virt); out_err3: arch_phys_wc_del(dev_priv->mmio_mtrr); - if (dev_priv->has_mob) - (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); - if (dev_priv->has_gmr) - (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); - (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); -out_err2: (void)ttm_bo_device_release(&dev_priv->bdev); out_err1: vmw_ttm_global_release(dev_priv); @@ -887,6 +888,13 @@ static int vmw_driver_unload(struct drm_device *dev) } vmw_kms_close(dev_priv); vmw_overlay_close(dev_priv); + + if (dev_priv->has_mob) + (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); + if (dev_priv->has_gmr) + (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); + (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); + vmw_fence_manager_takedown(dev_priv->fman); if (dev_priv->capabilities & SVGA_CAP_IRQMASK) drm_irq_uninstall(dev_priv->dev); @@ -898,11 +906,6 @@ static int vmw_driver_unload(struct drm_device *dev) ttm_object_device_release(&dev_priv->tdev); iounmap(dev_priv->mmio_virt); arch_phys_wc_del(dev_priv->mmio_mtrr); - if (dev_priv->has_mob) - (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); - if (dev_priv->has_gmr) - (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); - (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); (void)ttm_bo_device_release(&dev_priv->bdev); vmw_ttm_global_release(dev_priv); @@ -1235,6 +1238,7 @@ static void vmw_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); + pci_disable_device(pdev); drm_put_dev(dev); } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index 33176d05db35..654c8daeb5ab 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -890,7 +890,8 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use MOB buffer.\n"); - return -EINVAL; + ret = -EINVAL; + goto out_no_reloc; } bo = &vmw_bo->base; @@ -914,7 +915,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv, out_no_reloc: vmw_dmabuf_unreference(&vmw_bo); - vmw_bo_p = NULL; + *vmw_bo_p = NULL; return ret; } @@ -951,7 +952,8 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo); if (unlikely(ret != 0)) { DRM_ERROR("Could not find or use GMR region.\n"); - return -EINVAL; + ret = -EINVAL; + goto out_no_reloc; } bo = &vmw_bo->base; @@ -974,7 +976,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv, out_no_reloc: vmw_dmabuf_unreference(&vmw_bo); - vmw_bo_p = NULL; + *vmw_bo_p = NULL; return ret; } @@ -2780,13 +2782,11 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data, NULL, arg->command_size, arg->throttle_us, (void __user *)(unsigned long)arg->fence_rep, NULL); - + ttm_read_unlock(&dev_priv->reservation_sem); if (unlikely(ret != 0)) - goto out_unlock; + return ret; vmw_kms_cursor_post_execbuf(dev_priv); -out_unlock: - ttm_read_unlock(&dev_priv->reservation_sem); - return ret; + return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c index 8725b79e7847..07cda8cbbddb 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c @@ -2033,23 +2033,17 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, int i; struct drm_mode_config *mode_config = &dev->mode_config; - ret = ttm_read_lock(&dev_priv->reservation_sem, true); - if (unlikely(ret != 0)) - return ret; - if (!arg->num_outputs) { struct drm_vmw_rect def_rect = {0, 0, 800, 600}; vmw_du_update_layout(dev_priv, 1, &def_rect); - goto out_unlock; + return 0; } rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect), GFP_KERNEL); - if (unlikely(!rects)) { - ret = -ENOMEM; - goto out_unlock; - } + if (unlikely(!rects)) + return -ENOMEM; user_rects = (void __user *)(unsigned long)arg->rects; ret = copy_from_user(rects, user_rects, rects_size); @@ -2074,7 +2068,5 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data, out_free: kfree(rects); -out_unlock: - ttm_read_unlock(&dev_priv->reservation_sem); return ret; } diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c index 210cf4874cb7..edf274cabe81 100644 --- a/drivers/i2c/i2c-core.c +++ b/drivers/i2c/i2c-core.c @@ -679,9 +679,6 @@ static int i2c_device_remove(struct device *dev) status = driver->remove(client); } - if (dev->of_node) - irq_dispose_mapping(client->irq); - dev_pm_domain_detach(&client->dev, true); return status; } diff --git a/drivers/input/keyboard/tc3589x-keypad.c b/drivers/input/keyboard/tc3589x-keypad.c index 8ff612d160b0..563932500ff1 100644 --- a/drivers/input/keyboard/tc3589x-keypad.c +++ b/drivers/input/keyboard/tc3589x-keypad.c @@ -411,9 +411,9 @@ static int tc3589x_keypad_probe(struct platform_device *pdev) input_set_drvdata(input, keypad); - error = request_threaded_irq(irq, NULL, - tc3589x_keypad_irq, plat->irqtype, - "tc3589x-keypad", keypad); + error = request_threaded_irq(irq, NULL, tc3589x_keypad_irq, + plat->irqtype | IRQF_ONESHOT, + "tc3589x-keypad", keypad); if (error < 0) { dev_err(&pdev->dev, "Could not allocate irq %d,error %d\n", diff --git a/drivers/input/misc/mma8450.c b/drivers/input/misc/mma8450.c index 59d4dcddf6de..98228773a111 100644 --- a/drivers/input/misc/mma8450.c +++ b/drivers/input/misc/mma8450.c @@ -187,6 +187,7 @@ static int mma8450_probe(struct i2c_client *c, idev->private = m; idev->input->name = MMA8450_DRV_NAME; idev->input->id.bustype = BUS_I2C; + idev->input->dev.parent = &c->dev; idev->poll = mma8450_poll; idev->poll_interval = POLL_INTERVAL; idev->poll_interval_max = POLL_INTERVAL_MAX; diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c index d28726a0ef85..1bd15ebc01f2 100644 --- a/drivers/input/mouse/alps.c +++ b/drivers/input/mouse/alps.c @@ -2605,8 +2605,10 @@ int alps_detect(struct psmouse *psmouse, bool set_properties) return -ENOMEM; error = alps_identify(psmouse, priv); - if (error) + if (error) { + kfree(priv); return error; + } if (set_properties) { psmouse->vendor = "ALPS"; diff --git a/drivers/input/mouse/cyapa_gen3.c b/drivers/input/mouse/cyapa_gen3.c index 77e9d70a986b..1e2291c378fe 100644 --- a/drivers/input/mouse/cyapa_gen3.c +++ b/drivers/input/mouse/cyapa_gen3.c @@ -20,7 +20,7 @@ #include <linux/input/mt.h> #include <linux/module.h> #include <linux/slab.h> -#include <linux/unaligned/access_ok.h> +#include <asm/unaligned.h> #include "cyapa.h" diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c index ddf5393a1180..5b611dd71e79 100644 --- a/drivers/input/mouse/cyapa_gen5.c +++ b/drivers/input/mouse/cyapa_gen5.c @@ -17,7 +17,7 @@ #include <linux/mutex.h> #include <linux/completion.h> #include <linux/slab.h> -#include <linux/unaligned/access_ok.h> +#include <asm/unaligned.h> #include <linux/crc-itu-t.h> #include "cyapa.h" @@ -1926,7 +1926,7 @@ static int cyapa_gen5_read_idac_data(struct cyapa *cyapa, electrodes_tx = cyapa->electrodes_x; max_element_cnt = ((cyapa->aligned_electrodes_rx + 7) & ~7u) * electrodes_tx; - } else if (idac_data_type == GEN5_RETRIEVE_SELF_CAP_PWC_DATA) { + } else { offset = 2; max_element_cnt = cyapa->electrodes_x + cyapa->electrodes_y; diff --git a/drivers/input/mouse/focaltech.c b/drivers/input/mouse/focaltech.c index 757f78a94aec..23d259416f2f 100644 --- a/drivers/input/mouse/focaltech.c +++ b/drivers/input/mouse/focaltech.c @@ -67,9 +67,6 @@ static void focaltech_reset(struct psmouse *psmouse) #define FOC_MAX_FINGERS 5 -#define FOC_MAX_X 2431 -#define FOC_MAX_Y 1663 - /* * Current state of a single finger on the touchpad. */ @@ -129,9 +126,17 @@ static void focaltech_report_state(struct psmouse *psmouse) input_mt_slot(dev, i); input_mt_report_slot_state(dev, MT_TOOL_FINGER, active); if (active) { - input_report_abs(dev, ABS_MT_POSITION_X, finger->x); + unsigned int clamped_x, clamped_y; + /* + * The touchpad might report invalid data, so we clamp + * the resulting values so that we do not confuse + * userspace. + */ + clamped_x = clamp(finger->x, 0U, priv->x_max); + clamped_y = clamp(finger->y, 0U, priv->y_max); + input_report_abs(dev, ABS_MT_POSITION_X, clamped_x); input_report_abs(dev, ABS_MT_POSITION_Y, - FOC_MAX_Y - finger->y); + priv->y_max - clamped_y); } } input_mt_report_pointer_emulation(dev, true); @@ -180,16 +185,6 @@ static void focaltech_process_abs_packet(struct psmouse *psmouse, state->pressed = (packet[0] >> 4) & 1; - /* - * packet[5] contains some kind of tool size in the most - * significant nibble. 0xff is a special value (latching) that - * signals a large contact area. - */ - if (packet[5] == 0xff) { - state->fingers[finger].valid = false; - return; - } - state->fingers[finger].x = ((packet[1] & 0xf) << 8) | packet[2]; state->fingers[finger].y = (packet[3] << 8) | packet[4]; state->fingers[finger].valid = true; @@ -381,6 +376,23 @@ static int focaltech_read_size(struct psmouse *psmouse) return 0; } + +void focaltech_set_resolution(struct psmouse *psmouse, unsigned int resolution) +{ + /* not supported yet */ +} + +static void focaltech_set_rate(struct psmouse *psmouse, unsigned int rate) +{ + /* not supported yet */ +} + +static void focaltech_set_scale(struct psmouse *psmouse, + enum psmouse_scale scale) +{ + /* not supported yet */ +} + int focaltech_init(struct psmouse *psmouse) { struct focaltech_data *priv; @@ -415,6 +427,14 @@ int focaltech_init(struct psmouse *psmouse) psmouse->cleanup = focaltech_reset; /* resync is not supported yet */ psmouse->resync_time = 0; + /* + * rate/resolution/scale changes are not supported yet, and + * the generic implementations of these functions seem to + * confuse some touchpads + */ + psmouse->set_resolution = focaltech_set_resolution; + psmouse->set_rate = focaltech_set_rate; + psmouse->set_scale = focaltech_set_scale; return 0; diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c index 4ccd01d7a48d..8bc61237bc1b 100644 --- a/drivers/input/mouse/psmouse-base.c +++ b/drivers/input/mouse/psmouse-base.c @@ -454,6 +454,17 @@ static void psmouse_set_rate(struct psmouse *psmouse, unsigned int rate) } /* + * Here we set the mouse scaling. + */ + +static void psmouse_set_scale(struct psmouse *psmouse, enum psmouse_scale scale) +{ + ps2_command(&psmouse->ps2dev, NULL, + scale == PSMOUSE_SCALE21 ? PSMOUSE_CMD_SETSCALE21 : + PSMOUSE_CMD_SETSCALE11); +} + +/* * psmouse_poll() - default poll handler. Everyone except for ALPS uses it. */ @@ -689,6 +700,7 @@ static void psmouse_apply_defaults(struct psmouse *psmouse) psmouse->set_rate = psmouse_set_rate; psmouse->set_resolution = psmouse_set_resolution; + psmouse->set_scale = psmouse_set_scale; psmouse->poll = psmouse_poll; psmouse->protocol_handler = psmouse_process_byte; psmouse->pktsize = 3; @@ -1160,7 +1172,7 @@ static void psmouse_initialize(struct psmouse *psmouse) if (psmouse_max_proto != PSMOUSE_PS2) { psmouse->set_rate(psmouse, psmouse->rate); psmouse->set_resolution(psmouse, psmouse->resolution); - ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11); + psmouse->set_scale(psmouse, PSMOUSE_SCALE11); } } diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h index c2ff137ecbdb..d02e1bdc9ae4 100644 --- a/drivers/input/mouse/psmouse.h +++ b/drivers/input/mouse/psmouse.h @@ -36,6 +36,11 @@ typedef enum { PSMOUSE_FULL_PACKET } psmouse_ret_t; +enum psmouse_scale { + PSMOUSE_SCALE11, + PSMOUSE_SCALE21 +}; + struct psmouse { void *private; struct input_dev *dev; @@ -67,6 +72,7 @@ struct psmouse { psmouse_ret_t (*protocol_handler)(struct psmouse *psmouse); void (*set_rate)(struct psmouse *psmouse, unsigned int rate); void (*set_resolution)(struct psmouse *psmouse, unsigned int resolution); + void (*set_scale)(struct psmouse *psmouse, enum psmouse_scale scale); int (*reconnect)(struct psmouse *psmouse); void (*disconnect)(struct psmouse *psmouse); diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig index 58917525126e..6261fd6d7c3c 100644 --- a/drivers/input/touchscreen/Kconfig +++ b/drivers/input/touchscreen/Kconfig @@ -943,6 +943,7 @@ config TOUCHSCREEN_SUN4I tristate "Allwinner sun4i resistive touchscreen controller support" depends on ARCH_SUNXI || COMPILE_TEST depends on HWMON + depends on THERMAL || !THERMAL_OF help This selects support for the resistive touchscreen controller found on Allwinner sunxi SoCs. diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig index baa0d9786f50..1ae4e547b419 100644 --- a/drivers/iommu/Kconfig +++ b/drivers/iommu/Kconfig @@ -23,6 +23,7 @@ config IOMMU_IO_PGTABLE config IOMMU_IO_PGTABLE_LPAE bool "ARMv7/v8 Long Descriptor Format" select IOMMU_IO_PGTABLE + depends on ARM || ARM64 || COMPILE_TEST help Enable support for the ARM long descriptor pagetable format. This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page @@ -63,6 +64,7 @@ config MSM_IOMMU bool "MSM IOMMU Support" depends on ARM depends on ARCH_MSM8X60 || ARCH_MSM8960 || COMPILE_TEST + depends on BROKEN select IOMMU_API help Support for the IOMMUs found on certain Qualcomm SOCs. diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index 7ce52737c7a1..dc14fec4ede1 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1186,8 +1186,15 @@ static const struct iommu_ops exynos_iommu_ops = { static int __init exynos_iommu_init(void) { + struct device_node *np; int ret; + np = of_find_matching_node(NULL, sysmmu_of_match); + if (!np) + return 0; + + of_node_put(np); + lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table", LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL); if (!lv2table_kmem_cache) { diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c index 5a500edf00cc..b610a8dee238 100644 --- a/drivers/iommu/io-pgtable-arm.c +++ b/drivers/iommu/io-pgtable-arm.c @@ -56,7 +56,8 @@ ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ * (d)->bits_per_level) + (d)->pg_shift) -#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift) +#define ARM_LPAE_PAGES_PER_PGD(d) \ + DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift) /* * Calculate the index at level l used to map virtual address a using the @@ -66,7 +67,7 @@ ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) #define ARM_LPAE_LVL_IDX(a,l,d) \ - (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ + (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) /* Calculate the block/page mapping size at level l for pagetable in d. */ diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index f59f857b702e..a4ba851825c2 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1376,6 +1376,13 @@ static int __init omap_iommu_init(void) struct kmem_cache *p; const unsigned long flags = SLAB_HWCACHE_ALIGN; size_t align = 1 << 10; /* L2 pagetable alignement */ + struct device_node *np; + + np = of_find_matching_node(NULL, omap_iommu_of_match); + if (!np) + return 0; + + of_node_put(np); p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags, iopte_cachep_ctor); diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index 6a8b1ec4a48a..9f74fddcd304 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1015,8 +1015,15 @@ static struct platform_driver rk_iommu_driver = { static int __init rk_iommu_init(void) { + struct device_node *np; int ret; + np = of_find_matching_node(NULL, rk_iommu_dt_ids); + if (!np) + return 0; + + of_node_put(np); + ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops); if (ret) return ret; diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c index 463c235acbdc..4387dae14e45 100644 --- a/drivers/irqchip/irq-armada-370-xp.c +++ b/drivers/irqchip/irq-armada-370-xp.c @@ -69,6 +69,7 @@ static void __iomem *per_cpu_int_base; static void __iomem *main_int_base; static struct irq_domain *armada_370_xp_mpic_domain; static u32 doorbell_mask_reg; +static int parent_irq; #ifdef CONFIG_PCI_MSI static struct irq_domain *armada_370_xp_msi_domain; static DECLARE_BITMAP(msi_used, PCI_MSI_DOORBELL_NR); @@ -356,6 +357,7 @@ static int armada_xp_mpic_secondary_init(struct notifier_block *nfb, { if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) armada_xp_mpic_smp_cpu_init(); + return NOTIFY_OK; } @@ -364,6 +366,20 @@ static struct notifier_block armada_370_xp_mpic_cpu_notifier = { .priority = 100, }; +static int mpic_cascaded_secondary_init(struct notifier_block *nfb, + unsigned long action, void *hcpu) +{ + if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) + enable_percpu_irq(parent_irq, IRQ_TYPE_NONE); + + return NOTIFY_OK; +} + +static struct notifier_block mpic_cascaded_cpu_notifier = { + .notifier_call = mpic_cascaded_secondary_init, + .priority = 100, +}; + #endif /* CONFIG_SMP */ static struct irq_domain_ops armada_370_xp_mpic_irq_ops = { @@ -539,7 +555,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, struct device_node *parent) { struct resource main_int_res, per_cpu_int_res; - int parent_irq, nr_irqs, i; + int nr_irqs, i; u32 control; BUG_ON(of_address_to_resource(node, 0, &main_int_res)); @@ -587,6 +603,9 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node, register_cpu_notifier(&armada_370_xp_mpic_cpu_notifier); #endif } else { +#ifdef CONFIG_SMP + register_cpu_notifier(&mpic_cascaded_cpu_notifier); +#endif irq_set_chained_handler(parent_irq, armada_370_xp_mpic_handle_cascade_irq); } diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index d8996bdf0f61..596b0a9eee99 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -416,13 +416,14 @@ static void its_send_single_command(struct its_node *its, { struct its_cmd_block *cmd, *sync_cmd, *next_cmd; struct its_collection *sync_col; + unsigned long flags; - raw_spin_lock(&its->lock); + raw_spin_lock_irqsave(&its->lock, flags); cmd = its_allocate_entry(its); if (!cmd) { /* We're soooooo screewed... */ pr_err_ratelimited("ITS can't allocate, dropping command\n"); - raw_spin_unlock(&its->lock); + raw_spin_unlock_irqrestore(&its->lock, flags); return; } sync_col = builder(cmd, desc); @@ -442,7 +443,7 @@ static void its_send_single_command(struct its_node *its, post: next_cmd = its_post_commands(its); - raw_spin_unlock(&its->lock); + raw_spin_unlock_irqrestore(&its->lock, flags); its_wait_for_range_completion(its, cmd, next_cmd); } @@ -799,21 +800,43 @@ static int its_alloc_tables(struct its_node *its) { int err; int i; - int psz = PAGE_SIZE; + int psz = SZ_64K; u64 shr = GITS_BASER_InnerShareable; for (i = 0; i < GITS_BASER_NR_REGS; i++) { u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); u64 type = GITS_BASER_TYPE(val); u64 entry_size = GITS_BASER_ENTRY_SIZE(val); + int order = get_order(psz); + int alloc_size; u64 tmp; void *base; if (type == GITS_BASER_TYPE_NONE) continue; - /* We're lazy and only allocate a single page for now */ - base = (void *)get_zeroed_page(GFP_KERNEL); + /* + * Allocate as many entries as required to fit the + * range of device IDs that the ITS can grok... The ID + * space being incredibly sparse, this results in a + * massive waste of memory. + * + * For other tables, only allocate a single page. + */ + if (type == GITS_BASER_TYPE_DEVICE) { + u64 typer = readq_relaxed(its->base + GITS_TYPER); + u32 ids = GITS_TYPER_DEVBITS(typer); + + order = get_order((1UL << ids) * entry_size); + if (order >= MAX_ORDER) { + order = MAX_ORDER - 1; + pr_warn("%s: Device Table too large, reduce its page order to %u\n", + its->msi_chip.of_node->full_name, order); + } + } + + alloc_size = (1 << order) * PAGE_SIZE; + base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); if (!base) { err = -ENOMEM; goto out_free; @@ -841,7 +864,7 @@ retry_baser: break; } - val |= (PAGE_SIZE / psz) - 1; + val |= (alloc_size / psz) - 1; writeq_relaxed(val, its->base + GITS_BASER + i * 8); tmp = readq_relaxed(its->base + GITS_BASER + i * 8); @@ -882,7 +905,7 @@ retry_baser: } pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", - (int)(PAGE_SIZE / entry_size), + (int)(alloc_size / entry_size), its_base_type_string[type], (unsigned long)virt_to_phys(base), psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); @@ -1020,8 +1043,9 @@ static void its_cpu_init_collection(void) static struct its_device *its_find_device(struct its_node *its, u32 dev_id) { struct its_device *its_dev = NULL, *tmp; + unsigned long flags; - raw_spin_lock(&its->lock); + raw_spin_lock_irqsave(&its->lock, flags); list_for_each_entry(tmp, &its->its_device_list, entry) { if (tmp->device_id == dev_id) { @@ -1030,7 +1054,7 @@ static struct its_device *its_find_device(struct its_node *its, u32 dev_id) } } - raw_spin_unlock(&its->lock); + raw_spin_unlock_irqrestore(&its->lock, flags); return its_dev; } @@ -1040,6 +1064,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, { struct its_device *dev; unsigned long *lpi_map; + unsigned long flags; void *itt; int lpi_base; int nr_lpis; @@ -1056,7 +1081,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, nr_ites = max(2UL, roundup_pow_of_two(nvecs)); sz = nr_ites * its->ite_size; sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; - itt = kmalloc(sz, GFP_KERNEL); + itt = kzalloc(sz, GFP_KERNEL); lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); if (!dev || !itt || !lpi_map) { @@ -1075,9 +1100,9 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, dev->device_id = dev_id; INIT_LIST_HEAD(&dev->entry); - raw_spin_lock(&its->lock); + raw_spin_lock_irqsave(&its->lock, flags); list_add(&dev->entry, &its->its_device_list); - raw_spin_unlock(&its->lock); + raw_spin_unlock_irqrestore(&its->lock, flags); /* Bind the device to the first possible CPU */ cpu = cpumask_first(cpu_online_mask); @@ -1091,9 +1116,11 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id, static void its_free_device(struct its_device *its_dev) { - raw_spin_lock(&its_dev->its->lock); + unsigned long flags; + + raw_spin_lock_irqsave(&its_dev->its->lock, flags); list_del(&its_dev->entry); - raw_spin_unlock(&its_dev->its->lock); + raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); kfree(its_dev->itt); kfree(its_dev); } @@ -1112,31 +1139,69 @@ static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) return 0; } +struct its_pci_alias { + struct pci_dev *pdev; + u32 dev_id; + u32 count; +}; + +static int its_pci_msi_vec_count(struct pci_dev *pdev) +{ + int msi, msix; + + msi = max(pci_msi_vec_count(pdev), 0); + msix = max(pci_msix_vec_count(pdev), 0); + + return max(msi, msix); +} + +static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) +{ + struct its_pci_alias *dev_alias = data; + + dev_alias->dev_id = alias; + if (pdev != dev_alias->pdev) + dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); + + return 0; +} + static int its_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec, msi_alloc_info_t *info) { struct pci_dev *pdev; struct its_node *its; - u32 dev_id; struct its_device *its_dev; + struct its_pci_alias dev_alias; if (!dev_is_pci(dev)) return -EINVAL; pdev = to_pci_dev(dev); - dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn); + dev_alias.pdev = pdev; + dev_alias.count = nvec; + + pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias); its = domain->parent->host_data; - its_dev = its_find_device(its, dev_id); - if (WARN_ON(its_dev)) - return -EINVAL; + its_dev = its_find_device(its, dev_alias.dev_id); + if (its_dev) { + /* + * We already have seen this ID, probably through + * another alias (PCI bridge of some sort). No need to + * create the device. + */ + dev_dbg(dev, "Reusing ITT for devID %x\n", dev_alias.dev_id); + goto out; + } - its_dev = its_create_device(its, dev_id, nvec); + its_dev = its_create_device(its, dev_alias.dev_id, dev_alias.count); if (!its_dev) return -ENOMEM; - dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec)); - + dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", + dev_alias.count, ilog2(dev_alias.count)); +out: info->scratchpad[0].ptr = its_dev; info->scratchpad[1].ptr = dev; return 0; @@ -1255,6 +1320,34 @@ static const struct irq_domain_ops its_domain_ops = { .deactivate = its_irq_domain_deactivate, }; +static int its_force_quiescent(void __iomem *base) +{ + u32 count = 1000000; /* 1s */ + u32 val; + + val = readl_relaxed(base + GITS_CTLR); + if (val & GITS_CTLR_QUIESCENT) + return 0; + + /* Disable the generation of all interrupts to this ITS */ + val &= ~GITS_CTLR_ENABLE; + writel_relaxed(val, base + GITS_CTLR); + + /* Poll GITS_CTLR and wait until ITS becomes quiescent */ + while (1) { + val = readl_relaxed(base + GITS_CTLR); + if (val & GITS_CTLR_QUIESCENT) + return 0; + + count--; + if (!count) + return -EBUSY; + + cpu_relax(); + udelay(1); + } +} + static int its_probe(struct device_node *node, struct irq_domain *parent) { struct resource res; @@ -1283,6 +1376,13 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) goto out_unmap; } + err = its_force_quiescent(its_base); + if (err) { + pr_warn("%s: failed to quiesce, giving up\n", + node->full_name); + goto out_unmap; + } + pr_info("ITS: %s\n", node->full_name); its = kzalloc(sizeof(*its), GFP_KERNEL); @@ -1323,7 +1423,7 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) writeq_relaxed(baser, its->base + GITS_CBASER); tmp = readq_relaxed(its->base + GITS_CBASER); writeq_relaxed(0, its->base + GITS_CWRITER); - writel_relaxed(1, its->base + GITS_CTLR); + writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) { pr_info("ITS: using cache flushing for cmd queue\n"); @@ -1382,12 +1482,11 @@ static bool gic_rdists_supports_plpis(void) int its_cpu_init(void) { - if (!gic_rdists_supports_plpis()) { - pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); - return -ENXIO; - } - if (!list_empty(&its_nodes)) { + if (!gic_rdists_supports_plpis()) { + pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); + return -ENXIO; + } its_cpu_init_lpis(); its_cpu_init_collection(); } diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index 1c6dea2fbc34..fd8850def1b8 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -466,7 +466,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, tlist |= 1 << (mpidr & 0xf); cpu = cpumask_next(cpu, mask); - if (cpu == nr_cpu_ids) + if (cpu >= nr_cpu_ids) goto out; mpidr = cpu_logical_map(cpu); diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 4634cf7d0ec3..471e1cdc1933 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -154,23 +154,25 @@ static inline unsigned int gic_irq(struct irq_data *d) static void gic_mask_irq(struct irq_data *d) { u32 mask = 1 << (gic_irq(d) % 32); + unsigned long flags; - raw_spin_lock(&irq_controller_lock); + raw_spin_lock_irqsave(&irq_controller_lock, flags); writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_CLEAR + (gic_irq(d) / 32) * 4); if (gic_arch_extn.irq_mask) gic_arch_extn.irq_mask(d); - raw_spin_unlock(&irq_controller_lock); + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); } static void gic_unmask_irq(struct irq_data *d) { u32 mask = 1 << (gic_irq(d) % 32); + unsigned long flags; - raw_spin_lock(&irq_controller_lock); + raw_spin_lock_irqsave(&irq_controller_lock, flags); if (gic_arch_extn.irq_unmask) gic_arch_extn.irq_unmask(d); writel_relaxed(mask, gic_dist_base(d) + GIC_DIST_ENABLE_SET + (gic_irq(d) / 32) * 4); - raw_spin_unlock(&irq_controller_lock); + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); } static void gic_eoi_irq(struct irq_data *d) @@ -188,6 +190,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) { void __iomem *base = gic_dist_base(d); unsigned int gicirq = gic_irq(d); + unsigned long flags; int ret; /* Interrupt configuration for SGIs can't be changed */ @@ -199,14 +202,14 @@ static int gic_set_type(struct irq_data *d, unsigned int type) type != IRQ_TYPE_EDGE_RISING) return -EINVAL; - raw_spin_lock(&irq_controller_lock); + raw_spin_lock_irqsave(&irq_controller_lock, flags); if (gic_arch_extn.irq_set_type) gic_arch_extn.irq_set_type(d, type); ret = gic_configure_irq(gicirq, type, base, NULL); - raw_spin_unlock(&irq_controller_lock); + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); return ret; } @@ -227,6 +230,7 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); unsigned int cpu, shift = (gic_irq(d) % 4) * 8; u32 val, mask, bit; + unsigned long flags; if (!force) cpu = cpumask_any_and(mask_val, cpu_online_mask); @@ -236,12 +240,12 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) return -EINVAL; - raw_spin_lock(&irq_controller_lock); + raw_spin_lock_irqsave(&irq_controller_lock, flags); mask = 0xff << shift; bit = gic_cpu_map[cpu] << shift; val = readl_relaxed(reg) & ~mask; writel_relaxed(val | bit, reg); - raw_spin_unlock(&irq_controller_lock); + raw_spin_unlock_irqrestore(&irq_controller_lock, flags); return IRQ_SET_MASK_OK; } diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig index 5b76a173cd95..5897d8d8fa5a 100644 --- a/drivers/mtd/nand/Kconfig +++ b/drivers/mtd/nand/Kconfig @@ -526,6 +526,7 @@ config MTD_NAND_SUNXI config MTD_NAND_HISI504 tristate "Support for NAND controller on Hisilicon SoC Hip04" + depends on HAS_DMA help Enables support for NAND controller on Hisilicon SoC Hip04. diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c index 96b0b1d27df1..10b1f7a4fe50 100644 --- a/drivers/mtd/nand/pxa3xx_nand.c +++ b/drivers/mtd/nand/pxa3xx_nand.c @@ -480,6 +480,42 @@ static void disable_int(struct pxa3xx_nand_info *info, uint32_t int_mask) nand_writel(info, NDCR, ndcr | int_mask); } +static void drain_fifo(struct pxa3xx_nand_info *info, void *data, int len) +{ + if (info->ecc_bch) { + int timeout; + + /* + * According to the datasheet, when reading from NDDB + * with BCH enabled, after each 32 bytes reads, we + * have to make sure that the NDSR.RDDREQ bit is set. + * + * Drain the FIFO 8 32 bits reads at a time, and skip + * the polling on the last read. + */ + while (len > 8) { + __raw_readsl(info->mmio_base + NDDB, data, 8); + + for (timeout = 0; + !(nand_readl(info, NDSR) & NDSR_RDDREQ); + timeout++) { + if (timeout >= 5) { + dev_err(&info->pdev->dev, + "Timeout on RDDREQ while draining the FIFO\n"); + return; + } + + mdelay(1); + } + + data += 32; + len -= 8; + } + } + + __raw_readsl(info->mmio_base + NDDB, data, len); +} + static void handle_data_pio(struct pxa3xx_nand_info *info) { unsigned int do_bytes = min(info->data_size, info->chunk_size); @@ -496,14 +532,14 @@ static void handle_data_pio(struct pxa3xx_nand_info *info) DIV_ROUND_UP(info->oob_size, 4)); break; case STATE_PIO_READING: - __raw_readsl(info->mmio_base + NDDB, - info->data_buff + info->data_buff_pos, - DIV_ROUND_UP(do_bytes, 4)); + drain_fifo(info, + info->data_buff + info->data_buff_pos, + DIV_ROUND_UP(do_bytes, 4)); if (info->oob_size > 0) - __raw_readsl(info->mmio_base + NDDB, - info->oob_buff + info->oob_buff_pos, - DIV_ROUND_UP(info->oob_size, 4)); + drain_fifo(info, + info->oob_buff + info->oob_buff_pos, + DIV_ROUND_UP(info->oob_size, 4)); break; default: dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, @@ -1572,6 +1608,8 @@ static int alloc_nand_resource(struct platform_device *pdev) int ret, irq, cs; pdata = dev_get_platdata(&pdev->dev); + if (pdata->num_cs <= 0) + return -ENODEV; info = devm_kzalloc(&pdev->dev, sizeof(*info) + (sizeof(*mtd) + sizeof(*host)) * pdata->num_cs, GFP_KERNEL); if (!info) diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 3c82e02e3dae..b0f69248cb71 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -579,6 +579,10 @@ struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf) skb->pkt_type = PACKET_BROADCAST; skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + can_skb_reserve(skb); can_skb_prv(skb)->ifindex = dev->ifindex; @@ -603,6 +607,10 @@ struct sk_buff *alloc_canfd_skb(struct net_device *dev, skb->pkt_type = PACKET_BROADCAST; skb->ip_summed = CHECKSUM_UNNECESSARY; + skb_reset_mac_header(skb); + skb_reset_network_header(skb); + skb_reset_transport_header(skb); + can_skb_reserve(skb); can_skb_prv(skb)->ifindex = dev->ifindex; diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 2928f7003041..a316fa4b91ab 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -14,6 +14,7 @@ * Copyright (C) 2015 Valeo S.A. */ +#include <linux/kernel.h> #include <linux/completion.h> #include <linux/module.h> #include <linux/netdevice.h> @@ -584,8 +585,15 @@ static int kvaser_usb_wait_msg(const struct kvaser_usb *dev, u8 id, while (pos <= actual_len - MSG_HEADER_LEN) { tmp = buf + pos; - if (!tmp->len) - break; + /* Handle messages crossing the USB endpoint max packet + * size boundary. Check kvaser_usb_read_bulk_callback() + * for further details. + */ + if (tmp->len == 0) { + pos = round_up(pos, + dev->bulk_in->wMaxPacketSize); + continue; + } if (pos + tmp->len > actual_len) { dev_err(dev->udev->dev.parent, @@ -787,7 +795,6 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv, netdev_err(netdev, "Error transmitting URB\n"); usb_unanchor_urb(urb); usb_free_urb(urb); - kfree(buf); return err; } @@ -1317,8 +1324,19 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) while (pos <= urb->actual_length - MSG_HEADER_LEN) { msg = urb->transfer_buffer + pos; - if (!msg->len) - break; + /* The Kvaser firmware can only read and write messages that + * does not cross the USB's endpoint wMaxPacketSize boundary. + * If a follow-up command crosses such boundary, firmware puts + * a placeholder zero-length command in its place then aligns + * the real command to the next max packet size. + * + * Handle such cases or we're going to miss a significant + * number of events in case of a heavy rx load on the bus. + */ + if (msg->len == 0) { + pos = round_up(pos, dev->bulk_in->wMaxPacketSize); + continue; + } if (pos + msg->len > urb->actual_length) { dev_err(dev->udev->dev.parent, "Format error\n"); @@ -1326,7 +1344,6 @@ static void kvaser_usb_read_bulk_callback(struct urb *urb) } kvaser_usb_handle_message(dev, msg); - pos += msg->len; } @@ -1615,8 +1632,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, struct urb *urb; void *buf; struct kvaser_msg *msg; - int i, err; - int ret = NETDEV_TX_OK; + int i, err, ret = NETDEV_TX_OK; u8 *msg_tx_can_flags = NULL; /* GCC */ if (can_dropped_invalid_skb(netdev, skb)) @@ -1634,7 +1650,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, if (!buf) { stats->tx_dropped++; dev_kfree_skb(skb); - goto nobufmem; + goto freeurb; } msg = buf; @@ -1681,8 +1697,10 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, /* This should never happen; it implies a flow control bug */ if (!context) { netdev_warn(netdev, "cannot find free context\n"); + + kfree(buf); ret = NETDEV_TX_BUSY; - goto releasebuf; + goto freeurb; } context->priv = priv; @@ -1719,16 +1737,12 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, else netdev_warn(netdev, "Failed tx_urb %d\n", err); - goto releasebuf; + goto freeurb; } - usb_free_urb(urb); - - return NETDEV_TX_OK; + ret = NETDEV_TX_OK; -releasebuf: - kfree(buf); -nobufmem: +freeurb: usb_free_urb(urb); return ret; } diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c index 962c3f027383..0bac0f14edc3 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_fd.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_fd.c @@ -879,6 +879,10 @@ static int pcan_usb_fd_init(struct peak_usb_device *dev) pdev->usb_if = ppdev->usb_if; pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr; + + /* do a copy of the ctrlmode[_supported] too */ + dev->can.ctrlmode = ppdev->dev.can.ctrlmode; + dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported; } pdev->usb_if->dev[dev->ctrl_idx] = dev; diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c index 869d97fcf781..b927021c6c40 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c @@ -593,7 +593,7 @@ static int xgene_enet_reset(struct xgene_enet_pdata *pdata) if (!xgene_ring_mgr_init(pdata)) return -ENODEV; - if (!efi_enabled(EFI_BOOT)) { + if (pdata->clk) { clk_prepare_enable(pdata->clk); clk_disable_unprepare(pdata->clk); clk_prepare_enable(pdata->clk); diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index 4de62b210c85..635a83be7e5e 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1025,6 +1025,8 @@ static int xgene_enet_remove(struct platform_device *pdev) #ifdef CONFIG_ACPI static const struct acpi_device_id xgene_enet_acpi_match[] = { { "APMC0D05", }, + { "APMC0D30", }, + { "APMC0D31", }, { } }; MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); @@ -1033,6 +1035,8 @@ MODULE_DEVICE_TABLE(acpi, xgene_enet_acpi_match); #ifdef CONFIG_OF static struct of_device_id xgene_enet_of_match[] = { {.compatible = "apm,xgene-enet",}, + {.compatible = "apm,xgene1-sgenet",}, + {.compatible = "apm,xgene1-xgenet",}, {}, }; diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c index 21206d33b638..a7f2cc3e485e 100644 --- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c +++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c @@ -486,7 +486,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) { struct bcm_enet_priv *priv; struct net_device *dev; - int tx_work_done, rx_work_done; + int rx_work_done; priv = container_of(napi, struct bcm_enet_priv, napi); dev = priv->net_dev; @@ -498,14 +498,14 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget) ENETDMAC_IR, priv->tx_chan); /* reclaim sent skb */ - tx_work_done = bcm_enet_tx_reclaim(dev, 0); + bcm_enet_tx_reclaim(dev, 0); spin_lock(&priv->rx_lock); rx_work_done = bcm_enet_receive_queue(dev, budget); spin_unlock(&priv->rx_lock); - if (rx_work_done >= budget || tx_work_done > 0) { - /* rx/tx queue is not yet empty/clean */ + if (rx_work_done >= budget) { + /* rx queue is not yet empty/clean */ return rx_work_done; } diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 676ffe093180..0469f72c6e7e 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -302,9 +302,6 @@ static int bgmac_dma_rx_skb_for_slot(struct bgmac *bgmac, slot->skb = skb; slot->dma_addr = dma_addr; - if (slot->dma_addr & 0xC0000000) - bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); - return 0; } @@ -505,8 +502,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) ring->mmio_base); goto err_dma_free; } - if (ring->dma_base & 0xC0000000) - bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); ring->unaligned = bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX); @@ -536,8 +531,6 @@ static int bgmac_dma_alloc(struct bgmac *bgmac) err = -ENOMEM; goto err_dma_free; } - if (ring->dma_base & 0xC0000000) - bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); ring->unaligned = bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX); diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c index 7155e1d2c208..bef750a09027 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c @@ -12722,6 +12722,9 @@ static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev, pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, PCICFG_VENDOR_ID_OFFSET); + /* Set PCIe reset type to fundamental for EEH recovery */ + pdev->needs_freset = 1; + /* AER (Advanced Error reporting) configuration */ rc = pci_enable_pcie_error_reporting(pdev); if (!rc) diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c index 149a0d70c108..b97122926d3a 100644 --- a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c +++ b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c @@ -73,15 +73,17 @@ int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE)) return -EINVAL; + reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); if (wol->wolopts & WAKE_MAGICSECURE) { bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]), UMAC_MPD_PW_MS); bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]), UMAC_MPD_PW_LS); - reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL); reg |= MPD_PW_EN; - bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); + } else { + reg &= ~MPD_PW_EN; } + bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL); /* Flag the device and relevant IRQ as wakeup capable */ if (wol->wolopts) { diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index ad76b8e35a00..81d41539fcba 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -2113,17 +2113,17 @@ static const struct net_device_ops macb_netdev_ops = { }; #if defined(CONFIG_OF) -static struct macb_config pc302gem_config = { +static const struct macb_config pc302gem_config = { .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, .dma_burst_length = 16, }; -static struct macb_config sama5d3_config = { +static const struct macb_config sama5d3_config = { .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE, .dma_burst_length = 16, }; -static struct macb_config sama5d4_config = { +static const struct macb_config sama5d4_config = { .caps = 0, .dma_burst_length = 4, }; @@ -2154,7 +2154,7 @@ static void macb_configure_caps(struct macb *bp) if (bp->pdev->dev.of_node) { match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node); if (match && match->data) { - config = (const struct macb_config *)match->data; + config = match->data; bp->caps = config->caps; /* diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h index 31dc080f2437..ff85619a9732 100644 --- a/drivers/net/ethernet/cadence/macb.h +++ b/drivers/net/ethernet/cadence/macb.h @@ -351,7 +351,7 @@ /* Bitfields in MID */ #define MACB_IDNUM_OFFSET 16 -#define MACB_IDNUM_SIZE 16 +#define MACB_IDNUM_SIZE 12 #define MACB_REV_OFFSET 0 #define MACB_REV_SIZE 16 diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 9bb6220663b2..99492b7e3713 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -1597,7 +1597,7 @@ fec_enet_interrupt(int irq, void *dev_id) writel(int_events, fep->hwp + FEC_IEVENT); fec_enet_collect_events(fep, int_events); - if (fep->work_tx || fep->work_rx) { + if ((fep->work_tx || fep->work_rx) && fep->link) { ret = IRQ_HANDLED; if (napi_schedule_prep(&fep->napi)) { @@ -3383,7 +3383,6 @@ fec_drv_remove(struct platform_device *pdev) regulator_disable(fep->reg_phy); if (fep->ptp_clock) ptp_clock_unregister(fep->ptp_clock); - fec_enet_clk_enable(ndev, false); of_node_put(fep->phy_node); free_netdev(ndev); diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index 178e54028d10..7bf3682cdf47 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -747,6 +747,18 @@ static int gfar_parse_group(struct device_node *np, return 0; } +static int gfar_of_group_count(struct device_node *np) +{ + struct device_node *child; + int num = 0; + + for_each_available_child_of_node(np, child) + if (!of_node_cmp(child->name, "queue-group")) + num++; + + return num; +} + static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) { const char *model; @@ -784,7 +796,7 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) num_rx_qs = 1; } else { /* MQ_MG_MODE */ /* get the actual number of supported groups */ - unsigned int num_grps = of_get_available_child_count(np); + unsigned int num_grps = gfar_of_group_count(np); if (num_grps == 0 || num_grps > MAXGROUPS) { dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n", @@ -851,7 +863,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) /* Parse and initialize group specific information */ if (priv->mode == MQ_MG_MODE) { - for_each_child_of_node(np, child) { + for_each_available_child_of_node(np, child) { + if (of_node_cmp(child->name, "queue-group")) + continue; + err = gfar_parse_group(child, priv, model); if (err) goto err_grp_init; diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c index 209ee1b27f8d..5d093dc0f5f5 100644 --- a/drivers/net/ethernet/smsc/smc91x.c +++ b/drivers/net/ethernet/smsc/smc91x.c @@ -92,6 +92,7 @@ static const char version[] = #include "smc91x.h" #if defined(CONFIG_ASSABET_NEPONSET) +#include <mach/assabet.h> #include <mach/neponset.h> #endif diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index fb846ebba1d9..f9b42f11950f 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -272,6 +272,37 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) struct stmmac_priv *priv = NULL; struct plat_stmmacenet_data *plat_dat = NULL; const char *mac = NULL; + int irq, wol_irq, lpi_irq; + + /* Get IRQ information early to have an ability to ask for deferred + * probe if needed before we went too far with resource allocation. + */ + irq = platform_get_irq_byname(pdev, "macirq"); + if (irq < 0) { + if (irq != -EPROBE_DEFER) { + dev_err(dev, + "MAC IRQ configuration information not found\n"); + } + return irq; + } + + /* On some platforms e.g. SPEAr the wake up irq differs from the mac irq + * The external wake up irq can be passed through the platform code + * named as "eth_wake_irq" + * + * In case the wake up interrupt is not passed from the platform + * so the driver will continue to use the mac irq (ndev->irq) + */ + wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); + if (wol_irq < 0) { + if (wol_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; + wol_irq = irq; + } + + lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); + if (lpi_irq == -EPROBE_DEFER) + return -EPROBE_DEFER; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); addr = devm_ioremap_resource(dev, res); @@ -323,39 +354,15 @@ static int stmmac_pltfr_probe(struct platform_device *pdev) return PTR_ERR(priv); } + /* Copy IRQ values to priv structure which is now avaialble */ + priv->dev->irq = irq; + priv->wol_irq = wol_irq; + priv->lpi_irq = lpi_irq; + /* Get MAC address if available (DT) */ if (mac) memcpy(priv->dev->dev_addr, mac, ETH_ALEN); - /* Get the MAC information */ - priv->dev->irq = platform_get_irq_byname(pdev, "macirq"); - if (priv->dev->irq < 0) { - if (priv->dev->irq != -EPROBE_DEFER) { - netdev_err(priv->dev, - "MAC IRQ configuration information not found\n"); - } - return priv->dev->irq; - } - - /* - * On some platforms e.g. SPEAr the wake up irq differs from the mac irq - * The external wake up irq can be passed through the platform code - * named as "eth_wake_irq" - * - * In case the wake up interrupt is not passed from the platform - * so the driver will continue to use the mac irq (ndev->irq) - */ - priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); - if (priv->wol_irq < 0) { - if (priv->wol_irq == -EPROBE_DEFER) - return -EPROBE_DEFER; - priv->wol_irq = priv->dev->irq; - } - - priv->lpi_irq = platform_get_irq_byname(pdev, "eth_lpi"); - if (priv->lpi_irq == -EPROBE_DEFER) - return -EPROBE_DEFER; - platform_set_drvdata(pdev, priv->dev); pr_debug("STMMAC platform driver registration completed"); diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index f1ee71e22241..7d394846afc2 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -1730,11 +1730,11 @@ static int team_set_mac_address(struct net_device *dev, void *p) if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); - rcu_read_lock(); - list_for_each_entry_rcu(port, &team->port_list, list) + mutex_lock(&team->lock); + list_for_each_entry(port, &team->port_list, list) if (team->ops.port_change_dev_addr) team->ops.port_change_dev_addr(team, port); - rcu_read_unlock(); + mutex_unlock(&team->lock); return 0; } diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index f38227afe099..3aa8648080c8 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -340,12 +340,11 @@ static void xenvif_get_ethtool_stats(struct net_device *dev, unsigned int num_queues = vif->num_queues; int i; unsigned int queue_index; - struct xenvif_stats *vif_stats; for (i = 0; i < ARRAY_SIZE(xenvif_stats); i++) { unsigned long accum = 0; for (queue_index = 0; queue_index < num_queues; ++queue_index) { - vif_stats = &vif->queues[queue_index].stats; + void *vif_stats = &vif->queues[queue_index].stats; accum += *(unsigned long *)(vif_stats + xenvif_stats[i].offset); } data[i] = accum; diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index c4d68d768408..cab9f5257f57 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -1349,7 +1349,7 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s { unsigned int offset = skb_headlen(skb); skb_frag_t frags[MAX_SKB_FRAGS]; - int i; + int i, f; struct ubuf_info *uarg; struct sk_buff *nskb = skb_shinfo(skb)->frag_list; @@ -1389,23 +1389,25 @@ static int xenvif_handle_frag_list(struct xenvif_queue *queue, struct sk_buff *s frags[i].page_offset = 0; skb_frag_size_set(&frags[i], len); } - /* swap out with old one */ - memcpy(skb_shinfo(skb)->frags, - frags, - i * sizeof(skb_frag_t)); - skb_shinfo(skb)->nr_frags = i; - skb->truesize += i * PAGE_SIZE; - /* remove traces of mapped pages and frag_list */ + /* Copied all the bits from the frag list -- free it. */ skb_frag_list_init(skb); + xenvif_skb_zerocopy_prepare(queue, nskb); + kfree_skb(nskb); + + /* Release all the original (foreign) frags. */ + for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) + skb_frag_unref(skb, f); uarg = skb_shinfo(skb)->destructor_arg; /* increase inflight counter to offset decrement in callback */ atomic_inc(&queue->inflight_packets); uarg->callback(uarg, true); skb_shinfo(skb)->destructor_arg = NULL; - xenvif_skb_zerocopy_prepare(queue, nskb); - kfree_skb(nskb); + /* Fill the skb with the new (local) frags. */ + memcpy(skb_shinfo(skb)->frags, frags, i * sizeof(skb_frag_t)); + skb_shinfo(skb)->nr_frags = i; + skb->truesize += i * PAGE_SIZE; return 0; } diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig index 38d1c51f58b1..7bcaeec876c0 100644 --- a/drivers/of/Kconfig +++ b/drivers/of/Kconfig @@ -84,8 +84,7 @@ config OF_RESOLVE bool config OF_OVERLAY - bool - depends on OF + bool "Device Tree overlays" select OF_DYNAMIC select OF_RESOLVE diff --git a/drivers/of/base.c b/drivers/of/base.c index 0a8aeb8523fe..adb8764861c0 100644 --- a/drivers/of/base.c +++ b/drivers/of/base.c @@ -714,16 +714,17 @@ static struct device_node *__of_find_node_by_path(struct device_node *parent, const char *path) { struct device_node *child; - int len = strchrnul(path, '/') - path; - int term; + int len; + const char *end; + end = strchr(path, ':'); + if (!end) + end = strchrnul(path, '/'); + + len = end - path; if (!len) return NULL; - term = strchrnul(path, ':') - path; - if (term < len) - len = term; - __for_each_child_of_node(parent, child) { const char *name = strrchr(child->full_name, '/'); if (WARN(!name, "malformed device_node %s\n", child->full_name)) @@ -768,8 +769,12 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt /* The path could begin with an alias */ if (*path != '/') { - char *p = strchrnul(path, '/'); - int len = separator ? separator - path : p - path; + int len; + const char *p = separator; + + if (!p) + p = strchrnul(path, '/'); + len = p - path; /* of_aliases must not be NULL */ if (!of_aliases) @@ -794,6 +799,8 @@ struct device_node *of_find_node_opts_by_path(const char *path, const char **opt path++; /* Increment past '/' delimiter */ np = __of_find_node_by_path(np, path); path = strchrnul(path, '/'); + if (separator && separator < path) + break; } raw_spin_unlock_irqrestore(&devtree_lock, flags); return np; @@ -1886,8 +1893,10 @@ void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align)) name = of_get_property(of_chosen, "linux,stdout-path", NULL); if (IS_ENABLED(CONFIG_PPC) && !name) name = of_get_property(of_aliases, "stdout", NULL); - if (name) + if (name) { of_stdout = of_find_node_opts_by_path(name, &of_stdout_options); + add_preferred_console("stdout-path", 0, NULL); + } } if (!of_aliases) diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c index 352b4f28f82c..dee9270ba547 100644 --- a/drivers/of/overlay.c +++ b/drivers/of/overlay.c @@ -19,6 +19,7 @@ #include <linux/string.h> #include <linux/slab.h> #include <linux/err.h> +#include <linux/idr.h> #include "of_private.h" @@ -85,7 +86,7 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov, struct device_node *target, struct device_node *child) { const char *cname; - struct device_node *tchild, *grandchild; + struct device_node *tchild; int ret = 0; cname = kbasename(child->full_name); diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c index 0cf9a236d438..aba8946cac46 100644 --- a/drivers/of/unittest.c +++ b/drivers/of/unittest.c @@ -92,6 +92,11 @@ static void __init of_selftest_find_node_by_name(void) "option path test failed\n"); of_node_put(np); + np = of_find_node_opts_by_path("/testcase-data:test/option", &options); + selftest(np && !strcmp("test/option", options), + "option path test, subcase #1 failed\n"); + of_node_put(np); + np = of_find_node_opts_by_path("/testcase-data:testoption", NULL); selftest(np, "NULL option path test failed\n"); of_node_put(np); @@ -102,6 +107,12 @@ static void __init of_selftest_find_node_by_name(void) "option alias path test failed\n"); of_node_put(np); + np = of_find_node_opts_by_path("testcase-alias:test/alias/option", + &options); + selftest(np && !strcmp("test/alias/option", options), + "option alias path test, subcase #1 failed\n"); + of_node_put(np); + np = of_find_node_opts_by_path("testcase-alias:testaliasoption", NULL); selftest(np, "NULL option alias path test failed\n"); of_node_put(np); @@ -378,9 +389,9 @@ static void __init of_selftest_property_string(void) rc = of_property_match_string(np, "phandle-list-names", "first"); selftest(rc == 0, "first expected:0 got:%i\n", rc); rc = of_property_match_string(np, "phandle-list-names", "second"); - selftest(rc == 1, "second expected:0 got:%i\n", rc); + selftest(rc == 1, "second expected:1 got:%i\n", rc); rc = of_property_match_string(np, "phandle-list-names", "third"); - selftest(rc == 2, "third expected:0 got:%i\n", rc); + selftest(rc == 2, "third expected:2 got:%i\n", rc); rc = of_property_match_string(np, "phandle-list-names", "fourth"); selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc); rc = of_property_match_string(np, "missing-property", "blah"); @@ -478,7 +489,6 @@ static void __init of_selftest_changeset(void) struct device_node *n1, *n2, *n21, *nremove, *parent, *np; struct of_changeset chgset; - of_changeset_init(&chgset); n1 = __of_node_dup(NULL, "/testcase-data/changeset/n1"); selftest(n1, "testcase setup failure\n"); n2 = __of_node_dup(NULL, "/testcase-data/changeset/n2"); @@ -979,7 +989,7 @@ static int of_path_platform_device_exists(const char *path) return pdev != NULL; } -#if IS_ENABLED(CONFIG_I2C) +#if IS_BUILTIN(CONFIG_I2C) /* get the i2c client device instantiated at the path */ static struct i2c_client *of_path_to_i2c_client(const char *path) @@ -1445,7 +1455,7 @@ static void of_selftest_overlay_11(void) return; } -#if IS_ENABLED(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY) +#if IS_BUILTIN(CONFIG_I2C) && IS_ENABLED(CONFIG_OF_OVERLAY) struct selftest_i2c_bus_data { struct platform_device *pdev; @@ -1584,7 +1594,7 @@ static struct i2c_driver selftest_i2c_dev_driver = { .id_table = selftest_i2c_dev_id, }; -#if IS_ENABLED(CONFIG_I2C_MUX) +#if IS_BUILTIN(CONFIG_I2C_MUX) struct selftest_i2c_mux_data { int nchans; @@ -1695,7 +1705,7 @@ static int of_selftest_overlay_i2c_init(void) "could not register selftest i2c bus driver\n")) return ret; -#if IS_ENABLED(CONFIG_I2C_MUX) +#if IS_BUILTIN(CONFIG_I2C_MUX) ret = i2c_add_driver(&selftest_i2c_mux_driver); if (selftest(ret == 0, "could not register selftest i2c mux driver\n")) @@ -1707,7 +1717,7 @@ static int of_selftest_overlay_i2c_init(void) static void of_selftest_overlay_i2c_cleanup(void) { -#if IS_ENABLED(CONFIG_I2C_MUX) +#if IS_BUILTIN(CONFIG_I2C_MUX) i2c_del_driver(&selftest_i2c_mux_driver); #endif platform_driver_unregister(&selftest_i2c_bus_driver); @@ -1814,7 +1824,7 @@ static void __init of_selftest_overlay(void) of_selftest_overlay_10(); of_selftest_overlay_11(); -#if IS_ENABLED(CONFIG_I2C) +#if IS_BUILTIN(CONFIG_I2C) if (selftest(of_selftest_overlay_i2c_init() == 0, "i2c init failed\n")) goto out; diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c index aab55474dd0d..ee082c0366ec 100644 --- a/drivers/pci/host/pci-xgene.c +++ b/drivers/pci/host/pci-xgene.c @@ -127,7 +127,7 @@ static bool xgene_pcie_hide_rc_bars(struct pci_bus *bus, int offset) return false; } -static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, +static void __iomem *xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int offset) { struct xgene_pcie_port *port = bus->sysdata; @@ -137,7 +137,7 @@ static int xgene_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, return NULL; xgene_pcie_set_rtdid_reg(bus, devfn); - return xgene_pcie_get_cfg_base(bus); + return xgene_pcie_get_cfg_base(bus) + offset; } static struct pci_ops xgene_pcie_ops = { diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c index aa012fb3834b..312f23a8429c 100644 --- a/drivers/pci/pci-sysfs.c +++ b/drivers/pci/pci-sysfs.c @@ -521,7 +521,8 @@ static ssize_t driver_override_store(struct device *dev, struct pci_dev *pdev = to_pci_dev(dev); char *driver_override, *old = pdev->driver_override, *cp; - if (count > PATH_MAX) + /* We need to keep extra room for a newline */ + if (count >= (PAGE_SIZE - 1)) return -EINVAL; driver_override = kstrndup(buf, count, GFP_KERNEL); @@ -549,7 +550,7 @@ static ssize_t driver_override_show(struct device *dev, { struct pci_dev *pdev = to_pci_dev(dev); - return sprintf(buf, "%s\n", pdev->driver_override); + return snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override); } static DEVICE_ATTR_RW(driver_override); diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index b899947d839d..1245dca79009 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c @@ -3444,13 +3444,6 @@ static umode_t regulator_attr_is_visible(struct kobject *kobj, if (attr == &dev_attr_requested_microamps.attr) return rdev->desc->type == REGULATOR_CURRENT ? mode : 0; - /* all the other attributes exist to support constraints; - * don't show them if there are no constraints, or if the - * relevant supporting methods are missing. - */ - if (!rdev->constraints) - return 0; - /* constraints need specific supporting methods */ if (attr == &dev_attr_min_microvolts.attr || attr == &dev_attr_max_microvolts.attr) diff --git a/drivers/regulator/da9210-regulator.c b/drivers/regulator/da9210-regulator.c index bc6100103f7f..f0489cb9018b 100644 --- a/drivers/regulator/da9210-regulator.c +++ b/drivers/regulator/da9210-regulator.c @@ -152,6 +152,15 @@ static int da9210_i2c_probe(struct i2c_client *i2c, config.regmap = chip->regmap; config.of_node = dev->of_node; + /* Mask all interrupt sources to deassert interrupt line */ + error = regmap_write(chip->regmap, DA9210_REG_MASK_A, ~0); + if (!error) + error = regmap_write(chip->regmap, DA9210_REG_MASK_B, ~0); + if (error) { + dev_err(&i2c->dev, "Failed to write to mask reg: %d\n", error); + return error; + } + rdev = devm_regulator_register(&i2c->dev, &da9210_reg, &config); if (IS_ERR(rdev)) { dev_err(&i2c->dev, "Failed to register DA9210 regulator\n"); diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c index 1f93b752a81c..3fd44353cc80 100644 --- a/drivers/regulator/rk808-regulator.c +++ b/drivers/regulator/rk808-regulator.c @@ -235,6 +235,7 @@ static const struct regulator_desc rk808_reg[] = { .vsel_mask = RK808_LDO_VSEL_MASK, .enable_reg = RK808_LDO_EN_REG, .enable_mask = BIT(0), + .enable_time = 400, .owner = THIS_MODULE, }, { .name = "LDO_REG2", @@ -249,6 +250,7 @@ static const struct regulator_desc rk808_reg[] = { .vsel_mask = RK808_LDO_VSEL_MASK, .enable_reg = RK808_LDO_EN_REG, .enable_mask = BIT(1), + .enable_time = 400, .owner = THIS_MODULE, }, { .name = "LDO_REG3", @@ -263,6 +265,7 @@ static const struct regulator_desc rk808_reg[] = { .vsel_mask = RK808_BUCK4_VSEL_MASK, .enable_reg = RK808_LDO_EN_REG, .enable_mask = BIT(2), + .enable_time = 400, .owner = THIS_MODULE, }, { .name = "LDO_REG4", @@ -277,6 +280,7 @@ static const struct regulator_desc rk808_reg[] = { .vsel_mask = RK808_LDO_VSEL_MASK, .enable_reg = RK808_LDO_EN_REG, .enable_mask = BIT(3), + .enable_time = 400, .owner = THIS_MODULE, }, { .name = "LDO_REG5", @@ -291,6 +295,7 @@ static const struct regulator_desc rk808_reg[] = { .vsel_mask = RK808_LDO_VSEL_MASK, .enable_reg = RK808_LDO_EN_REG, .enable_mask = BIT(4), + .enable_time = 400, .owner = THIS_MODULE, }, { .name = "LDO_REG6", @@ -305,6 +310,7 @@ static const struct regulator_desc rk808_reg[] = { .vsel_mask = RK808_LDO_VSEL_MASK, .enable_reg = RK808_LDO_EN_REG, .enable_mask = BIT(5), + .enable_time = 400, .owner = THIS_MODULE, }, { .name = "LDO_REG7", @@ -319,6 +325,7 @@ static const struct regulator_desc rk808_reg[] = { .vsel_mask = RK808_LDO_VSEL_MASK, .enable_reg = RK808_LDO_EN_REG, .enable_mask = BIT(6), + .enable_time = 400, .owner = THIS_MODULE, }, { .name = "LDO_REG8", @@ -333,6 +340,7 @@ static const struct regulator_desc rk808_reg[] = { .vsel_mask = RK808_LDO_VSEL_MASK, .enable_reg = RK808_LDO_EN_REG, .enable_mask = BIT(7), + .enable_time = 400, .owner = THIS_MODULE, }, { .name = "SWITCH_REG1", diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c index 4241eeab3386..f4cf6851fae9 100644 --- a/drivers/rtc/rtc-s3c.c +++ b/drivers/rtc/rtc-s3c.c @@ -849,6 +849,7 @@ static struct s3c_rtc_data const s3c2443_rtc_data = { static struct s3c_rtc_data const s3c6410_rtc_data = { .max_user_freq = 32768, + .needs_src_clk = true, .irq_handler = s3c6410_rtc_irq, .set_freq = s3c6410_rtc_setfreq, .enable_tick = s3c6410_rtc_enable_tick, diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 96128cb009f3..da212813f2d5 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -547,7 +547,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char * parse input */ num_of_segments = 0; - for (i = 0; ((buf[i] != '\0') && (buf[i] != '\n') && i < count); i++) { + for (i = 0; (i < count && (buf[i] != '\0') && (buf[i] != '\n')); i++) { for (j = i; (buf[j] != ':') && (buf[j] != '\0') && (buf[j] != '\n') && diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index 09db45296eed..7497ddde2dd6 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c @@ -92,7 +92,7 @@ bool scm_reserve_cluster(struct scm_request *scmrq) add = 0; continue; } - for (pos = 0; pos <= iter->aob->request.msb_count; pos++) { + for (pos = 0; pos < iter->aob->request.msb_count; pos++) { if (clusters_intersect(req, iter->request[pos]) && (rq_data_dir(req) == WRITE || rq_data_dir(iter->request[pos]) == WRITE)) { diff --git a/drivers/scsi/libsas/sas_discover.c b/drivers/scsi/libsas/sas_discover.c index 62b58d38ce2e..60de66252fa2 100644 --- a/drivers/scsi/libsas/sas_discover.c +++ b/drivers/scsi/libsas/sas_discover.c @@ -500,6 +500,7 @@ static void sas_revalidate_domain(struct work_struct *work) struct sas_discovery_event *ev = to_sas_discovery_event(work); struct asd_sas_port *port = ev->port; struct sas_ha_struct *ha = port->ha; + struct domain_device *ddev = port->port_dev; /* prevent revalidation from finding sata links in recovery */ mutex_lock(&ha->disco_mutex); @@ -514,8 +515,9 @@ static void sas_revalidate_domain(struct work_struct *work) SAS_DPRINTK("REVALIDATING DOMAIN on port %d, pid:%d\n", port->id, task_pid_nr(current)); - if (port->port_dev) - res = sas_ex_revalidate_domain(port->port_dev); + if (ddev && (ddev->dev_type == SAS_FANOUT_EXPANDER_DEVICE || + ddev->dev_type == SAS_EDGE_EXPANDER_DEVICE)) + res = sas_ex_revalidate_domain(ddev); SAS_DPRINTK("done REVALIDATING DOMAIN on port %d, pid:%d, res 0x%x\n", port->id, task_pid_nr(current), res); diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index 9af7841f2e8c..06de34001c66 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -764,17 +764,17 @@ static void atmel_spi_pdc_next_xfer(struct spi_master *master, (unsigned long long)xfer->rx_dma); } - /* REVISIT: We're waiting for ENDRX before we start the next + /* REVISIT: We're waiting for RXBUFF before we start the next * transfer because we need to handle some difficult timing - * issues otherwise. If we wait for ENDTX in one transfer and - * then starts waiting for ENDRX in the next, it's difficult - * to tell the difference between the ENDRX interrupt we're - * actually waiting for and the ENDRX interrupt of the + * issues otherwise. If we wait for TXBUFE in one transfer and + * then starts waiting for RXBUFF in the next, it's difficult + * to tell the difference between the RXBUFF interrupt we're + * actually waiting for and the RXBUFF interrupt of the * previous transfer. * * It should be doable, though. Just not now... */ - spi_writel(as, IER, SPI_BIT(ENDRX) | SPI_BIT(OVRES)); + spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES)); spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN)); } diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c index a0197fd4e95c..3ce39d10fafb 100644 --- a/drivers/spi/spi-dw-mid.c +++ b/drivers/spi/spi-dw-mid.c @@ -139,6 +139,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws) 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!txdesc) + return NULL; + txdesc->callback = dw_spi_dma_tx_done; txdesc->callback_param = dws; @@ -184,6 +187,9 @@ static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws) 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!rxdesc) + return NULL; + rxdesc->callback = dw_spi_dma_rx_done; rxdesc->callback_param = dws; diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c index 5ba331047cbe..6d331e0db331 100644 --- a/drivers/spi/spi-dw-pci.c +++ b/drivers/spi/spi-dw-pci.c @@ -36,13 +36,13 @@ struct spi_pci_desc { static struct spi_pci_desc spi_pci_mid_desc_1 = { .setup = dw_spi_mid_init, - .num_cs = 32, + .num_cs = 5, .bus_num = 0, }; static struct spi_pci_desc spi_pci_mid_desc_2 = { .setup = dw_spi_mid_init, - .num_cs = 4, + .num_cs = 2, .bus_num = 1, }; diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c index 5a97a62b298a..4847afba89f4 100644 --- a/drivers/spi/spi-dw.c +++ b/drivers/spi/spi-dw.c @@ -621,14 +621,14 @@ static void spi_hw_init(struct device *dev, struct dw_spi *dws) if (!dws->fifo_len) { u32 fifo; - for (fifo = 2; fifo <= 256; fifo++) { + for (fifo = 1; fifo < 256; fifo++) { dw_writew(dws, DW_SPI_TXFLTR, fifo); if (fifo != dw_readw(dws, DW_SPI_TXFLTR)) break; } dw_writew(dws, DW_SPI_TXFLTR, 0); - dws->fifo_len = (fifo == 2) ? 0 : fifo - 1; + dws->fifo_len = (fifo == 1) ? 0 : fifo; dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len); } } diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c index c01567d53581..e649bc7d4c08 100644 --- a/drivers/spi/spi-img-spfi.c +++ b/drivers/spi/spi-img-spfi.c @@ -459,6 +459,13 @@ static int img_spfi_transfer_one(struct spi_master *master, unsigned long flags; int ret; + if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) { + dev_err(spfi->dev, + "Transfer length (%d) is greater than the max supported (%d)", + xfer->len, SPFI_TRANSACTION_TSIZE_MASK); + return -EINVAL; + } + /* * Stop all DMA and reset the controller if the previous transaction * timed-out and never completed it's DMA. diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c index 89ca162801da..ee513a85296b 100644 --- a/drivers/spi/spi-pl022.c +++ b/drivers/spi/spi-pl022.c @@ -534,12 +534,12 @@ static void giveback(struct pl022 *pl022) pl022->cur_msg = NULL; pl022->cur_transfer = NULL; pl022->cur_chip = NULL; - spi_finalize_current_message(pl022->master); /* disable the SPI/SSP operation */ writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); + spi_finalize_current_message(pl022->master); } /** diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c index 884a716e50cb..5c0616870358 100644 --- a/drivers/spi/spi-ti-qspi.c +++ b/drivers/spi/spi-ti-qspi.c @@ -101,6 +101,7 @@ struct ti_qspi { #define QSPI_FLEN(n) ((n - 1) << 0) /* STATUS REGISTER */ +#define BUSY 0x01 #define WC 0x02 /* INTERRUPT REGISTER */ @@ -199,6 +200,21 @@ static void ti_qspi_restore_ctx(struct ti_qspi *qspi) ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG); } +static inline u32 qspi_is_busy(struct ti_qspi *qspi) +{ + u32 stat; + unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT; + + stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG); + while ((stat & BUSY) && time_after(timeout, jiffies)) { + cpu_relax(); + stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG); + } + + WARN(stat & BUSY, "qspi busy\n"); + return stat & BUSY; +} + static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) { int wlen, count; @@ -211,6 +227,9 @@ static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t) wlen = t->bits_per_word >> 3; /* in bytes */ while (count) { + if (qspi_is_busy(qspi)) + return -EBUSY; + switch (wlen) { case 1: dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n", @@ -266,6 +285,9 @@ static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t) while (count) { dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc); + if (qspi_is_busy(qspi)) + return -EBUSY; + ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG); if (!wait_for_completion_timeout(&qspi->transfer_complete, QSPI_COMPLETION_TIMEOUT)) { diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index af98b096af2f..175c9956cbe3 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -144,10 +144,9 @@ struct ffs_io_data { bool read; struct kiocb *kiocb; - const struct iovec *iovec; - unsigned long nr_segs; - char __user *buf; - size_t len; + struct iov_iter data; + const void *to_free; + char *buf; struct mm_struct *mm; struct work_struct work; @@ -649,29 +648,10 @@ static void ffs_user_copy_worker(struct work_struct *work) io_data->req->actual; if (io_data->read && ret > 0) { - int i; - size_t pos = 0; - - /* - * Since req->length may be bigger than io_data->len (after - * being rounded up to maxpacketsize), we may end up with more - * data then user space has space for. - */ - ret = min_t(int, ret, io_data->len); - use_mm(io_data->mm); - for (i = 0; i < io_data->nr_segs; i++) { - size_t len = min_t(size_t, ret - pos, - io_data->iovec[i].iov_len); - if (!len) - break; - if (unlikely(copy_to_user(io_data->iovec[i].iov_base, - &io_data->buf[pos], len))) { - ret = -EFAULT; - break; - } - pos += len; - } + ret = copy_to_iter(io_data->buf, ret, &io_data->data); + if (iov_iter_count(&io_data->data)) + ret = -EFAULT; unuse_mm(io_data->mm); } @@ -684,7 +664,7 @@ static void ffs_user_copy_worker(struct work_struct *work) io_data->kiocb->private = NULL; if (io_data->read) - kfree(io_data->iovec); + kfree(io_data->to_free); kfree(io_data->buf); kfree(io_data); } @@ -743,6 +723,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) * before the waiting completes, so do not assign to 'gadget' earlier */ struct usb_gadget *gadget = epfile->ffs->gadget; + size_t copied; spin_lock_irq(&epfile->ffs->eps_lock); /* In the meantime, endpoint got disabled or changed. */ @@ -750,34 +731,21 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) spin_unlock_irq(&epfile->ffs->eps_lock); return -ESHUTDOWN; } + data_len = iov_iter_count(&io_data->data); /* * Controller may require buffer size to be aligned to * maxpacketsize of an out endpoint. */ - data_len = io_data->read ? - usb_ep_align_maybe(gadget, ep->ep, io_data->len) : - io_data->len; + if (io_data->read) + data_len = usb_ep_align_maybe(gadget, ep->ep, data_len); spin_unlock_irq(&epfile->ffs->eps_lock); data = kmalloc(data_len, GFP_KERNEL); if (unlikely(!data)) return -ENOMEM; - if (io_data->aio && !io_data->read) { - int i; - size_t pos = 0; - for (i = 0; i < io_data->nr_segs; i++) { - if (unlikely(copy_from_user(&data[pos], - io_data->iovec[i].iov_base, - io_data->iovec[i].iov_len))) { - ret = -EFAULT; - goto error; - } - pos += io_data->iovec[i].iov_len; - } - } else { - if (!io_data->read && - unlikely(__copy_from_user(data, io_data->buf, - io_data->len))) { + if (!io_data->read) { + copied = copy_from_iter(data, data_len, &io_data->data); + if (copied != data_len) { ret = -EFAULT; goto error; } @@ -876,10 +844,8 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data) */ ret = ep->status; if (io_data->read && ret > 0) { - ret = min_t(size_t, ret, io_data->len); - - if (unlikely(copy_to_user(io_data->buf, - data, ret))) + ret = copy_to_iter(data, ret, &io_data->data); + if (unlikely(iov_iter_count(&io_data->data))) ret = -EFAULT; } } @@ -898,37 +864,6 @@ error: return ret; } -static ssize_t -ffs_epfile_write(struct file *file, const char __user *buf, size_t len, - loff_t *ptr) -{ - struct ffs_io_data io_data; - - ENTER(); - - io_data.aio = false; - io_data.read = false; - io_data.buf = (char * __user)buf; - io_data.len = len; - - return ffs_epfile_io(file, &io_data); -} - -static ssize_t -ffs_epfile_read(struct file *file, char __user *buf, size_t len, loff_t *ptr) -{ - struct ffs_io_data io_data; - - ENTER(); - - io_data.aio = false; - io_data.read = true; - io_data.buf = buf; - io_data.len = len; - - return ffs_epfile_io(file, &io_data); -} - static int ffs_epfile_open(struct inode *inode, struct file *file) { @@ -965,67 +900,86 @@ static int ffs_aio_cancel(struct kiocb *kiocb) return value; } -static ssize_t ffs_epfile_aio_write(struct kiocb *kiocb, - const struct iovec *iovec, - unsigned long nr_segs, loff_t loff) +static ssize_t ffs_epfile_write_iter(struct kiocb *kiocb, struct iov_iter *from) { - struct ffs_io_data *io_data; + struct ffs_io_data io_data, *p = &io_data; + ssize_t res; ENTER(); - io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); - if (unlikely(!io_data)) - return -ENOMEM; + if (!is_sync_kiocb(kiocb)) { + p = kmalloc(sizeof(io_data), GFP_KERNEL); + if (unlikely(!p)) + return -ENOMEM; + p->aio = true; + } else { + p->aio = false; + } - io_data->aio = true; - io_data->read = false; - io_data->kiocb = kiocb; - io_data->iovec = iovec; - io_data->nr_segs = nr_segs; - io_data->len = kiocb->ki_nbytes; - io_data->mm = current->mm; + p->read = false; + p->kiocb = kiocb; + p->data = *from; + p->mm = current->mm; - kiocb->private = io_data; + kiocb->private = p; kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); - return ffs_epfile_io(kiocb->ki_filp, io_data); + res = ffs_epfile_io(kiocb->ki_filp, p); + if (res == -EIOCBQUEUED) + return res; + if (p->aio) + kfree(p); + else + *from = p->data; + return res; } -static ssize_t ffs_epfile_aio_read(struct kiocb *kiocb, - const struct iovec *iovec, - unsigned long nr_segs, loff_t loff) +static ssize_t ffs_epfile_read_iter(struct kiocb *kiocb, struct iov_iter *to) { - struct ffs_io_data *io_data; - struct iovec *iovec_copy; + struct ffs_io_data io_data, *p = &io_data; + ssize_t res; ENTER(); - iovec_copy = kmalloc_array(nr_segs, sizeof(*iovec_copy), GFP_KERNEL); - if (unlikely(!iovec_copy)) - return -ENOMEM; - - memcpy(iovec_copy, iovec, sizeof(struct iovec)*nr_segs); - - io_data = kmalloc(sizeof(*io_data), GFP_KERNEL); - if (unlikely(!io_data)) { - kfree(iovec_copy); - return -ENOMEM; + if (!is_sync_kiocb(kiocb)) { + p = kmalloc(sizeof(io_data), GFP_KERNEL); + if (unlikely(!p)) + return -ENOMEM; + p->aio = true; + } else { + p->aio = false; } - io_data->aio = true; - io_data->read = true; - io_data->kiocb = kiocb; - io_data->iovec = iovec_copy; - io_data->nr_segs = nr_segs; - io_data->len = kiocb->ki_nbytes; - io_data->mm = current->mm; + p->read = true; + p->kiocb = kiocb; + if (p->aio) { + p->to_free = dup_iter(&p->data, to, GFP_KERNEL); + if (!p->to_free) { + kfree(p); + return -ENOMEM; + } + } else { + p->data = *to; + p->to_free = NULL; + } + p->mm = current->mm; - kiocb->private = io_data; + kiocb->private = p; kiocb_set_cancel_fn(kiocb, ffs_aio_cancel); - return ffs_epfile_io(kiocb->ki_filp, io_data); + res = ffs_epfile_io(kiocb->ki_filp, p); + if (res == -EIOCBQUEUED) + return res; + + if (p->aio) { + kfree(p->to_free); + kfree(p); + } else { + *to = p->data; + } + return res; } static int @@ -1105,10 +1059,10 @@ static const struct file_operations ffs_epfile_operations = { .llseek = no_llseek, .open = ffs_epfile_open, - .write = ffs_epfile_write, - .read = ffs_epfile_read, - .aio_write = ffs_epfile_aio_write, - .aio_read = ffs_epfile_aio_read, + .write = new_sync_write, + .read = new_sync_read, + .write_iter = ffs_epfile_write_iter, + .read_iter = ffs_epfile_read_iter, .release = ffs_epfile_release, .unlocked_ioctl = ffs_epfile_ioctl, }; diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index db49ec4c748e..200f9a584064 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -74,6 +74,8 @@ MODULE_DESCRIPTION (DRIVER_DESC); MODULE_AUTHOR ("David Brownell"); MODULE_LICENSE ("GPL"); +static int ep_open(struct inode *, struct file *); + /*----------------------------------------------------------------------*/ @@ -283,14 +285,15 @@ static void epio_complete (struct usb_ep *ep, struct usb_request *req) * still need dev->lock to use epdata->ep. */ static int -get_ready_ep (unsigned f_flags, struct ep_data *epdata) +get_ready_ep (unsigned f_flags, struct ep_data *epdata, bool is_write) { int val; if (f_flags & O_NONBLOCK) { if (!mutex_trylock(&epdata->lock)) goto nonblock; - if (epdata->state != STATE_EP_ENABLED) { + if (epdata->state != STATE_EP_ENABLED && + (!is_write || epdata->state != STATE_EP_READY)) { mutex_unlock(&epdata->lock); nonblock: val = -EAGAIN; @@ -305,18 +308,20 @@ nonblock: switch (epdata->state) { case STATE_EP_ENABLED: + return 0; + case STATE_EP_READY: /* not configured yet */ + if (is_write) + return 0; + // FALLTHRU + case STATE_EP_UNBOUND: /* clean disconnect */ break; // case STATE_EP_DISABLED: /* "can't happen" */ - // case STATE_EP_READY: /* "can't happen" */ default: /* error! */ pr_debug ("%s: ep %p not available, state %d\n", shortname, epdata, epdata->state); - // FALLTHROUGH - case STATE_EP_UNBOUND: /* clean disconnect */ - val = -ENODEV; - mutex_unlock(&epdata->lock); } - return val; + mutex_unlock(&epdata->lock); + return -ENODEV; } static ssize_t @@ -363,97 +368,6 @@ ep_io (struct ep_data *epdata, void *buf, unsigned len) return value; } - -/* handle a synchronous OUT bulk/intr/iso transfer */ -static ssize_t -ep_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) -{ - struct ep_data *data = fd->private_data; - void *kbuf; - ssize_t value; - - if ((value = get_ready_ep (fd->f_flags, data)) < 0) - return value; - - /* halt any endpoint by doing a "wrong direction" i/o call */ - if (usb_endpoint_dir_in(&data->desc)) { - if (usb_endpoint_xfer_isoc(&data->desc)) { - mutex_unlock(&data->lock); - return -EINVAL; - } - DBG (data->dev, "%s halt\n", data->name); - spin_lock_irq (&data->dev->lock); - if (likely (data->ep != NULL)) - usb_ep_set_halt (data->ep); - spin_unlock_irq (&data->dev->lock); - mutex_unlock(&data->lock); - return -EBADMSG; - } - - /* FIXME readahead for O_NONBLOCK and poll(); careful with ZLPs */ - - value = -ENOMEM; - kbuf = kmalloc (len, GFP_KERNEL); - if (unlikely (!kbuf)) - goto free1; - - value = ep_io (data, kbuf, len); - VDEBUG (data->dev, "%s read %zu OUT, status %d\n", - data->name, len, (int) value); - if (value >= 0 && copy_to_user (buf, kbuf, value)) - value = -EFAULT; - -free1: - mutex_unlock(&data->lock); - kfree (kbuf); - return value; -} - -/* handle a synchronous IN bulk/intr/iso transfer */ -static ssize_t -ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) -{ - struct ep_data *data = fd->private_data; - void *kbuf; - ssize_t value; - - if ((value = get_ready_ep (fd->f_flags, data)) < 0) - return value; - - /* halt any endpoint by doing a "wrong direction" i/o call */ - if (!usb_endpoint_dir_in(&data->desc)) { - if (usb_endpoint_xfer_isoc(&data->desc)) { - mutex_unlock(&data->lock); - return -EINVAL; - } - DBG (data->dev, "%s halt\n", data->name); - spin_lock_irq (&data->dev->lock); - if (likely (data->ep != NULL)) - usb_ep_set_halt (data->ep); - spin_unlock_irq (&data->dev->lock); - mutex_unlock(&data->lock); - return -EBADMSG; - } - - /* FIXME writebehind for O_NONBLOCK and poll(), qlen = 1 */ - - value = -ENOMEM; - kbuf = memdup_user(buf, len); - if (IS_ERR(kbuf)) { - value = PTR_ERR(kbuf); - kbuf = NULL; - goto free1; - } - - value = ep_io (data, kbuf, len); - VDEBUG (data->dev, "%s write %zu IN, status %d\n", - data->name, len, (int) value); -free1: - mutex_unlock(&data->lock); - kfree (kbuf); - return value; -} - static int ep_release (struct inode *inode, struct file *fd) { @@ -481,7 +395,7 @@ static long ep_ioctl(struct file *fd, unsigned code, unsigned long value) struct ep_data *data = fd->private_data; int status; - if ((status = get_ready_ep (fd->f_flags, data)) < 0) + if ((status = get_ready_ep (fd->f_flags, data, false)) < 0) return status; spin_lock_irq (&data->dev->lock); @@ -517,8 +431,8 @@ struct kiocb_priv { struct mm_struct *mm; struct work_struct work; void *buf; - const struct iovec *iv; - unsigned long nr_segs; + struct iov_iter to; + const void *to_free; unsigned actual; }; @@ -541,35 +455,6 @@ static int ep_aio_cancel(struct kiocb *iocb) return value; } -static ssize_t ep_copy_to_user(struct kiocb_priv *priv) -{ - ssize_t len, total; - void *to_copy; - int i; - - /* copy stuff into user buffers */ - total = priv->actual; - len = 0; - to_copy = priv->buf; - for (i=0; i < priv->nr_segs; i++) { - ssize_t this = min((ssize_t)(priv->iv[i].iov_len), total); - - if (copy_to_user(priv->iv[i].iov_base, to_copy, this)) { - if (len == 0) - len = -EFAULT; - break; - } - - total -= this; - len += this; - to_copy += this; - if (total == 0) - break; - } - - return len; -} - static void ep_user_copy_worker(struct work_struct *work) { struct kiocb_priv *priv = container_of(work, struct kiocb_priv, work); @@ -578,13 +463,16 @@ static void ep_user_copy_worker(struct work_struct *work) size_t ret; use_mm(mm); - ret = ep_copy_to_user(priv); + ret = copy_to_iter(priv->buf, priv->actual, &priv->to); unuse_mm(mm); + if (!ret) + ret = -EFAULT; /* completing the iocb can drop the ctx and mm, don't touch mm after */ aio_complete(iocb, ret, ret); kfree(priv->buf); + kfree(priv->to_free); kfree(priv); } @@ -603,8 +491,9 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) * don't need to copy anything to userspace, so we can * complete the aio request immediately. */ - if (priv->iv == NULL || unlikely(req->actual == 0)) { + if (priv->to_free == NULL || unlikely(req->actual == 0)) { kfree(req->buf); + kfree(priv->to_free); kfree(priv); iocb->private = NULL; /* aio_complete() reports bytes-transferred _and_ faults */ @@ -618,6 +507,7 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) priv->buf = req->buf; priv->actual = req->actual; + INIT_WORK(&priv->work, ep_user_copy_worker); schedule_work(&priv->work); } spin_unlock(&epdata->dev->lock); @@ -626,38 +516,17 @@ static void ep_aio_complete(struct usb_ep *ep, struct usb_request *req) put_ep(epdata); } -static ssize_t -ep_aio_rwtail( - struct kiocb *iocb, - char *buf, - size_t len, - struct ep_data *epdata, - const struct iovec *iv, - unsigned long nr_segs -) +static ssize_t ep_aio(struct kiocb *iocb, + struct kiocb_priv *priv, + struct ep_data *epdata, + char *buf, + size_t len) { - struct kiocb_priv *priv; - struct usb_request *req; - ssize_t value; + struct usb_request *req; + ssize_t value; - priv = kmalloc(sizeof *priv, GFP_KERNEL); - if (!priv) { - value = -ENOMEM; -fail: - kfree(buf); - return value; - } iocb->private = priv; priv->iocb = iocb; - priv->iv = iv; - priv->nr_segs = nr_segs; - INIT_WORK(&priv->work, ep_user_copy_worker); - - value = get_ready_ep(iocb->ki_filp->f_flags, epdata); - if (unlikely(value < 0)) { - kfree(priv); - goto fail; - } kiocb_set_cancel_fn(iocb, ep_aio_cancel); get_ep(epdata); @@ -669,75 +538,154 @@ fail: * allocate or submit those if the host disconnected. */ spin_lock_irq(&epdata->dev->lock); - if (likely(epdata->ep)) { - req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); - if (likely(req)) { - priv->req = req; - req->buf = buf; - req->length = len; - req->complete = ep_aio_complete; - req->context = iocb; - value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); - if (unlikely(0 != value)) - usb_ep_free_request(epdata->ep, req); - } else - value = -EAGAIN; - } else - value = -ENODEV; - spin_unlock_irq(&epdata->dev->lock); + value = -ENODEV; + if (unlikely(epdata->ep)) + goto fail; - mutex_unlock(&epdata->lock); + req = usb_ep_alloc_request(epdata->ep, GFP_ATOMIC); + value = -ENOMEM; + if (unlikely(!req)) + goto fail; - if (unlikely(value)) { - kfree(priv); - put_ep(epdata); - } else - value = -EIOCBQUEUED; + priv->req = req; + req->buf = buf; + req->length = len; + req->complete = ep_aio_complete; + req->context = iocb; + value = usb_ep_queue(epdata->ep, req, GFP_ATOMIC); + if (unlikely(0 != value)) { + usb_ep_free_request(epdata->ep, req); + goto fail; + } + spin_unlock_irq(&epdata->dev->lock); + return -EIOCBQUEUED; + +fail: + spin_unlock_irq(&epdata->dev->lock); + kfree(priv->to_free); + kfree(priv); + put_ep(epdata); return value; } static ssize_t -ep_aio_read(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t o) +ep_read_iter(struct kiocb *iocb, struct iov_iter *to) { - struct ep_data *epdata = iocb->ki_filp->private_data; - char *buf; + struct file *file = iocb->ki_filp; + struct ep_data *epdata = file->private_data; + size_t len = iov_iter_count(to); + ssize_t value; + char *buf; - if (unlikely(usb_endpoint_dir_in(&epdata->desc))) - return -EINVAL; + if ((value = get_ready_ep(file->f_flags, epdata, false)) < 0) + return value; - buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); - if (unlikely(!buf)) - return -ENOMEM; + /* halt any endpoint by doing a "wrong direction" i/o call */ + if (usb_endpoint_dir_in(&epdata->desc)) { + if (usb_endpoint_xfer_isoc(&epdata->desc) || + !is_sync_kiocb(iocb)) { + mutex_unlock(&epdata->lock); + return -EINVAL; + } + DBG (epdata->dev, "%s halt\n", epdata->name); + spin_lock_irq(&epdata->dev->lock); + if (likely(epdata->ep != NULL)) + usb_ep_set_halt(epdata->ep); + spin_unlock_irq(&epdata->dev->lock); + mutex_unlock(&epdata->lock); + return -EBADMSG; + } - return ep_aio_rwtail(iocb, buf, iocb->ki_nbytes, epdata, iov, nr_segs); + buf = kmalloc(len, GFP_KERNEL); + if (unlikely(!buf)) { + mutex_unlock(&epdata->lock); + return -ENOMEM; + } + if (is_sync_kiocb(iocb)) { + value = ep_io(epdata, buf, len); + if (value >= 0 && copy_to_iter(buf, value, to)) + value = -EFAULT; + } else { + struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); + value = -ENOMEM; + if (!priv) + goto fail; + priv->to_free = dup_iter(&priv->to, to, GFP_KERNEL); + if (!priv->to_free) { + kfree(priv); + goto fail; + } + value = ep_aio(iocb, priv, epdata, buf, len); + if (value == -EIOCBQUEUED) + buf = NULL; + } +fail: + kfree(buf); + mutex_unlock(&epdata->lock); + return value; } +static ssize_t ep_config(struct ep_data *, const char *, size_t); + static ssize_t -ep_aio_write(struct kiocb *iocb, const struct iovec *iov, - unsigned long nr_segs, loff_t o) +ep_write_iter(struct kiocb *iocb, struct iov_iter *from) { - struct ep_data *epdata = iocb->ki_filp->private_data; - char *buf; - size_t len = 0; - int i = 0; + struct file *file = iocb->ki_filp; + struct ep_data *epdata = file->private_data; + size_t len = iov_iter_count(from); + bool configured; + ssize_t value; + char *buf; + + if ((value = get_ready_ep(file->f_flags, epdata, true)) < 0) + return value; - if (unlikely(!usb_endpoint_dir_in(&epdata->desc))) - return -EINVAL; + configured = epdata->state == STATE_EP_ENABLED; - buf = kmalloc(iocb->ki_nbytes, GFP_KERNEL); - if (unlikely(!buf)) + /* halt any endpoint by doing a "wrong direction" i/o call */ + if (configured && !usb_endpoint_dir_in(&epdata->desc)) { + if (usb_endpoint_xfer_isoc(&epdata->desc) || + !is_sync_kiocb(iocb)) { + mutex_unlock(&epdata->lock); + return -EINVAL; + } + DBG (epdata->dev, "%s halt\n", epdata->name); + spin_lock_irq(&epdata->dev->lock); + if (likely(epdata->ep != NULL)) + usb_ep_set_halt(epdata->ep); + spin_unlock_irq(&epdata->dev->lock); + mutex_unlock(&epdata->lock); + return -EBADMSG; + } + + buf = kmalloc(len, GFP_KERNEL); + if (unlikely(!buf)) { + mutex_unlock(&epdata->lock); return -ENOMEM; + } - for (i=0; i < nr_segs; i++) { - if (unlikely(copy_from_user(&buf[len], iov[i].iov_base, - iov[i].iov_len) != 0)) { - kfree(buf); - return -EFAULT; + if (unlikely(copy_from_iter(buf, len, from) != len)) { + value = -EFAULT; + goto out; + } + + if (unlikely(!configured)) { + value = ep_config(epdata, buf, len); + } else if (is_sync_kiocb(iocb)) { + value = ep_io(epdata, buf, len); + } else { + struct kiocb_priv *priv = kzalloc(sizeof *priv, GFP_KERNEL); + value = -ENOMEM; + if (priv) { + value = ep_aio(iocb, priv, epdata, buf, len); + if (value == -EIOCBQUEUED) + buf = NULL; } - len += iov[i].iov_len; } - return ep_aio_rwtail(iocb, buf, len, epdata, NULL, 0); +out: + kfree(buf); + mutex_unlock(&epdata->lock); + return value; } /*----------------------------------------------------------------------*/ @@ -745,15 +693,15 @@ ep_aio_write(struct kiocb *iocb, const struct iovec *iov, /* used after endpoint configuration */ static const struct file_operations ep_io_operations = { .owner = THIS_MODULE, - .llseek = no_llseek, - .read = ep_read, - .write = ep_write, - .unlocked_ioctl = ep_ioctl, + .open = ep_open, .release = ep_release, - - .aio_read = ep_aio_read, - .aio_write = ep_aio_write, + .llseek = no_llseek, + .read = new_sync_read, + .write = new_sync_write, + .unlocked_ioctl = ep_ioctl, + .read_iter = ep_read_iter, + .write_iter = ep_write_iter, }; /* ENDPOINT INITIALIZATION @@ -770,17 +718,12 @@ static const struct file_operations ep_io_operations = { * speed descriptor, then optional high speed descriptor. */ static ssize_t -ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) +ep_config (struct ep_data *data, const char *buf, size_t len) { - struct ep_data *data = fd->private_data; struct usb_ep *ep; u32 tag; int value, length = len; - value = mutex_lock_interruptible(&data->lock); - if (value < 0) - return value; - if (data->state != STATE_EP_READY) { value = -EL2HLT; goto fail; @@ -791,9 +734,7 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) goto fail0; /* we might need to change message format someday */ - if (copy_from_user (&tag, buf, 4)) { - goto fail1; - } + memcpy(&tag, buf, 4); if (tag != 1) { DBG(data->dev, "config %s, bad tag %d\n", data->name, tag); goto fail0; @@ -806,19 +747,15 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) */ /* full/low speed descriptor, then high speed */ - if (copy_from_user (&data->desc, buf, USB_DT_ENDPOINT_SIZE)) { - goto fail1; - } + memcpy(&data->desc, buf, USB_DT_ENDPOINT_SIZE); if (data->desc.bLength != USB_DT_ENDPOINT_SIZE || data->desc.bDescriptorType != USB_DT_ENDPOINT) goto fail0; if (len != USB_DT_ENDPOINT_SIZE) { if (len != 2 * USB_DT_ENDPOINT_SIZE) goto fail0; - if (copy_from_user (&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, - USB_DT_ENDPOINT_SIZE)) { - goto fail1; - } + memcpy(&data->hs_desc, buf + USB_DT_ENDPOINT_SIZE, + USB_DT_ENDPOINT_SIZE); if (data->hs_desc.bLength != USB_DT_ENDPOINT_SIZE || data->hs_desc.bDescriptorType != USB_DT_ENDPOINT) { @@ -840,24 +777,20 @@ ep_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) case USB_SPEED_LOW: case USB_SPEED_FULL: ep->desc = &data->desc; - value = usb_ep_enable(ep); - if (value == 0) - data->state = STATE_EP_ENABLED; break; case USB_SPEED_HIGH: /* fails if caller didn't provide that descriptor... */ ep->desc = &data->hs_desc; - value = usb_ep_enable(ep); - if (value == 0) - data->state = STATE_EP_ENABLED; break; default: DBG(data->dev, "unconnected, %s init abandoned\n", data->name); value = -EINVAL; + goto gone; } + value = usb_ep_enable(ep); if (value == 0) { - fd->f_op = &ep_io_operations; + data->state = STATE_EP_ENABLED; value = length; } gone: @@ -867,14 +800,10 @@ fail: data->desc.bDescriptorType = 0; data->hs_desc.bDescriptorType = 0; } - mutex_unlock(&data->lock); return value; fail0: value = -EINVAL; goto fail; -fail1: - value = -EFAULT; - goto fail; } static int @@ -902,15 +831,6 @@ ep_open (struct inode *inode, struct file *fd) return value; } -/* used before endpoint configuration */ -static const struct file_operations ep_config_operations = { - .llseek = no_llseek, - - .open = ep_open, - .write = ep_config, - .release = ep_release, -}; - /*----------------------------------------------------------------------*/ /* EP0 IMPLEMENTATION can be partly in userspace. @@ -989,6 +909,10 @@ ep0_read (struct file *fd, char __user *buf, size_t len, loff_t *ptr) enum ep0_state state; spin_lock_irq (&dev->lock); + if (dev->state <= STATE_DEV_OPENED) { + retval = -EINVAL; + goto done; + } /* report fd mode change before acting on it */ if (dev->setup_abort) { @@ -1187,8 +1111,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) struct dev_data *dev = fd->private_data; ssize_t retval = -ESRCH; - spin_lock_irq (&dev->lock); - /* report fd mode change before acting on it */ if (dev->setup_abort) { dev->setup_abort = 0; @@ -1234,7 +1156,6 @@ ep0_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) } else DBG (dev, "fail %s, state %d\n", __func__, dev->state); - spin_unlock_irq (&dev->lock); return retval; } @@ -1281,6 +1202,9 @@ ep0_poll (struct file *fd, poll_table *wait) struct dev_data *dev = fd->private_data; int mask = 0; + if (dev->state <= STATE_DEV_OPENED) + return DEFAULT_POLLMASK; + poll_wait(fd, &dev->wait, wait); spin_lock_irq (&dev->lock); @@ -1316,19 +1240,6 @@ static long dev_ioctl (struct file *fd, unsigned code, unsigned long value) return ret; } -/* used after device configuration */ -static const struct file_operations ep0_io_operations = { - .owner = THIS_MODULE, - .llseek = no_llseek, - - .read = ep0_read, - .write = ep0_write, - .fasync = ep0_fasync, - .poll = ep0_poll, - .unlocked_ioctl = dev_ioctl, - .release = dev_release, -}; - /*----------------------------------------------------------------------*/ /* The in-kernel gadget driver handles most ep0 issues, in particular @@ -1650,7 +1561,7 @@ static int activate_ep_files (struct dev_data *dev) goto enomem1; data->dentry = gadgetfs_create_file (dev->sb, data->name, - data, &ep_config_operations); + data, &ep_io_operations); if (!data->dentry) goto enomem2; list_add_tail (&data->epfiles, &dev->epfiles); @@ -1852,6 +1763,14 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) u32 tag; char *kbuf; + spin_lock_irq(&dev->lock); + if (dev->state > STATE_DEV_OPENED) { + value = ep0_write(fd, buf, len, ptr); + spin_unlock_irq(&dev->lock); + return value; + } + spin_unlock_irq(&dev->lock); + if (len < (USB_DT_CONFIG_SIZE + USB_DT_DEVICE_SIZE + 4)) return -EINVAL; @@ -1925,7 +1844,6 @@ dev_config (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) * on, they can work ... except in cleanup paths that * kick in after the ep0 descriptor is closed. */ - fd->f_op = &ep0_io_operations; value = len; } return value; @@ -1956,12 +1874,14 @@ dev_open (struct inode *inode, struct file *fd) return value; } -static const struct file_operations dev_init_operations = { +static const struct file_operations ep0_operations = { .llseek = no_llseek, .open = dev_open, + .read = ep0_read, .write = dev_config, .fasync = ep0_fasync, + .poll = ep0_poll, .unlocked_ioctl = dev_ioctl, .release = dev_release, }; @@ -2077,7 +1997,7 @@ gadgetfs_fill_super (struct super_block *sb, void *opts, int silent) goto Enomem; dev->sb = sb; - dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &dev_init_operations); + dev->dentry = gadgetfs_create_file(sb, CHIP, dev, &ep0_operations); if (!dev->dentry) { put_dev(dev); goto Enomem; diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c index f88bfdf5b6a0..2027a27546ef 100644 --- a/drivers/vfio/pci/vfio_pci_intrs.c +++ b/drivers/vfio/pci/vfio_pci_intrs.c @@ -868,12 +868,14 @@ int vfio_pci_set_irqs_ioctl(struct vfio_pci_device *vdev, uint32_t flags, func = vfio_pci_set_err_trigger; break; } + break; case VFIO_PCI_REQ_IRQ_INDEX: switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { case VFIO_IRQ_SET_ACTION_TRIGGER: func = vfio_pci_set_req_trigger; break; } + break; } if (!func) diff --git a/drivers/video/fbdev/amba-clcd.c b/drivers/video/fbdev/amba-clcd.c index 32c0b6b28097..9362424c2340 100644 --- a/drivers/video/fbdev/amba-clcd.c +++ b/drivers/video/fbdev/amba-clcd.c @@ -599,6 +599,9 @@ static int clcdfb_of_get_mode(struct device *dev, struct device_node *endpoint, len = clcdfb_snprintf_mode(NULL, 0, mode); name = devm_kzalloc(dev, len + 1, GFP_KERNEL); + if (!name) + return -ENOMEM; + clcdfb_snprintf_mode(name, len + 1, mode); mode->name = name; diff --git a/drivers/video/fbdev/core/fbmon.c b/drivers/video/fbdev/core/fbmon.c index 95338593ebf4..868facdec638 100644 --- a/drivers/video/fbdev/core/fbmon.c +++ b/drivers/video/fbdev/core/fbmon.c @@ -624,9 +624,6 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize, int num = 0, i, first = 1; int ver, rev; - ver = edid[EDID_STRUCT_VERSION]; - rev = edid[EDID_STRUCT_REVISION]; - mode = kzalloc(50 * sizeof(struct fb_videomode), GFP_KERNEL); if (mode == NULL) return NULL; @@ -637,6 +634,9 @@ static struct fb_videomode *fb_create_modedb(unsigned char *edid, int *dbsize, return NULL; } + ver = edid[EDID_STRUCT_VERSION]; + rev = edid[EDID_STRUCT_REVISION]; + *dbsize = 0; DPRINTK(" Detailed Timings\n"); diff --git a/drivers/video/fbdev/omap2/dss/display-sysfs.c b/drivers/video/fbdev/omap2/dss/display-sysfs.c index 5a2095a98ed8..12186557a9d4 100644 --- a/drivers/video/fbdev/omap2/dss/display-sysfs.c +++ b/drivers/video/fbdev/omap2/dss/display-sysfs.c @@ -28,44 +28,22 @@ #include <video/omapdss.h> #include "dss.h" -static struct omap_dss_device *to_dss_device_sysfs(struct device *dev) +static ssize_t display_name_show(struct omap_dss_device *dssdev, char *buf) { - struct omap_dss_device *dssdev = NULL; - - for_each_dss_dev(dssdev) { - if (dssdev->dev == dev) { - omap_dss_put_device(dssdev); - return dssdev; - } - } - - return NULL; -} - -static ssize_t display_name_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); - return snprintf(buf, PAGE_SIZE, "%s\n", dssdev->name ? dssdev->name : ""); } -static ssize_t display_enabled_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t display_enabled_show(struct omap_dss_device *dssdev, char *buf) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); - return snprintf(buf, PAGE_SIZE, "%d\n", omapdss_device_is_enabled(dssdev)); } -static ssize_t display_enabled_store(struct device *dev, - struct device_attribute *attr, +static ssize_t display_enabled_store(struct omap_dss_device *dssdev, const char *buf, size_t size) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); int r; bool enable; @@ -90,19 +68,16 @@ static ssize_t display_enabled_store(struct device *dev, return size; } -static ssize_t display_tear_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t display_tear_show(struct omap_dss_device *dssdev, char *buf) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); return snprintf(buf, PAGE_SIZE, "%d\n", dssdev->driver->get_te ? dssdev->driver->get_te(dssdev) : 0); } -static ssize_t display_tear_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) +static ssize_t display_tear_store(struct omap_dss_device *dssdev, + const char *buf, size_t size) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); int r; bool te; @@ -120,10 +95,8 @@ static ssize_t display_tear_store(struct device *dev, return size; } -static ssize_t display_timings_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t display_timings_show(struct omap_dss_device *dssdev, char *buf) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); struct omap_video_timings t; if (!dssdev->driver->get_timings) @@ -137,10 +110,9 @@ static ssize_t display_timings_show(struct device *dev, t.y_res, t.vfp, t.vbp, t.vsw); } -static ssize_t display_timings_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) +static ssize_t display_timings_store(struct omap_dss_device *dssdev, + const char *buf, size_t size) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); struct omap_video_timings t = dssdev->panel.timings; int r, found; @@ -176,10 +148,8 @@ static ssize_t display_timings_store(struct device *dev, return size; } -static ssize_t display_rotate_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t display_rotate_show(struct omap_dss_device *dssdev, char *buf) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); int rotate; if (!dssdev->driver->get_rotate) return -ENOENT; @@ -187,10 +157,9 @@ static ssize_t display_rotate_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%u\n", rotate); } -static ssize_t display_rotate_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) +static ssize_t display_rotate_store(struct omap_dss_device *dssdev, + const char *buf, size_t size) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); int rot, r; if (!dssdev->driver->set_rotate || !dssdev->driver->get_rotate) @@ -207,10 +176,8 @@ static ssize_t display_rotate_store(struct device *dev, return size; } -static ssize_t display_mirror_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t display_mirror_show(struct omap_dss_device *dssdev, char *buf) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); int mirror; if (!dssdev->driver->get_mirror) return -ENOENT; @@ -218,10 +185,9 @@ static ssize_t display_mirror_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "%u\n", mirror); } -static ssize_t display_mirror_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) +static ssize_t display_mirror_store(struct omap_dss_device *dssdev, + const char *buf, size_t size) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); int r; bool mirror; @@ -239,10 +205,8 @@ static ssize_t display_mirror_store(struct device *dev, return size; } -static ssize_t display_wss_show(struct device *dev, - struct device_attribute *attr, char *buf) +static ssize_t display_wss_show(struct omap_dss_device *dssdev, char *buf) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); unsigned int wss; if (!dssdev->driver->get_wss) @@ -253,10 +217,9 @@ static ssize_t display_wss_show(struct device *dev, return snprintf(buf, PAGE_SIZE, "0x%05x\n", wss); } -static ssize_t display_wss_store(struct device *dev, - struct device_attribute *attr, const char *buf, size_t size) +static ssize_t display_wss_store(struct omap_dss_device *dssdev, + const char *buf, size_t size) { - struct omap_dss_device *dssdev = to_dss_device_sysfs(dev); u32 wss; int r; @@ -277,50 +240,94 @@ static ssize_t display_wss_store(struct device *dev, return size; } -static DEVICE_ATTR(display_name, S_IRUGO, display_name_show, NULL); -static DEVICE_ATTR(enabled, S_IRUGO|S_IWUSR, +struct display_attribute { + struct attribute attr; + ssize_t (*show)(struct omap_dss_device *, char *); + ssize_t (*store)(struct omap_dss_device *, const char *, size_t); +}; + +#define DISPLAY_ATTR(_name, _mode, _show, _store) \ + struct display_attribute display_attr_##_name = \ + __ATTR(_name, _mode, _show, _store) + +static DISPLAY_ATTR(name, S_IRUGO, display_name_show, NULL); +static DISPLAY_ATTR(display_name, S_IRUGO, display_name_show, NULL); +static DISPLAY_ATTR(enabled, S_IRUGO|S_IWUSR, display_enabled_show, display_enabled_store); -static DEVICE_ATTR(tear_elim, S_IRUGO|S_IWUSR, +static DISPLAY_ATTR(tear_elim, S_IRUGO|S_IWUSR, display_tear_show, display_tear_store); -static DEVICE_ATTR(timings, S_IRUGO|S_IWUSR, +static DISPLAY_ATTR(timings, S_IRUGO|S_IWUSR, display_timings_show, display_timings_store); -static DEVICE_ATTR(rotate, S_IRUGO|S_IWUSR, +static DISPLAY_ATTR(rotate, S_IRUGO|S_IWUSR, display_rotate_show, display_rotate_store); -static DEVICE_ATTR(mirror, S_IRUGO|S_IWUSR, +static DISPLAY_ATTR(mirror, S_IRUGO|S_IWUSR, display_mirror_show, display_mirror_store); -static DEVICE_ATTR(wss, S_IRUGO|S_IWUSR, +static DISPLAY_ATTR(wss, S_IRUGO|S_IWUSR, display_wss_show, display_wss_store); -static const struct attribute *display_sysfs_attrs[] = { - &dev_attr_display_name.attr, - &dev_attr_enabled.attr, - &dev_attr_tear_elim.attr, - &dev_attr_timings.attr, - &dev_attr_rotate.attr, - &dev_attr_mirror.attr, - &dev_attr_wss.attr, +static struct attribute *display_sysfs_attrs[] = { + &display_attr_name.attr, + &display_attr_display_name.attr, + &display_attr_enabled.attr, + &display_attr_tear_elim.attr, + &display_attr_timings.attr, + &display_attr_rotate.attr, + &display_attr_mirror.attr, + &display_attr_wss.attr, NULL }; +static ssize_t display_attr_show(struct kobject *kobj, struct attribute *attr, + char *buf) +{ + struct omap_dss_device *dssdev; + struct display_attribute *display_attr; + + dssdev = container_of(kobj, struct omap_dss_device, kobj); + display_attr = container_of(attr, struct display_attribute, attr); + + if (!display_attr->show) + return -ENOENT; + + return display_attr->show(dssdev, buf); +} + +static ssize_t display_attr_store(struct kobject *kobj, struct attribute *attr, + const char *buf, size_t size) +{ + struct omap_dss_device *dssdev; + struct display_attribute *display_attr; + + dssdev = container_of(kobj, struct omap_dss_device, kobj); + display_attr = container_of(attr, struct display_attribute, attr); + + if (!display_attr->store) + return -ENOENT; + + return display_attr->store(dssdev, buf, size); +} + +static const struct sysfs_ops display_sysfs_ops = { + .show = display_attr_show, + .store = display_attr_store, +}; + +static struct kobj_type display_ktype = { + .sysfs_ops = &display_sysfs_ops, + .default_attrs = display_sysfs_attrs, +}; + int display_init_sysfs(struct platform_device *pdev) { struct omap_dss_device *dssdev = NULL; int r; for_each_dss_dev(dssdev) { - struct kobject *kobj = &dssdev->dev->kobj; - - r = sysfs_create_files(kobj, display_sysfs_attrs); + r = kobject_init_and_add(&dssdev->kobj, &display_ktype, + &pdev->dev.kobj, dssdev->alias); if (r) { DSSERR("failed to create sysfs files\n"); - goto err; - } - - r = sysfs_create_link(&pdev->dev.kobj, kobj, dssdev->alias); - if (r) { - sysfs_remove_files(kobj, display_sysfs_attrs); - - DSSERR("failed to create sysfs display link\n"); + omap_dss_put_device(dssdev); goto err; } } @@ -338,8 +345,12 @@ void display_uninit_sysfs(struct platform_device *pdev) struct omap_dss_device *dssdev = NULL; for_each_dss_dev(dssdev) { - sysfs_remove_link(&pdev->dev.kobj, dssdev->alias); - sysfs_remove_files(&dssdev->dev->kobj, - display_sysfs_attrs); + if (kobject_name(&dssdev->kobj) == NULL) + continue; + + kobject_del(&dssdev->kobj); + kobject_put(&dssdev->kobj); + + memset(&dssdev->kobj, 0, sizeof(dssdev->kobj)); } } diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index b4bca2d4a7e5..70fba973a107 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -526,20 +526,26 @@ static unsigned int __startup_pirq(unsigned int irq) pirq_query_unmask(irq); rc = set_evtchn_to_irq(evtchn, irq); - if (rc != 0) { - pr_err("irq%d: Failed to set port to irq mapping (%d)\n", - irq, rc); - xen_evtchn_close(evtchn); - return 0; - } + if (rc) + goto err; + bind_evtchn_to_cpu(evtchn, 0); info->evtchn = evtchn; + rc = xen_evtchn_port_setup(info); + if (rc) + goto err; + out: unmask_evtchn(evtchn); eoi_pirq(irq_get_irq_data(irq)); return 0; + +err: + pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc); + xen_evtchn_close(evtchn); + return 0; } static unsigned int startup_pirq(struct irq_data *data) diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c index 46ae0f9f02ad..75fe3d466515 100644 --- a/drivers/xen/xen-pciback/conf_space.c +++ b/drivers/xen/xen-pciback/conf_space.c @@ -16,7 +16,7 @@ #include "conf_space.h" #include "conf_space_quirks.h" -static bool permissive; +bool permissive; module_param(permissive, bool, 0644); /* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word, diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h index e56c934ad137..2e1d73d1d5d0 100644 --- a/drivers/xen/xen-pciback/conf_space.h +++ b/drivers/xen/xen-pciback/conf_space.h @@ -64,6 +64,8 @@ struct config_field_entry { void *data; }; +extern bool permissive; + #define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset) /* Add fields to a device - the add_fields macro expects to get a pointer to diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c index c5ee82587e8c..2d7369391472 100644 --- a/drivers/xen/xen-pciback/conf_space_header.c +++ b/drivers/xen/xen-pciback/conf_space_header.c @@ -11,6 +11,10 @@ #include "pciback.h" #include "conf_space.h" +struct pci_cmd_info { + u16 val; +}; + struct pci_bar_info { u32 val; u32 len_val; @@ -20,22 +24,36 @@ struct pci_bar_info { #define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO)) #define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER) -static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) +/* Bits guests are allowed to control in permissive mode. */ +#define PCI_COMMAND_GUEST (PCI_COMMAND_MASTER|PCI_COMMAND_SPECIAL| \ + PCI_COMMAND_INVALIDATE|PCI_COMMAND_VGA_PALETTE| \ + PCI_COMMAND_WAIT|PCI_COMMAND_FAST_BACK) + +static void *command_init(struct pci_dev *dev, int offset) { - int i; - int ret; - - ret = xen_pcibk_read_config_word(dev, offset, value, data); - if (!pci_is_enabled(dev)) - return ret; - - for (i = 0; i < PCI_ROM_RESOURCE; i++) { - if (dev->resource[i].flags & IORESOURCE_IO) - *value |= PCI_COMMAND_IO; - if (dev->resource[i].flags & IORESOURCE_MEM) - *value |= PCI_COMMAND_MEMORY; + struct pci_cmd_info *cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); + int err; + + if (!cmd) + return ERR_PTR(-ENOMEM); + + err = pci_read_config_word(dev, PCI_COMMAND, &cmd->val); + if (err) { + kfree(cmd); + return ERR_PTR(err); } + return cmd; +} + +static int command_read(struct pci_dev *dev, int offset, u16 *value, void *data) +{ + int ret = pci_read_config_word(dev, offset, value); + const struct pci_cmd_info *cmd = data; + + *value &= PCI_COMMAND_GUEST; + *value |= cmd->val & ~PCI_COMMAND_GUEST; + return ret; } @@ -43,6 +61,8 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) { struct xen_pcibk_dev_data *dev_data; int err; + u16 val; + struct pci_cmd_info *cmd = data; dev_data = pci_get_drvdata(dev); if (!pci_is_enabled(dev) && is_enable_cmd(value)) { @@ -83,6 +103,19 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data) } } + cmd->val = value; + + if (!permissive && (!dev_data || !dev_data->permissive)) + return 0; + + /* Only allow the guest to control certain bits. */ + err = pci_read_config_word(dev, offset, &val); + if (err || val == value) + return err; + + value &= PCI_COMMAND_GUEST; + value |= val & ~PCI_COMMAND_GUEST; + return pci_write_config_word(dev, offset, value); } @@ -282,6 +315,8 @@ static const struct config_field header_common[] = { { .offset = PCI_COMMAND, .size = 2, + .init = command_init, + .release = bar_release, .u.w.read = command_read, .u.w.write = command_write, }, |