diff options
author | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2022-11-21 10:21:53 +0100 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@linuxfoundation.org> | 2022-11-21 10:21:53 +0100 |
commit | 05df6ab8eba625a1d97eb67ee06d786b8e460685 (patch) | |
tree | 7fed59b7f49fd8d816475ca6b20c95c7f837ca6f /drivers | |
parent | 1d926e259d8f8195fdfaeea7951149001894b473 (diff) | |
parent | eb7081409f94a9a8608593d0fb63a1aa3d6f95d8 (diff) |
Merge 6.1-rc6 into driver-core-next
We need the kernfs changes in here as well.
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers')
878 files changed, 10215 insertions, 7580 deletions
diff --git a/drivers/accessibility/speakup/main.c b/drivers/accessibility/speakup/main.c index f52265293482..73db0cb44fc7 100644 --- a/drivers/accessibility/speakup/main.c +++ b/drivers/accessibility/speakup/main.c @@ -1778,7 +1778,7 @@ static void speakup_con_update(struct vc_data *vc) { unsigned long flags; - if (!speakup_console[vc->vc_num] || spk_parked) + if (!speakup_console[vc->vc_num] || spk_parked || !synth) return; if (!spin_trylock_irqsave(&speakup_info.spinlock, flags)) /* Speakup output, discard */ diff --git a/drivers/accessibility/speakup/utils.h b/drivers/accessibility/speakup/utils.h index 4bf2ee8ac246..4ce9a12f7664 100644 --- a/drivers/accessibility/speakup/utils.h +++ b/drivers/accessibility/speakup/utils.h @@ -54,7 +54,7 @@ static inline int oops(const char *msg, const char *info) static inline struct st_key *hash_name(char *name) { - u_char *pn = (u_char *)name; + unsigned char *pn = (unsigned char *)name; int hash = 0; while (*pn) { diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c index 72f1fb77abcd..e648158368a7 100644 --- a/drivers/acpi/acpi_extlog.c +++ b/drivers/acpi/acpi_extlog.c @@ -12,6 +12,7 @@ #include <linux/ratelimit.h> #include <linux/edac.h> #include <linux/ras.h> +#include <acpi/ghes.h> #include <asm/cpu.h> #include <asm/mce.h> @@ -138,8 +139,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, int cpu = mce->extcpu; struct acpi_hest_generic_status *estatus, *tmp; struct acpi_hest_generic_data *gdata; - const guid_t *fru_id = &guid_null; - char *fru_text = ""; + const guid_t *fru_id; + char *fru_text; guid_t *sec_type; static u32 err_seq; @@ -160,17 +161,23 @@ static int extlog_print(struct notifier_block *nb, unsigned long val, /* log event via trace */ err_seq++; - gdata = (struct acpi_hest_generic_data *)(tmp + 1); - if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) - fru_id = (guid_t *)gdata->fru_id; - if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) - fru_text = gdata->fru_text; - sec_type = (guid_t *)gdata->section_type; - if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { - struct cper_sec_mem_err *mem = (void *)(gdata + 1); - if (gdata->error_data_length >= sizeof(*mem)) - trace_extlog_mem_event(mem, err_seq, fru_id, fru_text, - (u8)gdata->error_severity); + apei_estatus_for_each_section(tmp, gdata) { + if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID) + fru_id = (guid_t *)gdata->fru_id; + else + fru_id = &guid_null; + if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT) + fru_text = gdata->fru_text; + else + fru_text = ""; + sec_type = (guid_t *)gdata->section_type; + if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) { + struct cper_sec_mem_err *mem = (void *)(gdata + 1); + + if (gdata->error_data_length >= sizeof(*mem)) + trace_extlog_mem_event(mem, err_seq, fru_id, fru_text, + (u8)gdata->error_severity); + } } out: diff --git a/drivers/acpi/acpi_pcc.c b/drivers/acpi/acpi_pcc.c index ee4ce5ba1fb2..3e252be047b8 100644 --- a/drivers/acpi/acpi_pcc.c +++ b/drivers/acpi/acpi_pcc.c @@ -27,7 +27,7 @@ * Arbitrary retries in case the remote processor is slow to respond * to PCC commands */ -#define PCC_CMD_WAIT_RETRIES_NUM 500 +#define PCC_CMD_WAIT_RETRIES_NUM 500ULL struct pcc_data { struct pcc_mbox_chan *pcc_chan; diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c index 80ad530583c9..9952f3a792ba 100644 --- a/drivers/acpi/apei/ghes.c +++ b/drivers/acpi/apei/ghes.c @@ -163,7 +163,7 @@ static void ghes_unmap(void __iomem *vaddr, enum fixed_addresses fixmap_idx) clear_fixmap(fixmap_idx); } -int ghes_estatus_pool_init(int num_ghes) +int ghes_estatus_pool_init(unsigned int num_ghes) { unsigned long addr, len; int rc; diff --git a/drivers/acpi/arm64/iort.c b/drivers/acpi/arm64/iort.c index ca2aed86b540..8059baf4ef27 100644 --- a/drivers/acpi/arm64/iort.c +++ b/drivers/acpi/arm64/iort.c @@ -1142,7 +1142,8 @@ static void iort_iommu_msi_get_resv_regions(struct device *dev, struct iommu_resv_region *region; region = iommu_alloc_resv_region(base + SZ_64K, SZ_64K, - prot, IOMMU_RESV_MSI); + prot, IOMMU_RESV_MSI, + GFP_KERNEL); if (region) list_add_tail(®ion->list, head); } diff --git a/drivers/acpi/numa/srat.c b/drivers/acpi/numa/srat.c index 3b818ab186be..1f4fc5f8a819 100644 --- a/drivers/acpi/numa/srat.c +++ b/drivers/acpi/numa/srat.c @@ -327,6 +327,7 @@ static int __init acpi_parse_cfmws(union acpi_subtable_headers *header, pr_warn("ACPI NUMA: Failed to add memblk for CFMWS node %d [mem %#llx-%#llx]\n", node, start, end); } + node_set(node, numa_nodes_parsed); /* Set the next available fake_pxm value */ (*fake_pxm)++; diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index c8385ef54c37..4e3db20e9cbb 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -323,6 +323,7 @@ struct pci_dev *acpi_get_pci_dev(acpi_handle handle) list_for_each_entry(pn, &adev->physical_node_list, node) { if (dev_is_pci(pn->dev)) { + get_device(pn->dev); pci_dev = to_pci_dev(pn->dev); break; } diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 6f9489edfb4e..f27914aedbd5 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -425,6 +425,24 @@ static const struct dmi_system_id asus_laptop[] = { DMI_MATCH(DMI_BOARD_NAME, "S5402ZA"), }, }, + { + .ident = "Asus Vivobook S5602ZA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_BOARD_NAME, "S5602ZA"), + }, + }, + { } +}; + +static const struct dmi_system_id lenovo_82ra[] = { + { + .ident = "LENOVO IdeaPad Flex 5 16ALC7", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "82RA"), + }, + }, { } }; @@ -434,11 +452,14 @@ struct irq_override_cmp { unsigned char triggering; unsigned char polarity; unsigned char shareable; + bool override; }; -static const struct irq_override_cmp skip_override_table[] = { - { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 }, - { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0 }, +static const struct irq_override_cmp override_table[] = { + { medion_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, + { asus_laptop, 1, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, false }, + { lenovo_82ra, 6, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, + { lenovo_82ra, 10, ACPI_LEVEL_SENSITIVE, ACPI_ACTIVE_LOW, 0, true }, }; static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, @@ -446,6 +467,17 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, { int i; + for (i = 0; i < ARRAY_SIZE(override_table); i++) { + const struct irq_override_cmp *entry = &override_table[i]; + + if (dmi_check_system(entry->system) && + entry->irq == gsi && + entry->triggering == triggering && + entry->polarity == polarity && + entry->shareable == shareable) + return entry->override; + } + #ifdef CONFIG_X86 /* * IRQ override isn't needed on modern AMD Zen systems and @@ -456,17 +488,6 @@ static bool acpi_dev_irq_override(u32 gsi, u8 triggering, u8 polarity, return false; #endif - for (i = 0; i < ARRAY_SIZE(skip_override_table); i++) { - const struct irq_override_cmp *entry = &skip_override_table[i]; - - if (dmi_check_system(entry->system) && - entry->irq == gsi && - entry->triggering == triggering && - entry->polarity == polarity && - entry->shareable == shareable) - return false; - } - return true; } @@ -498,8 +519,11 @@ static void acpi_dev_get_irqresource(struct resource *res, u32 gsi, u8 pol = p ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH; if (triggering != trig || polarity != pol) { - pr_warn("ACPI: IRQ %d override to %s, %s\n", gsi, - t ? "level" : "edge", p ? "low" : "high"); + pr_warn("ACPI: IRQ %d override to %s%s, %s%s\n", gsi, + t ? "level" : "edge", + trig == triggering ? "" : "(!)", + p ? "low" : "high", + pol == polarity ? "" : "(!)"); triggering = trig; polarity = pol; } diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 558664d169fc..b47e93a24a9a 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -789,6 +789,7 @@ static bool acpi_info_matches_ids(struct acpi_device_info *info, static const char * const acpi_ignore_dep_ids[] = { "PNP0D80", /* Windows-compatible System Power Management Controller */ "INT33BD", /* Intel Baytrail Mailbox Device */ + "LATT2021", /* Lattice FW Update Client Driver */ NULL }; @@ -1509,9 +1510,12 @@ int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map) goto out; } + *map = r; + list_for_each_entry(rentry, &list, node) { if (rentry->res->start >= rentry->res->end) { - kfree(r); + kfree(*map); + *map = NULL; ret = -EINVAL; dev_dbg(dma_dev, "Invalid DMA regions configuration\n"); goto out; @@ -1523,8 +1527,6 @@ int acpi_dma_get_range(struct device *dev, const struct bus_dma_region **map) r->offset = rentry->offset; r++; } - - *map = r; } out: acpi_dev_free_resource_list(&list); diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 0d9064a9804c..b2a616287638 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -646,6 +646,20 @@ static const struct dmi_system_id video_detect_dmi_table[] = { }, /* + * Models which have nvidia-ec-wmi support, but should not use it. + * Note this indicates a likely firmware bug on these models and should + * be revisited if/when Linux gets support for dynamic mux mode. + */ + { + .callback = video_detect_force_native, + /* Dell G15 5515 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"), + }, + }, + + /* * Desktops which falsely report a backlight and which our heuristics * for this do not catch. */ @@ -668,6 +682,11 @@ static const struct dmi_system_id video_detect_dmi_table[] = { { }, }; +static bool google_cros_ec_present(void) +{ + return acpi_dev_found("GOOG0004") || acpi_dev_found("GOOG000C"); +} + /* * Determine which type of backlight interface to use on this system, * First check cmdline, then dmi quirks, then do autodetect. @@ -713,6 +732,10 @@ static enum acpi_backlight_type __acpi_video_get_backlight_type(bool native) if (apple_gmux_present()) return acpi_backlight_apple_gmux; + /* Chromebooks should always prefer native backlight control. */ + if (google_cros_ec_present() && native_available) + return acpi_backlight_native; + /* On systems with ACPI video use either native or ACPI video. */ if (video_caps & ACPI_VIDEO_BACKLIGHT) { /* @@ -742,6 +765,18 @@ EXPORT_SYMBOL(acpi_video_get_backlight_type); bool acpi_video_backlight_use_native(void) { - return __acpi_video_get_backlight_type(true) == acpi_backlight_native; + /* + * Call __acpi_video_get_backlight_type() to let it know that + * a native backlight is available. + */ + __acpi_video_get_backlight_type(true); + + /* + * For now just always return true. There is a whole bunch of laptop + * models where (video_caps & ACPI_VIDEO_BACKLIGHT) is false causing + * __acpi_video_get_backlight_type() to return vendor, while these + * models only have a native backlight control. + */ + return true; } EXPORT_SYMBOL(acpi_video_backlight_use_native); diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index f8a2cbdc0ce2..d7d3f1669d4c 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -219,6 +219,12 @@ static const struct dmi_system_id force_storage_d3_dmi[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 14 7425 2-in-1"), } }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 16 5625"), + } + }, {} }; diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index 1c39cfce32fa..4ad42b0f75cd 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -739,6 +739,12 @@ int binder_alloc_mmap_handler(struct binder_alloc *alloc, const char *failure_string; struct binder_buffer *buffer; + if (unlikely(vma->vm_mm != alloc->mm)) { + ret = -EINVAL; + failure_string = "invalid vma->vm_mm"; + goto err_invalid_mm; + } + mutex_lock(&binder_alloc_mmap_lock); if (alloc->buffer_size) { ret = -EBUSY; @@ -785,6 +791,7 @@ err_alloc_pages_failed: alloc->buffer_size = 0; err_already_mapped: mutex_unlock(&binder_alloc_mmap_lock); +err_invalid_mm: binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%s: %d %lx-%lx %s failed %d\n", __func__, alloc->pid, vma->vm_start, vma->vm_end, diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h index da7ee8bec165..7add8e79912b 100644 --- a/drivers/ata/ahci.h +++ b/drivers/ata/ahci.h @@ -257,7 +257,7 @@ enum { PCS_7 = 0x94, /* 7+ port PCS (Denverton) */ /* em constants */ - EM_MAX_SLOTS = 8, + EM_MAX_SLOTS = SATA_PMP_MAX_PORTS, EM_MAX_RETRY = 5, /* em_ctl bits */ diff --git a/drivers/ata/ahci_brcm.c b/drivers/ata/ahci_brcm.c index f61795c546cf..6f216eb25610 100644 --- a/drivers/ata/ahci_brcm.c +++ b/drivers/ata/ahci_brcm.c @@ -448,7 +448,7 @@ static int brcm_ahci_probe(struct platform_device *pdev) if (!of_id) return -ENODEV; - priv->version = (enum brcm_ahci_version)of_id->data; + priv->version = (unsigned long)of_id->data; priv->dev = dev; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "top-ctrl"); diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c index b734e069034d..a950767f7948 100644 --- a/drivers/ata/ahci_imx.c +++ b/drivers/ata/ahci_imx.c @@ -1067,7 +1067,7 @@ static int imx_ahci_probe(struct platform_device *pdev) imxpriv->ahci_pdev = pdev; imxpriv->no_device = false; imxpriv->first_time = true; - imxpriv->type = (enum ahci_imx_type)of_id->data; + imxpriv->type = (unsigned long)of_id->data; imxpriv->sata_clk = devm_clk_get(dev, "sata"); if (IS_ERR(imxpriv->sata_clk)) { @@ -1235,4 +1235,4 @@ module_platform_driver(imx_ahci_driver); MODULE_DESCRIPTION("Freescale i.MX AHCI SATA platform driver"); MODULE_AUTHOR("Richard Zhu <Hong-Xing.Zhu@freescale.com>"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("ahci:imx"); +MODULE_ALIAS("platform:" DRV_NAME); diff --git a/drivers/ata/ahci_qoriq.c b/drivers/ata/ahci_qoriq.c index 6cd61842ad48..9cf9bf36a874 100644 --- a/drivers/ata/ahci_qoriq.c +++ b/drivers/ata/ahci_qoriq.c @@ -280,7 +280,7 @@ static int ahci_qoriq_probe(struct platform_device *pdev) return -ENOMEM; if (of_id) - qoriq_priv->type = (enum ahci_qoriq_type)of_id->data; + qoriq_priv->type = (unsigned long)of_id->data; else qoriq_priv->type = (enum ahci_qoriq_type)acpi_id->driver_data; diff --git a/drivers/ata/ahci_st.c b/drivers/ata/ahci_st.c index 5a2cac60a29a..8607b68eee53 100644 --- a/drivers/ata/ahci_st.c +++ b/drivers/ata/ahci_st.c @@ -236,7 +236,7 @@ static struct platform_driver st_ahci_driver = { .driver = { .name = DRV_NAME, .pm = &st_ahci_pm_ops, - .of_match_table = of_match_ptr(st_ahci_match), + .of_match_table = st_ahci_match, }, .probe = st_ahci_probe, .remove = ata_platform_remove_one, diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index 7bb5db17f864..1e08704d5117 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c @@ -785,7 +785,7 @@ static int xgene_ahci_probe(struct platform_device *pdev) of_devid = of_match_device(xgene_ahci_of_match, dev); if (of_devid) { if (of_devid->data) - version = (enum xgene_ahci_version) of_devid->data; + version = (unsigned long) of_devid->data; } #ifdef CONFIG_ACPI else { diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index e2ebb0b065e2..06a3d95ed8f9 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -3264,6 +3264,7 @@ static unsigned int ata_scsiop_maint_in(struct ata_scsi_args *args, u8 *rbuf) case REPORT_LUNS: case REQUEST_SENSE: case SYNCHRONIZE_CACHE: + case SYNCHRONIZE_CACHE_16: case REZERO_UNIT: case SEEK_6: case SEEK_10: @@ -3922,6 +3923,7 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd) return ata_scsi_write_same_xlat; case SYNCHRONIZE_CACHE: + case SYNCHRONIZE_CACHE_16: if (ata_try_flush_cache(dev)) return ata_scsi_flush_xlat; break; @@ -3962,9 +3964,19 @@ static inline ata_xlat_func_t ata_get_xlat_func(struct ata_device *dev, u8 cmd) int __ata_scsi_queuecmd(struct scsi_cmnd *scmd, struct ata_device *dev) { + struct ata_port *ap = dev->link->ap; u8 scsi_op = scmd->cmnd[0]; ata_xlat_func_t xlat_func; + /* + * scsi_queue_rq() will defer commands if scsi_host_in_recovery(). + * However, this check is done without holding the ap->lock (a libata + * specific lock), so we can have received an error irq since then, + * therefore we must check if EH is pending, while holding ap->lock. + */ + if (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) + return SCSI_MLQUEUE_DEVICE_BUSY; + if (unlikely(!scmd->cmd_len)) goto bad_cdb_len; @@ -4145,6 +4157,7 @@ void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd) * turning this into a no-op. */ case SYNCHRONIZE_CACHE: + case SYNCHRONIZE_CACHE_16: fallthrough; /* no-op's, complete with success */ diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c index a7e9a75410a3..e4fb9d1b9b39 100644 --- a/drivers/ata/libata-transport.c +++ b/drivers/ata/libata-transport.c @@ -301,7 +301,9 @@ int ata_tport_add(struct device *parent, pm_runtime_enable(dev); pm_runtime_forbid(dev); - transport_add_device(dev); + error = transport_add_device(dev); + if (error) + goto tport_transport_add_err; transport_configure_device(dev); error = ata_tlink_add(&ap->link); @@ -312,12 +314,12 @@ int ata_tport_add(struct device *parent, tport_link_err: transport_remove_device(dev); + tport_transport_add_err: device_del(dev); tport_err: transport_destroy_device(dev); put_device(dev); - ata_host_put(ap->host); return error; } @@ -456,7 +458,9 @@ int ata_tlink_add(struct ata_link *link) goto tlink_err; } - transport_add_device(dev); + error = transport_add_device(dev); + if (error) + goto tlink_transport_err; transport_configure_device(dev); ata_for_each_dev(ata_dev, link, ALL) { @@ -471,6 +475,7 @@ int ata_tlink_add(struct ata_link *link) ata_tdev_delete(ata_dev); } transport_remove_device(dev); + tlink_transport_err: device_del(dev); tlink_err: transport_destroy_device(dev); @@ -708,7 +713,13 @@ static int ata_tdev_add(struct ata_device *ata_dev) return error; } - transport_add_device(dev); + error = transport_add_device(dev); + if (error) { + device_del(dev); + ata_tdev_free(ata_dev); + return error; + } + transport_configure_device(dev); return 0; } diff --git a/drivers/ata/pata_legacy.c b/drivers/ata/pata_legacy.c index 0a8bf09a5c19..03c580625c2c 100644 --- a/drivers/ata/pata_legacy.c +++ b/drivers/ata/pata_legacy.c @@ -315,9 +315,10 @@ static void pdc20230_set_piomode(struct ata_port *ap, struct ata_device *adev) outb(inb(0x1F4) & 0x07, 0x1F4); rt = inb(0x1F3); - rt &= 0x07 << (3 * adev->devno); + rt &= ~(0x07 << (3 * !adev->devno)); if (pio) - rt |= (1 + 3 * pio) << (3 * adev->devno); + rt |= (1 + 3 * pio) << (3 * !adev->devno); + outb(rt, 0x1F3); udelay(100); outb(inb(0x1F2) | 0x01, 0x1F2); diff --git a/drivers/ata/pata_palmld.c b/drivers/ata/pata_palmld.c index 400e65190904..51caa2a427dd 100644 --- a/drivers/ata/pata_palmld.c +++ b/drivers/ata/pata_palmld.c @@ -63,8 +63,8 @@ static int palmld_pata_probe(struct platform_device *pdev) /* remap drive's physical memory address */ mem = devm_platform_ioremap_resource(pdev, 0); - if (!mem) - return -ENOMEM; + if (IS_ERR(mem)) + return PTR_ERR(mem); /* request and activate power and reset GPIOs */ lda->power = devm_gpiod_get(dev, "power", GPIOD_OUT_HIGH); diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c index 590ebea99601..0195eb29f6c2 100644 --- a/drivers/ata/sata_rcar.c +++ b/drivers/ata/sata_rcar.c @@ -875,7 +875,7 @@ static int sata_rcar_probe(struct platform_device *pdev) if (!priv) return -ENOMEM; - priv->type = (enum sata_rcar_type)of_device_get_match_data(dev); + priv->type = (unsigned long)of_device_get_match_data(dev); pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c index ead135c7044c..6471b559230e 100644 --- a/drivers/base/power/domain.c +++ b/drivers/base/power/domain.c @@ -2952,6 +2952,10 @@ static int genpd_iterate_idle_states(struct device_node *dn, np = it.node; if (!of_match_node(idle_state_match, np)) continue; + + if (!of_device_is_available(np)) + continue; + if (states) { ret = genpd_parse_state(&states[i], np); if (ret) { diff --git a/drivers/base/property.c b/drivers/base/property.c index 68f61d3e3857..f7b5aa8fcf28 100644 --- a/drivers/base/property.c +++ b/drivers/base/property.c @@ -236,7 +236,7 @@ EXPORT_SYMBOL_GPL(device_property_read_string); * Find a given string in a string array and if it is found return the * index back. * - * Return: %0 if the property was found (success), + * Return: index, starting from %0, if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of strings, @@ -457,7 +457,7 @@ EXPORT_SYMBOL_GPL(fwnode_property_read_string); * Find a given string in a string array and if it is found return the * index back. * - * Return: %0 if the property was found (success), + * Return: index, starting from %0, if the property was found (success), * %-EINVAL if given arguments are not valid, * %-ENODATA if the property does not have a value, * %-EPROTO if the property is not an array of strings, diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index db1b4b202646..a41145d52de9 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig @@ -408,6 +408,12 @@ config BLK_DEV_UBLK definition isn't finalized yet, and might change according to future requirement, so mark is as experimental now. + Say Y if you want to get better performance because task_work_add() + can be used in IO path for replacing io_uring cmd, which will become + shared between IO tasks and ubq daemon, meantime task_work_add() can + can handle batch more effectively, but task_work_add() isn't exported + for module, so ublk has to be built to kernel. + source "drivers/block/rnbd/Kconfig" endif # BLK_DEV diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c index f3e4db16fd07..8532b839a343 100644 --- a/drivers/block/drbd/drbd_main.c +++ b/drivers/block/drbd/drbd_main.c @@ -2672,7 +2672,7 @@ static int init_submitter(struct drbd_device *device) enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor) { struct drbd_resource *resource = adm_ctx->resource; - struct drbd_connection *connection; + struct drbd_connection *connection, *n; struct drbd_device *device; struct drbd_peer_device *peer_device, *tmp_peer_device; struct gendisk *disk; @@ -2789,7 +2789,7 @@ enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsig return NO_ERROR; out_idr_remove_from_resource: - for_each_connection(connection, resource) { + for_each_connection_safe(connection, n, resource) { peer_device = idr_remove(&connection->peer_devices, vnr); if (peer_device) kref_put(&connection->kref, drbd_destroy_connection); diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 8f7f144e54f3..7f9bcc82fc9c 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c @@ -30,11 +30,6 @@ static struct drbd_request *drbd_req_new(struct drbd_device *device, struct bio return NULL; memset(req, 0, sizeof(*req)); - req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, bio_src, - GFP_NOIO, &drbd_io_bio_set); - req->private_bio->bi_private = req; - req->private_bio->bi_end_io = drbd_request_endio; - req->rq_state = (bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0) | (bio_op(bio_src) == REQ_OP_WRITE_ZEROES ? RQ_ZEROES : 0) | (bio_op(bio_src) == REQ_OP_DISCARD ? RQ_UNMAP : 0); @@ -1219,9 +1214,12 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio) /* Update disk stats */ req->start_jif = bio_start_io_acct(req->master_bio); - if (!get_ldev(device)) { - bio_put(req->private_bio); - req->private_bio = NULL; + if (get_ldev(device)) { + req->private_bio = bio_alloc_clone(device->ldev->backing_bdev, + bio, GFP_NOIO, + &drbd_io_bio_set); + req->private_bio->bi_private = req; + req->private_bio->bi_end_io = drbd_request_endio; } /* process discards always from our submitter thread */ diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index f9e39301c4af..04453f4a319c 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -7222,8 +7222,10 @@ static int __init rbd_sysfs_init(void) int ret; ret = device_register(&rbd_root_dev); - if (ret < 0) + if (ret < 0) { + put_device(&rbd_root_dev); return ret; + } ret = bus_register(&rbd_bus_type); if (ret < 0) diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c index 2651bf41dde3..f96cb01e9604 100644 --- a/drivers/block/ublk_drv.c +++ b/drivers/block/ublk_drv.c @@ -57,11 +57,14 @@ #define UBLK_PARAM_TYPE_ALL (UBLK_PARAM_TYPE_BASIC | UBLK_PARAM_TYPE_DISCARD) struct ublk_rq_data { - struct callback_head work; + union { + struct callback_head work; + struct llist_node node; + }; }; struct ublk_uring_cmd_pdu { - struct request *req; + struct ublk_queue *ubq; }; /* @@ -119,12 +122,14 @@ struct ublk_queue { struct task_struct *ubq_daemon; char *io_cmd_buf; + struct llist_head io_cmds; + unsigned long io_addr; /* mapped vm address */ unsigned int max_io_sz; bool force_abort; unsigned short nr_io_ready; /* how many ios setup */ struct ublk_device *dev; - struct ublk_io ios[0]; + struct ublk_io ios[]; }; #define UBLK_DAEMON_MONITOR_PERIOD (5 * HZ) @@ -764,8 +769,12 @@ static inline void __ublk_rq_task_work(struct request *req) static void ublk_rq_task_work_cb(struct io_uring_cmd *cmd) { struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); + struct ublk_queue *ubq = pdu->ubq; + struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds); + struct ublk_rq_data *data; - __ublk_rq_task_work(pdu->req); + llist_for_each_entry(data, io_cmds, node) + __ublk_rq_task_work(blk_mq_rq_from_pdu(data)); } static void ublk_rq_task_work_fn(struct callback_head *work) @@ -777,6 +786,54 @@ static void ublk_rq_task_work_fn(struct callback_head *work) __ublk_rq_task_work(req); } +static void ublk_submit_cmd(struct ublk_queue *ubq, const struct request *rq) +{ + struct ublk_io *io = &ubq->ios[rq->tag]; + + /* + * If the check pass, we know that this is a re-issued request aborted + * previously in monitor_work because the ubq_daemon(cmd's task) is + * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore + * because this ioucmd's io_uring context may be freed now if no inflight + * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work. + * + * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing + * the tag). Then the request is re-started(allocating the tag) and we are here. + * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED + * guarantees that here is a re-issued request aborted previously. + */ + if (unlikely(io->flags & UBLK_IO_FLAG_ABORTED)) { + struct llist_node *io_cmds = llist_del_all(&ubq->io_cmds); + struct ublk_rq_data *data; + + llist_for_each_entry(data, io_cmds, node) + __ublk_abort_rq(ubq, blk_mq_rq_from_pdu(data)); + } else { + struct io_uring_cmd *cmd = io->cmd; + struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); + + pdu->ubq = ubq; + io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb); + } +} + +static void ublk_queue_cmd(struct ublk_queue *ubq, struct request *rq, + bool last) +{ + struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq); + + if (ublk_can_use_task_work(ubq)) { + enum task_work_notify_mode notify_mode = last ? + TWA_SIGNAL_NO_IPI : TWA_NONE; + + if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode)) + __ublk_abort_rq(ubq, rq); + } else { + if (llist_add(&data->node, &ubq->io_cmds)) + ublk_submit_cmd(ubq, rq); + } +} + static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx, const struct blk_mq_queue_data *bd) { @@ -788,6 +845,7 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx, res = ublk_setup_iod(ubq, rq); if (unlikely(res != BLK_STS_OK)) return BLK_STS_IOERR; + /* With recovery feature enabled, force_abort is set in * ublk_stop_dev() before calling del_gendisk(). We have to * abort all requeued and new rqs here to let del_gendisk() @@ -803,41 +861,11 @@ static blk_status_t ublk_queue_rq(struct blk_mq_hw_ctx *hctx, blk_mq_start_request(bd->rq); if (unlikely(ubq_daemon_is_dying(ubq))) { - fail: __ublk_abort_rq(ubq, rq); return BLK_STS_OK; } - if (ublk_can_use_task_work(ubq)) { - struct ublk_rq_data *data = blk_mq_rq_to_pdu(rq); - enum task_work_notify_mode notify_mode = bd->last ? - TWA_SIGNAL_NO_IPI : TWA_NONE; - - if (task_work_add(ubq->ubq_daemon, &data->work, notify_mode)) - goto fail; - } else { - struct ublk_io *io = &ubq->ios[rq->tag]; - struct io_uring_cmd *cmd = io->cmd; - struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); - - /* - * If the check pass, we know that this is a re-issued request aborted - * previously in monitor_work because the ubq_daemon(cmd's task) is - * PF_EXITING. We cannot call io_uring_cmd_complete_in_task() anymore - * because this ioucmd's io_uring context may be freed now if no inflight - * ioucmd exists. Otherwise we may cause null-deref in ctx->fallback_work. - * - * Note: monitor_work sets UBLK_IO_FLAG_ABORTED and ends this request(releasing - * the tag). Then the request is re-started(allocating the tag) and we are here. - * Since releasing/allocating a tag implies smp_mb(), finding UBLK_IO_FLAG_ABORTED - * guarantees that here is a re-issued request aborted previously. - */ - if ((io->flags & UBLK_IO_FLAG_ABORTED)) - goto fail; - - pdu->req = rq; - io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb); - } + ublk_queue_cmd(ubq, rq, bd->last); return BLK_STS_OK; } @@ -1164,22 +1192,12 @@ static void ublk_mark_io_ready(struct ublk_device *ub, struct ublk_queue *ubq) } static void ublk_handle_need_get_data(struct ublk_device *ub, int q_id, - int tag, struct io_uring_cmd *cmd) + int tag) { struct ublk_queue *ubq = ublk_get_queue(ub, q_id); struct request *req = blk_mq_tag_to_rq(ub->tag_set.tags[q_id], tag); - if (ublk_can_use_task_work(ubq)) { - struct ublk_rq_data *data = blk_mq_rq_to_pdu(req); - - /* should not fail since we call it just in ubq->ubq_daemon */ - task_work_add(ubq->ubq_daemon, &data->work, TWA_SIGNAL_NO_IPI); - } else { - struct ublk_uring_cmd_pdu *pdu = ublk_get_uring_cmd_pdu(cmd); - - pdu->req = req; - io_uring_cmd_complete_in_task(cmd, ublk_rq_task_work_cb); - } + ublk_queue_cmd(ubq, req, true); } static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) @@ -1267,7 +1285,7 @@ static int ublk_ch_uring_cmd(struct io_uring_cmd *cmd, unsigned int issue_flags) io->addr = ub_cmd->addr; io->cmd = cmd; io->flags |= UBLK_IO_FLAG_ACTIVE; - ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag, cmd); + ublk_handle_need_get_data(ub, ub_cmd->q_id, ub_cmd->tag); break; default: goto out; @@ -1658,6 +1676,9 @@ static int ublk_ctrl_add_dev(struct io_uring_cmd *cmd) */ ub->dev_info.flags &= UBLK_F_ALL; + if (!IS_BUILTIN(CONFIG_BLK_DEV_UBLK)) + ub->dev_info.flags |= UBLK_F_URING_CMD_COMP_IN_TASK; + /* We are not ready to support zero copy */ ub->dev_info.flags &= ~UBLK_F_SUPPORT_ZERO_COPY; diff --git a/drivers/bluetooth/virtio_bt.c b/drivers/bluetooth/virtio_bt.c index 67c21263f9e0..fd281d439505 100644 --- a/drivers/bluetooth/virtio_bt.c +++ b/drivers/bluetooth/virtio_bt.c @@ -219,7 +219,7 @@ static void virtbt_rx_work(struct work_struct *work) if (!skb) return; - skb->len = len; + skb_put(skb, len); virtbt_rx_handle(vbt, skb); if (virtbt_add_inbuf(vbt) < 0) diff --git a/drivers/char/hw_random/bcm2835-rng.c b/drivers/char/hw_random/bcm2835-rng.c index e7dd457e9b22..e98fcac578d6 100644 --- a/drivers/char/hw_random/bcm2835-rng.c +++ b/drivers/char/hw_random/bcm2835-rng.c @@ -71,7 +71,7 @@ static int bcm2835_rng_read(struct hwrng *rng, void *buf, size_t max, while ((rng_readl(priv, RNG_STATUS) >> 24) == 0) { if (!wait) return 0; - cpu_relax(); + hwrng_msleep(rng, 1000); } num_words = rng_readl(priv, RNG_STATUS) >> 24; diff --git a/drivers/char/random.c b/drivers/char/random.c index 2fe28eeb2f38..69754155300e 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -791,13 +791,13 @@ void __init random_init_early(const char *command_line) #endif for (i = 0, arch_bits = sizeof(entropy) * 8; i < ARRAY_SIZE(entropy);) { - longs = arch_get_random_seed_longs(entropy, ARRAY_SIZE(entropy) - i); + longs = arch_get_random_seed_longs_early(entropy, ARRAY_SIZE(entropy) - i); if (longs) { _mix_pool_bytes(entropy, sizeof(*entropy) * longs); i += longs; continue; } - longs = arch_get_random_longs(entropy, ARRAY_SIZE(entropy) - i); + longs = arch_get_random_longs_early(entropy, ARRAY_SIZE(entropy) - i); if (longs) { _mix_pool_bytes(entropy, sizeof(*entropy) * longs); i += longs; diff --git a/drivers/clk/clk-renesas-pcie.c b/drivers/clk/clk-renesas-pcie.c index 4f5df1fc74b4..e6247141d0c0 100644 --- a/drivers/clk/clk-renesas-pcie.c +++ b/drivers/clk/clk-renesas-pcie.c @@ -90,13 +90,66 @@ static const struct regmap_access_table rs9_writeable_table = { .n_yes_ranges = ARRAY_SIZE(rs9_writeable_ranges), }; +static int rs9_regmap_i2c_write(void *context, + unsigned int reg, unsigned int val) +{ + struct i2c_client *i2c = context; + const u8 data[3] = { reg, 1, val }; + const int count = ARRAY_SIZE(data); + int ret; + + ret = i2c_master_send(i2c, data, count); + if (ret == count) + return 0; + else if (ret < 0) + return ret; + else + return -EIO; +} + +static int rs9_regmap_i2c_read(void *context, + unsigned int reg, unsigned int *val) +{ + struct i2c_client *i2c = context; + struct i2c_msg xfer[2]; + u8 txdata = reg; + u8 rxdata[2]; + int ret; + + xfer[0].addr = i2c->addr; + xfer[0].flags = 0; + xfer[0].len = 1; + xfer[0].buf = (void *)&txdata; + + xfer[1].addr = i2c->addr; + xfer[1].flags = I2C_M_RD; + xfer[1].len = 2; + xfer[1].buf = (void *)rxdata; + + ret = i2c_transfer(i2c->adapter, xfer, 2); + if (ret < 0) + return ret; + if (ret != 2) + return -EIO; + + /* + * Byte 0 is transfer length, which is always 1 due + * to BCP register programming to 1 in rs9_probe(), + * ignore it and use data from Byte 1. + */ + *val = rxdata[1]; + return 0; +} + static const struct regmap_config rs9_regmap_config = { .reg_bits = 8, .val_bits = 8, - .cache_type = REGCACHE_FLAT, - .max_register = 0x8, + .cache_type = REGCACHE_NONE, + .max_register = RS9_REG_BCP, .rd_table = &rs9_readable_table, .wr_table = &rs9_writeable_table, + .reg_write = rs9_regmap_i2c_write, + .reg_read = rs9_regmap_i2c_read, }; static int rs9_get_output_config(struct rs9_driver_data *rs9, int idx) @@ -242,11 +295,17 @@ static int rs9_probe(struct i2c_client *client) return ret; } - rs9->regmap = devm_regmap_init_i2c(client, &rs9_regmap_config); + rs9->regmap = devm_regmap_init(&client->dev, NULL, + client, &rs9_regmap_config); if (IS_ERR(rs9->regmap)) return dev_err_probe(&client->dev, PTR_ERR(rs9->regmap), "Failed to allocate register map\n"); + /* Always read back 1 Byte via I2C */ + ret = regmap_write(rs9->regmap, RS9_REG_BCP, 1); + if (ret < 0) + return ret; + /* Register clock */ for (i = 0; i < rs9->chip_info->num_clks; i++) { snprintf(name, 5, "DIF%d", i); diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index c3c3f8c07258..57b83665e5c3 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1459,10 +1459,14 @@ static void clk_core_init_rate_req(struct clk_core * const core, { struct clk_core *parent; - if (WARN_ON(!core || !req)) + if (WARN_ON(!req)) return; memset(req, 0, sizeof(*req)); + req->max_rate = ULONG_MAX; + + if (!core) + return; req->rate = rate; clk_core_get_boundaries(core, &req->min_rate, &req->max_rate); diff --git a/drivers/clk/mediatek/clk-mt8195-topckgen.c b/drivers/clk/mediatek/clk-mt8195-topckgen.c index 8cbab5ca2e58..1e016329c1d2 100644 --- a/drivers/clk/mediatek/clk-mt8195-topckgen.c +++ b/drivers/clk/mediatek/clk-mt8195-topckgen.c @@ -1270,8 +1270,10 @@ static int clk_mt8195_topck_probe(struct platform_device *pdev) hw = devm_clk_hw_register_mux(&pdev->dev, "mfg_ck_fast_ref", mfg_fast_parents, ARRAY_SIZE(mfg_fast_parents), CLK_SET_RATE_PARENT, (base + 0x250), 8, 1, 0, &mt8195_clk_lock); - if (IS_ERR(hw)) + if (IS_ERR(hw)) { + r = PTR_ERR(hw); goto unregister_muxes; + } top_clk_data->hws[CLK_TOP_MFG_CK_FAST_REF] = hw; r = clk_mt8195_reg_mfg_mux_notifier(&pdev->dev, diff --git a/drivers/clk/qcom/gcc-sc7280.c b/drivers/clk/qcom/gcc-sc7280.c index 8afb7575e712..46d41ebce2b0 100644 --- a/drivers/clk/qcom/gcc-sc7280.c +++ b/drivers/clk/qcom/gcc-sc7280.c @@ -3467,6 +3467,7 @@ static int gcc_sc7280_probe(struct platform_device *pdev) regmap_update_bits(regmap, 0x28004, BIT(0), BIT(0)); regmap_update_bits(regmap, 0x28014, BIT(0), BIT(0)); regmap_update_bits(regmap, 0x71004, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x7100C, BIT(13), BIT(13)); ret = qcom_cc_register_rcg_dfs(regmap, gcc_dfs_clocks, ARRAY_SIZE(gcc_dfs_clocks)); diff --git a/drivers/clk/qcom/gpucc-sc7280.c b/drivers/clk/qcom/gpucc-sc7280.c index 9a832f2bcf49..1490cd45a654 100644 --- a/drivers/clk/qcom/gpucc-sc7280.c +++ b/drivers/clk/qcom/gpucc-sc7280.c @@ -463,6 +463,7 @@ static int gpu_cc_sc7280_probe(struct platform_device *pdev) */ regmap_update_bits(regmap, 0x1170, BIT(0), BIT(0)); regmap_update_bits(regmap, 0x1098, BIT(0), BIT(0)); + regmap_update_bits(regmap, 0x1098, BIT(13), BIT(13)); return qcom_cc_really_probe(pdev, &gpu_cc_sc7280_desc, regmap); } diff --git a/drivers/clk/renesas/r8a779g0-cpg-mssr.c b/drivers/clk/renesas/r8a779g0-cpg-mssr.c index 9641122133b5..d5b325e3c539 100644 --- a/drivers/clk/renesas/r8a779g0-cpg-mssr.c +++ b/drivers/clk/renesas/r8a779g0-cpg-mssr.c @@ -47,6 +47,7 @@ enum clk_ids { CLK_S0_VIO, CLK_S0_VC, CLK_S0_HSC, + CLK_SASYNCPER, CLK_SV_VIP, CLK_SV_IR, CLK_SDSRC, @@ -84,6 +85,7 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = { DEF_FIXED(".s0_vio", CLK_S0_VIO, CLK_PLL1_DIV2, 2, 1), DEF_FIXED(".s0_vc", CLK_S0_VC, CLK_PLL1_DIV2, 2, 1), DEF_FIXED(".s0_hsc", CLK_S0_HSC, CLK_PLL1_DIV2, 2, 1), + DEF_FIXED(".sasyncper", CLK_SASYNCPER, CLK_PLL5_DIV4, 3, 1), DEF_FIXED(".sv_vip", CLK_SV_VIP, CLK_PLL1, 5, 1), DEF_FIXED(".sv_ir", CLK_SV_IR, CLK_PLL1, 5, 1), DEF_BASE(".sdsrc", CLK_SDSRC, CLK_TYPE_GEN4_SDSRC, CLK_PLL5), @@ -128,6 +130,9 @@ static const struct cpg_core_clk r8a779g0_core_clks[] __initconst = { DEF_FIXED("s0d4_hsc", R8A779G0_CLK_S0D4_HSC, CLK_S0_HSC, 4, 1), DEF_FIXED("cl16m_hsc", R8A779G0_CLK_CL16M_HSC, CLK_S0_HSC, 48, 1), DEF_FIXED("s0d2_cc", R8A779G0_CLK_S0D2_CC, CLK_S0, 2, 1), + DEF_FIXED("sasyncperd1",R8A779G0_CLK_SASYNCPERD1, CLK_SASYNCPER,1, 1), + DEF_FIXED("sasyncperd2",R8A779G0_CLK_SASYNCPERD2, CLK_SASYNCPER,2, 1), + DEF_FIXED("sasyncperd4",R8A779G0_CLK_SASYNCPERD4, CLK_SASYNCPER,4, 1), DEF_FIXED("svd1_ir", R8A779G0_CLK_SVD1_IR, CLK_SV_IR, 1, 1), DEF_FIXED("svd2_ir", R8A779G0_CLK_SVD2_IR, CLK_SV_IR, 2, 1), DEF_FIXED("svd1_vip", R8A779G0_CLK_SVD1_VIP, CLK_SV_VIP, 1, 1), @@ -153,10 +158,10 @@ static const struct mssr_mod_clk r8a779g0_mod_clks[] __initconst = { DEF_MOD("avb0", 211, R8A779G0_CLK_S0D4_HSC), DEF_MOD("avb1", 212, R8A779G0_CLK_S0D4_HSC), DEF_MOD("avb2", 213, R8A779G0_CLK_S0D4_HSC), - DEF_MOD("hscif0", 514, R8A779G0_CLK_S0D3_PER), - DEF_MOD("hscif1", 515, R8A779G0_CLK_S0D3_PER), - DEF_MOD("hscif2", 516, R8A779G0_CLK_S0D3_PER), - DEF_MOD("hscif3", 517, R8A779G0_CLK_S0D3_PER), + DEF_MOD("hscif0", 514, R8A779G0_CLK_SASYNCPERD1), + DEF_MOD("hscif1", 515, R8A779G0_CLK_SASYNCPERD1), + DEF_MOD("hscif2", 516, R8A779G0_CLK_SASYNCPERD1), + DEF_MOD("hscif3", 517, R8A779G0_CLK_SASYNCPERD1), DEF_MOD("i2c0", 518, R8A779G0_CLK_S0D6_PER), DEF_MOD("i2c1", 519, R8A779G0_CLK_S0D6_PER), DEF_MOD("i2c2", 520, R8A779G0_CLK_S0D6_PER), diff --git a/drivers/clk/sifive/Kconfig b/drivers/clk/sifive/Kconfig index 9132c3c4aa86..b7fde0aadfcb 100644 --- a/drivers/clk/sifive/Kconfig +++ b/drivers/clk/sifive/Kconfig @@ -2,7 +2,8 @@ menuconfig CLK_SIFIVE bool "SiFive SoC driver support" - depends on RISCV || COMPILE_TEST + depends on SOC_SIFIVE || COMPILE_TEST + default SOC_SIFIVE help SoC drivers for SiFive Linux-capable SoCs. @@ -10,6 +11,7 @@ if CLK_SIFIVE config CLK_SIFIVE_PRCI bool "PRCI driver for SiFive SoCs" + default SOC_SIFIVE select RESET_CONTROLLER select RESET_SIMPLE select CLK_ANALOGBITS_WRPLL_CLN28HPC diff --git a/drivers/clocksource/hyperv_timer.c b/drivers/clocksource/hyperv_timer.c index bb47610bbd1c..18de1f439ffd 100644 --- a/drivers/clocksource/hyperv_timer.c +++ b/drivers/clocksource/hyperv_timer.c @@ -21,6 +21,7 @@ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/acpi.h> +#include <linux/hyperv.h> #include <clocksource/hyperv_timer.h> #include <asm/hyperv-tlfs.h> #include <asm/mshyperv.h> @@ -395,25 +396,25 @@ static u64 notrace read_hv_sched_clock_tsc(void) static void suspend_hv_clock_tsc(struct clocksource *arg) { - u64 tsc_msr; + union hv_reference_tsc_msr tsc_msr; /* Disable the TSC page */ - tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC); - tsc_msr &= ~BIT_ULL(0); - hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr); + tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC); + tsc_msr.enable = 0; + hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64); } static void resume_hv_clock_tsc(struct clocksource *arg) { phys_addr_t phys_addr = virt_to_phys(&tsc_pg); - u64 tsc_msr; + union hv_reference_tsc_msr tsc_msr; /* Re-enable the TSC page */ - tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC); - tsc_msr &= GENMASK_ULL(11, 0); - tsc_msr |= BIT_ULL(0) | (u64)phys_addr; - hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr); + tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC); + tsc_msr.enable = 1; + tsc_msr.pfn = HVPFN_DOWN(phys_addr); + hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64); } #ifdef HAVE_VDSO_CLOCKMODE_HVCLOCK @@ -495,7 +496,7 @@ static __always_inline void hv_setup_sched_clock(void *sched_clock) {} static bool __init hv_init_tsc_clocksource(void) { - u64 tsc_msr; + union hv_reference_tsc_msr tsc_msr; phys_addr_t phys_addr; if (!(ms_hyperv.features & HV_MSR_REFERENCE_TSC_AVAILABLE)) @@ -530,10 +531,10 @@ static bool __init hv_init_tsc_clocksource(void) * (which already has at least the low 12 bits set to zero since * it is page aligned). Also set the "enable" bit, which is bit 0. */ - tsc_msr = hv_get_register(HV_REGISTER_REFERENCE_TSC); - tsc_msr &= GENMASK_ULL(11, 0); - tsc_msr = tsc_msr | 0x1 | (u64)phys_addr; - hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr); + tsc_msr.as_uint64 = hv_get_register(HV_REGISTER_REFERENCE_TSC); + tsc_msr.enable = 1; + tsc_msr.pfn = HVPFN_DOWN(phys_addr); + hv_set_register(HV_REGISTER_REFERENCE_TSC, tsc_msr.as_uint64); clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); diff --git a/drivers/counter/104-quad-8.c b/drivers/counter/104-quad-8.c index 77a863b7eefe..deed4afadb29 100644 --- a/drivers/counter/104-quad-8.c +++ b/drivers/counter/104-quad-8.c @@ -232,34 +232,45 @@ static const enum counter_function quad8_count_functions_list[] = { COUNTER_FUNCTION_QUADRATURE_X4, }; +static int quad8_function_get(const struct quad8 *const priv, const size_t id, + enum counter_function *const function) +{ + if (!priv->quadrature_mode[id]) { + *function = COUNTER_FUNCTION_PULSE_DIRECTION; + return 0; + } + + switch (priv->quadrature_scale[id]) { + case 0: + *function = COUNTER_FUNCTION_QUADRATURE_X1_A; + return 0; + case 1: + *function = COUNTER_FUNCTION_QUADRATURE_X2_A; + return 0; + case 2: + *function = COUNTER_FUNCTION_QUADRATURE_X4; + return 0; + default: + /* should never reach this path */ + return -EINVAL; + } +} + static int quad8_function_read(struct counter_device *counter, struct counter_count *count, enum counter_function *function) { struct quad8 *const priv = counter_priv(counter); - const int id = count->id; unsigned long irqflags; + int retval; spin_lock_irqsave(&priv->lock, irqflags); - if (priv->quadrature_mode[id]) - switch (priv->quadrature_scale[id]) { - case 0: - *function = COUNTER_FUNCTION_QUADRATURE_X1_A; - break; - case 1: - *function = COUNTER_FUNCTION_QUADRATURE_X2_A; - break; - case 2: - *function = COUNTER_FUNCTION_QUADRATURE_X4; - break; - } - else - *function = COUNTER_FUNCTION_PULSE_DIRECTION; + retval = quad8_function_get(priv, count->id, function); spin_unlock_irqrestore(&priv->lock, irqflags); - return 0; + return retval; } static int quad8_function_write(struct counter_device *counter, @@ -359,6 +370,7 @@ static int quad8_action_read(struct counter_device *counter, enum counter_synapse_action *action) { struct quad8 *const priv = counter_priv(counter); + unsigned long irqflags; int err; enum counter_function function; const size_t signal_a_id = count->synapses[0].signal->id; @@ -374,9 +386,21 @@ static int quad8_action_read(struct counter_device *counter, return 0; } - err = quad8_function_read(counter, count, &function); - if (err) + spin_lock_irqsave(&priv->lock, irqflags); + + /* Get Count function and direction atomically */ + err = quad8_function_get(priv, count->id, &function); + if (err) { + spin_unlock_irqrestore(&priv->lock, irqflags); + return err; + } + err = quad8_direction_read(counter, count, &direction); + if (err) { + spin_unlock_irqrestore(&priv->lock, irqflags); return err; + } + + spin_unlock_irqrestore(&priv->lock, irqflags); /* Default action mode */ *action = COUNTER_SYNAPSE_ACTION_NONE; @@ -389,10 +413,6 @@ static int quad8_action_read(struct counter_device *counter, return 0; case COUNTER_FUNCTION_QUADRATURE_X1_A: if (synapse->signal->id == signal_a_id) { - err = quad8_direction_read(counter, count, &direction); - if (err) - return err; - if (direction == COUNTER_COUNT_DIRECTION_FORWARD) *action = COUNTER_SYNAPSE_ACTION_RISING_EDGE; else diff --git a/drivers/counter/microchip-tcb-capture.c b/drivers/counter/microchip-tcb-capture.c index f9dee15d9777..e2d1dc6ca668 100644 --- a/drivers/counter/microchip-tcb-capture.c +++ b/drivers/counter/microchip-tcb-capture.c @@ -28,7 +28,6 @@ struct mchp_tc_data { int qdec_mode; int num_channels; int channel[2]; - bool trig_inverted; }; static const enum counter_function mchp_tc_count_functions[] = { @@ -153,7 +152,7 @@ static int mchp_tc_count_signal_read(struct counter_device *counter, regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], SR), &sr); - if (priv->trig_inverted) + if (signal->id == 1) sigstatus = (sr & ATMEL_TC_MTIOB); else sigstatus = (sr & ATMEL_TC_MTIOA); @@ -171,6 +170,17 @@ static int mchp_tc_count_action_read(struct counter_device *counter, struct mchp_tc_data *const priv = counter_priv(counter); u32 cmr; + if (priv->qdec_mode) { + *action = COUNTER_SYNAPSE_ACTION_BOTH_EDGES; + return 0; + } + + /* Only TIOA signal is evaluated in non-QDEC mode */ + if (synapse->signal->id != 0) { + *action = COUNTER_SYNAPSE_ACTION_NONE; + return 0; + } + regmap_read(priv->regmap, ATMEL_TC_REG(priv->channel[0], CMR), &cmr); switch (cmr & ATMEL_TC_ETRGEDG) { @@ -199,8 +209,8 @@ static int mchp_tc_count_action_write(struct counter_device *counter, struct mchp_tc_data *const priv = counter_priv(counter); u32 edge = ATMEL_TC_ETRGEDG_NONE; - /* QDEC mode is rising edge only */ - if (priv->qdec_mode) + /* QDEC mode is rising edge only; only TIOA handled in non-QDEC mode */ + if (priv->qdec_mode || synapse->signal->id != 0) return -EINVAL; switch (action) { diff --git a/drivers/counter/ti-ecap-capture.c b/drivers/counter/ti-ecap-capture.c index af10de30aba5..fb1cb1774674 100644 --- a/drivers/counter/ti-ecap-capture.c +++ b/drivers/counter/ti-ecap-capture.c @@ -377,7 +377,8 @@ static const enum counter_signal_polarity ecap_cnt_pol_avail[] = { COUNTER_SIGNAL_POLARITY_NEGATIVE, }; -static DEFINE_COUNTER_ARRAY_POLARITY(ecap_cnt_pol_array, ecap_cnt_pol_avail, ECAP_NB_CEVT); +static DEFINE_COUNTER_AVAILABLE(ecap_cnt_pol_available, ecap_cnt_pol_avail); +static DEFINE_COUNTER_ARRAY_POLARITY(ecap_cnt_pol_array, ecap_cnt_pol_available, ECAP_NB_CEVT); static struct counter_comp ecap_cnt_signal_ext[] = { COUNTER_COMP_ARRAY_POLARITY(ecap_cnt_pol_read, ecap_cnt_pol_write, ecap_cnt_pol_array), @@ -479,8 +480,8 @@ static int ecap_cnt_probe(struct platform_device *pdev) int ret; counter_dev = devm_counter_alloc(dev, sizeof(*ecap_dev)); - if (IS_ERR(counter_dev)) - return PTR_ERR(counter_dev); + if (!counter_dev) + return -ENOMEM; counter_dev->name = ECAP_DRV_NAME; counter_dev->parent = dev; diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c index d69d13a26414..4aec4b2a5225 100644 --- a/drivers/cpufreq/cpufreq-dt.c +++ b/drivers/cpufreq/cpufreq-dt.c @@ -222,10 +222,8 @@ static int dt_cpufreq_early_init(struct device *dev, int cpu) if (reg_name[0]) { priv->opp_token = dev_pm_opp_set_regulators(cpu_dev, reg_name); if (priv->opp_token < 0) { - ret = priv->opp_token; - if (ret != -EPROBE_DEFER) - dev_err(cpu_dev, "failed to set regulators: %d\n", - ret); + ret = dev_err_probe(cpu_dev, priv->opp_token, + "failed to set regulators\n"); goto free_cpumask; } } diff --git a/drivers/cpufreq/imx6q-cpufreq.c b/drivers/cpufreq/imx6q-cpufreq.c index 90beb26ed34e..ad4ce8493144 100644 --- a/drivers/cpufreq/imx6q-cpufreq.c +++ b/drivers/cpufreq/imx6q-cpufreq.c @@ -396,9 +396,7 @@ static int imx6q_cpufreq_probe(struct platform_device *pdev) ret = imx6q_opp_check_speed_grading(cpu_dev); } if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(cpu_dev, "failed to read ocotp: %d\n", - ret); + dev_err_probe(cpu_dev, ret, "failed to read ocotp\n"); goto out_free_opp; } diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index fc3ebeb0bbe5..6ff73c30769f 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -27,6 +27,7 @@ #include <linux/pm_qos.h> #include <trace/events/power.h> +#include <asm/cpu.h> #include <asm/div64.h> #include <asm/msr.h> #include <asm/cpu_device_id.h> @@ -280,10 +281,10 @@ static struct cpudata **all_cpu_data; * structure is used to store those callbacks. */ struct pstate_funcs { - int (*get_max)(void); - int (*get_max_physical)(void); - int (*get_min)(void); - int (*get_turbo)(void); + int (*get_max)(int cpu); + int (*get_max_physical)(int cpu); + int (*get_min)(int cpu); + int (*get_turbo)(int cpu); int (*get_scaling)(void); int (*get_cpu_scaling)(int cpu); int (*get_aperf_mperf_shift)(void); @@ -398,16 +399,6 @@ static int intel_pstate_get_cppc_guaranteed(int cpu) return cppc_perf.nominal_perf; } - -static u32 intel_pstate_cppc_nominal(int cpu) -{ - u64 nominal_perf; - - if (cppc_get_nominal_perf(cpu, &nominal_perf)) - return 0; - - return nominal_perf; -} #else /* CONFIG_ACPI_CPPC_LIB */ static inline void intel_pstate_set_itmt_prio(int cpu) { @@ -531,35 +522,18 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu) { int perf_ctl_max_phys = cpu->pstate.max_pstate_physical; int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling; - int perf_ctl_turbo = pstate_funcs.get_turbo(); - int turbo_freq = perf_ctl_turbo * perf_ctl_scaling; + int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu); int scaling = cpu->pstate.scaling; pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys); - pr_debug("CPU%d: perf_ctl_max = %d\n", cpu->cpu, pstate_funcs.get_max()); pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo); pr_debug("CPU%d: perf_ctl_scaling = %d\n", cpu->cpu, perf_ctl_scaling); pr_debug("CPU%d: HWP_CAP guaranteed = %d\n", cpu->cpu, cpu->pstate.max_pstate); pr_debug("CPU%d: HWP_CAP highest = %d\n", cpu->cpu, cpu->pstate.turbo_pstate); pr_debug("CPU%d: HWP-to-frequency scaling factor: %d\n", cpu->cpu, scaling); - /* - * If the product of the HWP performance scaling factor and the HWP_CAP - * highest performance is greater than the maximum turbo frequency - * corresponding to the pstate_funcs.get_turbo() return value, the - * scaling factor is too high, so recompute it to make the HWP_CAP - * highest performance correspond to the maximum turbo frequency. - */ - cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * scaling; - if (turbo_freq < cpu->pstate.turbo_freq) { - cpu->pstate.turbo_freq = turbo_freq; - scaling = DIV_ROUND_UP(turbo_freq, cpu->pstate.turbo_pstate); - cpu->pstate.scaling = scaling; - - pr_debug("CPU%d: refined HWP-to-frequency scaling factor: %d\n", - cpu->cpu, scaling); - } - + cpu->pstate.turbo_freq = rounddown(cpu->pstate.turbo_pstate * scaling, + perf_ctl_scaling); cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling, perf_ctl_scaling); @@ -1740,7 +1714,7 @@ static void intel_pstate_hwp_enable(struct cpudata *cpudata) intel_pstate_update_epp_defaults(cpudata); } -static int atom_get_min_pstate(void) +static int atom_get_min_pstate(int not_used) { u64 value; @@ -1748,7 +1722,7 @@ static int atom_get_min_pstate(void) return (value >> 8) & 0x7F; } -static int atom_get_max_pstate(void) +static int atom_get_max_pstate(int not_used) { u64 value; @@ -1756,7 +1730,7 @@ static int atom_get_max_pstate(void) return (value >> 16) & 0x7F; } -static int atom_get_turbo_pstate(void) +static int atom_get_turbo_pstate(int not_used) { u64 value; @@ -1834,23 +1808,23 @@ static void atom_get_vid(struct cpudata *cpudata) cpudata->vid.turbo = value & 0x7f; } -static int core_get_min_pstate(void) +static int core_get_min_pstate(int cpu) { u64 value; - rdmsrl(MSR_PLATFORM_INFO, value); + rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value); return (value >> 40) & 0xFF; } -static int core_get_max_pstate_physical(void) +static int core_get_max_pstate_physical(int cpu) { u64 value; - rdmsrl(MSR_PLATFORM_INFO, value); + rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &value); return (value >> 8) & 0xFF; } -static int core_get_tdp_ratio(u64 plat_info) +static int core_get_tdp_ratio(int cpu, u64 plat_info) { /* Check how many TDP levels present */ if (plat_info & 0x600000000) { @@ -1860,13 +1834,13 @@ static int core_get_tdp_ratio(u64 plat_info) int err; /* Get the TDP level (0, 1, 2) to get ratios */ - err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); + err = rdmsrl_safe_on_cpu(cpu, MSR_CONFIG_TDP_CONTROL, &tdp_ctrl); if (err) return err; /* TDP MSR are continuous starting at 0x648 */ tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03); - err = rdmsrl_safe(tdp_msr, &tdp_ratio); + err = rdmsrl_safe_on_cpu(cpu, tdp_msr, &tdp_ratio); if (err) return err; @@ -1883,7 +1857,7 @@ static int core_get_tdp_ratio(u64 plat_info) return -ENXIO; } -static int core_get_max_pstate(void) +static int core_get_max_pstate(int cpu) { u64 tar; u64 plat_info; @@ -1891,10 +1865,10 @@ static int core_get_max_pstate(void) int tdp_ratio; int err; - rdmsrl(MSR_PLATFORM_INFO, plat_info); + rdmsrl_on_cpu(cpu, MSR_PLATFORM_INFO, &plat_info); max_pstate = (plat_info >> 8) & 0xFF; - tdp_ratio = core_get_tdp_ratio(plat_info); + tdp_ratio = core_get_tdp_ratio(cpu, plat_info); if (tdp_ratio <= 0) return max_pstate; @@ -1903,7 +1877,7 @@ static int core_get_max_pstate(void) return tdp_ratio; } - err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar); + err = rdmsrl_safe_on_cpu(cpu, MSR_TURBO_ACTIVATION_RATIO, &tar); if (!err) { int tar_levels; @@ -1918,13 +1892,13 @@ static int core_get_max_pstate(void) return max_pstate; } -static int core_get_turbo_pstate(void) +static int core_get_turbo_pstate(int cpu) { u64 value; int nont, ret; - rdmsrl(MSR_TURBO_RATIO_LIMIT, value); - nont = core_get_max_pstate(); + rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); + nont = core_get_max_pstate(cpu); ret = (value) & 255; if (ret <= nont) ret = nont; @@ -1952,50 +1926,37 @@ static int knl_get_aperf_mperf_shift(void) return 10; } -static int knl_get_turbo_pstate(void) +static int knl_get_turbo_pstate(int cpu) { u64 value; int nont, ret; - rdmsrl(MSR_TURBO_RATIO_LIMIT, value); - nont = core_get_max_pstate(); + rdmsrl_on_cpu(cpu, MSR_TURBO_RATIO_LIMIT, &value); + nont = core_get_max_pstate(cpu); ret = (((value) >> 8) & 0xFF); if (ret <= nont) ret = nont; return ret; } -#ifdef CONFIG_ACPI_CPPC_LIB -static u32 hybrid_ref_perf; - -static int hybrid_get_cpu_scaling(int cpu) +static void hybrid_get_type(void *data) { - return DIV_ROUND_UP(core_get_scaling() * hybrid_ref_perf, - intel_pstate_cppc_nominal(cpu)); + u8 *cpu_type = data; + + *cpu_type = get_this_hybrid_cpu_type(); } -static void intel_pstate_cppc_set_cpu_scaling(void) +static int hybrid_get_cpu_scaling(int cpu) { - u32 min_nominal_perf = U32_MAX; - int cpu; + u8 cpu_type = 0; - for_each_present_cpu(cpu) { - u32 nominal_perf = intel_pstate_cppc_nominal(cpu); + smp_call_function_single(cpu, hybrid_get_type, &cpu_type, 1); + /* P-cores have a smaller perf level-to-freqency scaling factor. */ + if (cpu_type == 0x40) + return 78741; - if (nominal_perf && nominal_perf < min_nominal_perf) - min_nominal_perf = nominal_perf; - } - - if (min_nominal_perf < U32_MAX) { - hybrid_ref_perf = min_nominal_perf; - pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling; - } + return core_get_scaling(); } -#else -static inline void intel_pstate_cppc_set_cpu_scaling(void) -{ -} -#endif /* CONFIG_ACPI_CPPC_LIB */ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate) { @@ -2025,10 +1986,10 @@ static void intel_pstate_max_within_limits(struct cpudata *cpu) static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) { - int perf_ctl_max_phys = pstate_funcs.get_max_physical(); + int perf_ctl_max_phys = pstate_funcs.get_max_physical(cpu->cpu); int perf_ctl_scaling = pstate_funcs.get_scaling(); - cpu->pstate.min_pstate = pstate_funcs.get_min(); + cpu->pstate.min_pstate = pstate_funcs.get_min(cpu->cpu); cpu->pstate.max_pstate_physical = perf_ctl_max_phys; cpu->pstate.perf_ctl_scaling = perf_ctl_scaling; @@ -2044,8 +2005,8 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu) } } else { cpu->pstate.scaling = perf_ctl_scaling; - cpu->pstate.max_pstate = pstate_funcs.get_max(); - cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); + cpu->pstate.max_pstate = pstate_funcs.get_max(cpu->cpu); + cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(cpu->cpu); } if (cpu->pstate.scaling == perf_ctl_scaling) { @@ -3221,9 +3182,9 @@ static unsigned int force_load __initdata; static int __init intel_pstate_msrs_not_valid(void) { - if (!pstate_funcs.get_max() || - !pstate_funcs.get_min() || - !pstate_funcs.get_turbo()) + if (!pstate_funcs.get_max(0) || + !pstate_funcs.get_min(0) || + !pstate_funcs.get_turbo(0)) return -ENODEV; return 0; @@ -3450,7 +3411,7 @@ static int __init intel_pstate_init(void) default_driver = &intel_pstate; if (boot_cpu_has(X86_FEATURE_HYBRID_CPU)) - intel_pstate_cppc_set_cpu_scaling(); + pstate_funcs.get_cpu_scaling = hybrid_get_cpu_scaling; goto hwp_cpu_matched; } diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index 863548f59c3e..a577586b23be 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -64,7 +64,7 @@ static struct platform_device *cpufreq_dt_pdev, *cpufreq_pdev; static void get_krait_bin_format_a(struct device *cpu_dev, int *speed, int *pvs, int *pvs_ver, - struct nvmem_cell *pvs_nvmem, u8 *buf) + u8 *buf) { u32 pte_efuse; @@ -95,7 +95,7 @@ static void get_krait_bin_format_a(struct device *cpu_dev, static void get_krait_bin_format_b(struct device *cpu_dev, int *speed, int *pvs, int *pvs_ver, - struct nvmem_cell *pvs_nvmem, u8 *buf) + u8 *buf) { u32 pte_efuse, redundant_sel; @@ -213,6 +213,7 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev, int speed = 0, pvs = 0, pvs_ver = 0; u8 *speedbin; size_t len; + int ret = 0; speedbin = nvmem_cell_read(speedbin_nvmem, &len); @@ -222,15 +223,16 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev, switch (len) { case 4: get_krait_bin_format_a(cpu_dev, &speed, &pvs, &pvs_ver, - speedbin_nvmem, speedbin); + speedbin); break; case 8: get_krait_bin_format_b(cpu_dev, &speed, &pvs, &pvs_ver, - speedbin_nvmem, speedbin); + speedbin); break; default: dev_err(cpu_dev, "Unable to read nvmem data. Defaulting to 0!\n"); - return -ENODEV; + ret = -ENODEV; + goto len_error; } snprintf(*pvs_name, sizeof("speedXX-pvsXX-vXX"), "speed%d-pvs%d-v%d", @@ -238,8 +240,9 @@ static int qcom_cpufreq_krait_name_version(struct device *cpu_dev, drv->versions = (1 << speed); +len_error: kfree(speedbin); - return 0; + return ret; } static const struct qcom_cpufreq_match_data match_data_kryo = { @@ -262,7 +265,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) struct nvmem_cell *speedbin_nvmem; struct device_node *np; struct device *cpu_dev; - char *pvs_name = "speedXX-pvsXX-vXX"; + char pvs_name_buffer[] = "speedXX-pvsXX-vXX"; + char *pvs_name = pvs_name_buffer; unsigned cpu; const struct of_device_id *match; int ret; @@ -295,11 +299,8 @@ static int qcom_cpufreq_probe(struct platform_device *pdev) if (drv->data->get_version) { speedbin_nvmem = of_nvmem_cell_get(np, NULL); if (IS_ERR(speedbin_nvmem)) { - if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER) - dev_err(cpu_dev, - "Could not get nvmem cell: %ld\n", - PTR_ERR(speedbin_nvmem)); - ret = PTR_ERR(speedbin_nvmem); + ret = dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem), + "Could not get nvmem cell\n"); goto free_drv; } diff --git a/drivers/cpufreq/sun50i-cpufreq-nvmem.c b/drivers/cpufreq/sun50i-cpufreq-nvmem.c index a4922580ce06..1583a370da39 100644 --- a/drivers/cpufreq/sun50i-cpufreq-nvmem.c +++ b/drivers/cpufreq/sun50i-cpufreq-nvmem.c @@ -56,12 +56,9 @@ static int sun50i_cpufreq_get_efuse(u32 *versions) speedbin_nvmem = of_nvmem_cell_get(np, NULL); of_node_put(np); - if (IS_ERR(speedbin_nvmem)) { - if (PTR_ERR(speedbin_nvmem) != -EPROBE_DEFER) - pr_err("Could not get nvmem cell: %ld\n", - PTR_ERR(speedbin_nvmem)); - return PTR_ERR(speedbin_nvmem); - } + if (IS_ERR(speedbin_nvmem)) + return dev_err_probe(cpu_dev, PTR_ERR(speedbin_nvmem), + "Could not get nvmem cell\n"); speedbin = nvmem_cell_read(speedbin_nvmem, &len); nvmem_cell_put(speedbin_nvmem); diff --git a/drivers/cpufreq/tegra194-cpufreq.c b/drivers/cpufreq/tegra194-cpufreq.c index c2004cae3f02..4596c3e323aa 100644 --- a/drivers/cpufreq/tegra194-cpufreq.c +++ b/drivers/cpufreq/tegra194-cpufreq.c @@ -589,6 +589,7 @@ static const struct of_device_id tegra194_cpufreq_of_match[] = { { .compatible = "nvidia,tegra239-ccplex-cluster", .data = &tegra239_cpufreq_soc }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, tegra194_cpufreq_of_match); static struct platform_driver tegra194_ccplex_driver = { .driver = { diff --git a/drivers/cxl/core/mbox.c b/drivers/cxl/core/mbox.c index 16176b9278b4..0c90f13870a4 100644 --- a/drivers/cxl/core/mbox.c +++ b/drivers/cxl/core/mbox.c @@ -174,7 +174,7 @@ int cxl_mbox_send_cmd(struct cxl_dev_state *cxlds, u16 opcode, void *in, }; int rc; - if (out_size > cxlds->payload_size) + if (in_size > cxlds->payload_size || out_size > cxlds->payload_size) return -E2BIG; rc = cxlds->mbox_send(cxlds, &mbox_cmd); diff --git a/drivers/cxl/core/pmem.c b/drivers/cxl/core/pmem.c index 1d12a8206444..36aa5070d902 100644 --- a/drivers/cxl/core/pmem.c +++ b/drivers/cxl/core/pmem.c @@ -188,6 +188,7 @@ static void cxl_nvdimm_release(struct device *dev) { struct cxl_nvdimm *cxl_nvd = to_cxl_nvdimm(dev); + xa_destroy(&cxl_nvd->pmem_regions); kfree(cxl_nvd); } @@ -230,6 +231,7 @@ static struct cxl_nvdimm *cxl_nvdimm_alloc(struct cxl_memdev *cxlmd) dev = &cxl_nvd->dev; cxl_nvd->cxlmd = cxlmd; + xa_init(&cxl_nvd->pmem_regions); device_initialize(dev); lockdep_set_class(&dev->mutex, &cxl_nvdimm_key); device_set_pm_not_required(dev); diff --git a/drivers/cxl/core/port.c b/drivers/cxl/core/port.c index bffde862de0b..e7556864ea80 100644 --- a/drivers/cxl/core/port.c +++ b/drivers/cxl/core/port.c @@ -811,6 +811,7 @@ static struct cxl_dport *find_dport(struct cxl_port *port, int id) static int add_dport(struct cxl_port *port, struct cxl_dport *new) { struct cxl_dport *dup; + int rc; device_lock_assert(&port->dev); dup = find_dport(port, new->port_id); @@ -821,8 +822,14 @@ static int add_dport(struct cxl_port *port, struct cxl_dport *new) dev_name(dup->dport)); return -EBUSY; } - return xa_insert(&port->dports, (unsigned long)new->dport, new, - GFP_KERNEL); + + rc = xa_insert(&port->dports, (unsigned long)new->dport, new, + GFP_KERNEL); + if (rc) + return rc; + + port->nr_dports++; + return 0; } /* diff --git a/drivers/cxl/core/region.c b/drivers/cxl/core/region.c index 401148016978..f9ae5ad284ff 100644 --- a/drivers/cxl/core/region.c +++ b/drivers/cxl/core/region.c @@ -174,7 +174,8 @@ static int cxl_region_decode_commit(struct cxl_region *cxlr) iter = to_cxl_port(iter->dev.parent)) { cxl_rr = cxl_rr_load(iter, cxlr); cxld = cxl_rr->decoder; - rc = cxld->commit(cxld); + if (cxld->commit) + rc = cxld->commit(cxld); if (rc) break; } @@ -657,6 +658,9 @@ static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port, xa_for_each(&port->regions, index, iter) { struct cxl_region_params *ip = &iter->region->params; + if (!ip->res) + continue; + if (ip->res->start > p->res->start) { dev_dbg(&cxlr->dev, "%s: HPA order violation %s:%pr vs %pr\n", @@ -686,18 +690,27 @@ static struct cxl_region_ref *alloc_region_ref(struct cxl_port *port, return cxl_rr; } -static void free_region_ref(struct cxl_region_ref *cxl_rr) +static void cxl_rr_free_decoder(struct cxl_region_ref *cxl_rr) { - struct cxl_port *port = cxl_rr->port; struct cxl_region *cxlr = cxl_rr->region; struct cxl_decoder *cxld = cxl_rr->decoder; + if (!cxld) + return; + dev_WARN_ONCE(&cxlr->dev, cxld->region != cxlr, "region mismatch\n"); if (cxld->region == cxlr) { cxld->region = NULL; put_device(&cxlr->dev); } +} +static void free_region_ref(struct cxl_region_ref *cxl_rr) +{ + struct cxl_port *port = cxl_rr->port; + struct cxl_region *cxlr = cxl_rr->region; + + cxl_rr_free_decoder(cxl_rr); xa_erase(&port->regions, (unsigned long)cxlr); xa_destroy(&cxl_rr->endpoints); kfree(cxl_rr); @@ -728,6 +741,33 @@ static int cxl_rr_ep_add(struct cxl_region_ref *cxl_rr, return 0; } +static int cxl_rr_alloc_decoder(struct cxl_port *port, struct cxl_region *cxlr, + struct cxl_endpoint_decoder *cxled, + struct cxl_region_ref *cxl_rr) +{ + struct cxl_decoder *cxld; + + if (port == cxled_to_port(cxled)) + cxld = &cxled->cxld; + else + cxld = cxl_region_find_decoder(port, cxlr); + if (!cxld) { + dev_dbg(&cxlr->dev, "%s: no decoder available\n", + dev_name(&port->dev)); + return -EBUSY; + } + + if (cxld->region) { + dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n", + dev_name(&port->dev), dev_name(&cxld->dev), + dev_name(&cxld->region->dev)); + return -EBUSY; + } + + cxl_rr->decoder = cxld; + return 0; +} + /** * cxl_port_attach_region() - track a region's interest in a port by endpoint * @port: port to add a new region reference 'struct cxl_region_ref' @@ -794,12 +834,6 @@ static int cxl_port_attach_region(struct cxl_port *port, cxl_rr->nr_targets++; nr_targets_inc = true; } - - /* - * The decoder for @cxlr was allocated when the region was first - * attached to @port. - */ - cxld = cxl_rr->decoder; } else { cxl_rr = alloc_region_ref(port, cxlr); if (IS_ERR(cxl_rr)) { @@ -810,26 +844,11 @@ static int cxl_port_attach_region(struct cxl_port *port, } nr_targets_inc = true; - if (port == cxled_to_port(cxled)) - cxld = &cxled->cxld; - else - cxld = cxl_region_find_decoder(port, cxlr); - if (!cxld) { - dev_dbg(&cxlr->dev, "%s: no decoder available\n", - dev_name(&port->dev)); - goto out_erase; - } - - if (cxld->region) { - dev_dbg(&cxlr->dev, "%s: %s already attached to %s\n", - dev_name(&port->dev), dev_name(&cxld->dev), - dev_name(&cxld->region->dev)); - rc = -EBUSY; + rc = cxl_rr_alloc_decoder(port, cxlr, cxled, cxl_rr); + if (rc) goto out_erase; - } - - cxl_rr->decoder = cxld; } + cxld = cxl_rr->decoder; rc = cxl_rr_ep_add(cxl_rr, cxled); if (rc) { @@ -971,7 +990,14 @@ static int cxl_port_setup_targets(struct cxl_port *port, if (cxl_rr->nr_targets_set) { int i, distance; - distance = p->nr_targets / cxl_rr->nr_targets; + /* + * Passthrough ports impose no distance requirements between + * peers + */ + if (port->nr_dports == 1) + distance = 0; + else + distance = p->nr_targets / cxl_rr->nr_targets; for (i = 0; i < cxl_rr->nr_targets_set; i++) if (ep->dport == cxlsd->target[i]) { rc = check_last_peer(cxled, ep, cxl_rr, @@ -1508,9 +1534,24 @@ static const struct attribute_group *region_groups[] = { static void cxl_region_release(struct device *dev) { + struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(dev->parent); struct cxl_region *cxlr = to_cxl_region(dev); + int id = atomic_read(&cxlrd->region_id); + + /* + * Try to reuse the recently idled id rather than the cached + * next id to prevent the region id space from increasing + * unnecessarily. + */ + if (cxlr->id < id) + if (atomic_try_cmpxchg(&cxlrd->region_id, &id, cxlr->id)) { + memregion_free(id); + goto out; + } memregion_free(cxlr->id); +out: + put_device(dev->parent); kfree(cxlr); } @@ -1538,8 +1579,19 @@ static struct cxl_region *to_cxl_region(struct device *dev) static void unregister_region(void *dev) { struct cxl_region *cxlr = to_cxl_region(dev); + struct cxl_region_params *p = &cxlr->params; + int i; device_del(dev); + + /* + * Now that region sysfs is shutdown, the parameter block is now + * read-only, so no need to hold the region rwsem to access the + * region parameters. + */ + for (i = 0; i < p->interleave_ways; i++) + detach_target(cxlr, i); + cxl_region_iomem_release(cxlr); put_device(dev); } @@ -1561,6 +1613,11 @@ static struct cxl_region *cxl_region_alloc(struct cxl_root_decoder *cxlrd, int i device_initialize(dev); lockdep_set_class(&dev->mutex, &cxl_region_key); dev->parent = &cxlrd->cxlsd.cxld.dev; + /* + * Keep root decoder pinned through cxl_region_release to fixup + * region id allocations + */ + get_device(dev->parent); device_set_pm_not_required(dev); dev->bus = &cxl_bus_type; dev->type = &cxl_region_type; diff --git a/drivers/cxl/cxl.h b/drivers/cxl/cxl.h index f680450f0b16..ac75554b5d76 100644 --- a/drivers/cxl/cxl.h +++ b/drivers/cxl/cxl.h @@ -423,7 +423,7 @@ struct cxl_nvdimm { struct device dev; struct cxl_memdev *cxlmd; struct cxl_nvdimm_bridge *bridge; - struct cxl_pmem_region *region; + struct xarray pmem_regions; }; struct cxl_pmem_region_mapping { @@ -457,6 +457,7 @@ struct cxl_pmem_region { * @regions: cxl_region_ref instances, regions mapped by this port * @parent_dport: dport that points to this port in the parent * @decoder_ida: allocator for decoder ids + * @nr_dports: number of entries in @dports * @hdm_end: track last allocated HDM decoder instance for allocation ordering * @commit_end: cursor to track highest committed decoder for commit ordering * @component_reg_phys: component register capability base address (optional) @@ -475,6 +476,7 @@ struct cxl_port { struct xarray regions; struct cxl_dport *parent_dport; struct ida decoder_ida; + int nr_dports; int hdm_end; int commit_end; resource_size_t component_reg_phys; diff --git a/drivers/cxl/pmem.c b/drivers/cxl/pmem.c index 7dc0a2fa1a6b..4c627d67281a 100644 --- a/drivers/cxl/pmem.c +++ b/drivers/cxl/pmem.c @@ -30,17 +30,20 @@ static void unregister_nvdimm(void *nvdimm) struct cxl_nvdimm *cxl_nvd = nvdimm_provider_data(nvdimm); struct cxl_nvdimm_bridge *cxl_nvb = cxl_nvd->bridge; struct cxl_pmem_region *cxlr_pmem; + unsigned long index; device_lock(&cxl_nvb->dev); - cxlr_pmem = cxl_nvd->region; dev_set_drvdata(&cxl_nvd->dev, NULL); - cxl_nvd->region = NULL; - device_unlock(&cxl_nvb->dev); + xa_for_each(&cxl_nvd->pmem_regions, index, cxlr_pmem) { + get_device(&cxlr_pmem->dev); + device_unlock(&cxl_nvb->dev); - if (cxlr_pmem) { device_release_driver(&cxlr_pmem->dev); put_device(&cxlr_pmem->dev); + + device_lock(&cxl_nvb->dev); } + device_unlock(&cxl_nvb->dev); nvdimm_delete(nvdimm); cxl_nvd->bridge = NULL; @@ -107,7 +110,7 @@ static int cxl_pmem_get_config_size(struct cxl_dev_state *cxlds, *cmd = (struct nd_cmd_get_config_size) { .config_size = cxlds->lsa_size, - .max_xfer = cxlds->payload_size, + .max_xfer = cxlds->payload_size - sizeof(struct cxl_mbox_set_lsa), }; return 0; @@ -148,7 +151,7 @@ static int cxl_pmem_set_config_data(struct cxl_dev_state *cxlds, return -EINVAL; /* 4-byte status follows the input data in the payload */ - if (struct_size(cmd, in_buf, cmd->in_length) + 4 > buf_len) + if (size_add(struct_size(cmd, in_buf, cmd->in_length), 4) > buf_len) return -EINVAL; set_lsa = @@ -366,25 +369,49 @@ static int match_cxl_nvdimm(struct device *dev, void *data) static void unregister_nvdimm_region(void *nd_region) { - struct cxl_nvdimm_bridge *cxl_nvb; - struct cxl_pmem_region *cxlr_pmem; + nvdimm_region_delete(nd_region); +} + +static int cxl_nvdimm_add_region(struct cxl_nvdimm *cxl_nvd, + struct cxl_pmem_region *cxlr_pmem) +{ + int rc; + + rc = xa_insert(&cxl_nvd->pmem_regions, (unsigned long)cxlr_pmem, + cxlr_pmem, GFP_KERNEL); + if (rc) + return rc; + + get_device(&cxlr_pmem->dev); + return 0; +} + +static void cxl_nvdimm_del_region(struct cxl_nvdimm *cxl_nvd, + struct cxl_pmem_region *cxlr_pmem) +{ + /* + * It is possible this is called without a corresponding + * cxl_nvdimm_add_region for @cxlr_pmem + */ + cxlr_pmem = xa_erase(&cxl_nvd->pmem_regions, (unsigned long)cxlr_pmem); + if (cxlr_pmem) + put_device(&cxlr_pmem->dev); +} + +static void release_mappings(void *data) +{ int i; + struct cxl_pmem_region *cxlr_pmem = data; + struct cxl_nvdimm_bridge *cxl_nvb = cxlr_pmem->bridge; - cxlr_pmem = nd_region_provider_data(nd_region); - cxl_nvb = cxlr_pmem->bridge; device_lock(&cxl_nvb->dev); for (i = 0; i < cxlr_pmem->nr_mappings; i++) { struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; struct cxl_nvdimm *cxl_nvd = m->cxl_nvd; - if (cxl_nvd->region) { - put_device(&cxlr_pmem->dev); - cxl_nvd->region = NULL; - } + cxl_nvdimm_del_region(cxl_nvd, cxlr_pmem); } device_unlock(&cxl_nvb->dev); - - nvdimm_region_delete(nd_region); } static void cxlr_pmem_remove_resource(void *res) @@ -422,7 +449,7 @@ static int cxl_pmem_region_probe(struct device *dev) if (!cxl_nvb->nvdimm_bus) { dev_dbg(dev, "nvdimm bus not found\n"); rc = -ENXIO; - goto err; + goto out_nvb; } memset(&mappings, 0, sizeof(mappings)); @@ -431,7 +458,7 @@ static int cxl_pmem_region_probe(struct device *dev) res = devm_kzalloc(dev, sizeof(*res), GFP_KERNEL); if (!res) { rc = -ENOMEM; - goto err; + goto out_nvb; } res->name = "Persistent Memory"; @@ -442,11 +469,11 @@ static int cxl_pmem_region_probe(struct device *dev) rc = insert_resource(&iomem_resource, res); if (rc) - goto err; + goto out_nvb; rc = devm_add_action_or_reset(dev, cxlr_pmem_remove_resource, res); if (rc) - goto err; + goto out_nvb; ndr_desc.res = res; ndr_desc.provider_data = cxlr_pmem; @@ -462,7 +489,7 @@ static int cxl_pmem_region_probe(struct device *dev) nd_set = devm_kzalloc(dev, sizeof(*nd_set), GFP_KERNEL); if (!nd_set) { rc = -ENOMEM; - goto err; + goto out_nvb; } ndr_desc.memregion = cxlr->id; @@ -472,9 +499,13 @@ static int cxl_pmem_region_probe(struct device *dev) info = kmalloc_array(cxlr_pmem->nr_mappings, sizeof(*info), GFP_KERNEL); if (!info) { rc = -ENOMEM; - goto err; + goto out_nvb; } + rc = devm_add_action_or_reset(dev, release_mappings, cxlr_pmem); + if (rc) + goto out_nvd; + for (i = 0; i < cxlr_pmem->nr_mappings; i++) { struct cxl_pmem_region_mapping *m = &cxlr_pmem->mapping[i]; struct cxl_memdev *cxlmd = m->cxlmd; @@ -486,7 +517,7 @@ static int cxl_pmem_region_probe(struct device *dev) dev_dbg(dev, "[%d]: %s: no cxl_nvdimm found\n", i, dev_name(&cxlmd->dev)); rc = -ENODEV; - goto err; + goto out_nvd; } /* safe to drop ref now with bridge lock held */ @@ -498,10 +529,17 @@ static int cxl_pmem_region_probe(struct device *dev) dev_dbg(dev, "[%d]: %s: no nvdimm found\n", i, dev_name(&cxlmd->dev)); rc = -ENODEV; - goto err; + goto out_nvd; } - cxl_nvd->region = cxlr_pmem; - get_device(&cxlr_pmem->dev); + + /* + * Pin the region per nvdimm device as those may be released + * out-of-order with respect to the region, and a single nvdimm + * maybe associated with multiple regions + */ + rc = cxl_nvdimm_add_region(cxl_nvd, cxlr_pmem); + if (rc) + goto out_nvd; m->cxl_nvd = cxl_nvd; mappings[i] = (struct nd_mapping_desc) { .nvdimm = nvdimm, @@ -527,27 +565,18 @@ static int cxl_pmem_region_probe(struct device *dev) nvdimm_pmem_region_create(cxl_nvb->nvdimm_bus, &ndr_desc); if (!cxlr_pmem->nd_region) { rc = -ENOMEM; - goto err; + goto out_nvd; } rc = devm_add_action_or_reset(dev, unregister_nvdimm_region, cxlr_pmem->nd_region); -out: +out_nvd: kfree(info); +out_nvb: device_unlock(&cxl_nvb->dev); put_device(&cxl_nvb->dev); return rc; - -err: - dev_dbg(dev, "failed to create nvdimm region\n"); - for (i--; i >= 0; i--) { - nvdimm = mappings[i].nvdimm; - cxl_nvd = nvdimm_provider_data(nvdimm); - put_device(&cxl_nvd->region->dev); - cxl_nvd->region = NULL; - } - goto out; } static struct cxl_driver cxl_pmem_region_driver = { diff --git a/drivers/dma/apple-admac.c b/drivers/dma/apple-admac.c index 317ca76ccafd..a2cc520225d3 100644 --- a/drivers/dma/apple-admac.c +++ b/drivers/dma/apple-admac.c @@ -493,7 +493,7 @@ static struct dma_chan *admac_dma_of_xlate(struct of_phandle_args *dma_spec, return NULL; } - return &ad->channels[index].chan; + return dma_get_slave_channel(&ad->channels[index].chan); } static int admac_drain_reports(struct admac_data *ad, int channo) diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 5a50423b7378..858bd64f1313 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -256,6 +256,8 @@ static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first) ATC_SPIP_BOUNDARY(first->boundary)); channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) | ATC_DPIP_BOUNDARY(first->boundary)); + /* Don't allow CPU to reorder channel enable. */ + wmb(); dma_writel(atdma, CHER, atchan->mask); vdbg_dump_regs(atchan); @@ -316,7 +318,8 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) struct at_desc *desc_first = atc_first_active(atchan); struct at_desc *desc; int ret; - u32 ctrla, dscr, trials; + u32 ctrla, dscr; + unsigned int i; /* * If the cookie doesn't match to the currently running transfer then @@ -386,7 +389,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) dscr = channel_readl(atchan, DSCR); rmb(); /* ensure DSCR is read before CTRLA */ ctrla = channel_readl(atchan, CTRLA); - for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) { + for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) { u32 new_dscr; rmb(); /* ensure DSCR is read after CTRLA */ @@ -412,7 +415,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) rmb(); /* ensure DSCR is read before CTRLA */ ctrla = channel_readl(atchan, CTRLA); } - if (unlikely(trials >= ATC_MAX_DSCR_TRIALS)) + if (unlikely(i == ATC_MAX_DSCR_TRIALS)) return -ETIMEDOUT; /* for the first descriptor we can be more accurate */ @@ -462,18 +465,6 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) if (!atc_chan_is_cyclic(atchan)) dma_cookie_complete(txd); - /* If the transfer was a memset, free our temporary buffer */ - if (desc->memset_buffer) { - dma_pool_free(atdma->memset_pool, desc->memset_vaddr, - desc->memset_paddr); - desc->memset_buffer = false; - } - - /* move children to free_list */ - list_splice_init(&desc->tx_list, &atchan->free_list); - /* move myself to free_list */ - list_move(&desc->desc_node, &atchan->free_list); - spin_unlock_irqrestore(&atchan->lock, flags); dma_descriptor_unmap(txd); @@ -483,42 +474,20 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) dmaengine_desc_get_callback_invoke(txd, NULL); dma_run_dependencies(txd); -} - -/** - * atc_complete_all - finish work for all transactions - * @atchan: channel to complete transactions for - * - * Eventually submit queued descriptors if any - * - * Assume channel is idle while calling this function - * Called with atchan->lock held and bh disabled - */ -static void atc_complete_all(struct at_dma_chan *atchan) -{ - struct at_desc *desc, *_desc; - LIST_HEAD(list); - unsigned long flags; - - dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); spin_lock_irqsave(&atchan->lock, flags); - - /* - * Submit queued descriptors ASAP, i.e. before we go through - * the completed ones. - */ - if (!list_empty(&atchan->queue)) - atc_dostart(atchan, atc_first_queued(atchan)); - /* empty active_list now it is completed */ - list_splice_init(&atchan->active_list, &list); - /* empty queue list by moving descriptors (if any) to active_list */ - list_splice_init(&atchan->queue, &atchan->active_list); - + /* move children to free_list */ + list_splice_init(&desc->tx_list, &atchan->free_list); + /* add myself to free_list */ + list_add(&desc->desc_node, &atchan->free_list); spin_unlock_irqrestore(&atchan->lock, flags); - list_for_each_entry_safe(desc, _desc, &list, desc_node) - atc_chain_complete(atchan, desc); + /* If the transfer was a memset, free our temporary buffer */ + if (desc->memset_buffer) { + dma_pool_free(atdma->memset_pool, desc->memset_vaddr, + desc->memset_paddr); + desc->memset_buffer = false; + } } /** @@ -527,26 +496,28 @@ static void atc_complete_all(struct at_dma_chan *atchan) */ static void atc_advance_work(struct at_dma_chan *atchan) { + struct at_desc *desc; unsigned long flags; - int ret; dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); spin_lock_irqsave(&atchan->lock, flags); - ret = atc_chan_is_enabled(atchan); - spin_unlock_irqrestore(&atchan->lock, flags); - if (ret) - return; - - if (list_empty(&atchan->active_list) || - list_is_singular(&atchan->active_list)) - return atc_complete_all(atchan); + if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list)) + return spin_unlock_irqrestore(&atchan->lock, flags); - atc_chain_complete(atchan, atc_first_active(atchan)); + desc = atc_first_active(atchan); + /* Remove the transfer node from the active list. */ + list_del_init(&desc->desc_node); + spin_unlock_irqrestore(&atchan->lock, flags); + atc_chain_complete(atchan, desc); /* advance work */ spin_lock_irqsave(&atchan->lock, flags); - atc_dostart(atchan, atc_first_active(atchan)); + if (!list_empty(&atchan->active_list)) { + desc = atc_first_queued(atchan); + list_move_tail(&desc->desc_node, &atchan->active_list); + atc_dostart(atchan, desc); + } spin_unlock_irqrestore(&atchan->lock, flags); } @@ -558,6 +529,7 @@ static void atc_advance_work(struct at_dma_chan *atchan) static void atc_handle_error(struct at_dma_chan *atchan) { struct at_desc *bad_desc; + struct at_desc *desc; struct at_desc *child; unsigned long flags; @@ -570,13 +542,12 @@ static void atc_handle_error(struct at_dma_chan *atchan) bad_desc = atc_first_active(atchan); list_del_init(&bad_desc->desc_node); - /* As we are stopped, take advantage to push queued descriptors - * in active_list */ - list_splice_init(&atchan->queue, atchan->active_list.prev); - /* Try to restart the controller */ - if (!list_empty(&atchan->active_list)) - atc_dostart(atchan, atc_first_active(atchan)); + if (!list_empty(&atchan->active_list)) { + desc = atc_first_queued(atchan); + list_move_tail(&desc->desc_node, &atchan->active_list); + atc_dostart(atchan, desc); + } /* * KERN_CRITICAL may seem harsh, but since this only happens @@ -691,19 +662,11 @@ static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx) spin_lock_irqsave(&atchan->lock, flags); cookie = dma_cookie_assign(tx); - if (list_empty(&atchan->active_list)) { - dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n", - desc->txd.cookie); - atc_dostart(atchan, desc); - list_add_tail(&desc->desc_node, &atchan->active_list); - } else { - dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", - desc->txd.cookie); - list_add_tail(&desc->desc_node, &atchan->queue); - } - + list_add_tail(&desc->desc_node, &atchan->queue); spin_unlock_irqrestore(&atchan->lock, flags); + dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n", + desc->txd.cookie); return cookie; } @@ -1445,11 +1408,8 @@ static int atc_terminate_all(struct dma_chan *chan) struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); int chan_id = atchan->chan_common.chan_id; - struct at_desc *desc, *_desc; unsigned long flags; - LIST_HEAD(list); - dev_vdbg(chan2dev(chan), "%s\n", __func__); /* @@ -1468,19 +1428,15 @@ static int atc_terminate_all(struct dma_chan *chan) cpu_relax(); /* active_list entries will end up before queued entries */ - list_splice_init(&atchan->queue, &list); - list_splice_init(&atchan->active_list, &list); - - spin_unlock_irqrestore(&atchan->lock, flags); - - /* Flush all pending and queued descriptors */ - list_for_each_entry_safe(desc, _desc, &list, desc_node) - atc_chain_complete(atchan, desc); + list_splice_tail_init(&atchan->queue, &atchan->free_list); + list_splice_tail_init(&atchan->active_list, &atchan->free_list); clear_bit(ATC_IS_PAUSED, &atchan->status); /* if channel dedicated to cyclic operations, free it */ clear_bit(ATC_IS_CYCLIC, &atchan->status); + spin_unlock_irqrestore(&atchan->lock, flags); + return 0; } @@ -1535,20 +1491,26 @@ atc_tx_status(struct dma_chan *chan, } /** - * atc_issue_pending - try to finish work + * atc_issue_pending - takes the first transaction descriptor in the pending + * queue and starts the transfer. * @chan: target DMA channel */ static void atc_issue_pending(struct dma_chan *chan) { - struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_desc *desc; + unsigned long flags; dev_vdbg(chan2dev(chan), "issue_pending\n"); - /* Not needed for cyclic transfers */ - if (atc_chan_is_cyclic(atchan)) - return; + spin_lock_irqsave(&atchan->lock, flags); + if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue)) + return spin_unlock_irqrestore(&atchan->lock, flags); - atc_advance_work(atchan); + desc = atc_first_queued(atchan); + list_move_tail(&desc->desc_node, &atchan->active_list); + atc_dostart(atchan, desc); + spin_unlock_irqrestore(&atchan->lock, flags); } /** @@ -1966,7 +1928,11 @@ static int __init at_dma_probe(struct platform_device *pdev) dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "", plat_dat->nr_channels); - dma_async_device_register(&atdma->dma_common); + err = dma_async_device_register(&atdma->dma_common); + if (err) { + dev_err(&pdev->dev, "Unable to register: %d.\n", err); + goto err_dma_async_device_register; + } /* * Do not return an error if the dmac node is not present in order to @@ -1986,6 +1952,7 @@ static int __init at_dma_probe(struct platform_device *pdev) err_of_dma_controller_register: dma_async_device_unregister(&atdma->dma_common); +err_dma_async_device_register: dma_pool_destroy(atdma->memset_pool); err_memset_pool_create: dma_pool_destroy(atdma->dma_desc_pool); diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index 4d1ebc040031..d4d382d74607 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -186,13 +186,13 @@ /* LLI == Linked List Item; aka DMA buffer descriptor */ struct at_lli { /* values that are not changed by hardware */ - dma_addr_t saddr; - dma_addr_t daddr; + u32 saddr; + u32 daddr; /* value that may get written back: */ - u32 ctrla; + u32 ctrla; /* more values that are not changed by hardware */ - u32 ctrlb; - dma_addr_t dscr; /* chain to next lli */ + u32 ctrlb; + u32 dscr; /* chain to next lli */ }; /** diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c index c2808fd081d6..a9b96b18772f 100644 --- a/drivers/dma/idxd/cdev.c +++ b/drivers/dma/idxd/cdev.c @@ -312,6 +312,24 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) if (idxd->state != IDXD_DEV_ENABLED) return -ENXIO; + /* + * User type WQ is enabled only when SVA is enabled for two reasons: + * - If no IOMMU or IOMMU Passthrough without SVA, userspace + * can directly access physical address through the WQ. + * - The IDXD cdev driver does not provide any ways to pin + * user pages and translate the address from user VA to IOVA or + * PA without IOMMU SVA. Therefore the application has no way + * to instruct the device to perform DMA function. This makes + * the cdev not usable for normal application usage. + */ + if (!device_user_pasid_enabled(idxd)) { + idxd->cmd_status = IDXD_SCMD_WQ_USER_NO_IOMMU; + dev_dbg(&idxd->pdev->dev, + "User type WQ cannot be enabled without SVA.\n"); + + return -EOPNOTSUPP; + } + mutex_lock(&wq->wq_lock); wq->type = IDXD_WQT_USER; rc = drv_enable_wq(wq); diff --git a/drivers/dma/idxd/device.c b/drivers/dma/idxd/device.c index 2c1e6f6daa62..6f44fa8f78a5 100644 --- a/drivers/dma/idxd/device.c +++ b/drivers/dma/idxd/device.c @@ -390,7 +390,7 @@ static void idxd_wq_disable_cleanup(struct idxd_wq *wq) clear_bit(WQ_FLAG_ATS_DISABLE, &wq->flags); memset(wq->name, 0, WQ_NAME_SIZE); wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; - wq->max_batch_size = WQ_DEFAULT_MAX_BATCH; + idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); if (wq->opcap_bmap) bitmap_copy(wq->opcap_bmap, idxd->opcap_bmap, IDXD_MAX_OPCAP_BITS); } @@ -730,13 +730,21 @@ static void idxd_device_wqs_clear_state(struct idxd_device *idxd) void idxd_device_clear_state(struct idxd_device *idxd) { - if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) - return; + /* IDXD is always disabled. Other states are cleared only when IDXD is configurable. */ + if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags)) { + /* + * Clearing wq state is protected by wq lock. + * So no need to be protected by device lock. + */ + idxd_device_wqs_clear_state(idxd); + + spin_lock(&idxd->dev_lock); + idxd_groups_clear_state(idxd); + idxd_engines_clear_state(idxd); + } else { + spin_lock(&idxd->dev_lock); + } - idxd_device_wqs_clear_state(idxd); - spin_lock(&idxd->dev_lock); - idxd_groups_clear_state(idxd); - idxd_engines_clear_state(idxd); idxd->state = IDXD_DEV_DISABLED; spin_unlock(&idxd->dev_lock); } @@ -869,7 +877,7 @@ static int idxd_wq_config_write(struct idxd_wq *wq) /* bytes 12-15 */ wq->wqcfg->max_xfer_shift = ilog2(wq->max_xfer_bytes); - wq->wqcfg->max_batch_shift = ilog2(wq->max_batch_size); + idxd_wqcfg_set_max_batch_shift(idxd->data->type, wq->wqcfg, ilog2(wq->max_batch_size)); /* bytes 32-63 */ if (idxd->hw.wq_cap.op_config && wq->opcap_bmap) { @@ -1051,7 +1059,7 @@ static int idxd_wq_load_config(struct idxd_wq *wq) wq->priority = wq->wqcfg->priority; wq->max_xfer_bytes = 1ULL << wq->wqcfg->max_xfer_shift; - wq->max_batch_size = 1ULL << wq->wqcfg->max_batch_shift; + idxd_wq_set_max_batch_size(idxd->data->type, wq, 1U << wq->wqcfg->max_batch_shift); for (i = 0; i < WQCFG_STRIDES(idxd); i++) { wqcfg_offset = WQCFG_OFFSET(idxd, wq->id, i); diff --git a/drivers/dma/idxd/idxd.h b/drivers/dma/idxd/idxd.h index 1196ab342f01..7ced8d283d98 100644 --- a/drivers/dma/idxd/idxd.h +++ b/drivers/dma/idxd/idxd.h @@ -548,6 +548,38 @@ static inline int idxd_wq_refcount(struct idxd_wq *wq) return wq->client_count; }; +/* + * Intel IAA does not support batch processing. + * The max batch size of device, max batch size of wq and + * max batch shift of wqcfg should be always 0 on IAA. + */ +static inline void idxd_set_max_batch_size(int idxd_type, struct idxd_device *idxd, + u32 max_batch_size) +{ + if (idxd_type == IDXD_TYPE_IAX) + idxd->max_batch_size = 0; + else + idxd->max_batch_size = max_batch_size; +} + +static inline void idxd_wq_set_max_batch_size(int idxd_type, struct idxd_wq *wq, + u32 max_batch_size) +{ + if (idxd_type == IDXD_TYPE_IAX) + wq->max_batch_size = 0; + else + wq->max_batch_size = max_batch_size; +} + +static inline void idxd_wqcfg_set_max_batch_shift(int idxd_type, union wqcfg *wqcfg, + u32 max_batch_shift) +{ + if (idxd_type == IDXD_TYPE_IAX) + wqcfg->max_batch_shift = 0; + else + wqcfg->max_batch_shift = max_batch_shift; +} + int __must_check __idxd_driver_register(struct idxd_device_driver *idxd_drv, struct module *module, const char *mod_name); #define idxd_driver_register(driver) \ diff --git a/drivers/dma/idxd/init.c b/drivers/dma/idxd/init.c index 2b18d512cbfc..09cbf0c179ba 100644 --- a/drivers/dma/idxd/init.c +++ b/drivers/dma/idxd/init.c @@ -183,7 +183,7 @@ static int idxd_setup_wqs(struct idxd_device *idxd) init_completion(&wq->wq_dead); init_completion(&wq->wq_resurrect); wq->max_xfer_bytes = WQ_DEFAULT_MAX_XFER; - wq->max_batch_size = WQ_DEFAULT_MAX_BATCH; + idxd_wq_set_max_batch_size(idxd->data->type, wq, WQ_DEFAULT_MAX_BATCH); wq->enqcmds_retries = IDXD_ENQCMDS_RETRIES; wq->wqcfg = kzalloc_node(idxd->wqcfg_size, GFP_KERNEL, dev_to_node(dev)); if (!wq->wqcfg) { @@ -418,7 +418,7 @@ static void idxd_read_caps(struct idxd_device *idxd) idxd->max_xfer_bytes = 1ULL << idxd->hw.gen_cap.max_xfer_shift; dev_dbg(dev, "max xfer size: %llu bytes\n", idxd->max_xfer_bytes); - idxd->max_batch_size = 1U << idxd->hw.gen_cap.max_batch_shift; + idxd_set_max_batch_size(idxd->data->type, idxd, 1U << idxd->hw.gen_cap.max_batch_shift); dev_dbg(dev, "max batch size: %u\n", idxd->max_batch_size); if (idxd->hw.gen_cap.config_en) set_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags); diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c index bdaccf9e0436..7269bd54554f 100644 --- a/drivers/dma/idxd/sysfs.c +++ b/drivers/dma/idxd/sysfs.c @@ -1046,7 +1046,7 @@ static ssize_t wq_max_batch_size_store(struct device *dev, struct device_attribu if (batch_size > idxd->max_batch_size) return -EINVAL; - wq->max_batch_size = (u32)batch_size; + idxd_wq_set_max_batch_size(idxd->data->type, wq, (u32)batch_size); return count; } diff --git a/drivers/dma/mv_xor_v2.c b/drivers/dma/mv_xor_v2.c index f629ef6fd3c2..113834e1167b 100644 --- a/drivers/dma/mv_xor_v2.c +++ b/drivers/dma/mv_xor_v2.c @@ -893,6 +893,7 @@ static int mv_xor_v2_remove(struct platform_device *pdev) tasklet_kill(&xor_dev->irq_tasklet); clk_disable_unprepare(xor_dev->clk); + clk_disable_unprepare(xor_dev->reg_clk); return 0; } diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c index e7034f6f3994..22a392fe6d32 100644 --- a/drivers/dma/pxa_dma.c +++ b/drivers/dma/pxa_dma.c @@ -1247,14 +1247,14 @@ static int pxad_init_phys(struct platform_device *op, return -ENOMEM; for (i = 0; i < nb_phy_chans; i++) - if (platform_get_irq(op, i) > 0) + if (platform_get_irq_optional(op, i) > 0) nr_irq++; for (i = 0; i < nb_phy_chans; i++) { phy = &pdev->phys[i]; phy->base = pdev->base; phy->idx = i; - irq = platform_get_irq(op, i); + irq = platform_get_irq_optional(op, i); if ((nr_irq > 1) && (irq > 0)) ret = devm_request_irq(&op->dev, irq, pxad_chan_handler, diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c index 4891a1767e5a..37674029cb42 100644 --- a/drivers/dma/stm32-dma.c +++ b/drivers/dma/stm32-dma.c @@ -675,6 +675,8 @@ static void stm32_dma_handle_chan_paused(struct stm32_dma_chan *chan) chan->chan_reg.dma_sndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id)); + chan->status = DMA_PAUSED; + dev_dbg(chan2dev(chan), "vchan %pK: paused\n", &chan->vchan); } @@ -789,9 +791,7 @@ static irqreturn_t stm32_dma_chan_irq(int irq, void *devid) if (status & STM32_DMA_TCI) { stm32_dma_irq_clear(chan, STM32_DMA_TCI); if (scr & STM32_DMA_SCR_TCIE) { - if (chan->status == DMA_PAUSED && !(scr & STM32_DMA_SCR_EN)) - stm32_dma_handle_chan_paused(chan); - else + if (chan->status != DMA_PAUSED) stm32_dma_handle_chan_done(chan, scr); } status &= ~STM32_DMA_TCI; @@ -838,13 +838,11 @@ static int stm32_dma_pause(struct dma_chan *c) return -EPERM; spin_lock_irqsave(&chan->vchan.lock, flags); + ret = stm32_dma_disable_chan(chan); - /* - * A transfer complete flag is set to indicate the end of transfer due to the stream - * interruption, so wait for interrupt - */ if (!ret) - chan->status = DMA_PAUSED; + stm32_dma_handle_chan_paused(chan); + spin_unlock_irqrestore(&chan->vchan.lock, flags); return ret; diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c index e28acbcb53f4..b9d4c843635f 100644 --- a/drivers/dma/stm32-mdma.c +++ b/drivers/dma/stm32-mdma.c @@ -1539,6 +1539,7 @@ static struct dma_chan *stm32_mdma_of_xlate(struct of_phandle_args *dma_spec, return NULL; } + memset(&config, 0, sizeof(config)); config.request = dma_spec->args[0]; config.priority_level = dma_spec->args[1]; config.transfer_config = dma_spec->args[2]; diff --git a/drivers/dma/ti/k3-udma-glue.c b/drivers/dma/ti/k3-udma-glue.c index 4fdd9f06b723..4f1aeb81e9c7 100644 --- a/drivers/dma/ti/k3-udma-glue.c +++ b/drivers/dma/ti/k3-udma-glue.c @@ -299,6 +299,7 @@ struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev, ret = device_register(&tx_chn->common.chan_dev); if (ret) { dev_err(dev, "Channel Device registration failed %d\n", ret); + put_device(&tx_chn->common.chan_dev); tx_chn->common.chan_dev.parent = NULL; goto err; } @@ -917,6 +918,7 @@ k3_udma_glue_request_rx_chn_priv(struct device *dev, const char *name, ret = device_register(&rx_chn->common.chan_dev); if (ret) { dev_err(dev, "Channel Device registration failed %d\n", ret); + put_device(&rx_chn->common.chan_dev); rx_chn->common.chan_dev.parent = NULL; goto err; } @@ -1048,6 +1050,7 @@ k3_udma_glue_request_remote_rx_chn(struct device *dev, const char *name, ret = device_register(&rx_chn->common.chan_dev); if (ret) { dev_err(dev, "Channel Device registration failed %d\n", ret); + put_device(&rx_chn->common.chan_dev); rx_chn->common.chan_dev.parent = NULL; goto err; } diff --git a/drivers/extcon/extcon-usbc-tusb320.c b/drivers/extcon/extcon-usbc-tusb320.c index 41041ff0fadb..2a120d8d3c27 100644 --- a/drivers/extcon/extcon-usbc-tusb320.c +++ b/drivers/extcon/extcon-usbc-tusb320.c @@ -327,7 +327,13 @@ static irqreturn_t tusb320_irq_handler(int irq, void *dev_id) return IRQ_NONE; tusb320_extcon_irq_handler(priv, reg); - tusb320_typec_irq_handler(priv, reg); + + /* + * Type-C support is optional. Only call the Type-C handler if a + * port had been registered previously. + */ + if (priv->port) + tusb320_typec_irq_handler(priv, reg); regmap_write(priv->regmap, TUSB320_REG9, reg); diff --git a/drivers/firmware/arm_scmi/bus.c b/drivers/firmware/arm_scmi/bus.c index d4e23101448a..35bb70724d44 100644 --- a/drivers/firmware/arm_scmi/bus.c +++ b/drivers/firmware/arm_scmi/bus.c @@ -216,9 +216,20 @@ void scmi_device_destroy(struct scmi_device *scmi_dev) device_unregister(&scmi_dev->dev); } +void scmi_device_link_add(struct device *consumer, struct device *supplier) +{ + struct device_link *link; + + link = device_link_add(consumer, supplier, DL_FLAG_AUTOREMOVE_CONSUMER); + + WARN_ON(!link); +} + void scmi_set_handle(struct scmi_device *scmi_dev) { scmi_dev->handle = scmi_handle_get(&scmi_dev->dev); + if (scmi_dev->handle) + scmi_device_link_add(&scmi_dev->dev, scmi_dev->handle->dev); } int scmi_protocol_register(const struct scmi_protocol *proto) diff --git a/drivers/firmware/arm_scmi/common.h b/drivers/firmware/arm_scmi/common.h index 61aba7447c32..a1c0154c31c6 100644 --- a/drivers/firmware/arm_scmi/common.h +++ b/drivers/firmware/arm_scmi/common.h @@ -97,6 +97,7 @@ static inline void unpack_scmi_header(u32 msg_hdr, struct scmi_msg_hdr *hdr) struct scmi_revision_info * scmi_revision_area_get(const struct scmi_protocol_handle *ph); int scmi_handle_put(const struct scmi_handle *handle); +void scmi_device_link_add(struct device *consumer, struct device *supplier); struct scmi_handle *scmi_handle_get(struct device *dev); void scmi_set_handle(struct scmi_device *scmi_dev); void scmi_setup_protocol_implemented(const struct scmi_protocol_handle *ph, @@ -117,6 +118,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); * * @dev: Reference to device in the SCMI hierarchy corresponding to this * channel + * @rx_timeout_ms: The configured RX timeout in milliseconds. * @handle: Pointer to SCMI entity handle * @no_completion_irq: Flag to indicate that this channel has no completion * interrupt mechanism for synchronous commands. @@ -126,6 +128,7 @@ void scmi_protocol_release(const struct scmi_handle *handle, u8 protocol_id); */ struct scmi_chan_info { struct device *dev; + unsigned int rx_timeout_ms; struct scmi_handle *handle; bool no_completion_irq; void *transport_info; @@ -232,7 +235,7 @@ void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id); struct scmi_shared_mem; void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer); + struct scmi_xfer *xfer, struct scmi_chan_info *cinfo); u32 shmem_read_header(struct scmi_shared_mem __iomem *shmem); void shmem_fetch_response(struct scmi_shared_mem __iomem *shmem, struct scmi_xfer *xfer); diff --git a/drivers/firmware/arm_scmi/driver.c b/drivers/firmware/arm_scmi/driver.c index 609ebedee9cb..f818d00bb2c6 100644 --- a/drivers/firmware/arm_scmi/driver.c +++ b/drivers/firmware/arm_scmi/driver.c @@ -2013,6 +2013,7 @@ static int scmi_chan_setup(struct scmi_info *info, struct device *dev, return -ENOMEM; cinfo->dev = dev; + cinfo->rx_timeout_ms = info->desc->max_rx_timeout_ms; ret = info->desc->ops->chan_setup(cinfo, info->dev, tx); if (ret) @@ -2044,8 +2045,12 @@ scmi_txrx_setup(struct scmi_info *info, struct device *dev, int prot_id) { int ret = scmi_chan_setup(info, dev, prot_id, true); - if (!ret) /* Rx is optional, hence no error check */ - scmi_chan_setup(info, dev, prot_id, false); + if (!ret) { + /* Rx is optional, report only memory errors */ + ret = scmi_chan_setup(info, dev, prot_id, false); + if (ret && ret != -ENOMEM) + ret = 0; + } return ret; } @@ -2273,10 +2278,16 @@ int scmi_protocol_device_request(const struct scmi_device_id *id_table) sdev = scmi_get_protocol_device(child, info, id_table->protocol_id, id_table->name); - /* Set handle if not already set: device existed */ - if (sdev && !sdev->handle) - sdev->handle = - scmi_handle_get_from_info_unlocked(info); + if (sdev) { + /* Set handle if not already set: device existed */ + if (!sdev->handle) + sdev->handle = + scmi_handle_get_from_info_unlocked(info); + /* Relink consumer and suppliers */ + if (sdev->handle) + scmi_device_link_add(&sdev->dev, + sdev->handle->dev); + } } else { dev_err(info->dev, "Failed. SCMI protocol %d not active.\n", @@ -2475,20 +2486,17 @@ void scmi_free_channel(struct scmi_chan_info *cinfo, struct idr *idr, int id) static int scmi_remove(struct platform_device *pdev) { - int ret = 0, id; + int ret, id; struct scmi_info *info = platform_get_drvdata(pdev); struct device_node *child; mutex_lock(&scmi_list_mutex); if (info->users) - ret = -EBUSY; - else - list_del(&info->node); + dev_warn(&pdev->dev, + "Still active SCMI users will be forcibly unbound.\n"); + list_del(&info->node); mutex_unlock(&scmi_list_mutex); - if (ret) - return ret; - scmi_notification_exit(&info->handle); mutex_lock(&info->protocols_mtx); @@ -2500,7 +2508,11 @@ static int scmi_remove(struct platform_device *pdev) idr_destroy(&info->active_protocols); /* Safe to free channels since no more users */ - return scmi_cleanup_txrx_channels(info); + ret = scmi_cleanup_txrx_channels(info); + if (ret) + dev_warn(&pdev->dev, "Failed to cleanup SCMI channels.\n"); + + return 0; } static ssize_t protocol_version_show(struct device *dev, @@ -2571,6 +2583,7 @@ MODULE_DEVICE_TABLE(of, scmi_of_match); static struct platform_driver scmi_driver = { .driver = { .name = "arm-scmi", + .suppress_bind_attrs = true, .of_match_table = scmi_of_match, .dev_groups = versions_groups, }, diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c index 08ff4d110beb..1e40cb035044 100644 --- a/drivers/firmware/arm_scmi/mailbox.c +++ b/drivers/firmware/arm_scmi/mailbox.c @@ -36,7 +36,7 @@ static void tx_prepare(struct mbox_client *cl, void *m) { struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl); - shmem_tx_prepare(smbox->shmem, m); + shmem_tx_prepare(smbox->shmem, m, smbox->cinfo); } static void rx_callback(struct mbox_client *cl, void *m) diff --git a/drivers/firmware/arm_scmi/optee.c b/drivers/firmware/arm_scmi/optee.c index f42dad997ac9..2a7aeab40e54 100644 --- a/drivers/firmware/arm_scmi/optee.c +++ b/drivers/firmware/arm_scmi/optee.c @@ -498,7 +498,7 @@ static int scmi_optee_send_message(struct scmi_chan_info *cinfo, msg_tx_prepare(channel->req.msg, xfer); ret = invoke_process_msg_channel(channel, msg_command_size(xfer)); } else { - shmem_tx_prepare(channel->req.shmem, xfer); + shmem_tx_prepare(channel->req.shmem, xfer, cinfo); ret = invoke_process_smt_channel(channel); } diff --git a/drivers/firmware/arm_scmi/shmem.c b/drivers/firmware/arm_scmi/shmem.c index 0e3eaea5d852..1dfe534b8518 100644 --- a/drivers/firmware/arm_scmi/shmem.c +++ b/drivers/firmware/arm_scmi/shmem.c @@ -5,10 +5,13 @@ * Copyright (C) 2019 ARM Ltd. */ +#include <linux/ktime.h> #include <linux/io.h> #include <linux/processor.h> #include <linux/types.h> +#include <asm-generic/bug.h> + #include "common.h" /* @@ -30,16 +33,36 @@ struct scmi_shared_mem { }; void shmem_tx_prepare(struct scmi_shared_mem __iomem *shmem, - struct scmi_xfer *xfer) + struct scmi_xfer *xfer, struct scmi_chan_info *cinfo) { + ktime_t stop; + /* * Ideally channel must be free by now unless OS timeout last * request and platform continued to process the same, wait * until it releases the shared memory, otherwise we may endup - * overwriting its response with new message payload or vice-versa + * overwriting its response with new message payload or vice-versa. + * Giving up anyway after twice the expected channel timeout so as + * not to bail-out on intermittent issues where the platform is + * occasionally a bit slower to answer. + * + * Note that after a timeout is detected we bail-out and carry on but + * the transport functionality is probably permanently compromised: + * this is just to ease debugging and avoid complete hangs on boot + * due to a misbehaving SCMI firmware. */ - spin_until_cond(ioread32(&shmem->channel_status) & - SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE); + stop = ktime_add_ms(ktime_get(), 2 * cinfo->rx_timeout_ms); + spin_until_cond((ioread32(&shmem->channel_status) & + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE) || + ktime_after(ktime_get(), stop)); + if (!(ioread32(&shmem->channel_status) & + SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE)) { + WARN_ON_ONCE(1); + dev_err(cinfo->dev, + "Timeout waiting for a free TX channel !\n"); + return; + } + /* Mark channel busy + clear error */ iowrite32(0x0, &shmem->channel_status); iowrite32(xfer->hdr.poll_completion ? 0 : SCMI_SHMEM_FLAG_INTR_ENABLED, diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index 745acfdd0b3d..87a7b13cf868 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -188,7 +188,7 @@ static int smc_send_message(struct scmi_chan_info *cinfo, */ smc_channel_lock_acquire(scmi_info, xfer); - shmem_tx_prepare(scmi_info->shmem, xfer); + shmem_tx_prepare(scmi_info->shmem, xfer, cinfo); arm_smccc_1_1_invoke(scmi_info->func_id, 0, 0, 0, 0, 0, 0, 0, &res); diff --git a/drivers/firmware/arm_scmi/virtio.c b/drivers/firmware/arm_scmi/virtio.c index 14709dbc96a1..33c9b81a55cd 100644 --- a/drivers/firmware/arm_scmi/virtio.c +++ b/drivers/firmware/arm_scmi/virtio.c @@ -148,7 +148,6 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) { unsigned long flags; DECLARE_COMPLETION_ONSTACK(vioch_shutdown_done); - void *deferred_wq = NULL; /* * Prepare to wait for the last release if not already released @@ -162,16 +161,11 @@ static void scmi_vio_channel_cleanup_sync(struct scmi_vio_channel *vioch) vioch->shutdown_done = &vioch_shutdown_done; virtio_break_device(vioch->vqueue->vdev); - if (!vioch->is_rx && vioch->deferred_tx_wq) { - deferred_wq = vioch->deferred_tx_wq; + if (!vioch->is_rx && vioch->deferred_tx_wq) /* Cannot be kicked anymore after this...*/ vioch->deferred_tx_wq = NULL; - } spin_unlock_irqrestore(&vioch->lock, flags); - if (deferred_wq) - destroy_workqueue(deferred_wq); - scmi_vio_channel_release(vioch); /* Let any possibly concurrent RX path release the channel */ @@ -416,6 +410,11 @@ static bool virtio_chan_available(struct device *dev, int idx) return vioch && !vioch->cinfo; } +static void scmi_destroy_tx_workqueue(void *deferred_tx_wq) +{ + destroy_workqueue(deferred_tx_wq); +} + static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, bool tx) { @@ -430,6 +429,8 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, /* Setup a deferred worker for polling. */ if (tx && !vioch->deferred_tx_wq) { + int ret; + vioch->deferred_tx_wq = alloc_workqueue(dev_name(&scmi_vdev->dev), WQ_UNBOUND | WQ_FREEZABLE | WQ_SYSFS, @@ -437,6 +438,11 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, if (!vioch->deferred_tx_wq) return -ENOMEM; + ret = devm_add_action_or_reset(dev, scmi_destroy_tx_workqueue, + vioch->deferred_tx_wq); + if (ret) + return ret; + INIT_WORK(&vioch->deferred_tx_work, scmi_vio_deferred_tx_worker); } @@ -444,12 +450,12 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, for (i = 0; i < vioch->max_msg; i++) { struct scmi_vio_msg *msg; - msg = devm_kzalloc(cinfo->dev, sizeof(*msg), GFP_KERNEL); + msg = devm_kzalloc(dev, sizeof(*msg), GFP_KERNEL); if (!msg) return -ENOMEM; if (tx) { - msg->request = devm_kzalloc(cinfo->dev, + msg->request = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE, GFP_KERNEL); if (!msg->request) @@ -458,7 +464,7 @@ static int virtio_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, refcount_set(&msg->users, 1); } - msg->input = devm_kzalloc(cinfo->dev, VIRTIO_SCMI_MAX_PDU_SIZE, + msg->input = devm_kzalloc(dev, VIRTIO_SCMI_MAX_PDU_SIZE, GFP_KERNEL); if (!msg->input) return -ENOMEM; diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig index 5b79a4a4a88d..6787ed8dfacf 100644 --- a/drivers/firmware/efi/Kconfig +++ b/drivers/firmware/efi/Kconfig @@ -124,28 +124,6 @@ config EFI_ZBOOT is supported by the encapsulated image. (The compression algorithm used is described in the zboot image header) -config EFI_ZBOOT_SIGNED - def_bool y - depends on EFI_ZBOOT_SIGNING_CERT != "" - depends on EFI_ZBOOT_SIGNING_KEY != "" - -config EFI_ZBOOT_SIGNING - bool "Sign the EFI decompressor for UEFI secure boot" - depends on EFI_ZBOOT - help - Use the 'sbsign' command line tool (which must exist on the host - path) to sign both the EFI decompressor PE/COFF image, as well as the - encapsulated PE/COFF image, which is subsequently compressed and - wrapped by the former image. - -config EFI_ZBOOT_SIGNING_CERT - string "Certificate to use for signing the compressed EFI boot image" - depends on EFI_ZBOOT_SIGNING - -config EFI_ZBOOT_SIGNING_KEY - string "Private key to use for signing the compressed EFI boot image" - depends on EFI_ZBOOT_SIGNING - config EFI_ARMSTUB_DTB_LOADER bool "Enable the DTB loader" depends on EFI_GENERIC_STUB && !RISCV && !LOONGARCH diff --git a/drivers/firmware/efi/arm-runtime.c b/drivers/firmware/efi/arm-runtime.c index 3359ae2adf24..7c48c380d722 100644 --- a/drivers/firmware/efi/arm-runtime.c +++ b/drivers/firmware/efi/arm-runtime.c @@ -63,7 +63,7 @@ static bool __init efi_virtmap_init(void) if (!(md->attribute & EFI_MEMORY_RUNTIME)) continue; - if (md->virt_addr == 0) + if (md->virt_addr == U64_MAX) return false; ret = efi_create_mapping(&efi_mm, md); diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c index 9624735f1575..a46df5d1d094 100644 --- a/drivers/firmware/efi/efi.c +++ b/drivers/firmware/efi/efi.c @@ -271,6 +271,8 @@ static __init int efivar_ssdt_load(void) acpi_status ret = acpi_load_table(data, NULL); if (ret) pr_err("failed to load table: %u\n", ret); + else + continue; } else { pr_err("failed to get var data: 0x%lx\n", status); } @@ -609,7 +611,7 @@ int __init efi_config_parse_tables(const efi_config_table_t *config_tables, seed = early_memremap(efi_rng_seed, sizeof(*seed)); if (seed != NULL) { - size = READ_ONCE(seed->size); + size = min(seed->size, EFI_RANDOM_SEED_SIZE); early_memunmap(seed, sizeof(*seed)); } else { pr_err("Could not map UEFI random seed!\n"); diff --git a/drivers/firmware/efi/libstub/Makefile b/drivers/firmware/efi/libstub/Makefile index b1601aad7e1a..ef5045a53ce0 100644 --- a/drivers/firmware/efi/libstub/Makefile +++ b/drivers/firmware/efi/libstub/Makefile @@ -82,7 +82,7 @@ $(obj)/lib-%.o: $(srctree)/lib/%.c FORCE lib-$(CONFIG_EFI_GENERIC_STUB) += efi-stub.o string.o intrinsics.o systable.o lib-$(CONFIG_ARM) += arm32-stub.o -lib-$(CONFIG_ARM64) += arm64-stub.o +lib-$(CONFIG_ARM64) += arm64-stub.o smbios.o lib-$(CONFIG_X86) += x86-stub.o lib-$(CONFIG_RISCV) += riscv-stub.o lib-$(CONFIG_LOONGARCH) += loongarch-stub.o diff --git a/drivers/firmware/efi/libstub/Makefile.zboot b/drivers/firmware/efi/libstub/Makefile.zboot index 35f234ad8738..3340b385a05b 100644 --- a/drivers/firmware/efi/libstub/Makefile.zboot +++ b/drivers/firmware/efi/libstub/Makefile.zboot @@ -20,22 +20,11 @@ zboot-size-len-y := 4 zboot-method-$(CONFIG_KERNEL_GZIP) := gzip zboot-size-len-$(CONFIG_KERNEL_GZIP) := 0 -quiet_cmd_sbsign = SBSIGN $@ - cmd_sbsign = sbsign --out $@ $< \ - --key $(CONFIG_EFI_ZBOOT_SIGNING_KEY) \ - --cert $(CONFIG_EFI_ZBOOT_SIGNING_CERT) - -$(obj)/$(EFI_ZBOOT_PAYLOAD).signed: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE - $(call if_changed,sbsign) - -ZBOOT_PAYLOAD-y := $(EFI_ZBOOT_PAYLOAD) -ZBOOT_PAYLOAD-$(CONFIG_EFI_ZBOOT_SIGNED) := $(EFI_ZBOOT_PAYLOAD).signed - -$(obj)/vmlinuz: $(obj)/$(ZBOOT_PAYLOAD-y) FORCE +$(obj)/vmlinuz: $(obj)/$(EFI_ZBOOT_PAYLOAD) FORCE $(call if_changed,$(zboot-method-y)) OBJCOPYFLAGS_vmlinuz.o := -I binary -O $(EFI_ZBOOT_BFD_TARGET) \ - --rename-section .data=.gzdata,load,alloc,readonly,contents + --rename-section .data=.gzdata,load,alloc,readonly,contents $(obj)/vmlinuz.o: $(obj)/vmlinuz FORCE $(call if_changed,objcopy) @@ -53,18 +42,8 @@ LDFLAGS_vmlinuz.efi.elf := -T $(srctree)/drivers/firmware/efi/libstub/zboot.lds $(obj)/vmlinuz.efi.elf: $(obj)/vmlinuz.o $(ZBOOT_DEPS) FORCE $(call if_changed,ld) -ZBOOT_EFI-y := vmlinuz.efi -ZBOOT_EFI-$(CONFIG_EFI_ZBOOT_SIGNED) := vmlinuz.efi.unsigned - -OBJCOPYFLAGS_$(ZBOOT_EFI-y) := -O binary -$(obj)/$(ZBOOT_EFI-y): $(obj)/vmlinuz.efi.elf FORCE +OBJCOPYFLAGS_vmlinuz.efi := -O binary +$(obj)/vmlinuz.efi: $(obj)/vmlinuz.efi.elf FORCE $(call if_changed,objcopy) targets += zboot-header.o vmlinuz vmlinuz.o vmlinuz.efi.elf vmlinuz.efi - -ifneq ($(CONFIG_EFI_ZBOOT_SIGNED),) -$(obj)/vmlinuz.efi: $(obj)/vmlinuz.efi.unsigned FORCE - $(call if_changed,sbsign) -endif - -targets += $(EFI_ZBOOT_PAYLOAD).signed vmlinuz.efi.unsigned diff --git a/drivers/firmware/efi/libstub/arm64-stub.c b/drivers/firmware/efi/libstub/arm64-stub.c index 259e4b852d63..f9de5217ea65 100644 --- a/drivers/firmware/efi/libstub/arm64-stub.c +++ b/drivers/firmware/efi/libstub/arm64-stub.c @@ -15,6 +15,21 @@ #include "efistub.h" +static bool system_needs_vamap(void) +{ + const u8 *type1_family = efi_get_smbios_string(1, family); + + /* + * Ampere Altra machines crash in SetTime() if SetVirtualAddressMap() + * has not been called prior. + */ + if (!type1_family || strcmp(type1_family, "Altra")) + return false; + + efi_warn("Working around broken SetVirtualAddressMap()\n"); + return true; +} + efi_status_t check_platform_features(void) { u64 tg; @@ -24,7 +39,7 @@ efi_status_t check_platform_features(void) * UEFI runtime regions 1:1 and so calling SetVirtualAddressMap() is * unnecessary. */ - if (VA_BITS_MIN >= 48) + if (VA_BITS_MIN >= 48 && !system_needs_vamap()) efi_novamap = true; /* UEFI mandates support for 4 KB granularity, no need to check */ diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h index a30fb5d8ef05..eb03d5a9aac8 100644 --- a/drivers/firmware/efi/libstub/efistub.h +++ b/drivers/firmware/efi/libstub/efistub.h @@ -975,4 +975,32 @@ efi_enable_reset_attack_mitigation(void) { } void efi_retrieve_tpm2_eventlog(void); +struct efi_smbios_record { + u8 type; + u8 length; + u16 handle; +}; + +struct efi_smbios_type1_record { + struct efi_smbios_record header; + + u8 manufacturer; + u8 product_name; + u8 version; + u8 serial_number; + efi_guid_t uuid; + u8 wakeup_type; + u8 sku_number; + u8 family; +}; + +#define efi_get_smbios_string(__type, __name) ({ \ + int size = sizeof(struct efi_smbios_type ## __type ## _record); \ + int off = offsetof(struct efi_smbios_type ## __type ## _record, \ + __name); \ + __efi_get_smbios_string(__type, off, size); \ +}) + +const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize); + #endif diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c index 4f4d98e51fbf..70e9789ff9de 100644 --- a/drivers/firmware/efi/libstub/fdt.c +++ b/drivers/firmware/efi/libstub/fdt.c @@ -313,16 +313,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(void *handle, /* * Set the virtual address field of all - * EFI_MEMORY_RUNTIME entries to 0. This will signal - * the incoming kernel that no virtual translation has - * been installed. + * EFI_MEMORY_RUNTIME entries to U64_MAX. This will + * signal the incoming kernel that no virtual + * translation has been installed. */ for (l = 0; l < priv.boot_memmap->map_size; l += priv.boot_memmap->desc_size) { p = (void *)priv.boot_memmap->map + l; if (p->attribute & EFI_MEMORY_RUNTIME) - p->virt_addr = 0; + p->virt_addr = U64_MAX; } } return EFI_SUCCESS; diff --git a/drivers/firmware/efi/libstub/random.c b/drivers/firmware/efi/libstub/random.c index 24aa37535372..33ab56769595 100644 --- a/drivers/firmware/efi/libstub/random.c +++ b/drivers/firmware/efi/libstub/random.c @@ -75,7 +75,12 @@ efi_status_t efi_random_get_seed(void) if (status != EFI_SUCCESS) return status; - status = efi_bs_call(allocate_pool, EFI_RUNTIME_SERVICES_DATA, + /* + * Use EFI_ACPI_RECLAIM_MEMORY here so that it is guaranteed that the + * allocation will survive a kexec reboot (although we refresh the seed + * beforehand) + */ + status = efi_bs_call(allocate_pool, EFI_ACPI_RECLAIM_MEMORY, sizeof(*seed) + EFI_RANDOM_SEED_SIZE, (void **)&seed); if (status != EFI_SUCCESS) diff --git a/drivers/firmware/efi/libstub/smbios.c b/drivers/firmware/efi/libstub/smbios.c new file mode 100644 index 000000000000..460418b7f5f5 --- /dev/null +++ b/drivers/firmware/efi/libstub/smbios.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0-only +// Copyright 2022 Google LLC +// Author: Ard Biesheuvel <ardb@google.com> + +#include <linux/efi.h> + +#include "efistub.h" + +typedef struct efi_smbios_protocol efi_smbios_protocol_t; + +struct efi_smbios_protocol { + efi_status_t (__efiapi *add)(efi_smbios_protocol_t *, efi_handle_t, + u16 *, struct efi_smbios_record *); + efi_status_t (__efiapi *update_string)(efi_smbios_protocol_t *, u16 *, + unsigned long *, u8 *); + efi_status_t (__efiapi *remove)(efi_smbios_protocol_t *, u16); + efi_status_t (__efiapi *get_next)(efi_smbios_protocol_t *, u16 *, u8 *, + struct efi_smbios_record **, + efi_handle_t *); + + u8 major_version; + u8 minor_version; +}; + +const u8 *__efi_get_smbios_string(u8 type, int offset, int recsize) +{ + struct efi_smbios_record *record; + efi_smbios_protocol_t *smbios; + efi_status_t status; + u16 handle = 0xfffe; + const u8 *strtable; + + status = efi_bs_call(locate_protocol, &EFI_SMBIOS_PROTOCOL_GUID, NULL, + (void **)&smbios) ?: + efi_call_proto(smbios, get_next, &handle, &type, &record, NULL); + if (status != EFI_SUCCESS) + return NULL; + + strtable = (u8 *)record + recsize; + for (int i = 1; i < ((u8 *)record)[offset]; i++) { + int len = strlen(strtable); + + if (!len) + return NULL; + strtable += len + 1; + } + return strtable; +} diff --git a/drivers/firmware/efi/libstub/x86-stub.c b/drivers/firmware/efi/libstub/x86-stub.c index b9ce6393e353..33a7811e12c6 100644 --- a/drivers/firmware/efi/libstub/x86-stub.c +++ b/drivers/firmware/efi/libstub/x86-stub.c @@ -765,9 +765,9 @@ static efi_status_t exit_boot(struct boot_params *boot_params, void *handle) * relocated by efi_relocate_kernel. * On failure, we exit to the firmware via efi_exit instead of returning. */ -unsigned long efi_main(efi_handle_t handle, - efi_system_table_t *sys_table_arg, - struct boot_params *boot_params) +asmlinkage unsigned long efi_main(efi_handle_t handle, + efi_system_table_t *sys_table_arg, + struct boot_params *boot_params) { unsigned long bzimage_addr = (unsigned long)startup_32; unsigned long buffer_start, buffer_end; diff --git a/drivers/firmware/efi/libstub/zboot.lds b/drivers/firmware/efi/libstub/zboot.lds index 87a62765bafd..93d33f68333b 100644 --- a/drivers/firmware/efi/libstub/zboot.lds +++ b/drivers/firmware/efi/libstub/zboot.lds @@ -38,7 +38,8 @@ SECTIONS } } -PROVIDE(__efistub__gzdata_size = ABSOLUTE(. - __efistub__gzdata_start)); +PROVIDE(__efistub__gzdata_size = + ABSOLUTE(__efistub__gzdata_end - __efistub__gzdata_start)); PROVIDE(__data_rawsize = ABSOLUTE(_edata - _etext)); PROVIDE(__data_size = ABSOLUTE(_end - _etext)); diff --git a/drivers/firmware/efi/riscv-runtime.c b/drivers/firmware/efi/riscv-runtime.c index d28e715d2bcc..d0daacd2c903 100644 --- a/drivers/firmware/efi/riscv-runtime.c +++ b/drivers/firmware/efi/riscv-runtime.c @@ -41,7 +41,7 @@ static bool __init efi_virtmap_init(void) if (!(md->attribute & EFI_MEMORY_RUNTIME)) continue; - if (md->virt_addr == 0) + if (md->virt_addr == U64_MAX) return false; ret = efi_create_mapping(&efi_mm, md); diff --git a/drivers/firmware/efi/tpm.c b/drivers/firmware/efi/tpm.c index 8f665678e9e3..e8d69bd548f3 100644 --- a/drivers/firmware/efi/tpm.c +++ b/drivers/firmware/efi/tpm.c @@ -97,7 +97,7 @@ int __init efi_tpm_eventlog_init(void) goto out_calc; } - memblock_reserve((unsigned long)final_tbl, + memblock_reserve(efi.tpm_final_log, tbl_size + sizeof(*final_tbl)); efi_tpm_final_log_size = tbl_size; diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c index dd74d2ad3184..0ba9f18312f5 100644 --- a/drivers/firmware/efi/vars.c +++ b/drivers/firmware/efi/vars.c @@ -7,6 +7,7 @@ */ #include <linux/types.h> +#include <linux/sizes.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/module.h> @@ -20,31 +21,23 @@ static struct efivars *__efivars; static DEFINE_SEMAPHORE(efivars_lock); -efi_status_t check_var_size(u32 attributes, unsigned long size) -{ - const struct efivar_operations *fops; - - fops = __efivars->ops; - - if (!fops->query_variable_store) - return EFI_UNSUPPORTED; - - return fops->query_variable_store(attributes, size, false); -} -EXPORT_SYMBOL_NS_GPL(check_var_size, EFIVAR); - -efi_status_t check_var_size_nonblocking(u32 attributes, unsigned long size) +static efi_status_t check_var_size(bool nonblocking, u32 attributes, + unsigned long size) { const struct efivar_operations *fops; + efi_status_t status; fops = __efivars->ops; if (!fops->query_variable_store) - return EFI_UNSUPPORTED; - - return fops->query_variable_store(attributes, size, true); + status = EFI_UNSUPPORTED; + else + status = fops->query_variable_store(attributes, size, + nonblocking); + if (status == EFI_UNSUPPORTED) + return (size <= SZ_64K) ? EFI_SUCCESS : EFI_OUT_OF_RESOURCES; + return status; } -EXPORT_SYMBOL_NS_GPL(check_var_size_nonblocking, EFIVAR); /** * efivars_kobject - get the kobject for the registered efivars @@ -196,26 +189,6 @@ efi_status_t efivar_get_next_variable(unsigned long *name_size, EXPORT_SYMBOL_NS_GPL(efivar_get_next_variable, EFIVAR); /* - * efivar_set_variable_blocking() - local helper function for set_variable - * - * Must be called with efivars_lock held. - */ -static efi_status_t -efivar_set_variable_blocking(efi_char16_t *name, efi_guid_t *vendor, - u32 attr, unsigned long data_size, void *data) -{ - efi_status_t status; - - if (data_size > 0) { - status = check_var_size(attr, data_size + - ucs2_strsize(name, 1024)); - if (status != EFI_SUCCESS) - return status; - } - return __efivars->ops->set_variable(name, vendor, attr, data_size, data); -} - -/* * efivar_set_variable_locked() - set a variable identified by name/vendor * * Must be called with efivars_lock held. If @nonblocking is set, it will use @@ -228,23 +201,21 @@ efi_status_t efivar_set_variable_locked(efi_char16_t *name, efi_guid_t *vendor, efi_set_variable_t *setvar; efi_status_t status; - if (!nonblocking) - return efivar_set_variable_blocking(name, vendor, attr, - data_size, data); + if (data_size > 0) { + status = check_var_size(nonblocking, attr, + data_size + ucs2_strsize(name, 1024)); + if (status != EFI_SUCCESS) + return status; + } /* * If no _nonblocking variant exists, the ordinary one * is assumed to be non-blocking. */ - setvar = __efivars->ops->set_variable_nonblocking ?: - __efivars->ops->set_variable; + setvar = __efivars->ops->set_variable_nonblocking; + if (!setvar || !nonblocking) + setvar = __efivars->ops->set_variable; - if (data_size > 0) { - status = check_var_size_nonblocking(attr, data_size + - ucs2_strsize(name, 1024)); - if (status != EFI_SUCCESS) - return status; - } return setvar(name, vendor, attr, data_size, data); } EXPORT_SYMBOL_NS_GPL(efivar_set_variable_locked, EFIVAR); @@ -264,7 +235,8 @@ efi_status_t efivar_set_variable(efi_char16_t *name, efi_guid_t *vendor, if (efivar_lock()) return EFI_ABORTED; - status = efivar_set_variable_blocking(name, vendor, attr, data_size, data); + status = efivar_set_variable_locked(name, vendor, attr, data_size, + data, false); efivar_unlock(); return status; } diff --git a/drivers/firmware/google/coreboot_table.c b/drivers/firmware/google/coreboot_table.c index c52bcaa9def6..9ca21feb9d45 100644 --- a/drivers/firmware/google/coreboot_table.c +++ b/drivers/firmware/google/coreboot_table.c @@ -149,12 +149,8 @@ static int coreboot_table_probe(struct platform_device *pdev) if (!ptr) return -ENOMEM; - ret = bus_register(&coreboot_bus_type); - if (!ret) { - ret = coreboot_table_populate(dev, ptr); - if (ret) - bus_unregister(&coreboot_bus_type); - } + ret = coreboot_table_populate(dev, ptr); + memunmap(ptr); return ret; @@ -169,7 +165,6 @@ static int __cb_dev_unregister(struct device *dev, void *dummy) static int coreboot_table_remove(struct platform_device *pdev) { bus_for_each_dev(&coreboot_bus_type, NULL, NULL, __cb_dev_unregister); - bus_unregister(&coreboot_bus_type); return 0; } @@ -199,6 +194,32 @@ static struct platform_driver coreboot_table_driver = { .of_match_table = of_match_ptr(coreboot_of_match), }, }; -module_platform_driver(coreboot_table_driver); + +static int __init coreboot_table_driver_init(void) +{ + int ret; + + ret = bus_register(&coreboot_bus_type); + if (ret) + return ret; + + ret = platform_driver_register(&coreboot_table_driver); + if (ret) { + bus_unregister(&coreboot_bus_type); + return ret; + } + + return 0; +} + +static void __exit coreboot_table_driver_exit(void) +{ + platform_driver_unregister(&coreboot_table_driver); + bus_unregister(&coreboot_bus_type); +} + +module_init(coreboot_table_driver_init); +module_exit(coreboot_table_driver_exit); + MODULE_AUTHOR("Google, Inc."); MODULE_LICENSE("GPL"); diff --git a/drivers/gpio/gpio-tegra.c b/drivers/gpio/gpio-tegra.c index e4fb4cb38a0f..5b265a6fd3c1 100644 --- a/drivers/gpio/gpio-tegra.c +++ b/drivers/gpio/gpio-tegra.c @@ -18,6 +18,7 @@ #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/module.h> +#include <linux/seq_file.h> #include <linux/irqdomain.h> #include <linux/irqchip/chained_irq.h> #include <linux/pinctrl/consumer.h> @@ -94,7 +95,6 @@ struct tegra_gpio_info { struct tegra_gpio_bank *bank_info; const struct tegra_gpio_soc_config *soc; struct gpio_chip gc; - struct irq_chip ic; u32 bank_count; unsigned int *irqs; }; @@ -288,6 +288,7 @@ static void tegra_gpio_irq_mask(struct irq_data *d) unsigned int gpio = d->hwirq; tegra_gpio_mask_write(tgi, GPIO_MSK_INT_ENB(tgi, gpio), gpio, 0); + gpiochip_disable_irq(chip, gpio); } static void tegra_gpio_irq_unmask(struct irq_data *d) @@ -296,6 +297,7 @@ static void tegra_gpio_irq_unmask(struct irq_data *d) struct tegra_gpio_info *tgi = gpiochip_get_data(chip); unsigned int gpio = d->hwirq; + gpiochip_enable_irq(chip, gpio); tegra_gpio_mask_write(tgi, GPIO_MSK_INT_ENB(tgi, gpio), gpio, 1); } @@ -598,10 +600,47 @@ static void tegra_gpio_irq_release_resources(struct irq_data *d) tegra_gpio_enable(tgi, d->hwirq); } +static void tegra_gpio_irq_print_chip(struct irq_data *d, struct seq_file *s) +{ + struct gpio_chip *chip = irq_data_get_irq_chip_data(d); + + seq_printf(s, dev_name(chip->parent)); +} + +static const struct irq_chip tegra_gpio_irq_chip = { + .irq_shutdown = tegra_gpio_irq_shutdown, + .irq_ack = tegra_gpio_irq_ack, + .irq_mask = tegra_gpio_irq_mask, + .irq_unmask = tegra_gpio_irq_unmask, + .irq_set_type = tegra_gpio_irq_set_type, +#ifdef CONFIG_PM_SLEEP + .irq_set_wake = tegra_gpio_irq_set_wake, +#endif + .irq_print_chip = tegra_gpio_irq_print_chip, + .irq_request_resources = tegra_gpio_irq_request_resources, + .irq_release_resources = tegra_gpio_irq_release_resources, + .flags = IRQCHIP_IMMUTABLE, +}; + +static const struct irq_chip tegra210_gpio_irq_chip = { + .irq_shutdown = tegra_gpio_irq_shutdown, + .irq_ack = tegra_gpio_irq_ack, + .irq_mask = tegra_gpio_irq_mask, + .irq_unmask = tegra_gpio_irq_unmask, + .irq_set_affinity = tegra_gpio_irq_set_affinity, + .irq_set_type = tegra_gpio_irq_set_type, +#ifdef CONFIG_PM_SLEEP + .irq_set_wake = tegra_gpio_irq_set_wake, +#endif + .irq_print_chip = tegra_gpio_irq_print_chip, + .irq_request_resources = tegra_gpio_irq_request_resources, + .irq_release_resources = tegra_gpio_irq_release_resources, + .flags = IRQCHIP_IMMUTABLE, +}; + #ifdef CONFIG_DEBUG_FS #include <linux/debugfs.h> -#include <linux/seq_file.h> static int tegra_dbg_gpio_show(struct seq_file *s, void *unused) { @@ -689,18 +728,6 @@ static int tegra_gpio_probe(struct platform_device *pdev) tgi->gc.ngpio = tgi->bank_count * 32; tgi->gc.parent = &pdev->dev; - tgi->ic.name = "GPIO"; - tgi->ic.irq_ack = tegra_gpio_irq_ack; - tgi->ic.irq_mask = tegra_gpio_irq_mask; - tgi->ic.irq_unmask = tegra_gpio_irq_unmask; - tgi->ic.irq_set_type = tegra_gpio_irq_set_type; - tgi->ic.irq_shutdown = tegra_gpio_irq_shutdown; -#ifdef CONFIG_PM_SLEEP - tgi->ic.irq_set_wake = tegra_gpio_irq_set_wake; -#endif - tgi->ic.irq_request_resources = tegra_gpio_irq_request_resources; - tgi->ic.irq_release_resources = tegra_gpio_irq_release_resources; - platform_set_drvdata(pdev, tgi); if (tgi->soc->debounce_supported) @@ -733,7 +760,6 @@ static int tegra_gpio_probe(struct platform_device *pdev) } irq = &tgi->gc.irq; - irq->chip = &tgi->ic; irq->fwnode = of_node_to_fwnode(pdev->dev.of_node); irq->child_to_parent_hwirq = tegra_gpio_child_to_parent_hwirq; irq->populate_parent_alloc_arg = tegra_gpio_populate_parent_fwspec; @@ -752,7 +778,9 @@ static int tegra_gpio_probe(struct platform_device *pdev) if (!irq->parent_domain) return -EPROBE_DEFER; - tgi->ic.irq_set_affinity = tegra_gpio_irq_set_affinity; + gpio_irq_chip_set_chip(irq, &tegra210_gpio_irq_chip); + } else { + gpio_irq_chip_set_chip(irq, &tegra_gpio_irq_chip); } tgi->regs = devm_platform_ioremap_resource(pdev, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ae9371b172e3..2eca58220550 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -274,9 +274,6 @@ extern int amdgpu_vcnfw_log; #define AMDGPU_RESET_VCE (1 << 13) #define AMDGPU_RESET_VCE1 (1 << 14) -#define AMDGPU_RESET_LEVEL_SOFT_RECOVERY (1 << 0) -#define AMDGPU_RESET_LEVEL_MODE2 (1 << 1) - /* max cursor sizes (in pixels) */ #define CIK_CURSOR_WIDTH 128 #define CIK_CURSOR_HEIGHT 128 @@ -1065,7 +1062,6 @@ struct amdgpu_device { struct work_struct reset_work; - uint32_t amdgpu_reset_level_mask; bool job_hang; }; @@ -1297,6 +1293,7 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev, u32 reg, u32 v); struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, struct dma_fence *gang); +bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev); /* atpx handler */ #if defined(CONFIG_VGA_SWITCHEROO) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 03bbfaa51cbc..5d9a34601a1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -134,7 +134,6 @@ static void amdgpu_amdkfd_reset_work(struct work_struct *work) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); amdgpu_device_gpu_recover(adev, NULL, &reset_context); } @@ -707,6 +706,13 @@ err: void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle) { + /* Temporary workaround to fix issues observed in some + * compute applications when GFXOFF is enabled on GFX11. + */ + if (IP_VERSION_MAJ(adev->ip_versions[GC_HWIP][0]) == 11) { + pr_debug("GFXOFF is %s\n", idle ? "enabled" : "disabled"); + amdgpu_gfx_off_ctrl(adev, idle); + } amdgpu_dpm_switch_power_profile(adev, PP_SMC_POWER_PROFILE_COMPUTE, !idle); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index 0b0a72ca5695..7e80caa05060 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -111,7 +111,7 @@ static int init_interrupts_v11(struct amdgpu_device *adev, uint32_t pipe_id) lock_srbm(adev, mec, pipe, 0, 0); - WREG32(SOC15_REG_OFFSET(GC, 0, regCPC_INT_CNTL), + WREG32_SOC15(GC, 0, regCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 978d3970b5cc..6d1ff7b9258d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -171,9 +171,7 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > kfd_mem_limit.max_ttm_mem_limit) || (adev && adev->kfd.vram_used + vram_needed > - adev->gmc.real_vram_size - - atomic64_read(&adev->vram_pin_size) - - reserved_for_pt)) { + adev->gmc.real_vram_size - reserved_for_pt)) { ret = -ENOMEM; goto release; } @@ -510,13 +508,13 @@ kfd_mem_dmamap_userptr(struct kgd_mem *mem, struct ttm_tt *ttm = bo->tbo.ttm; int ret; + if (WARN_ON(ttm->num_pages != src_ttm->num_pages)) + return -EINVAL; + ttm->sg = kmalloc(sizeof(*ttm->sg), GFP_KERNEL); if (unlikely(!ttm->sg)) return -ENOMEM; - if (WARN_ON(ttm->num_pages != src_ttm->num_pages)) - return -EINVAL; - /* Same sequence as in amdgpu_ttm_tt_pin_userptr */ ret = sg_alloc_table_from_pages(ttm->sg, src_ttm->pages, ttm->num_pages, 0, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 1bbd39b3b0fc..d038b258cc92 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -109,6 +109,7 @@ static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p, return r; ++(num_ibs[r]); + p->gang_leader_idx = r; return 0; } @@ -287,8 +288,10 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, } } - if (!p->gang_size) - return -EINVAL; + if (!p->gang_size) { + ret = -EINVAL; + goto free_partial_kdata; + } for (i = 0; i < p->gang_size; ++i) { ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm); @@ -300,7 +303,7 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, if (ret) goto free_all_kdata; } - p->gang_leader = p->jobs[p->gang_size - 1]; + p->gang_leader = p->jobs[p->gang_leader_idx]; if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) { ret = -ECANCELED; @@ -992,6 +995,7 @@ out_free_user_pages: kvfree(e->user_pages); e->user_pages = NULL; } + mutex_unlock(&p->bo_list->bo_list_mutex); return r; } @@ -1194,16 +1198,18 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) return r; } - for (i = 0; i < p->gang_size - 1; ++i) { + for (i = 0; i < p->gang_size; ++i) { + if (p->jobs[i] == leader) + continue; + r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync); if (r) return r; } - r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_size - 1]); + r = amdgpu_ctx_wait_prev_fence(p->ctx, p->entities[p->gang_leader_idx]); if (r && r != -ERESTARTSYS) DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n"); - return r; } @@ -1237,9 +1243,12 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, for (i = 0; i < p->gang_size; ++i) drm_sched_job_arm(&p->jobs[i]->base); - for (i = 0; i < (p->gang_size - 1); ++i) { + for (i = 0; i < p->gang_size; ++i) { struct dma_fence *fence; + if (p->jobs[i] == leader) + continue; + fence = &p->jobs[i]->base.s_fence->scheduled; r = amdgpu_sync_fence(&leader->sync, fence); if (r) @@ -1275,7 +1284,10 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, list_for_each_entry(e, &p->validated, tv.head) { /* Everybody except for the gang leader uses READ */ - for (i = 0; i < (p->gang_size - 1); ++i) { + for (i = 0; i < p->gang_size; ++i) { + if (p->jobs[i] == leader) + continue; + dma_resv_add_fence(e->tv.bo->base.resv, &p->jobs[i]->base.s_fence->finished, DMA_RESV_USAGE_READ); @@ -1285,7 +1297,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, e->tv.num_shared = 0; } - seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_size - 1], + seq = amdgpu_ctx_add_fence(p->ctx, p->entities[p->gang_leader_idx], p->fence); amdgpu_cs_post_dependencies(p); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h index cbaa19b2b8a3..f80adf9069ec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.h @@ -54,6 +54,7 @@ struct amdgpu_cs_parser { /* scheduler job objects */ unsigned int gang_size; + unsigned int gang_leader_idx; struct drm_sched_entity *entities[AMDGPU_CS_GANG_SIZE]; struct amdgpu_job *jobs[AMDGPU_CS_GANG_SIZE]; struct amdgpu_job *gang_leader; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index f6d9d5da53cd..d2139ac12159 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -326,7 +326,10 @@ static int amdgpu_ctx_init(struct amdgpu_ctx_mgr *mgr, int32_t priority, if (r) return r; - ctx->stable_pstate = current_stable_pstate; + if (mgr->adev->pm.stable_pstate_ctx) + ctx->stable_pstate = mgr->adev->pm.stable_pstate_ctx->stable_pstate; + else + ctx->stable_pstate = current_stable_pstate; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 6066aebf491c..de61a85c4b02 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1954,8 +1954,6 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev) return PTR_ERR(ent); } - debugfs_create_u32("amdgpu_reset_level", 0600, root, &adev->amdgpu_reset_level_mask); - /* Register debugfs entries for amdgpu_ttm */ amdgpu_ttm_debugfs_init(adev); amdgpu_debugfs_pm_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index ab8f970b2849..f1e9663b4051 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2928,6 +2928,14 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); + /* + * Per PMFW team's suggestion, driver needs to handle gfxoff + * and df cstate features disablement for gpu reset(e.g. Mode1Reset) + * scenario. Add the missing df cstate disablement here. + */ + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + dev_warn(adev->dev, "Failed to disallow df cstate"); + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; @@ -3202,6 +3210,15 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) return r; } adev->ip_blocks[i].status.hw = true; + + if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { + /* disable gfxoff for IP resume. The gfxoff will be re-enabled in + * amdgpu_device_resume() after IP resume. + */ + amdgpu_gfx_off_ctrl(adev, false); + DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n"); + } + } return 0; @@ -4043,15 +4060,18 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) * at suspend time. * */ -static void amdgpu_device_evict_resources(struct amdgpu_device *adev) +static int amdgpu_device_evict_resources(struct amdgpu_device *adev) { + int ret; + /* No need to evict vram on APUs for suspend to ram or s2idle */ if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU)) - return; + return 0; - if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM)) + ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); + if (ret) DRM_WARN("evicting device resources failed\n"); - + return ret; } /* @@ -4101,7 +4121,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) if (!adev->in_s0ix) amdgpu_amdkfd_suspend(adev, adev->in_runpm); - amdgpu_device_evict_resources(adev); + r = amdgpu_device_evict_resources(adev); + if (r) + return r; amdgpu_fence_driver_hw_fini(adev); @@ -4177,6 +4199,13 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) /* Make sure IB tests flushed */ flush_delayed_work(&adev->delayed_init_work); + if (adev->in_s0ix) { + /* re-enable gfxoff after IP resume. This re-enables gfxoff after + * it was disabled for IP resume in amdgpu_device_ip_resume_phase2(). + */ + amdgpu_gfx_off_ctrl(adev, true); + DRM_DEBUG("will enable gfxoff for the mission mode\n"); + } if (fbcon) drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false); @@ -5210,7 +5239,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, reset_context->job = job; reset_context->hive = hive; - /* * Build list of devices to reset. * In case we are in XGMI hive mode, resort the device list @@ -5337,11 +5365,8 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ amdgpu_ras_resume(adev); } else { r = amdgpu_do_asic_reset(device_list_handle, reset_context); - if (r && r == -EAGAIN) { - set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags); - adev->asic_reset_res = 0; + if (r && r == -EAGAIN) goto retry; - } if (!r && gpu_reset_for_dev_remove) goto recover_end; @@ -5377,7 +5402,7 @@ skip_hw_reset: drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res); } - if (adev->enable_mes) + if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)) amdgpu_mes_self_test(tmp_adev); if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) { @@ -5777,7 +5802,6 @@ pci_ers_result_t amdgpu_pci_slot_reset(struct pci_dev *pdev) reset_context.reset_req_dev = adev; set_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); set_bit(AMDGPU_SKIP_HW_RESET, &reset_context.flags); - set_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); adev->no_hw_access = true; r = amdgpu_device_pre_asic_reset(adev, &reset_context); @@ -6020,3 +6044,44 @@ struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, dma_fence_put(old); return NULL; } + +bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_HAINAN: +#endif + case CHIP_TOPAZ: + /* chips with no display hardware */ + return false; +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: +#endif + case CHIP_TONGA: + case CHIP_FIJI: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + case CHIP_VEGAM: + case CHIP_CARRIZO: + case CHIP_STONEY: + /* chips with display hardware */ + return true; + default: + /* IP discovery */ + if (!adev->ip_versions[DCE_HWIP][0] || + (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) + return false; + return true; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 3c9fecdd6b2f..bf2d50c8c92a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -2201,7 +2201,8 @@ amdgpu_pci_remove(struct pci_dev *pdev) pm_runtime_forbid(dev->dev); } - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2)) { + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 2) && + !amdgpu_sriov_vf(adev)) { bool need_to_reset_gpu = false; if (adev->gmc.xgmi.num_physical_nodes > 1) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 34233a74248c..b06cb0bb166c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -656,7 +656,7 @@ void amdgpu_gmc_get_vbios_allocations(struct amdgpu_device *adev) } if (amdgpu_sriov_vf(adev) || - !amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_DCE)) { + !amdgpu_device_has_display_hardware(adev)) { size = 0; } else { size = amdgpu_gmc_get_vbios_fb_size(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 46c99331d7f1..cd968e781077 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -72,7 +72,6 @@ static enum drm_gpu_sched_stat amdgpu_job_timedout(struct drm_sched_job *s_job) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); r = amdgpu_device_gpu_recover(ring->adev, job, &reset_context); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index fe23e09eec98..4e42dcb1950f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -337,11 +337,17 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, fw_info->feature = adev->psp.cap_feature_version; break; case AMDGPU_INFO_FW_MES_KIQ: - fw_info->ver = adev->mes.ucode_fw_version[0]; - fw_info->feature = 0; + fw_info->ver = adev->mes.kiq_version & AMDGPU_MES_VERSION_MASK; + fw_info->feature = (adev->mes.kiq_version & AMDGPU_MES_FEAT_VERSION_MASK) + >> AMDGPU_MES_FEAT_VERSION_SHIFT; break; case AMDGPU_INFO_FW_MES: - fw_info->ver = adev->mes.ucode_fw_version[1]; + fw_info->ver = adev->mes.sched_version & AMDGPU_MES_VERSION_MASK; + fw_info->feature = (adev->mes.sched_version & AMDGPU_MES_FEAT_VERSION_MASK) + >> AMDGPU_MES_FEAT_VERSION_SHIFT; + break; + case AMDGPU_INFO_FW_IMU: + fw_info->ver = adev->gfx.imu_fw_version; fw_info->feature = 0; break; default: @@ -1520,6 +1526,15 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) fw_info.feature, fw_info.ver); } + /* IMU */ + query_fw.fw_type = AMDGPU_INFO_FW_IMU; + query_fw.index = 0; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "IMU feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + /* PSP SOS */ query_fw.fw_type = AMDGPU_INFO_FW_SOS; ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 2dad7aa9a03b..a4b47e1bd111 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1950,7 +1950,6 @@ static void amdgpu_ras_do_recovery(struct work_struct *work) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); amdgpu_device_gpu_recover(ras->adev, NULL, &reset_context); } @@ -2268,6 +2267,25 @@ static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) { + if (amdgpu_sriov_vf(adev)) { + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(13, 0, 2): + return true; + default: + return false; + } + } + + if (adev->asic_type == CHIP_IP_DISCOVERY) { + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(13, 0, 0): + case IP_VERSION(13, 0, 10): + return true; + default: + return false; + } + } + return adev->asic_type == CHIP_VEGA10 || adev->asic_type == CHIP_VEGA20 || adev->asic_type == CHIP_ARCTURUS || @@ -2311,11 +2329,6 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev) !amdgpu_ras_asic_supported(adev)) return; - /* If driver run on sriov guest side, only enable ras for aldebaran */ - if (amdgpu_sriov_vf(adev) && - adev->ip_versions[MP1_HWIP][0] != IP_VERSION(13, 0, 2)) - return; - if (!adev->gmc.xgmi.connected_to_cpu) { if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { dev_info(adev->dev, "MEM ECC is active.\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c index 9da5ead50c90..f778466bb9db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.c @@ -37,8 +37,6 @@ int amdgpu_reset_init(struct amdgpu_device *adev) { int ret = 0; - adev->amdgpu_reset_level_mask = 0x1; - switch (adev->ip_versions[MP1_HWIP][0]) { case IP_VERSION(13, 0, 2): ret = aldebaran_reset_init(adev); @@ -76,12 +74,6 @@ int amdgpu_reset_prepare_hwcontext(struct amdgpu_device *adev, { struct amdgpu_reset_handler *reset_handler = NULL; - if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2)) - return -ENOSYS; - - if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags)) - return -ENOSYS; - if (adev->reset_cntl && adev->reset_cntl->get_reset_handler) reset_handler = adev->reset_cntl->get_reset_handler( adev->reset_cntl, reset_context); @@ -98,12 +90,6 @@ int amdgpu_reset_perform_reset(struct amdgpu_device *adev, int ret; struct amdgpu_reset_handler *reset_handler = NULL; - if (!(adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_MODE2)) - return -ENOSYS; - - if (test_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context->flags)) - return -ENOSYS; - if (adev->reset_cntl) reset_handler = adev->reset_cntl->get_reset_handler( adev->reset_cntl, reset_context); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h index f5318fedf2f0..f4a501ff87d9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_reset.h @@ -30,8 +30,7 @@ enum AMDGPU_RESET_FLAGS { AMDGPU_NEED_FULL_RESET = 0, AMDGPU_SKIP_HW_RESET = 1, - AMDGPU_SKIP_MODE2_RESET = 2, - AMDGPU_RESET_FOR_DEVICE_REMOVE = 3, + AMDGPU_RESET_FOR_DEVICE_REMOVE = 2, }; struct amdgpu_reset_context { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 3e316b013fd9..d3558c34d406 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -405,9 +405,6 @@ bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid, { ktime_t deadline = ktime_add_us(ktime_get(), 10000); - if (!(ring->adev->amdgpu_reset_level_mask & AMDGPU_RESET_LEVEL_SOFT_RECOVERY)) - return false; - if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence) return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index dc262d2c2925..57277b1cf183 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -439,6 +439,9 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev, while (cursor.remaining) { amdgpu_res_next(&cursor, cursor.size); + if (!cursor.remaining) + break; + /* ttm_resource_ioremap only supports contiguous memory */ if (end != cursor.start) return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index dd0bc649a57d..5cb62e6249c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -698,6 +698,7 @@ FW_VERSION_ATTR(rlc_srlg_fw_version, 0444, gfx.rlc_srlg_fw_version); FW_VERSION_ATTR(rlc_srls_fw_version, 0444, gfx.rlc_srls_fw_version); FW_VERSION_ATTR(mec_fw_version, 0444, gfx.mec_fw_version); FW_VERSION_ATTR(mec2_fw_version, 0444, gfx.mec2_fw_version); +FW_VERSION_ATTR(imu_fw_version, 0444, gfx.imu_fw_version); FW_VERSION_ATTR(sos_fw_version, 0444, psp.sos.fw_version); FW_VERSION_ATTR(asd_fw_version, 0444, psp.asd_context.bin_desc.fw_version); FW_VERSION_ATTR(ta_ras_fw_version, 0444, psp.ras_context.context.bin_desc.fw_version); @@ -719,7 +720,8 @@ static struct attribute *fw_attrs[] = { &dev_attr_ta_ras_fw_version.attr, &dev_attr_ta_xgmi_fw_version.attr, &dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr, &dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr, - &dev_attr_dmcu_fw_version.attr, NULL + &dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr, + NULL }; static const struct attribute_group fw_attr_group = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index e4af40b9a8aa..c73abe54d974 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -547,6 +547,7 @@ static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev) POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version); + POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_IMU, adev->gfx.imu_fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos.fw_version); POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, adev->psp.asd_context.bin_desc.fw_version); @@ -726,6 +727,12 @@ void amdgpu_detect_virtualization(struct amdgpu_device *adev) adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; } + if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID) + /* VF MMIO access (except mailbox range) from CPU + * will be blocked during sriov runtime + */ + adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT; + /* we have the ability to check now */ if (amdgpu_sriov_vf(adev)) { switch (adev->asic_type) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index d94c31e68a14..49c4347d154c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -31,6 +31,7 @@ #define AMDGPU_SRIOV_CAPS_IS_VF (1 << 2) /* this GPU is a virtual function */ #define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ #define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */ +#define AMDGPU_VF_MMIO_ACCESS_PROTECT (1 << 5) /* MMIO write access is not allowed in sriov runtime */ /* flags for indirect register access path supported by rlcg for sriov */ #define AMDGPU_RLCG_GC_WRITE_LEGACY (0x8 << 28) @@ -297,6 +298,9 @@ struct amdgpu_video_codec_info; #define amdgpu_passthrough(adev) \ ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) +#define amdgpu_sriov_vf_mmio_access_protection(adev) \ +((adev)->virt.caps & AMDGPU_VF_MMIO_ACCESS_PROTECT) + static inline bool is_virtual_machine(void) { #if defined(CONFIG_X86) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index f4b5301ea2a0..500a1dc4fe02 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -500,6 +500,8 @@ static int amdgpu_vkms_sw_init(void *handle) adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; + adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; + r = amdgpu_display_modeset_create_props(adev); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 83b0c5d86e48..003aa9e47085 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -143,32 +143,6 @@ int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, return 0; } -/* - * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS - * happens while holding this lock anywhere to prevent deadlocks when - * an MMU notifier runs in reclaim-FS context. - */ -static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) -{ - mutex_lock(&vm->eviction_lock); - vm->saved_flags = memalloc_noreclaim_save(); -} - -static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) -{ - if (mutex_trylock(&vm->eviction_lock)) { - vm->saved_flags = memalloc_noreclaim_save(); - return 1; - } - return 0; -} - -static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) -{ - memalloc_noreclaim_restore(vm->saved_flags); - mutex_unlock(&vm->eviction_lock); -} - /** * amdgpu_vm_bo_evicted - vm_bo is evicted * @@ -2338,7 +2312,11 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev) */ #ifdef CONFIG_X86_64 if (amdgpu_vm_update_mode == -1) { - if (amdgpu_gmc_vram_full_visible(&adev->gmc)) + /* For asic with VF MMIO access protection + * avoid using CPU for VM table updates + */ + if (amdgpu_gmc_vram_full_visible(&adev->gmc) && + !amdgpu_sriov_vf_mmio_access_protection(adev)) adev->vm_manager.vm_update_mode = AMDGPU_VM_USE_CPU_FOR_COMPUTE; else diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index 83acb7bd80fe..6546e786bf00 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -492,7 +492,48 @@ void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m); */ static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm) { + unsigned long flags; + spinlock_t *lock; + + /* + * Workaround to stop racing between the fence signaling and handling + * the cb. The lock is static after initially setting it up, just make + * sure that the dma_fence structure isn't freed up. + */ + rcu_read_lock(); + lock = vm->last_tlb_flush->lock; + rcu_read_unlock(); + + spin_lock_irqsave(lock, flags); + spin_unlock_irqrestore(lock, flags); + return atomic64_read(&vm->tlb_seq); } +/* + * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS + * happens while holding this lock anywhere to prevent deadlocks when + * an MMU notifier runs in reclaim-FS context. + */ +static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) +{ + mutex_lock(&vm->eviction_lock); + vm->saved_flags = memalloc_noreclaim_save(); +} + +static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) +{ + if (mutex_trylock(&vm->eviction_lock)) { + vm->saved_flags = memalloc_noreclaim_save(); + return true; + } + return false; +} + +static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) +{ + memalloc_noreclaim_restore(vm->saved_flags); + mutex_unlock(&vm->eviction_lock); +} + #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 358b91243e37..b5f3bba851db 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -597,7 +597,9 @@ static int amdgpu_vm_pt_alloc(struct amdgpu_device *adev, if (entry->bo) return 0; + amdgpu_vm_eviction_unlock(vm); r = amdgpu_vm_pt_create(adev, vm, cursor->level, immediate, &pt); + amdgpu_vm_eviction_lock(vm); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 2b0669c464f6..69e105fa41f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -116,8 +116,15 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, DMA_RESV_USAGE_BOOKKEEP); } - if (fence && !p->immediate) + if (fence && !p->immediate) { + /* + * Most hw generations now have a separate queue for page table + * updates, but when the queue is shared with userspace we need + * the extra CPU round trip to correctly flush the TLB. + */ + set_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &f->flags); swap(*fence, f); + } dma_fence_put(f); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 73a517bcf5c1..80dd1343594c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -435,7 +435,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, if (place->flags & TTM_PL_FLAG_TOPDOWN) vres->flags |= DRM_BUDDY_TOPDOWN_ALLOCATION; - if (fpfn || lpfn != man->size) + if (fpfn || lpfn != mgr->mm.size) /* Allocate blocks in desired range */ vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index e78e4c27b62a..6c97148ca0ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -70,6 +70,7 @@ enum amd_sriov_ucode_engine_id { AMD_SRIOV_UCODE_ID_RLC_SRLS, AMD_SRIOV_UCODE_ID_MEC, AMD_SRIOV_UCODE_ID_MEC2, + AMD_SRIOV_UCODE_ID_IMU, AMD_SRIOV_UCODE_ID_SOS, AMD_SRIOV_UCODE_ID_ASD, AMD_SRIOV_UCODE_ID_TA_RAS, diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 251109723ab6..0fecc5bf45bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -1571,7 +1571,7 @@ static void gfx_v11_0_init_compute_vmid(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, regSH_MEM_BASES, sh_mem_bases); /* Enable trap for each kfd vmid. */ - data = RREG32(SOC15_REG_OFFSET(GC, 0, regSPI_GDBG_PER_VMID_CNTL)); + data = RREG32_SOC15(GC, 0, regSPI_GDBG_PER_VMID_CNTL); data = REG_SET_FIELD(data, SPI_GDBG_PER_VMID_CNTL, TRAP_EN, 1); } soc21_grbm_select(adev, 0, 0, 0, 0); @@ -5051,6 +5051,7 @@ static int gfx_v11_0_set_powergating_state(void *handle, switch (adev->ip_versions[GC_HWIP][0]) { case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): amdgpu_gfx_off_ctrl(adev, enable); break; case IP_VERSION(11, 0, 1): @@ -5076,6 +5077,7 @@ static int gfx_v11_0_set_clockgating_state(void *handle, case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): gfx_v11_0_update_gfx_clock_gating(adev, state == AMD_CG_STATE_GATE); break; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c index 846ccb6cf07d..66dfb574cc7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c @@ -186,6 +186,10 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, /* Use register 17 for GART */ const unsigned eng = 17; unsigned int i; + unsigned char hub_ip = 0; + + hub_ip = (vmhub == AMDGPU_GFXHUB_0) ? + GC_HWIP : MMHUB_HWIP; spin_lock(&adev->gmc.invalidate_lock); /* @@ -199,8 +203,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, if (use_semaphore) { for (i = 0; i < adev->usec_timeout; i++) { /* a read return value of 1 means semaphore acuqire */ - tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_sem + - hub->eng_distance * eng); + tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem + + hub->eng_distance * eng, hub_ip); if (tmp & 0x1) break; udelay(1); @@ -210,12 +214,12 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n"); } - WREG32_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req); + WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_req + hub->eng_distance * eng, inv_req, hub_ip); /* Wait for ACK with a delay.*/ for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + - hub->eng_distance * eng); + tmp = RREG32_RLC_NO_KIQ(hub->vm_inv_eng0_ack + + hub->eng_distance * eng, hub_ip); tmp &= 1 << vmid; if (tmp) break; @@ -229,8 +233,8 @@ static void gmc_v11_0_flush_vm_hub(struct amdgpu_device *adev, uint32_t vmid, * add semaphore release after invalidation, * write with 0 means semaphore release */ - WREG32_NO_KIQ(hub->vm_inv_eng0_sem + - hub->eng_distance * eng, 0); + WREG32_RLC_NO_KIQ(hub->vm_inv_eng0_sem + + hub->eng_distance * eng, 0, hub_ip); /* Issue additional private vm invalidation to MMHUB */ if ((vmhub != AMDGPU_GFXHUB_0) && diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 5cec6b259b7f..f141fadd2d86 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -98,7 +98,14 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, struct amdgpu_device *adev = mes->adev; struct amdgpu_ring *ring = &mes->ring; unsigned long flags; + signed long timeout = adev->usec_timeout; + if (amdgpu_emu_mode) { + timeout *= 100; + } else if (amdgpu_sriov_vf(adev)) { + /* Worst case in sriov where all other 15 VF timeout, each VF needs about 600ms */ + timeout = 15 * 600 * 1000; + } BUG_ON(size % 4 != 0); spin_lock_irqsave(&mes->ring_lock, flags); @@ -118,7 +125,7 @@ static int mes_v11_0_submit_pkt_and_poll_completion(struct amdgpu_mes *mes, DRM_DEBUG("MES msg=%d was emitted\n", x_pkt->header.opcode); r = amdgpu_fence_wait_polling(ring, ring->fence_drv.sync_seq, - adev->usec_timeout * (amdgpu_emu_mode ? 100 : 1)); + timeout); if (r < 1) { DRM_ERROR("MES failed to response msg=%d\n", x_pkt->header.opcode); @@ -1156,6 +1163,42 @@ static int mes_v11_0_sw_fini(void *handle) return 0; } +static void mes_v11_0_kiq_dequeue_sched(struct amdgpu_device *adev) +{ + uint32_t data; + int i; + + mutex_lock(&adev->srbm_mutex); + soc21_grbm_select(adev, 3, AMDGPU_MES_SCHED_PIPE, 0, 0); + + /* disable the queue if it's active */ + if (RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1) { + WREG32_SOC15(GC, 0, regCP_HQD_DEQUEUE_REQUEST, 1); + for (i = 0; i < adev->usec_timeout; i++) { + if (!(RREG32_SOC15(GC, 0, regCP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + } + data = RREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL); + data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 0); + data = REG_SET_FIELD(data, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_HIT, 1); + WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, data); + + WREG32_SOC15(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL, 0); + + WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_LO, 0); + WREG32_SOC15(GC, 0, regCP_HQD_PQ_WPTR_HI, 0); + WREG32_SOC15(GC, 0, regCP_HQD_PQ_RPTR, 0); + + soc21_grbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + adev->mes.ring.sched.ready = false; +} + static void mes_v11_0_kiq_setting(struct amdgpu_ring *ring) { uint32_t tmp; @@ -1207,6 +1250,9 @@ failure: static int mes_v11_0_kiq_hw_fini(struct amdgpu_device *adev) { + if (adev->mes.ring.sched.ready) + mes_v11_0_kiq_dequeue_sched(adev); + mes_v11_0_enable(adev, false); return 0; } @@ -1262,9 +1308,6 @@ failure: static int mes_v11_0_hw_fini(void *handle) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - adev->mes.ring.sched.ready = false; return 0; } @@ -1296,7 +1339,8 @@ static int mes_v11_0_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (!amdgpu_in_reset(adev)) + if (!amdgpu_in_reset(adev) && + (adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3))) amdgpu_mes_self_test(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c index 4d304f22889e..998b5d17b271 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c @@ -32,8 +32,6 @@ #include "gc/gc_10_1_0_offset.h" #include "soc15_common.h" -#define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid 0x064d -#define mmMM_ATC_L2_MISC_CG_Sienna_Cichlid_BASE_IDX 0 #define mmDAGB0_CNTL_MISC2_Sienna_Cichlid 0x0070 #define mmDAGB0_CNTL_MISC2_Sienna_Cichlid_BASE_IDX 0 @@ -574,7 +572,6 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad case IP_VERSION(2, 1, 0): case IP_VERSION(2, 1, 1): case IP_VERSION(2, 1, 2): - def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid); def1 = data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid); break; default: @@ -608,8 +605,6 @@ static void mmhub_v2_0_update_medium_grain_clock_gating(struct amdgpu_device *ad case IP_VERSION(2, 1, 0): case IP_VERSION(2, 1, 1): case IP_VERSION(2, 1, 2): - if (def != data) - WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data); if (def1 != data1) WREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid, data1); break; @@ -634,8 +629,8 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade case IP_VERSION(2, 1, 0): case IP_VERSION(2, 1, 1): case IP_VERSION(2, 1, 2): - def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid); - break; + /* There is no ATCL2 in MMHUB for 2.1.x */ + return; default: def = data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG); break; @@ -646,18 +641,8 @@ static void mmhub_v2_0_update_medium_grain_light_sleep(struct amdgpu_device *ade else data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; - if (def != data) { - switch (adev->ip_versions[MMHUB_HWIP][0]) { - case IP_VERSION(2, 1, 0): - case IP_VERSION(2, 1, 1): - case IP_VERSION(2, 1, 2): - WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid, data); - break; - default: - WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data); - break; - } - } + if (def != data) + WREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG, data); } static int mmhub_v2_0_set_clockgating(struct amdgpu_device *adev, @@ -695,7 +680,10 @@ static void mmhub_v2_0_get_clockgating(struct amdgpu_device *adev, u64 *flags) case IP_VERSION(2, 1, 0): case IP_VERSION(2, 1, 1): case IP_VERSION(2, 1, 2): - data = RREG32_SOC15(MMHUB, 0, mmMM_ATC_L2_MISC_CG_Sienna_Cichlid); + /* There is no ATCL2 in MMHUB for 2.1.x. Keep the status + * based on DAGB + */ + data = MM_ATC_L2_MISC_CG__ENABLE_MASK; data1 = RREG32_SOC15(MMHUB, 0, mmDAGB0_CNTL_MISC2_Sienna_Cichlid); break; default: diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c index a2f04b249132..12906ba74462 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c @@ -290,7 +290,6 @@ flr_done: reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); amdgpu_device_gpu_recover(adev, NULL, &reset_context); } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c index a977f0027928..e07757eea7ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c @@ -317,7 +317,6 @@ flr_done: reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); amdgpu_device_gpu_recover(adev, NULL, &reset_context); } diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index fd14fa9b9cd7..288c414babdf 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -529,7 +529,6 @@ static void xgpu_vi_mailbox_flr_work(struct work_struct *work) reset_context.method = AMD_RESET_METHOD_NONE; reset_context.reset_req_dev = adev; clear_bit(AMDGPU_NEED_FULL_RESET, &reset_context.flags); - clear_bit(AMDGPU_SKIP_MODE2_RESET, &reset_context.flags); amdgpu_device_gpu_recover(adev, NULL, &reset_context); } diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c index 21d822b1d589..88f9b327183a 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v13_0.c @@ -45,6 +45,7 @@ MODULE_FIRMWARE("amdgpu/psp_13_0_0_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_7_sos.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_7_ta.bin"); MODULE_FIRMWARE("amdgpu/psp_13_0_10_sos.bin"); +MODULE_FIRMWARE("amdgpu/psp_13_0_10_ta.bin"); /* For large FW files the time to complete can be very long */ #define USBC_PD_POLLING_LIMIT_S 240 diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 298fa11702e7..1122bd4eae98 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1417,11 +1417,6 @@ static int sdma_v4_0_start(struct amdgpu_device *adev) WREG32_SDMA(i, mmSDMA0_CNTL, temp); if (!amdgpu_sriov_vf(adev)) { - ring = &adev->sdma.instance[i].ring; - adev->nbio.funcs->sdma_doorbell_range(adev, i, - ring->use_doorbell, ring->doorbell_index, - adev->doorbell_index.sdma_doorbell_range); - /* unhalt engine */ temp = RREG32_SDMA(i, mmSDMA0_F32_CNTL); temp = REG_SET_FIELD(temp, SDMA0_F32_CNTL, HALT, 0); diff --git a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c index 7aa570c1ce4a..81a6d5b94987 100644 --- a/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c +++ b/drivers/gpu/drm/amd/amdgpu/sienna_cichlid.c @@ -31,12 +31,23 @@ #include "amdgpu_psp.h" #include "amdgpu_xgmi.h" +static bool sienna_cichlid_is_mode2_default(struct amdgpu_reset_control *reset_ctl) +{ +#if 0 + struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; + + if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7) && + adev->pm.fw_version >= 0x3a5500 && !amdgpu_sriov_vf(adev)) + return true; +#endif + return false; +} + static struct amdgpu_reset_handler * sienna_cichlid_get_reset_handler(struct amdgpu_reset_control *reset_ctl, struct amdgpu_reset_context *reset_context) { struct amdgpu_reset_handler *handler; - struct amdgpu_device *adev = (struct amdgpu_device *)reset_ctl->handle; if (reset_context->method != AMD_RESET_METHOD_NONE) { list_for_each_entry(handler, &reset_ctl->reset_handlers, @@ -44,15 +55,13 @@ sienna_cichlid_get_reset_handler(struct amdgpu_reset_control *reset_ctl, if (handler->reset_method == reset_context->method) return handler; } - } else { - list_for_each_entry(handler, &reset_ctl->reset_handlers, + } + + if (sienna_cichlid_is_mode2_default(reset_ctl)) { + list_for_each_entry (handler, &reset_ctl->reset_handlers, handler_list) { - if (handler->reset_method == AMD_RESET_METHOD_MODE2 && - adev->pm.fw_version >= 0x3a5500 && - !amdgpu_sriov_vf(adev)) { - reset_context->method = AMD_RESET_METHOD_MODE2; + if (handler->reset_method == AMD_RESET_METHOD_MODE2) return handler; - } } } diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 183024d7c184..e3b2b6b4f1a6 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1211,6 +1211,20 @@ static int soc15_common_sw_fini(void *handle) return 0; } +static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev) +{ + int i; + + /* sdma doorbell range is programed by hypervisor */ + if (!amdgpu_sriov_vf(adev)) { + for (i = 0; i < adev->sdma.num_instances; i++) { + adev->nbio.funcs->sdma_doorbell_range(adev, i, + true, adev->doorbell_index.sdma_engine[i] << 1, + adev->doorbell_index.sdma_doorbell_range); + } + } +} + static int soc15_common_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -1230,6 +1244,13 @@ static int soc15_common_hw_init(void *handle) /* enable the doorbell aperture */ soc15_enable_doorbell_aperture(adev, true); + /* HW doorbell routing policy: doorbell writing not + * in SDMA/IH/MM/ACV range will be routed to CP. So + * we need to init SDMA doorbell range prior + * to CP ip block init and ring test. IH already + * happens before CP. + */ + soc15_sdma_doorbell_range_init(adev); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 795706b3b092..e08044008186 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -423,6 +423,7 @@ static bool soc21_need_full_reset(struct amdgpu_device *adev) case IP_VERSION(11, 0, 0): return amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC); case IP_VERSION(11, 0, 2): + case IP_VERSION(11, 0, 3): return false; default: return true; @@ -636,7 +637,11 @@ static int soc21_common_early_init(void *handle) break; case IP_VERSION(11, 0, 3): adev->cg_flags = AMD_CG_SUPPORT_VCN_MGCG | - AMD_CG_SUPPORT_JPEG_MGCG; + AMD_CG_SUPPORT_JPEG_MGCG | + AMD_CG_SUPPORT_GFX_CGCG | + AMD_CG_SUPPORT_GFX_CGLS | + AMD_CG_SUPPORT_REPEATER_FGCG | + AMD_CG_SUPPORT_GFX_MGCG; adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG | AMD_PG_SUPPORT_JPEG; diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h index c7118843db05..0c4c5499bb5c 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h @@ -2495,442 +2495,444 @@ static const uint32_t cwsr_trap_gfx10_hex[] = { 0xbf9f0000, 0x00000000, }; static const uint32_t cwsr_trap_gfx11_hex[] = { - 0xbfa00001, 0xbfa0021e, + 0xbfa00001, 0xbfa00221, 0xb0804006, 0xb8f8f802, 0x9178ff78, 0x00020006, - 0xb8fbf803, 0xbf0d9f6d, - 0xbfa20006, 0x8b6eff78, - 0x00002000, 0xbfa10009, - 0x8b6eff6d, 0x00ff0000, - 0xbfa2001e, 0x8b6eff7b, - 0x00000400, 0xbfa20041, - 0xbf830010, 0xb8fbf803, - 0xbfa0fffa, 0x8b6eff7b, - 0x00000900, 0xbfa20015, - 0x8b6eff7b, 0x000071ff, - 0xbfa10008, 0x8b6fff7b, - 0x00007080, 0xbfa10001, - 0xbeee1287, 0xb8eff801, - 0x846e8c6e, 0x8b6e6f6e, - 0xbfa2000a, 0x8b6eff6d, - 0x00ff0000, 0xbfa20007, - 0xb8eef801, 0x8b6eff6e, - 0x00000800, 0xbfa20003, + 0xb8fbf803, 0xbf0d9e6d, + 0xbfa10001, 0xbfbd0000, + 0xbf0d9f6d, 0xbfa20006, + 0x8b6eff78, 0x00002000, + 0xbfa10009, 0x8b6eff6d, + 0x00ff0000, 0xbfa2001e, 0x8b6eff7b, 0x00000400, - 0xbfa20026, 0xbefa4d82, - 0xbf89fc07, 0x84fa887a, - 0xf4005bbd, 0xf8000010, - 0xbf89fc07, 0x846e976e, - 0x9177ff77, 0x00800000, - 0x8c776e77, 0xf4045bbd, - 0xf8000000, 0xbf89fc07, - 0xf4045ebd, 0xf8000008, - 0xbf89fc07, 0x8bee6e6e, - 0xbfa10001, 0xbe80486e, - 0x8b6eff6d, 0x01ff0000, - 0xbfa20005, 0x8c78ff78, - 0x00002000, 0x80ec886c, - 0x82ed806d, 0xbfa00005, - 0x8b6eff6d, 0x01000000, - 0xbfa20002, 0x806c846c, - 0x826d806d, 0x8b6dff6d, - 0x0000ffff, 0x8bfe7e7e, - 0x8bea6a6a, 0xb978f802, - 0xbe804a6c, 0x8b6dff6d, - 0x0000ffff, 0xbefa0080, - 0xb97a0283, 0xbeee007e, - 0xbeef007f, 0xbefe0180, - 0xbefe4d84, 0xbf89fc07, - 0x8b7aff7f, 0x04000000, - 0x847a857a, 0x8c6d7a6d, - 0xbefa007e, 0x8b7bff7f, - 0x0000ffff, 0xbefe00c1, - 0xbeff00c1, 0xdca6c000, - 0x007a0000, 0x7e000280, - 0xbefe007a, 0xbeff007b, - 0xb8fb02dc, 0x847b997b, - 0xb8fa3b05, 0x807a817a, - 0xbf0d997b, 0xbfa20002, - 0x847a897a, 0xbfa00001, - 0x847a8a7a, 0xb8fb1e06, - 0x847b8a7b, 0x807a7b7a, + 0xbfa20041, 0xbf830010, + 0xb8fbf803, 0xbfa0fffa, + 0x8b6eff7b, 0x00000900, + 0xbfa20015, 0x8b6eff7b, + 0x000071ff, 0xbfa10008, + 0x8b6fff7b, 0x00007080, + 0xbfa10001, 0xbeee1287, + 0xb8eff801, 0x846e8c6e, + 0x8b6e6f6e, 0xbfa2000a, + 0x8b6eff6d, 0x00ff0000, + 0xbfa20007, 0xb8eef801, + 0x8b6eff6e, 0x00000800, + 0xbfa20003, 0x8b6eff7b, + 0x00000400, 0xbfa20026, + 0xbefa4d82, 0xbf89fc07, + 0x84fa887a, 0xf4005bbd, + 0xf8000010, 0xbf89fc07, + 0x846e976e, 0x9177ff77, + 0x00800000, 0x8c776e77, + 0xf4045bbd, 0xf8000000, + 0xbf89fc07, 0xf4045ebd, + 0xf8000008, 0xbf89fc07, + 0x8bee6e6e, 0xbfa10001, + 0xbe80486e, 0x8b6eff6d, + 0x01ff0000, 0xbfa20005, + 0x8c78ff78, 0x00002000, + 0x80ec886c, 0x82ed806d, + 0xbfa00005, 0x8b6eff6d, + 0x01000000, 0xbfa20002, + 0x806c846c, 0x826d806d, + 0x8b6dff6d, 0x0000ffff, + 0x8bfe7e7e, 0x8bea6a6a, + 0xb978f802, 0xbe804a6c, + 0x8b6dff6d, 0x0000ffff, + 0xbefa0080, 0xb97a0283, + 0xbeee007e, 0xbeef007f, + 0xbefe0180, 0xbefe4d84, + 0xbf89fc07, 0x8b7aff7f, + 0x04000000, 0x847a857a, + 0x8c6d7a6d, 0xbefa007e, 0x8b7bff7f, 0x0000ffff, - 0x807aff7a, 0x00000200, - 0x807a7e7a, 0x827b807b, - 0xd7610000, 0x00010870, - 0xd7610000, 0x00010a71, - 0xd7610000, 0x00010c72, - 0xd7610000, 0x00010e73, - 0xd7610000, 0x00011074, - 0xd7610000, 0x00011275, - 0xd7610000, 0x00011476, - 0xd7610000, 0x00011677, - 0xd7610000, 0x00011a79, - 0xd7610000, 0x00011c7e, - 0xd7610000, 0x00011e7f, - 0xbefe00ff, 0x00003fff, - 0xbeff0080, 0xdca6c040, - 0x007a0000, 0xd760007a, - 0x00011d00, 0xd760007b, - 0x00011f00, 0xbefe007a, - 0xbeff007b, 0xbef4007e, - 0x8b75ff7f, 0x0000ffff, - 0x8c75ff75, 0x00040000, - 0xbef60080, 0xbef700ff, - 0x10807fac, 0xbef1007d, - 0xbef00080, 0xb8f302dc, - 0x84739973, 0xbefe00c1, - 0x857d9973, 0x8b7d817d, - 0xbf06817d, 0xbfa20002, - 0xbeff0080, 0xbfa00002, - 0xbeff00c1, 0xbfa00009, + 0xbefe00c1, 0xbeff00c1, + 0xdca6c000, 0x007a0000, + 0x7e000280, 0xbefe007a, + 0xbeff007b, 0xb8fb02dc, + 0x847b997b, 0xb8fa3b05, + 0x807a817a, 0xbf0d997b, + 0xbfa20002, 0x847a897a, + 0xbfa00001, 0x847a8a7a, + 0xb8fb1e06, 0x847b8a7b, + 0x807a7b7a, 0x8b7bff7f, + 0x0000ffff, 0x807aff7a, + 0x00000200, 0x807a7e7a, + 0x827b807b, 0xd7610000, + 0x00010870, 0xd7610000, + 0x00010a71, 0xd7610000, + 0x00010c72, 0xd7610000, + 0x00010e73, 0xd7610000, + 0x00011074, 0xd7610000, + 0x00011275, 0xd7610000, + 0x00011476, 0xd7610000, + 0x00011677, 0xd7610000, + 0x00011a79, 0xd7610000, + 0x00011c7e, 0xd7610000, + 0x00011e7f, 0xbefe00ff, + 0x00003fff, 0xbeff0080, + 0xdca6c040, 0x007a0000, + 0xd760007a, 0x00011d00, + 0xd760007b, 0x00011f00, + 0xbefe007a, 0xbeff007b, + 0xbef4007e, 0x8b75ff7f, + 0x0000ffff, 0x8c75ff75, + 0x00040000, 0xbef60080, + 0xbef700ff, 0x10807fac, + 0xbef1007d, 0xbef00080, + 0xb8f302dc, 0x84739973, + 0xbefe00c1, 0x857d9973, + 0x8b7d817d, 0xbf06817d, + 0xbfa20002, 0xbeff0080, + 0xbfa00002, 0xbeff00c1, + 0xbfa00009, 0xbef600ff, + 0x01000000, 0xe0685080, + 0x701d0100, 0xe0685100, + 0x701d0200, 0xe0685180, + 0x701d0300, 0xbfa00008, 0xbef600ff, 0x01000000, - 0xe0685080, 0x701d0100, - 0xe0685100, 0x701d0200, - 0xe0685180, 0x701d0300, - 0xbfa00008, 0xbef600ff, - 0x01000000, 0xe0685100, - 0x701d0100, 0xe0685200, - 0x701d0200, 0xe0685300, - 0x701d0300, 0xb8f03b05, - 0x80708170, 0xbf0d9973, - 0xbfa20002, 0x84708970, - 0xbfa00001, 0x84708a70, - 0xb8fa1e06, 0x847a8a7a, - 0x80707a70, 0x8070ff70, - 0x00000200, 0xbef600ff, - 0x01000000, 0x7e000280, - 0x7e020280, 0x7e040280, - 0xbefd0080, 0xd7610002, - 0x0000fa71, 0x807d817d, - 0xd7610002, 0x0000fa6c, - 0x807d817d, 0x917aff6d, - 0x80000000, 0xd7610002, - 0x0000fa7a, 0x807d817d, - 0xd7610002, 0x0000fa6e, - 0x807d817d, 0xd7610002, - 0x0000fa6f, 0x807d817d, - 0xd7610002, 0x0000fa78, - 0x807d817d, 0xb8faf803, - 0xd7610002, 0x0000fa7a, - 0x807d817d, 0xd7610002, - 0x0000fa7b, 0x807d817d, - 0xb8f1f801, 0xd7610002, - 0x0000fa71, 0x807d817d, - 0xb8f1f814, 0xd7610002, - 0x0000fa71, 0x807d817d, - 0xb8f1f815, 0xd7610002, - 0x0000fa71, 0x807d817d, - 0xbefe00ff, 0x0000ffff, - 0xbeff0080, 0xe0685000, - 0x701d0200, 0xbefe00c1, + 0xe0685100, 0x701d0100, + 0xe0685200, 0x701d0200, + 0xe0685300, 0x701d0300, 0xb8f03b05, 0x80708170, 0xbf0d9973, 0xbfa20002, 0x84708970, 0xbfa00001, 0x84708a70, 0xb8fa1e06, 0x847a8a7a, 0x80707a70, + 0x8070ff70, 0x00000200, 0xbef600ff, 0x01000000, - 0xbef90080, 0xbefd0080, - 0xbf800000, 0xbe804100, - 0xbe824102, 0xbe844104, - 0xbe864106, 0xbe884108, - 0xbe8a410a, 0xbe8c410c, - 0xbe8e410e, 0xd7610002, - 0x0000f200, 0x80798179, - 0xd7610002, 0x0000f201, + 0x7e000280, 0x7e020280, + 0x7e040280, 0xbefd0080, + 0xd7610002, 0x0000fa71, + 0x807d817d, 0xd7610002, + 0x0000fa6c, 0x807d817d, + 0x917aff6d, 0x80000000, + 0xd7610002, 0x0000fa7a, + 0x807d817d, 0xd7610002, + 0x0000fa6e, 0x807d817d, + 0xd7610002, 0x0000fa6f, + 0x807d817d, 0xd7610002, + 0x0000fa78, 0x807d817d, + 0xb8faf803, 0xd7610002, + 0x0000fa7a, 0x807d817d, + 0xd7610002, 0x0000fa7b, + 0x807d817d, 0xb8f1f801, + 0xd7610002, 0x0000fa71, + 0x807d817d, 0xb8f1f814, + 0xd7610002, 0x0000fa71, + 0x807d817d, 0xb8f1f815, + 0xd7610002, 0x0000fa71, + 0x807d817d, 0xbefe00ff, + 0x0000ffff, 0xbeff0080, + 0xe0685000, 0x701d0200, + 0xbefe00c1, 0xb8f03b05, + 0x80708170, 0xbf0d9973, + 0xbfa20002, 0x84708970, + 0xbfa00001, 0x84708a70, + 0xb8fa1e06, 0x847a8a7a, + 0x80707a70, 0xbef600ff, + 0x01000000, 0xbef90080, + 0xbefd0080, 0xbf800000, + 0xbe804100, 0xbe824102, + 0xbe844104, 0xbe864106, + 0xbe884108, 0xbe8a410a, + 0xbe8c410c, 0xbe8e410e, + 0xd7610002, 0x0000f200, 0x80798179, 0xd7610002, - 0x0000f202, 0x80798179, - 0xd7610002, 0x0000f203, + 0x0000f201, 0x80798179, + 0xd7610002, 0x0000f202, 0x80798179, 0xd7610002, - 0x0000f204, 0x80798179, - 0xd7610002, 0x0000f205, + 0x0000f203, 0x80798179, + 0xd7610002, 0x0000f204, 0x80798179, 0xd7610002, - 0x0000f206, 0x80798179, - 0xd7610002, 0x0000f207, + 0x0000f205, 0x80798179, + 0xd7610002, 0x0000f206, 0x80798179, 0xd7610002, - 0x0000f208, 0x80798179, - 0xd7610002, 0x0000f209, + 0x0000f207, 0x80798179, + 0xd7610002, 0x0000f208, 0x80798179, 0xd7610002, - 0x0000f20a, 0x80798179, - 0xd7610002, 0x0000f20b, + 0x0000f209, 0x80798179, + 0xd7610002, 0x0000f20a, 0x80798179, 0xd7610002, - 0x0000f20c, 0x80798179, - 0xd7610002, 0x0000f20d, + 0x0000f20b, 0x80798179, + 0xd7610002, 0x0000f20c, 0x80798179, 0xd7610002, - 0x0000f20e, 0x80798179, - 0xd7610002, 0x0000f20f, - 0x80798179, 0xbf06a079, - 0xbfa10006, 0xe0685000, - 0x701d0200, 0x8070ff70, - 0x00000080, 0xbef90080, - 0x7e040280, 0x807d907d, - 0xbf0aff7d, 0x00000060, - 0xbfa2ffbc, 0xbe804100, - 0xbe824102, 0xbe844104, - 0xbe864106, 0xbe884108, - 0xbe8a410a, 0xd7610002, - 0x0000f200, 0x80798179, - 0xd7610002, 0x0000f201, + 0x0000f20d, 0x80798179, + 0xd7610002, 0x0000f20e, 0x80798179, 0xd7610002, - 0x0000f202, 0x80798179, - 0xd7610002, 0x0000f203, + 0x0000f20f, 0x80798179, + 0xbf06a079, 0xbfa10006, + 0xe0685000, 0x701d0200, + 0x8070ff70, 0x00000080, + 0xbef90080, 0x7e040280, + 0x807d907d, 0xbf0aff7d, + 0x00000060, 0xbfa2ffbc, + 0xbe804100, 0xbe824102, + 0xbe844104, 0xbe864106, + 0xbe884108, 0xbe8a410a, + 0xd7610002, 0x0000f200, 0x80798179, 0xd7610002, - 0x0000f204, 0x80798179, - 0xd7610002, 0x0000f205, + 0x0000f201, 0x80798179, + 0xd7610002, 0x0000f202, 0x80798179, 0xd7610002, - 0x0000f206, 0x80798179, - 0xd7610002, 0x0000f207, + 0x0000f203, 0x80798179, + 0xd7610002, 0x0000f204, 0x80798179, 0xd7610002, - 0x0000f208, 0x80798179, - 0xd7610002, 0x0000f209, + 0x0000f205, 0x80798179, + 0xd7610002, 0x0000f206, 0x80798179, 0xd7610002, - 0x0000f20a, 0x80798179, - 0xd7610002, 0x0000f20b, - 0x80798179, 0xe0685000, - 0x701d0200, 0xbefe00c1, - 0x857d9973, 0x8b7d817d, - 0xbf06817d, 0xbfa20002, - 0xbeff0080, 0xbfa00001, - 0xbeff00c1, 0xb8fb4306, - 0x8b7bc17b, 0xbfa10044, - 0xbfbd0000, 0x8b7aff6d, - 0x80000000, 0xbfa10040, - 0x847b867b, 0x847b827b, - 0xbef6007b, 0xb8f03b05, - 0x80708170, 0xbf0d9973, - 0xbfa20002, 0x84708970, - 0xbfa00001, 0x84708a70, - 0xb8fa1e06, 0x847a8a7a, - 0x80707a70, 0x8070ff70, - 0x00000200, 0x8070ff70, - 0x00000080, 0xbef600ff, - 0x01000000, 0xd71f0000, - 0x000100c1, 0xd7200000, - 0x000200c1, 0x16000084, - 0x857d9973, 0x8b7d817d, - 0xbf06817d, 0xbefd0080, - 0xbfa20012, 0xbe8300ff, - 0x00000080, 0xbf800000, - 0xbf800000, 0xbf800000, - 0xd8d80000, 0x01000000, - 0xbf890000, 0xe0685000, - 0x701d0100, 0x807d037d, - 0x80700370, 0xd5250000, - 0x0001ff00, 0x00000080, - 0xbf0a7b7d, 0xbfa2fff4, - 0xbfa00011, 0xbe8300ff, - 0x00000100, 0xbf800000, - 0xbf800000, 0xbf800000, - 0xd8d80000, 0x01000000, - 0xbf890000, 0xe0685000, - 0x701d0100, 0x807d037d, - 0x80700370, 0xd5250000, - 0x0001ff00, 0x00000100, - 0xbf0a7b7d, 0xbfa2fff4, + 0x0000f207, 0x80798179, + 0xd7610002, 0x0000f208, + 0x80798179, 0xd7610002, + 0x0000f209, 0x80798179, + 0xd7610002, 0x0000f20a, + 0x80798179, 0xd7610002, + 0x0000f20b, 0x80798179, + 0xe0685000, 0x701d0200, 0xbefe00c1, 0x857d9973, 0x8b7d817d, 0xbf06817d, - 0xbfa20004, 0xbef000ff, - 0x00000200, 0xbeff0080, - 0xbfa00003, 0xbef000ff, - 0x00000400, 0xbeff00c1, - 0xb8fb3b05, 0x807b817b, - 0x847b827b, 0x857d9973, + 0xbfa20002, 0xbeff0080, + 0xbfa00001, 0xbeff00c1, + 0xb8fb4306, 0x8b7bc17b, + 0xbfa10044, 0xbfbd0000, + 0x8b7aff6d, 0x80000000, + 0xbfa10040, 0x847b867b, + 0x847b827b, 0xbef6007b, + 0xb8f03b05, 0x80708170, + 0xbf0d9973, 0xbfa20002, + 0x84708970, 0xbfa00001, + 0x84708a70, 0xb8fa1e06, + 0x847a8a7a, 0x80707a70, + 0x8070ff70, 0x00000200, + 0x8070ff70, 0x00000080, + 0xbef600ff, 0x01000000, + 0xd71f0000, 0x000100c1, + 0xd7200000, 0x000200c1, + 0x16000084, 0x857d9973, 0x8b7d817d, 0xbf06817d, - 0xbfa20017, 0xbef600ff, - 0x01000000, 0xbefd0084, - 0xbf0a7b7d, 0xbfa10037, - 0x7e008700, 0x7e028701, - 0x7e048702, 0x7e068703, - 0xe0685000, 0x701d0000, - 0xe0685080, 0x701d0100, - 0xe0685100, 0x701d0200, - 0xe0685180, 0x701d0300, - 0x807d847d, 0x8070ff70, - 0x00000200, 0xbf0a7b7d, - 0xbfa2ffef, 0xbfa00025, + 0xbefd0080, 0xbfa20012, + 0xbe8300ff, 0x00000080, + 0xbf800000, 0xbf800000, + 0xbf800000, 0xd8d80000, + 0x01000000, 0xbf890000, + 0xe0685000, 0x701d0100, + 0x807d037d, 0x80700370, + 0xd5250000, 0x0001ff00, + 0x00000080, 0xbf0a7b7d, + 0xbfa2fff4, 0xbfa00011, + 0xbe8300ff, 0x00000100, + 0xbf800000, 0xbf800000, + 0xbf800000, 0xd8d80000, + 0x01000000, 0xbf890000, + 0xe0685000, 0x701d0100, + 0x807d037d, 0x80700370, + 0xd5250000, 0x0001ff00, + 0x00000100, 0xbf0a7b7d, + 0xbfa2fff4, 0xbefe00c1, + 0x857d9973, 0x8b7d817d, + 0xbf06817d, 0xbfa20004, + 0xbef000ff, 0x00000200, + 0xbeff0080, 0xbfa00003, + 0xbef000ff, 0x00000400, + 0xbeff00c1, 0xb8fb3b05, + 0x807b817b, 0x847b827b, + 0x857d9973, 0x8b7d817d, + 0xbf06817d, 0xbfa20017, 0xbef600ff, 0x01000000, 0xbefd0084, 0xbf0a7b7d, - 0xbfa10011, 0x7e008700, + 0xbfa10037, 0x7e008700, 0x7e028701, 0x7e048702, 0x7e068703, 0xe0685000, - 0x701d0000, 0xe0685100, - 0x701d0100, 0xe0685200, - 0x701d0200, 0xe0685300, + 0x701d0000, 0xe0685080, + 0x701d0100, 0xe0685100, + 0x701d0200, 0xe0685180, 0x701d0300, 0x807d847d, - 0x8070ff70, 0x00000400, + 0x8070ff70, 0x00000200, 0xbf0a7b7d, 0xbfa2ffef, - 0xb8fb1e06, 0x8b7bc17b, - 0xbfa1000c, 0x847b837b, - 0x807b7d7b, 0xbefe00c1, - 0xbeff0080, 0x7e008700, + 0xbfa00025, 0xbef600ff, + 0x01000000, 0xbefd0084, + 0xbf0a7b7d, 0xbfa10011, + 0x7e008700, 0x7e028701, + 0x7e048702, 0x7e068703, 0xe0685000, 0x701d0000, - 0x807d817d, 0x8070ff70, - 0x00000080, 0xbf0a7b7d, - 0xbfa2fff8, 0xbfa00146, - 0xbef4007e, 0x8b75ff7f, - 0x0000ffff, 0x8c75ff75, - 0x00040000, 0xbef60080, - 0xbef700ff, 0x10807fac, - 0xb8f202dc, 0x84729972, - 0x8b6eff7f, 0x04000000, - 0xbfa1003a, 0xbefe00c1, - 0x857d9972, 0x8b7d817d, - 0xbf06817d, 0xbfa20002, - 0xbeff0080, 0xbfa00001, - 0xbeff00c1, 0xb8ef4306, - 0x8b6fc16f, 0xbfa1002f, - 0x846f866f, 0x846f826f, - 0xbef6006f, 0xb8f83b05, - 0x80788178, 0xbf0d9972, - 0xbfa20002, 0x84788978, - 0xbfa00001, 0x84788a78, - 0xb8ee1e06, 0x846e8a6e, - 0x80786e78, 0x8078ff78, - 0x00000200, 0x8078ff78, - 0x00000080, 0xbef600ff, - 0x01000000, 0x857d9972, - 0x8b7d817d, 0xbf06817d, - 0xbefd0080, 0xbfa2000c, - 0xe0500000, 0x781d0000, - 0xbf8903f7, 0xdac00000, - 0x00000000, 0x807dff7d, - 0x00000080, 0x8078ff78, - 0x00000080, 0xbf0a6f7d, - 0xbfa2fff5, 0xbfa0000b, - 0xe0500000, 0x781d0000, - 0xbf8903f7, 0xdac00000, - 0x00000000, 0x807dff7d, - 0x00000100, 0x8078ff78, - 0x00000100, 0xbf0a6f7d, - 0xbfa2fff5, 0xbef80080, + 0xe0685100, 0x701d0100, + 0xe0685200, 0x701d0200, + 0xe0685300, 0x701d0300, + 0x807d847d, 0x8070ff70, + 0x00000400, 0xbf0a7b7d, + 0xbfa2ffef, 0xb8fb1e06, + 0x8b7bc17b, 0xbfa1000c, + 0x847b837b, 0x807b7d7b, + 0xbefe00c1, 0xbeff0080, + 0x7e008700, 0xe0685000, + 0x701d0000, 0x807d817d, + 0x8070ff70, 0x00000080, + 0xbf0a7b7d, 0xbfa2fff8, + 0xbfa00146, 0xbef4007e, + 0x8b75ff7f, 0x0000ffff, + 0x8c75ff75, 0x00040000, + 0xbef60080, 0xbef700ff, + 0x10807fac, 0xb8f202dc, + 0x84729972, 0x8b6eff7f, + 0x04000000, 0xbfa1003a, 0xbefe00c1, 0x857d9972, 0x8b7d817d, 0xbf06817d, 0xbfa20002, 0xbeff0080, 0xbfa00001, 0xbeff00c1, - 0xb8ef3b05, 0x806f816f, - 0x846f826f, 0x857d9972, - 0x8b7d817d, 0xbf06817d, - 0xbfa20024, 0xbef600ff, - 0x01000000, 0xbeee0078, + 0xb8ef4306, 0x8b6fc16f, + 0xbfa1002f, 0x846f866f, + 0x846f826f, 0xbef6006f, + 0xb8f83b05, 0x80788178, + 0xbf0d9972, 0xbfa20002, + 0x84788978, 0xbfa00001, + 0x84788a78, 0xb8ee1e06, + 0x846e8a6e, 0x80786e78, 0x8078ff78, 0x00000200, - 0xbefd0084, 0xbf0a6f7d, - 0xbfa10050, 0xe0505000, - 0x781d0000, 0xe0505080, - 0x781d0100, 0xe0505100, - 0x781d0200, 0xe0505180, - 0x781d0300, 0xbf8903f7, - 0x7e008500, 0x7e028501, - 0x7e048502, 0x7e068503, - 0x807d847d, 0x8078ff78, - 0x00000200, 0xbf0a6f7d, - 0xbfa2ffee, 0xe0505000, - 0x6e1d0000, 0xe0505080, - 0x6e1d0100, 0xe0505100, - 0x6e1d0200, 0xe0505180, - 0x6e1d0300, 0xbf8903f7, - 0xbfa00034, 0xbef600ff, - 0x01000000, 0xbeee0078, - 0x8078ff78, 0x00000400, - 0xbefd0084, 0xbf0a6f7d, - 0xbfa10012, 0xe0505000, - 0x781d0000, 0xe0505100, - 0x781d0100, 0xe0505200, - 0x781d0200, 0xe0505300, - 0x781d0300, 0xbf8903f7, - 0x7e008500, 0x7e028501, - 0x7e048502, 0x7e068503, - 0x807d847d, 0x8078ff78, - 0x00000400, 0xbf0a6f7d, - 0xbfa2ffee, 0xb8ef1e06, - 0x8b6fc16f, 0xbfa1000e, - 0x846f836f, 0x806f7d6f, - 0xbefe00c1, 0xbeff0080, + 0x8078ff78, 0x00000080, + 0xbef600ff, 0x01000000, + 0x857d9972, 0x8b7d817d, + 0xbf06817d, 0xbefd0080, + 0xbfa2000c, 0xe0500000, + 0x781d0000, 0xbf8903f7, + 0xdac00000, 0x00000000, + 0x807dff7d, 0x00000080, + 0x8078ff78, 0x00000080, + 0xbf0a6f7d, 0xbfa2fff5, + 0xbfa0000b, 0xe0500000, + 0x781d0000, 0xbf8903f7, + 0xdac00000, 0x00000000, + 0x807dff7d, 0x00000100, + 0x8078ff78, 0x00000100, + 0xbf0a6f7d, 0xbfa2fff5, + 0xbef80080, 0xbefe00c1, + 0x857d9972, 0x8b7d817d, + 0xbf06817d, 0xbfa20002, + 0xbeff0080, 0xbfa00001, + 0xbeff00c1, 0xb8ef3b05, + 0x806f816f, 0x846f826f, + 0x857d9972, 0x8b7d817d, + 0xbf06817d, 0xbfa20024, + 0xbef600ff, 0x01000000, + 0xbeee0078, 0x8078ff78, + 0x00000200, 0xbefd0084, + 0xbf0a6f7d, 0xbfa10050, 0xe0505000, 0x781d0000, + 0xe0505080, 0x781d0100, + 0xe0505100, 0x781d0200, + 0xe0505180, 0x781d0300, 0xbf8903f7, 0x7e008500, - 0x807d817d, 0x8078ff78, - 0x00000080, 0xbf0a6f7d, - 0xbfa2fff7, 0xbeff00c1, + 0x7e028501, 0x7e048502, + 0x7e068503, 0x807d847d, + 0x8078ff78, 0x00000200, + 0xbf0a6f7d, 0xbfa2ffee, 0xe0505000, 0x6e1d0000, - 0xe0505100, 0x6e1d0100, - 0xe0505200, 0x6e1d0200, - 0xe0505300, 0x6e1d0300, - 0xbf8903f7, 0xb8f83b05, - 0x80788178, 0xbf0d9972, - 0xbfa20002, 0x84788978, - 0xbfa00001, 0x84788a78, - 0xb8ee1e06, 0x846e8a6e, - 0x80786e78, 0x8078ff78, - 0x00000200, 0x80f8ff78, - 0x00000050, 0xbef600ff, - 0x01000000, 0xbefd00ff, - 0x0000006c, 0x80f89078, - 0xf428403a, 0xf0000000, - 0xbf89fc07, 0x80fd847d, - 0xbf800000, 0xbe804300, - 0xbe824302, 0x80f8a078, - 0xf42c403a, 0xf0000000, - 0xbf89fc07, 0x80fd887d, - 0xbf800000, 0xbe804300, - 0xbe824302, 0xbe844304, - 0xbe864306, 0x80f8c078, - 0xf430403a, 0xf0000000, - 0xbf89fc07, 0x80fd907d, - 0xbf800000, 0xbe804300, - 0xbe824302, 0xbe844304, - 0xbe864306, 0xbe884308, - 0xbe8a430a, 0xbe8c430c, - 0xbe8e430e, 0xbf06807d, - 0xbfa1fff0, 0xb980f801, - 0x00000000, 0xbfbd0000, + 0xe0505080, 0x6e1d0100, + 0xe0505100, 0x6e1d0200, + 0xe0505180, 0x6e1d0300, + 0xbf8903f7, 0xbfa00034, + 0xbef600ff, 0x01000000, + 0xbeee0078, 0x8078ff78, + 0x00000400, 0xbefd0084, + 0xbf0a6f7d, 0xbfa10012, + 0xe0505000, 0x781d0000, + 0xe0505100, 0x781d0100, + 0xe0505200, 0x781d0200, + 0xe0505300, 0x781d0300, + 0xbf8903f7, 0x7e008500, + 0x7e028501, 0x7e048502, + 0x7e068503, 0x807d847d, + 0x8078ff78, 0x00000400, + 0xbf0a6f7d, 0xbfa2ffee, + 0xb8ef1e06, 0x8b6fc16f, + 0xbfa1000e, 0x846f836f, + 0x806f7d6f, 0xbefe00c1, + 0xbeff0080, 0xe0505000, + 0x781d0000, 0xbf8903f7, + 0x7e008500, 0x807d817d, + 0x8078ff78, 0x00000080, + 0xbf0a6f7d, 0xbfa2fff7, + 0xbeff00c1, 0xe0505000, + 0x6e1d0000, 0xe0505100, + 0x6e1d0100, 0xe0505200, + 0x6e1d0200, 0xe0505300, + 0x6e1d0300, 0xbf8903f7, 0xb8f83b05, 0x80788178, 0xbf0d9972, 0xbfa20002, 0x84788978, 0xbfa00001, 0x84788a78, 0xb8ee1e06, 0x846e8a6e, 0x80786e78, 0x8078ff78, 0x00000200, + 0x80f8ff78, 0x00000050, 0xbef600ff, 0x01000000, - 0xf4205bfa, 0xf0000000, - 0x80788478, 0xf4205b3a, + 0xbefd00ff, 0x0000006c, + 0x80f89078, 0xf428403a, + 0xf0000000, 0xbf89fc07, + 0x80fd847d, 0xbf800000, + 0xbe804300, 0xbe824302, + 0x80f8a078, 0xf42c403a, + 0xf0000000, 0xbf89fc07, + 0x80fd887d, 0xbf800000, + 0xbe804300, 0xbe824302, + 0xbe844304, 0xbe864306, + 0x80f8c078, 0xf430403a, + 0xf0000000, 0xbf89fc07, + 0x80fd907d, 0xbf800000, + 0xbe804300, 0xbe824302, + 0xbe844304, 0xbe864306, + 0xbe884308, 0xbe8a430a, + 0xbe8c430c, 0xbe8e430e, + 0xbf06807d, 0xbfa1fff0, + 0xb980f801, 0x00000000, + 0xbfbd0000, 0xb8f83b05, + 0x80788178, 0xbf0d9972, + 0xbfa20002, 0x84788978, + 0xbfa00001, 0x84788a78, + 0xb8ee1e06, 0x846e8a6e, + 0x80786e78, 0x8078ff78, + 0x00000200, 0xbef600ff, + 0x01000000, 0xf4205bfa, 0xf0000000, 0x80788478, - 0xf4205b7a, 0xf0000000, - 0x80788478, 0xf4205c3a, + 0xf4205b3a, 0xf0000000, + 0x80788478, 0xf4205b7a, 0xf0000000, 0x80788478, - 0xf4205c7a, 0xf0000000, - 0x80788478, 0xf4205eba, + 0xf4205c3a, 0xf0000000, + 0x80788478, 0xf4205c7a, 0xf0000000, 0x80788478, - 0xf4205efa, 0xf0000000, - 0x80788478, 0xf4205e7a, + 0xf4205eba, 0xf0000000, + 0x80788478, 0xf4205efa, 0xf0000000, 0x80788478, - 0xf4205cfa, 0xf0000000, - 0x80788478, 0xf4205bba, + 0xf4205e7a, 0xf0000000, + 0x80788478, 0xf4205cfa, 0xf0000000, 0x80788478, - 0xbf89fc07, 0xb96ef814, 0xf4205bba, 0xf0000000, 0x80788478, 0xbf89fc07, - 0xb96ef815, 0xbefd006f, - 0xbefe0070, 0xbeff0071, - 0x8b6f7bff, 0x000003ff, - 0xb96f4803, 0x8b6f7bff, - 0xfffff800, 0x856f8b6f, - 0xb96fa2c3, 0xb973f801, - 0xb8ee3b05, 0x806e816e, - 0xbf0d9972, 0xbfa20002, - 0x846e896e, 0xbfa00001, - 0x846e8a6e, 0xb8ef1e06, - 0x846f8a6f, 0x806e6f6e, - 0x806eff6e, 0x00000200, - 0x806e746e, 0x826f8075, - 0x8b6fff6f, 0x0000ffff, - 0xf4085c37, 0xf8000050, - 0xf4085d37, 0xf8000060, - 0xf4005e77, 0xf8000074, - 0xbf89fc07, 0x8b6dff6d, - 0x0000ffff, 0x8bfe7e7e, - 0x8bea6a6a, 0xb8eef802, - 0xbf0d866e, 0xbfa20002, - 0xb97af802, 0xbe80486c, - 0xb97af802, 0xbe804a6c, - 0xbfb00000, 0xbf9f0000, + 0xb96ef814, 0xf4205bba, + 0xf0000000, 0x80788478, + 0xbf89fc07, 0xb96ef815, + 0xbefd006f, 0xbefe0070, + 0xbeff0071, 0x8b6f7bff, + 0x000003ff, 0xb96f4803, + 0x8b6f7bff, 0xfffff800, + 0x856f8b6f, 0xb96fa2c3, + 0xb973f801, 0xb8ee3b05, + 0x806e816e, 0xbf0d9972, + 0xbfa20002, 0x846e896e, + 0xbfa00001, 0x846e8a6e, + 0xb8ef1e06, 0x846f8a6f, + 0x806e6f6e, 0x806eff6e, + 0x00000200, 0x806e746e, + 0x826f8075, 0x8b6fff6f, + 0x0000ffff, 0xf4085c37, + 0xf8000050, 0xf4085d37, + 0xf8000060, 0xf4005e77, + 0xf8000074, 0xbf89fc07, + 0x8b6dff6d, 0x0000ffff, + 0x8bfe7e7e, 0x8bea6a6a, + 0xb8eef802, 0xbf0d866e, + 0xbfa20002, 0xb97af802, + 0xbe80486c, 0xb97af802, + 0xbe804a6c, 0xbfb00000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, 0xbf9f0000, + 0xbf9f0000, 0x00000000, }; diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm index 0f81670f6f9c..8b92c33c2a7c 100644 --- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm +++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx10.asm @@ -186,6 +186,12 @@ L_SKIP_RESTORE: s_getreg_b32 s_save_trapsts, hwreg(HW_REG_TRAPSTS) #if SW_SA_TRAP + // If ttmp1[30] is set then issue s_barrier to unblock dependent waves. + s_bitcmp1_b32 s_save_pc_hi, 30 + s_cbranch_scc0 L_TRAP_NO_BARRIER + s_barrier + +L_TRAP_NO_BARRIER: // If ttmp1[31] is set then trap may occur early. // Spin wait until SAVECTX exception is raised. s_bitcmp1_b32 s_save_pc_hi, 31 diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 5feaba6a77de..6d291aa6386b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1950,7 +1950,7 @@ static int criu_checkpoint(struct file *filep, { int ret; uint32_t num_devices, num_bos, num_objects; - uint64_t priv_size, priv_offset = 0; + uint64_t priv_size, priv_offset = 0, bo_priv_offset; if (!args->devices || !args->bos || !args->priv_data) return -EINVAL; @@ -1994,38 +1994,34 @@ static int criu_checkpoint(struct file *filep, if (ret) goto exit_unlock; - ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos, - (uint8_t __user *)args->priv_data, &priv_offset); - if (ret) - goto exit_unlock; + /* Leave room for BOs in the private data. They need to be restored + * before events, but we checkpoint them last to simplify the error + * handling. + */ + bo_priv_offset = priv_offset; + priv_offset += num_bos * sizeof(struct kfd_criu_bo_priv_data); if (num_objects) { ret = kfd_criu_checkpoint_queues(p, (uint8_t __user *)args->priv_data, &priv_offset); if (ret) - goto close_bo_fds; + goto exit_unlock; ret = kfd_criu_checkpoint_events(p, (uint8_t __user *)args->priv_data, &priv_offset); if (ret) - goto close_bo_fds; + goto exit_unlock; ret = kfd_criu_checkpoint_svm(p, (uint8_t __user *)args->priv_data, &priv_offset); if (ret) - goto close_bo_fds; + goto exit_unlock; } -close_bo_fds: - if (ret) { - /* If IOCTL returns err, user assumes all FDs opened in criu_dump_bos are closed */ - uint32_t i; - struct kfd_criu_bo_bucket *bo_buckets = (struct kfd_criu_bo_bucket *) args->bos; - - for (i = 0; i < num_bos; i++) { - if (bo_buckets[i].alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) - close_fd(bo_buckets[i].dmabuf_fd); - } - } + /* This must be the last thing in this function that can fail. + * Otherwise we leak dmabuf file descriptors. + */ + ret = criu_checkpoint_bos(p, num_bos, (uint8_t __user *)args->bos, + (uint8_t __user *)args->priv_data, &bo_priv_offset); exit_unlock: mutex_unlock(&p->mutex); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index cd5f8b219bf9..8bfdfd062ff6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -795,6 +795,102 @@ static struct kfd_gpu_cache_info yellow_carp_cache_info[] = { }, }; +static struct kfd_gpu_cache_info gfx1037_cache_info[] = { + { + /* TCP L1 Cache per CU */ + .cache_size = 16, + .cache_level = 1, + .flags = (CRAT_CACHE_FLAGS_ENABLED | + CRAT_CACHE_FLAGS_DATA_CACHE | + CRAT_CACHE_FLAGS_SIMD_CACHE), + .num_cu_shared = 1, + }, + { + /* Scalar L1 Instruction Cache per SQC */ + .cache_size = 32, + .cache_level = 1, + .flags = (CRAT_CACHE_FLAGS_ENABLED | + CRAT_CACHE_FLAGS_INST_CACHE | + CRAT_CACHE_FLAGS_SIMD_CACHE), + .num_cu_shared = 2, + }, + { + /* Scalar L1 Data Cache per SQC */ + .cache_size = 16, + .cache_level = 1, + .flags = (CRAT_CACHE_FLAGS_ENABLED | + CRAT_CACHE_FLAGS_DATA_CACHE | + CRAT_CACHE_FLAGS_SIMD_CACHE), + .num_cu_shared = 2, + }, + { + /* GL1 Data Cache per SA */ + .cache_size = 128, + .cache_level = 1, + .flags = (CRAT_CACHE_FLAGS_ENABLED | + CRAT_CACHE_FLAGS_DATA_CACHE | + CRAT_CACHE_FLAGS_SIMD_CACHE), + .num_cu_shared = 2, + }, + { + /* L2 Data Cache per GPU (Total Tex Cache) */ + .cache_size = 256, + .cache_level = 2, + .flags = (CRAT_CACHE_FLAGS_ENABLED | + CRAT_CACHE_FLAGS_DATA_CACHE | + CRAT_CACHE_FLAGS_SIMD_CACHE), + .num_cu_shared = 2, + }, +}; + +static struct kfd_gpu_cache_info gc_10_3_6_cache_info[] = { + { + /* TCP L1 Cache per CU */ + .cache_size = 16, + .cache_level = 1, + .flags = (CRAT_CACHE_FLAGS_ENABLED | + CRAT_CACHE_FLAGS_DATA_CACHE | + CRAT_CACHE_FLAGS_SIMD_CACHE), + .num_cu_shared = 1, + }, + { + /* Scalar L1 Instruction Cache per SQC */ + .cache_size = 32, + .cache_level = 1, + .flags = (CRAT_CACHE_FLAGS_ENABLED | + CRAT_CACHE_FLAGS_INST_CACHE | + CRAT_CACHE_FLAGS_SIMD_CACHE), + .num_cu_shared = 2, + }, + { + /* Scalar L1 Data Cache per SQC */ + .cache_size = 16, + .cache_level = 1, + .flags = (CRAT_CACHE_FLAGS_ENABLED | + CRAT_CACHE_FLAGS_DATA_CACHE | + CRAT_CACHE_FLAGS_SIMD_CACHE), + .num_cu_shared = 2, + }, + { + /* GL1 Data Cache per SA */ + .cache_size = 128, + .cache_level = 1, + .flags = (CRAT_CACHE_FLAGS_ENABLED | + CRAT_CACHE_FLAGS_DATA_CACHE | + CRAT_CACHE_FLAGS_SIMD_CACHE), + .num_cu_shared = 2, + }, + { + /* L2 Data Cache per GPU (Total Tex Cache) */ + .cache_size = 256, + .cache_level = 2, + .flags = (CRAT_CACHE_FLAGS_ENABLED | + CRAT_CACHE_FLAGS_DATA_CACHE | + CRAT_CACHE_FLAGS_SIMD_CACHE), + .num_cu_shared = 2, + }, +}; + static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev, struct crat_subtype_computeunit *cu) { @@ -1514,11 +1610,17 @@ static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev, num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info); break; case IP_VERSION(10, 3, 3): - case IP_VERSION(10, 3, 6): /* TODO: Double check these on production silicon */ - case IP_VERSION(10, 3, 7): /* TODO: Double check these on production silicon */ pcache_info = yellow_carp_cache_info; num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info); break; + case IP_VERSION(10, 3, 6): + pcache_info = gc_10_3_6_cache_info; + num_of_cache_types = ARRAY_SIZE(gc_10_3_6_cache_info); + break; + case IP_VERSION(10, 3, 7): + pcache_info = gfx1037_cache_info; + num_of_cache_types = ARRAY_SIZE(gfx1037_cache_info); + break; case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): case IP_VERSION(11, 0, 2): diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c index 83e3ce9f6049..729d26d648af 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c @@ -506,6 +506,7 @@ int kfd_criu_restore_event(struct file *devkfd, ret = create_other_event(p, ev, &ev_priv->event_id); break; } + mutex_unlock(&p->event_mutex); exit: if (ret) @@ -513,8 +514,6 @@ exit: kfree(ev_priv); - mutex_unlock(&p->event_mutex); - return ret; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 2797029bd500..22b077ac9a19 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -973,12 +973,10 @@ out_unlock_prange: out_unlock_svms: mutex_unlock(&p->svms.lock); out_unref_process: + pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr); kfd_unref_process(p); out_mmput: mmput(mm); - - pr_debug("CPU fault svms 0x%p address 0x%lx done\n", &p->svms, addr); - return r ? VM_FAULT_SIGBUS : 0; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c053cb79cd06..3e1ecca72430 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -147,6 +147,14 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); /* Number of bytes in PSP footer for firmware. */ #define PSP_FOOTER_BYTES 0x100 +/* + * DMUB Async to Sync Mechanism Status + */ +#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1 +#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2 +#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3 +#define DMUB_ASYNC_TO_SYNC_ACCESS_INVALID 4 + /** * DOC: overview * @@ -1549,6 +1557,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; + /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ + adev->dm.dc->debug.ignore_cable_id = true; + r = dm_dmub_hw_init(adev); if (r) { DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); @@ -1634,12 +1645,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) } } - if (amdgpu_dm_initialize_drm_device(adev)) { - DRM_ERROR( - "amdgpu: failed to initialize sw for display support.\n"); - goto error; - } - /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. * It is expected that DMUB will resend any pending notifications at this point, for * example HPD from DPIA. @@ -1647,6 +1652,12 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (dc_is_dmub_outbox_supported(adev->dm.dc)) dc_enable_dmub_outbox(adev->dm.dc); + if (amdgpu_dm_initialize_drm_device(adev)) { + DRM_ERROR( + "amdgpu: failed to initialize sw for display support.\n"); + goto error; + } + /* create fake encoders for MST */ dm_dp_create_fake_mst_encoders(adev); @@ -7619,9 +7630,10 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, bundle->surface_updates[planes_count].plane_info = &bundle->plane_infos[planes_count]; - fill_dc_dirty_rects(plane, old_plane_state, new_plane_state, - new_crtc_state, - &bundle->flip_addrs[planes_count]); + if (acrtc_state->stream->link->psr_settings.psr_feature_enabled) + fill_dc_dirty_rects(plane, old_plane_state, + new_plane_state, new_crtc_state, + &bundle->flip_addrs[planes_count]); /* * Only allow immediate flips for fast updates that don't @@ -10105,6 +10117,8 @@ static int amdgpu_dm_set_dmub_async_sync_status(bool is_cmd_aux, *operation_result = AUX_RET_ERROR_TIMEOUT; } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_FAIL) { *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; + } else if (status_type == DMUB_ASYNC_TO_SYNC_ACCESS_INVALID) { + *operation_result = AUX_RET_ERROR_INVALID_REPLY; } else { *operation_result = AUX_RET_ERROR_UNKNOWN; } @@ -10152,6 +10166,16 @@ int amdgpu_dm_process_dmub_aux_transfer_sync(bool is_cmd_aux, struct dc_context payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; if (!payload->write && adev->dm.dmub_notify->aux_reply.length && payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK) { + + if (payload->length != adev->dm.dmub_notify->aux_reply.length) { + DRM_WARN("invalid read from DPIA AUX %x(%d) got length %d!\n", + payload->address, payload->length, + adev->dm.dmub_notify->aux_reply.length); + return amdgpu_dm_set_dmub_async_sync_status(is_cmd_aux, ctx, + DMUB_ASYNC_TO_SYNC_ACCESS_INVALID, + (uint32_t *)operation_result); + } + memcpy(payload->data, adev->dm.dmub_notify->aux_reply.data, adev->dm.dmub_notify->aux_reply.length); } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index b5ce15c43bcc..635c398fcefe 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -51,12 +51,6 @@ #define AMDGPU_DMUB_NOTIFICATION_MAX 5 /* - * DMUB Async to Sync Mechanism Status - */ -#define DMUB_ASYNC_TO_SYNC_ACCESS_FAIL 1 -#define DMUB_ASYNC_TO_SYNC_ACCESS_TIMEOUT 2 -#define DMUB_ASYNC_TO_SYNC_ACCESS_SUCCESS 3 -/* #include "include/amdgpu_dal_power_if.h" #include "amdgpu_dm_irq.h" */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c index 594fe8a4d02b..64dd02970292 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -412,7 +412,7 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, { struct amdgpu_crtc *acrtc = NULL; struct drm_plane *cursor_plane; - + bool is_dcn; int res = -ENOMEM; cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); @@ -450,8 +450,14 @@ int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, acrtc->otg_inst = -1; dm->adev->mode_info.crtcs[crtc_index] = acrtc; - drm_crtc_enable_color_mgmt(&acrtc->base, MAX_COLOR_LUT_ENTRIES, + + /* Don't enable DRM CRTC degamma property for DCE since it doesn't + * support programmable degamma anywhere. + */ + is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch; + drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0, true, MAX_COLOR_LUT_ENTRIES); + drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); return 0; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c index dfd3be49eac8..e6854f7270a6 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -1369,7 +1369,7 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane, { struct amdgpu_device *adev = drm_to_adev(plane->dev); const struct drm_format_info *info = drm_format_info(format); - struct hw_asic_id asic_id = adev->dm.dc->ctx->asic_id; + int i; enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3; @@ -1386,49 +1386,13 @@ static bool dm_plane_format_mod_supported(struct drm_plane *plane, return true; } - /* check if swizzle mode is supported by this version of DCN */ - switch (asic_id.chip_family) { - case FAMILY_SI: - case FAMILY_CI: - case FAMILY_KV: - case FAMILY_CZ: - case FAMILY_VI: - /* asics before AI does not have modifier support */ - return false; - case FAMILY_AI: - case FAMILY_RV: - case FAMILY_NV: - case FAMILY_VGH: - case FAMILY_YELLOW_CARP: - case AMDGPU_FAMILY_GC_10_3_6: - case AMDGPU_FAMILY_GC_10_3_7: - switch (AMD_FMT_MOD_GET(TILE, modifier)) { - case AMD_FMT_MOD_TILE_GFX9_64K_R_X: - case AMD_FMT_MOD_TILE_GFX9_64K_D_X: - case AMD_FMT_MOD_TILE_GFX9_64K_S_X: - case AMD_FMT_MOD_TILE_GFX9_64K_D: - return true; - default: - return false; - } - break; - case AMDGPU_FAMILY_GC_11_0_0: - case AMDGPU_FAMILY_GC_11_0_1: - switch (AMD_FMT_MOD_GET(TILE, modifier)) { - case AMD_FMT_MOD_TILE_GFX11_256K_R_X: - case AMD_FMT_MOD_TILE_GFX9_64K_R_X: - case AMD_FMT_MOD_TILE_GFX9_64K_D_X: - case AMD_FMT_MOD_TILE_GFX9_64K_S_X: - case AMD_FMT_MOD_TILE_GFX9_64K_D: - return true; - default: - return false; - } - break; - default: - ASSERT(0); /* Unknown asic */ - break; + /* Check that the modifier is on the list of the plane's supported modifiers. */ + for (i = 0; i < plane->modifier_count; i++) { + if (modifier == plane->modifiers[i]) + break; } + if (i == plane->modifier_count) + return false; /* * For D swizzle the canonical modifier depends on the bpp, so check diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index ee0456b5e14e..e0c8d6f09bb4 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -2393,6 +2393,26 @@ static enum bp_result get_vram_info_v25( return result; } +static enum bp_result get_vram_info_v30( + struct bios_parser *bp, + struct dc_vram_info *info) +{ + struct atom_vram_info_header_v3_0 *info_v30; + enum bp_result result = BP_RESULT_OK; + + info_v30 = GET_IMAGE(struct atom_vram_info_header_v3_0, + DATA_TABLES(vram_info)); + + if (info_v30 == NULL) + return BP_RESULT_BADBIOSTABLE; + + info->num_chans = info_v30->channel_num; + info->dram_channel_width_bytes = (1 << info_v30->channel_width) / 8; + + return result; +} + + /* * get_integrated_info_v11 * @@ -3060,6 +3080,16 @@ static enum bp_result bios_parser_get_vram_info( } break; + case 3: + switch (revision.minor) { + case 0: + result = get_vram_info_v30(bp, info); + break; + default: + break; + } + break; + default: return result; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c index 1131c6d73f6c..20a06c04e4a1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn314/dcn314_clk_mgr.c @@ -363,32 +363,32 @@ static struct wm_table ddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 9, - .sr_enter_plus_exit_time_us = 11, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 9, - .sr_enter_plus_exit_time_us = 11, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 9, - .sr_enter_plus_exit_time_us = 11, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.72, - .sr_exit_time_us = 9, - .sr_enter_plus_exit_time_us = 11, + .sr_exit_time_us = 12.5, + .sr_enter_plus_exit_time_us = 14.5, .valid = true, }, } @@ -400,32 +400,32 @@ static struct wm_table lpddr5_wm_table = { .wm_inst = WM_A, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 11.5, - .sr_enter_plus_exit_time_us = 14.5, + .sr_exit_time_us = 16.5, + .sr_enter_plus_exit_time_us = 18.5, .valid = true, }, { .wm_inst = WM_B, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 11.5, - .sr_enter_plus_exit_time_us = 14.5, + .sr_exit_time_us = 16.5, + .sr_enter_plus_exit_time_us = 18.5, .valid = true, }, { .wm_inst = WM_C, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 11.5, - .sr_enter_plus_exit_time_us = 14.5, + .sr_exit_time_us = 16.5, + .sr_enter_plus_exit_time_us = 18.5, .valid = true, }, { .wm_inst = WM_D, .wm_type = WM_TYPE_PSTATE_CHG, .pstate_latency_us = 11.65333, - .sr_exit_time_us = 11.5, - .sr_enter_plus_exit_time_us = 14.5, + .sr_exit_time_us = 16.5, + .sr_enter_plus_exit_time_us = 18.5, .valid = true, }, } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 1c612ccf1944..6f77d8e538ab 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -157,6 +157,7 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); unsigned int num_levels; struct clk_limit_num_entries *num_entries_per_clk = &clk_mgr_base->bw_params->clk_table.num_entries_per_clk; + unsigned int i; memset(&(clk_mgr_base->clks), 0, sizeof(struct dc_clocks)); clk_mgr_base->clks.p_state_change_support = true; @@ -205,18 +206,17 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) clk_mgr->dpm_present = true; if (clk_mgr_base->ctx->dc->debug.min_disp_clk_khz) { - unsigned int i; - for (i = 0; i < num_levels; i++) if (clk_mgr_base->bw_params->clk_table.entries[i].dispclk_mhz < khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_disp_clk_khz)) clk_mgr_base->bw_params->clk_table.entries[i].dispclk_mhz = khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_disp_clk_khz); } + for (i = 0; i < num_levels; i++) + if (clk_mgr_base->bw_params->clk_table.entries[i].dispclk_mhz > 1950) + clk_mgr_base->bw_params->clk_table.entries[i].dispclk_mhz = 1950; if (clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz) { - unsigned int i; - for (i = 0; i < num_levels; i++) if (clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz < khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz)) @@ -669,6 +669,9 @@ static void dcn32_get_memclk_states_from_smu(struct clk_mgr *clk_mgr_base) &clk_mgr_base->bw_params->clk_table.entries[0].memclk_mhz, &num_entries_per_clk->num_memclk_levels); + /* memclk must have at least one level */ + num_entries_per_clk->num_memclk_levels = num_entries_per_clk->num_memclk_levels ? num_entries_per_clk->num_memclk_levels : 1; + dcn32_init_single_clock(clk_mgr, PPCLK_FCLK, &clk_mgr_base->bw_params->clk_table.entries[0].fclk_mhz, &num_entries_per_clk->num_fclk_levels); diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index bfc5474c0f4c..0598465fd1a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -852,6 +852,8 @@ struct dc_debug_options { bool enable_double_buffered_dsc_pg_support; bool enable_dp_dig_pixel_rate_div_policy; enum lttpr_mode lttpr_mode_override; + unsigned int dsc_delay_factor_wa_x1000; + unsigned int min_prefetch_in_strobe_ns; }; struct gpu_info_soc_bounding_box_v1_0; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c index 4996d2810edb..938dba5249d4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubp.c @@ -623,6 +623,10 @@ void hubp2_cursor_set_attributes( hubp->att.size.bits.width = attr->width; hubp->att.size.bits.height = attr->height; hubp->att.cur_ctl.bits.mode = attr->color_format; + + hubp->cur_rect.w = attr->width; + hubp->cur_rect.h = attr->height; + hubp->att.cur_ctl.bits.pitch = hw_pitch; hubp->att.cur_ctl.bits.line_per_chunk = lpc; hubp->att.cur_ctl.bits.cur_2x_magnify = attr->attribute_flags.bits.ENABLE_MAGNIFICATION; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c index d732b6f031a1..a7e0001a8f46 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c @@ -1270,16 +1270,6 @@ void dcn20_pipe_control_lock( lock, &hw_locks, &inst_flags); - } else if (pipe->stream && pipe->stream->mall_stream_config.type == SUBVP_MAIN) { - union dmub_inbox0_cmd_lock_hw hw_lock_cmd = { 0 }; - hw_lock_cmd.bits.command_code = DMUB_INBOX0_CMD__HW_LOCK; - hw_lock_cmd.bits.hw_lock_client = HW_LOCK_CLIENT_DRIVER; - hw_lock_cmd.bits.lock_pipe = 1; - hw_lock_cmd.bits.otg_inst = pipe->stream_res.tg->inst; - hw_lock_cmd.bits.lock = lock; - if (!lock) - hw_lock_cmd.bits.should_release = 1; - dmub_hw_lock_mgr_inbox0_cmd(dc->ctx->dmub_srv, hw_lock_cmd); } else if (pipe->plane_state != NULL && pipe->plane_state->triplebuffer_flips) { if (lock) pipe->stream_res.tg->funcs->triplebuffer_lock(pipe->stream_res.tg); @@ -1856,7 +1846,7 @@ void dcn20_post_unlock_program_front_end( for (j = 0; j < TIMEOUT_FOR_PIPE_ENABLE_MS*1000 && hubp->funcs->hubp_is_flip_pending(hubp); j++) - mdelay(1); + udelay(1); } } diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c index 84e1486f3d51..39a57bcd7866 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hubp.c @@ -87,6 +87,7 @@ static struct hubp_funcs dcn31_hubp_funcs = { .hubp_init = hubp3_init, .set_unbounded_requesting = hubp31_set_unbounded_requesting, .hubp_soft_reset = hubp31_soft_reset, + .hubp_set_flip_int = hubp1_set_flip_int, .hubp_in_blank = hubp1_in_blank, .program_extended_blank = hubp31_program_extended_blank, }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c index 7e773bf7b895..38842f938bed 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_dio_stream_encoder.c @@ -49,18 +49,30 @@ #define CTX \ enc1->base.ctx +static void enc314_reset_fifo(struct stream_encoder *enc, bool reset) +{ + struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); + uint32_t reset_val = reset ? 1 : 0; + uint32_t is_symclk_on; + + REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, reset_val); + REG_GET(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, &is_symclk_on); + + if (is_symclk_on) + REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, reset_val, 10, 5000); + else + udelay(10); +} static void enc314_enable_fifo(struct stream_encoder *enc) { struct dcn10_stream_encoder *enc1 = DCN10STRENC_FROM_STRENC(enc); - /* TODO: Confirm if we need to wait for DIG_SYMCLK_FE_ON */ - REG_WAIT(DIG_FE_CNTL, DIG_SYMCLK_FE_ON, 1, 10, 5000); REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_READ_START_LEVEL, 0x7); - REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 1); - REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 1, 10, 5000); - REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_RESET, 0); - REG_WAIT(DIG_FIFO_CTRL0, DIG_FIFO_RESET_DONE, 0, 10, 5000); + + enc314_reset_fifo(enc, true); + enc314_reset_fifo(enc, false); + REG_UPDATE(DIG_FIFO_CTRL0, DIG_FIFO_ENABLE, 1); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c index 47eb162f1a75..7dd36e402bac 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_optc.c @@ -237,7 +237,7 @@ static struct timing_generator_funcs dcn314_tg_funcs = { .clear_optc_underflow = optc1_clear_optc_underflow, .setup_global_swap_lock = NULL, .get_crc = optc1_get_crc, - .configure_crc = optc2_configure_crc, + .configure_crc = optc1_configure_crc, .set_dsc_config = optc3_set_dsc_config, .get_dsc_status = optc2_get_dsc_status, .set_dwb_source = NULL, diff --git a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c index d0ad72caead2..9066c511a052 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn314/dcn314_resource.c @@ -847,7 +847,7 @@ static const struct resource_caps res_cap_dcn314 = { .num_ddc = 5, .num_vmid = 16, .num_mpc_3dlut = 2, - .num_dsc = 3, + .num_dsc = 4, }; static const struct dc_plane_cap plane_cap = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index cf5bd9713f54..ac41a763bb1d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -283,8 +283,7 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c using the max for calculation */ if (hubp->curs_attr.width > 0) { - // Round cursor width to next multiple of 64 - cursor_size = (((hubp->curs_attr.width + 63) / 64) * 64) * hubp->curs_attr.height; + cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; switch (pipe->stream->cursor_attributes.color_format) { case CURSOR_MODE_MONO: @@ -309,9 +308,9 @@ static uint32_t dcn32_calculate_cab_allocation(struct dc *dc, struct dc_state *c cursor_size > 16384) { /* cursor_num_mblk = CEILING(num_cursors*cursor_width*cursor_width*cursor_Bpe/mblk_bytes, 1) */ - cache_lines_used += (((hubp->curs_attr.width * hubp->curs_attr.height * cursor_bpp + - DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / DCN3_2_MALL_MBLK_SIZE_BYTES) * - DCN3_2_MALL_MBLK_SIZE_BYTES) / dc->caps.cache_line_size + 2; + cache_lines_used += (((cursor_size + DCN3_2_MALL_MBLK_SIZE_BYTES - 1) / + DCN3_2_MALL_MBLK_SIZE_BYTES) * DCN3_2_MALL_MBLK_SIZE_BYTES) / + dc->caps.cache_line_size + 2; } break; } @@ -727,10 +726,7 @@ void dcn32_update_mall_sel(struct dc *dc, struct dc_state *context) struct hubp *hubp = pipe->plane_res.hubp; if (pipe->stream && pipe->plane_state && hubp && hubp->funcs->hubp_update_mall_sel) { - //Round cursor width up to next multiple of 64 - int cursor_width = ((hubp->curs_attr.width + 63) / 64) * 64; - int cursor_height = hubp->curs_attr.height; - int cursor_size = cursor_width * cursor_height; + int cursor_size = hubp->curs_attr.pitch * hubp->curs_attr.height; switch (hubp->curs_attr.color_format) { case CURSOR_MODE_MONO: diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index a88dd7b3d1c1..d1598e3131f6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -724,6 +724,7 @@ static const struct dc_debug_options debug_defaults_drv = { .enable_dp_dig_pixel_rate_div_policy = 1, .allow_sw_cursor_fallback = false, .alloc_extra_way_for_cursor = true, + .min_prefetch_in_strobe_ns = 60000, // 60us }; static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index d51d0c40ae5b..b03a7814e96d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -200,7 +200,7 @@ bool dcn32_all_pipes_have_stream_and_plane(struct dc *dc, struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; if (!pipe->stream) - return false; + continue; if (!pipe->plane_state) return false; diff --git a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c index 61087f2385a9..6292ac515d1a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn321/dcn321_resource.c @@ -722,6 +722,7 @@ static const struct dc_debug_options debug_defaults_drv = { .enable_dp_dig_pixel_rate_div_policy = 1, .allow_sw_cursor_fallback = false, .alloc_extra_way_for_cursor = true, + .min_prefetch_in_strobe_ns = 60000, // 60us }; static const struct dc_debug_options debug_defaults_diags = { diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index d70838edba80..ca7d24000621 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -77,7 +77,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/dcn30_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/dcn32_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_rq_dlg_calc_32.o := $(dml_ccflags) -CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn321/dcn321_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/dcn31_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c index d680f1c5b69f..45db40c41882 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/dcn20_fpu.c @@ -1228,6 +1228,7 @@ int dcn20_populate_dml_pipes_from_context( pipes[pipe_cnt].pipe.src.dcc = false; pipes[pipe_cnt].pipe.src.dcc_rate = 1; pipes[pipe_cnt].pipe.dest.synchronized_vblank_all_planes = synchronized_vblank; + pipes[pipe_cnt].pipe.dest.synchronize_timings = synchronized_vblank; pipes[pipe_cnt].pipe.dest.hblank_start = timing->h_total - timing->h_front_porch; pipes[pipe_cnt].pipe.dest.hblank_end = pipes[pipe_cnt].pipe.dest.hblank_start - timing->h_addressable diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c index cf420ad2b8dc..34b6c763a455 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn314/dcn314_fpu.c @@ -146,8 +146,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_14_soc = { }, }, .num_states = 5, - .sr_exit_time_us = 9.0, - .sr_enter_plus_exit_time_us = 11.0, + .sr_exit_time_us = 16.5, + .sr_enter_plus_exit_time_us = 18.5, .sr_exit_z8_time_us = 442.0, .sr_enter_plus_exit_z8_time_us = 560.0, .writeback_latency_us = 12.0, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index 819de0f11012..12e17bcfca3f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -1803,6 +1803,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, */ context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; + /* For DCN32/321 need to validate with fclk pstate change latency equal to dummy so + * prefetch is scheduled correctly to account for dummy pstate. + */ + if (dummy_latency_index == 0) + context->bw_ctx.dml.soc.fclk_change_latency_us = + dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; dcn32_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, false); maxMpcComb = context->bw_ctx.dml.vba.maxMpcComb; dcfclk_from_fw_based_mclk_switching = context->bw_ctx.dml.vba.DCFCLKState[vlevel][context->bw_ctx.dml.vba.maxMpcComb]; @@ -1990,6 +1996,10 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, context->perf_params.stutter_period_us = context->bw_ctx.dml.vba.StutterPeriod; + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching && dummy_latency_index == 0) + context->bw_ctx.dml.soc.fclk_change_latency_us = + dc->clk_mgr->bw_params->dummy_pstate_table[dummy_latency_index].dummy_pstate_latency_us; + dcn32_calculate_dlg_params(dc, context, pipes, pipe_cnt, vlevel); if (!pstate_en) @@ -1997,8 +2007,12 @@ void dcn32_calculate_wm_and_dlg_fpu(struct dc *dc, struct dc_state *context, context->bw_ctx.dml.soc.dram_clock_change_latency_us = dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.pstate_latency_us; - if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) + if (context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching) { dcn30_setup_mclk_switch_using_fw_based_vblank_stretch(dc, context); + if (dummy_latency_index == 0) + context->bw_ctx.dml.soc.fclk_change_latency_us = + dc->clk_mgr->bw_params->wm_table.nv_entries[WM_A].dml_input.fclk_change_latency_us; + } } static void dcn32_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, @@ -2359,9 +2373,13 @@ void dcn32_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_pa if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) dcn3_2_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; - } + /* DML DSC delay factor workaround */ + dcn3_2_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0; + + dcn3_2_ip.min_prefetch_in_strobe_us = dc->debug.min_prefetch_in_strobe_ns / 1000.0; + /* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */ dcn3_2_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c index 5b91660a6496..9afd9ba23fb2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c @@ -364,10 +364,11 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) { v->DSCDelay[k] = dml32_DSCDelayRequirement(mode_lib->vba.DSCEnabled[k], mode_lib->vba.ODMCombineEnabled[k], mode_lib->vba.DSCInputBitPerComponent[k], - mode_lib->vba.OutputBpp[k], mode_lib->vba.HActive[k], mode_lib->vba.HTotal[k], + mode_lib->vba.OutputBppPerState[mode_lib->vba.VoltageLevel][k], + mode_lib->vba.HActive[k], mode_lib->vba.HTotal[k], mode_lib->vba.NumberOfDSCSlices[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.Output[k], mode_lib->vba.PixelClock[k], - mode_lib->vba.PixelClockBackEnd[k]); + mode_lib->vba.PixelClockBackEnd[k], mode_lib->vba.ip.dsc_delay_factor_wa); } for (k = 0; k < mode_lib->vba.NumberOfActiveSurfaces; ++k) @@ -717,6 +718,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman do { MaxTotalRDBandwidth = 0; + DestinationLineTimesForPrefetchLessThan2 = false; + VRatioPrefetchMoreThanMax = false; #ifdef __DML_VBA_DEBUG__ dml_print("DML::%s: Start loop: VStartup = %d\n", __func__, mode_lib->vba.VStartupLines); #endif @@ -785,6 +788,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman v->SwathHeightY[k], v->SwathHeightC[k], TWait, + v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ? + mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, /* Output */ &v->DSTXAfterScaler[k], &v->DSTYAfterScaler[k], @@ -1627,7 +1632,7 @@ static void mode_support_configuration(struct vba_vars_st *v, && !mode_lib->vba.MSOOrODMSplitWithNonDPLink && !mode_lib->vba.NotEnoughLanesForMSO && mode_lib->vba.LinkCapacitySupport[i] == true && !mode_lib->vba.P2IWith420 - && !mode_lib->vba.DSCOnlyIfNecessaryWithBPP + //&& !mode_lib->vba.DSCOnlyIfNecessaryWithBPP && !mode_lib->vba.DSC422NativeNotSupported && !mode_lib->vba.MPCCombineMethodIncompatible && mode_lib->vba.ODMCombine2To1SupportCheckOK[i] == true @@ -2475,7 +2480,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.OutputBppPerState[i][k], mode_lib->vba.HActive[k], mode_lib->vba.HTotal[k], mode_lib->vba.NumberOfDSCSlices[k], mode_lib->vba.OutputFormat[k], mode_lib->vba.Output[k], - mode_lib->vba.PixelClock[k], mode_lib->vba.PixelClockBackEnd[k]); + mode_lib->vba.PixelClock[k], mode_lib->vba.PixelClockBackEnd[k], + mode_lib->vba.ip.dsc_delay_factor_wa); } for (k = 0; k <= mode_lib->vba.NumberOfActiveSurfaces - 1; k++) { @@ -3190,6 +3196,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l mode_lib->vba.FCLKChangeLatency, mode_lib->vba.UrgLatency[i], mode_lib->vba.SREnterPlusExitTime); + memset(&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull, 0, sizeof(DmlPipe)); v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.Dppclk = mode_lib->vba.RequiredDPPCLK[i][j][k]; v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.Dispclk = mode_lib->vba.RequiredDISPCLK[i][j]; v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.myPipe.PixelClock = mode_lib->vba.PixelClock[k]; @@ -3242,6 +3249,8 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l v->swath_width_chroma_ub_this_state[k], v->SwathHeightYThisState[k], v->SwathHeightCThisState[k], v->TWait, + v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ ? + mode_lib->vba.ip.min_prefetch_in_strobe_us : 0, /* Output */ &v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k], diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h index c62e0991358b..c8b28c83ddf4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.h @@ -46,9 +46,14 @@ // Prefetch schedule max vratio #define __DML_MAX_VRATIO_PRE__ 4.0 +#define __DML_VBA_MAX_DST_Y_PRE__ 63.75 + #define BPP_INVALID 0 #define BPP_BLENDED_PIPE 0xffffffff +#define MEM_STROBE_FREQ_MHZ 1600 +#define MEM_STROBE_MAX_DELIVERY_TIME_US 60.0 + struct display_mode_lib; void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_lib); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c index ad66e241f9ae..debe46b24a3e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c @@ -1726,7 +1726,8 @@ unsigned int dml32_DSCDelayRequirement(bool DSCEnabled, enum output_format_class OutputFormat, enum output_encoder_class Output, double PixelClock, - double PixelClockBackEnd) + double PixelClockBackEnd, + double dsc_delay_factor_wa) { unsigned int DSCDelayRequirement_val; @@ -1746,7 +1747,7 @@ unsigned int dml32_DSCDelayRequirement(bool DSCEnabled, } DSCDelayRequirement_val = DSCDelayRequirement_val + (HTotal - HActive) * - dml_ceil(DSCDelayRequirement_val / HActive, 1); + dml_ceil((double)DSCDelayRequirement_val / HActive, 1); DSCDelayRequirement_val = DSCDelayRequirement_val * PixelClock / PixelClockBackEnd; @@ -1764,7 +1765,7 @@ unsigned int dml32_DSCDelayRequirement(bool DSCEnabled, dml_print("DML::%s: DSCDelayRequirement_val = %d\n", __func__, DSCDelayRequirement_val); #endif - return DSCDelayRequirement_val; + return dml_ceil(DSCDelayRequirement_val * dsc_delay_factor_wa, 1); } void dml32_CalculateSurfaceSizeInMall( @@ -3416,6 +3417,7 @@ bool dml32_CalculatePrefetchSchedule( unsigned int SwathHeightY, unsigned int SwathHeightC, double TWait, + double TPreReq, /* Output */ double *DSTXAfterScaler, double *DSTYAfterScaler, @@ -3666,6 +3668,7 @@ bool dml32_CalculatePrefetchSchedule( dst_y_prefetch_equ = VStartup - (*TSetup + dml_max(TWait + TCalc, *Tdmdl)) / LineTime - (*DSTYAfterScaler + (double) *DSTXAfterScaler / (double) myPipe->HTotal); + dst_y_prefetch_equ = dml_min(dst_y_prefetch_equ, __DML_VBA_MAX_DST_Y_PRE__); #ifdef __DML_VBA_DEBUG__ dml_print("DML::%s: HTotal = %d\n", __func__, myPipe->HTotal); dml_print("DML::%s: min_Lsw = %f\n", __func__, min_Lsw); @@ -3725,7 +3728,8 @@ bool dml32_CalculatePrefetchSchedule( *VRatioPrefetchY = 0; *VRatioPrefetchC = 0; *RequiredPrefetchPixDataBWLuma = 0; - if (dst_y_prefetch_equ > 1) { + if (dst_y_prefetch_equ > 1 && + (Tpre_rounded >= TPreReq || dst_y_prefetch_equ == __DML_VBA_MAX_DST_Y_PRE__)) { double PrefetchBandwidth1; double PrefetchBandwidth2; double PrefetchBandwidth3; @@ -3871,7 +3875,11 @@ bool dml32_CalculatePrefetchSchedule( } if (dst_y_prefetch_oto < dst_y_prefetch_equ) { - *DestinationLinesForPrefetch = dst_y_prefetch_oto; + if (dst_y_prefetch_oto * LineTime < TPreReq) { + *DestinationLinesForPrefetch = dst_y_prefetch_equ; + } else { + *DestinationLinesForPrefetch = dst_y_prefetch_oto; + } TimeForFetchingMetaPTE = Tvm_oto; TimeForFetchingRowInVBlank = Tr0_oto; *PrefetchBandwidth = prefetch_bw_oto; @@ -4396,7 +4404,7 @@ void dml32_CalculateWatermarksMALLUseAndDRAMSpeedChangeSupport( if (v->NumberOfActiveSurfaces > 1) { ActiveClockChangeLatencyHidingY = ActiveClockChangeLatencyHidingY - - (1 - 1 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k] + - (1.0 - 1.0 / v->NumberOfActiveSurfaces) * SwathHeightY[k] * v->HTotal[k] / v->PixelClock[k] / v->VRatio[k]; } diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h index 55cead0d4237..3989c2a28fae 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h @@ -30,7 +30,7 @@ #include "os_types.h" #include "../dc_features.h" #include "../display_mode_structs.h" -#include "dml/display_mode_vba.h" +#include "../display_mode_vba.h" unsigned int dml32_dscceComputeDelay( unsigned int bpc, @@ -327,7 +327,8 @@ unsigned int dml32_DSCDelayRequirement(bool DSCEnabled, enum output_format_class OutputFormat, enum output_encoder_class Output, double PixelClock, - double PixelClockBackEnd); + double PixelClockBackEnd, + double dsc_delay_factor_wa); void dml32_CalculateSurfaceSizeInMall( unsigned int NumberOfActiveSurfaces, @@ -742,6 +743,7 @@ bool dml32_CalculatePrefetchSchedule( unsigned int SwathHeightY, unsigned int SwathHeightC, double TWait, + double TPreReq, /* Output */ double *DSTXAfterScaler, double *DSTYAfterScaler, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c index a1276f6b9581..395ae8761980 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_rq_dlg_calc_32.c @@ -291,8 +291,8 @@ void dml32_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, dml_print("DML_DLG: %s: vready_after_vcount0 = %d\n", __func__, dlg_regs->vready_after_vcount0); - dst_x_after_scaler = get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); - dst_y_after_scaler = get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx); + dst_x_after_scaler = dml_ceil(get_dst_x_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx), 1); + dst_y_after_scaler = dml_ceil(get_dst_y_after_scaler(mode_lib, e2e_pipe_param, num_pipes, pipe_idx), 1); // do some adjustment on the dst_after scaler to account for odm combine mode dml_print("DML_DLG: %s: input dst_x_after_scaler = %d\n", __func__, dst_x_after_scaler); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c index dd90f241e906..432b4ecd01a7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn321/dcn321_fpu.c @@ -29,6 +29,7 @@ #include "dcn321_fpu.h" #include "dcn32/dcn32_resource.h" #include "dcn321/dcn321_resource.h" +#include "dml/dcn32/display_mode_vba_util_32.h" #define DCN3_2_DEFAULT_DET_SIZE 256 @@ -119,15 +120,15 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_21_soc = { }, }, .num_states = 1, - .sr_exit_time_us = 12.36, - .sr_enter_plus_exit_time_us = 16.72, + .sr_exit_time_us = 19.95, + .sr_enter_plus_exit_time_us = 24.36, .sr_exit_z8_time_us = 285.0, .sr_enter_plus_exit_z8_time_us = 320, .writeback_latency_us = 12.0, .round_trip_ping_latency_dcfclk_cycles = 263, - .urgent_latency_pixel_data_only_us = 4.0, - .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, - .urgent_latency_vm_data_only_us = 4.0, + .urgent_latency_pixel_data_only_us = 9.35, + .urgent_latency_pixel_mixed_with_vm_data_us = 9.35, + .urgent_latency_vm_data_only_us = 9.35, .fclk_change_latency_us = 20, .usr_retraining_latency_us = 2, .smn_latency_us = 2, @@ -538,9 +539,13 @@ void dcn321_update_bw_bounding_box_fpu(struct dc *dc, struct clk_bw_params *bw_p if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) dcn3_21_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; - } + /* DML DSC delay factor workaround */ + dcn3_21_ip.dsc_delay_factor_wa = dc->debug.dsc_delay_factor_wa_x1000 / 1000.0; + + dcn3_21_ip.min_prefetch_in_strobe_us = dc->debug.min_prefetch_in_strobe_ns / 1000.0; + /* Override dispclk_dppclk_vco_speed_mhz from Clk Mgr */ dcn3_21_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index f33a8879b05a..64d602e6412f 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -364,6 +364,10 @@ struct _vcs_dpi_ip_params_st { unsigned int max_num_dp2p0_outputs; unsigned int max_num_dp2p0_streams; unsigned int VBlankNomDefaultUS; + + /* DM workarounds */ + double dsc_delay_factor_wa; // TODO: Remove after implementing root cause fix + double min_prefetch_in_strobe_us; }; struct _vcs_dpi_display_xfc_params_st { diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 03924aed8d5c..8e6585dab20e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -625,7 +625,7 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.skip_dio_check[mode_lib->vba.NumberOfActivePlanes] = dout->is_virtual; - if (!dout->dsc_enable) + if (dout->dsc_enable) mode_lib->vba.ForcedOutputLinkBPP[mode_lib->vba.NumberOfActivePlanes] = dout->output_bpp; else mode_lib->vba.ForcedOutputLinkBPP[mode_lib->vba.NumberOfActivePlanes] = 0.0; diff --git a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c index d635b73af46f..0ea52ba5ac82 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/dcn32/hw_factory_dcn32.c @@ -108,6 +108,13 @@ static const struct ddc_registers ddc_data_regs_dcn[] = { ddc_data_regs_dcn2(4), ddc_data_regs_dcn2(5), { + // add a dummy entry for cases no such port + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, + .ddc_setup = 0, + .phy_aux_cntl = 0, + .dc_gpio_aux_ctrl_5 = 0 + }, + { DDC_GPIO_VGA_REG_LIST(DATA), .ddc_setup = 0, .phy_aux_cntl = 0, @@ -122,6 +129,13 @@ static const struct ddc_registers ddc_clk_regs_dcn[] = { ddc_clk_regs_dcn2(4), ddc_clk_regs_dcn2(5), { + // add a dummy entry for cases no such port + {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,}, + .ddc_setup = 0, + .phy_aux_cntl = 0, + .dc_gpio_aux_ctrl_5 = 0 + }, + { DDC_GPIO_VGA_REG_LIST(CLK), .ddc_setup = 0, .phy_aux_cntl = 0, diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c index 6fd38cdd68c0..525bc8881950 100644 --- a/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_ddc.c @@ -94,11 +94,14 @@ static enum gpio_result set_config( * is required for detection of AUX mode */ if (hw_gpio->base.en != GPIO_DDC_LINE_VIP_PAD) { if (!ddc_data_pd_en || !ddc_clk_pd_en) { - - REG_SET_2(gpio.MASK_reg, regval, + if (hw_gpio->base.en == GPIO_DDC_LINE_DDC_VGA) { + // bit 4 of mask has different usage in some cases + REG_SET(gpio.MASK_reg, regval, DC_GPIO_DDC1DATA_PD_EN, 1); + } else { + REG_SET_2(gpio.MASK_reg, regval, DC_GPIO_DDC1DATA_PD_EN, 1, DC_GPIO_DDC1CLK_PD_EN, 1); - + } if (config_data->type == GPIO_CONFIG_TYPE_I2C_AUX_DUAL_MODE) msleep(3); diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index e85364dff4e0..5cb3e8634739 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -262,8 +262,9 @@ struct kfd2kgd_calls { uint32_t queue_id); int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd, - uint32_t reset_type, unsigned int timeout, - uint32_t pipe_id, uint32_t queue_id); + enum kfd_preempt_type reset_type, + unsigned int timeout, uint32_t pipe_id, + uint32_t queue_id); bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd); diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 948cc75376f8..236657eece47 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -3362,11 +3362,11 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) if (adev->pm.sysfs_initialized) return 0; + INIT_LIST_HEAD(&adev->pm.pm_attr_list); + if (adev->pm.dpm_enabled == 0) return 0; - INIT_LIST_HEAD(&adev->pm.pm_attr_list); - adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, DRIVER_NAME, adev, hwmon_groups); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 13c5c7f1ecb9..b880f4d7d67e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1156,22 +1156,21 @@ static int smu_smc_hw_setup(struct smu_context *smu) uint64_t features_supported; int ret = 0; - if (adev->in_suspend && smu_is_dpm_running(smu)) { - dev_info(adev->dev, "dpm has been enabled\n"); - /* this is needed specifically */ - switch (adev->ip_versions[MP1_HWIP][0]) { - case IP_VERSION(11, 0, 7): - case IP_VERSION(11, 0, 11): - case IP_VERSION(11, 5, 0): - case IP_VERSION(11, 0, 12): + switch (adev->ip_versions[MP1_HWIP][0]) { + case IP_VERSION(11, 0, 7): + case IP_VERSION(11, 0, 11): + case IP_VERSION(11, 5, 0): + case IP_VERSION(11, 0, 12): + if (adev->in_suspend && smu_is_dpm_running(smu)) { + dev_info(adev->dev, "dpm has been enabled\n"); ret = smu_system_features_control(smu, true); if (ret) dev_err(adev->dev, "Failed system features control!\n"); - break; - default: - break; + return ret; } - return ret; + break; + default: + break; } ret = smu_init_display_count(smu, 0); @@ -1314,8 +1313,8 @@ static int smu_smc_hw_setup(struct smu_context *smu) ret = smu_enable_thermal_alert(smu); if (ret) { - dev_err(adev->dev, "Failed to enable thermal alert!\n"); - return ret; + dev_err(adev->dev, "Failed to enable thermal alert!\n"); + return ret; } ret = smu_notify_display_change(smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index e2fa3b066b96..f816b1dd110e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -1388,6 +1388,14 @@ enum smu_cmn2asic_mapping_type { CMN2ASIC_MAPPING_WORKLOAD, }; +enum smu_baco_seq { + BACO_SEQ_BACO = 0, + BACO_SEQ_MSR, + BACO_SEQ_BAMACO, + BACO_SEQ_ULPS, + BACO_SEQ_COUNT, +}; + #define MSG_MAP(msg, index, valid_in_vf) \ [SMU_MSG_##msg] = {1, (index), (valid_in_vf)} diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h index 063f4a737605..b76f0f7e4299 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h @@ -25,7 +25,7 @@ #define SMU13_DRIVER_IF_V13_0_0_H //Increment this version if SkuTable_t or BoardTable_t change -#define PPTABLE_VERSION 0x24 +#define PPTABLE_VERSION 0x26 #define NUM_GFXCLK_DPM_LEVELS 16 #define NUM_SOCCLK_DPM_LEVELS 8 @@ -109,6 +109,22 @@ #define FEATURE_SPARE_63_BIT 63 #define NUM_FEATURES 64 +#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL +#define ALLOWED_FEATURE_CTRL_SCPM ((1 << FEATURE_DPM_GFXCLK_BIT) | \ + (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \ + (1 << FEATURE_DPM_UCLK_BIT) | \ + (1 << FEATURE_DPM_FCLK_BIT) | \ + (1 << FEATURE_DPM_SOCCLK_BIT) | \ + (1 << FEATURE_DPM_MP0CLK_BIT) | \ + (1 << FEATURE_DPM_LINK_BIT) | \ + (1 << FEATURE_DPM_DCN_BIT) | \ + (1 << FEATURE_DS_GFXCLK_BIT) | \ + (1 << FEATURE_DS_SOCCLK_BIT) | \ + (1 << FEATURE_DS_FCLK_BIT) | \ + (1 << FEATURE_DS_LCLK_BIT) | \ + (1 << FEATURE_DS_DCFCLK_BIT) | \ + (1 << FEATURE_DS_UCLK_BIT)) + //For use with feature control messages typedef enum { FEATURE_PWR_ALL, @@ -133,6 +149,7 @@ typedef enum { #define DEBUG_OVERRIDE_DISABLE_DFLL 0x00000200 #define DEBUG_OVERRIDE_ENABLE_RLC_VF_BRINGUP_MODE 0x00000400 #define DEBUG_OVERRIDE_DFLL_MASTER_MODE 0x00000800 +#define DEBUG_OVERRIDE_ENABLE_PROFILING_MODE 0x00001000 // VR Mapping Bit Defines #define VR_MAPPING_VR_SELECT_MASK 0x01 @@ -262,15 +279,15 @@ typedef enum { } I2cControllerPort_e; typedef enum { - I2C_CONTROLLER_NAME_VR_GFX = 0, - I2C_CONTROLLER_NAME_VR_SOC, - I2C_CONTROLLER_NAME_VR_VMEMP, - I2C_CONTROLLER_NAME_VR_VDDIO, - I2C_CONTROLLER_NAME_LIQUID0, - I2C_CONTROLLER_NAME_LIQUID1, - I2C_CONTROLLER_NAME_PLX, - I2C_CONTROLLER_NAME_OTHER, - I2C_CONTROLLER_NAME_COUNT, + I2C_CONTROLLER_NAME_VR_GFX = 0, + I2C_CONTROLLER_NAME_VR_SOC, + I2C_CONTROLLER_NAME_VR_VMEMP, + I2C_CONTROLLER_NAME_VR_VDDIO, + I2C_CONTROLLER_NAME_LIQUID0, + I2C_CONTROLLER_NAME_LIQUID1, + I2C_CONTROLLER_NAME_PLX, + I2C_CONTROLLER_NAME_FAN_INTAKE, + I2C_CONTROLLER_NAME_COUNT, } I2cControllerName_e; typedef enum { @@ -282,16 +299,17 @@ typedef enum { I2C_CONTROLLER_THROTTLER_LIQUID0, I2C_CONTROLLER_THROTTLER_LIQUID1, I2C_CONTROLLER_THROTTLER_PLX, + I2C_CONTROLLER_THROTTLER_FAN_INTAKE, I2C_CONTROLLER_THROTTLER_INA3221, I2C_CONTROLLER_THROTTLER_COUNT, } I2cControllerThrottler_e; typedef enum { - I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5, - I2C_CONTROLLER_PROTOCOL_VR_IR35217, - I2C_CONTROLLER_PROTOCOL_TMP_TMP102A, - I2C_CONTROLLER_PROTOCOL_INA3221, - I2C_CONTROLLER_PROTOCOL_COUNT, + I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5, + I2C_CONTROLLER_PROTOCOL_VR_IR35217, + I2C_CONTROLLER_PROTOCOL_TMP_MAX31875, + I2C_CONTROLLER_PROTOCOL_INA3221, + I2C_CONTROLLER_PROTOCOL_COUNT, } I2cControllerProtocol_e; typedef struct { @@ -658,13 +676,20 @@ typedef struct { #define PP_NUM_OD_VF_CURVE_POINTS PP_NUM_RTAVFS_PWL_ZONES + 1 +typedef enum { + FAN_MODE_AUTO = 0, + FAN_MODE_MANUAL_LINEAR, +} FanMode_e; typedef struct { uint32_t FeatureCtrlMask; //Voltage control int16_t VoltageOffsetPerZoneBoundary[PP_NUM_OD_VF_CURVE_POINTS]; - uint16_t reserved[2]; + uint16_t VddGfxVmax; // in mV + + uint8_t IdlePwrSavingFeaturesCtrl; + uint8_t RuntimePwrSavingFeaturesCtrl; //Frequency changes int16_t GfxclkFmin; // MHz @@ -674,7 +699,7 @@ typedef struct { //PPT int16_t Ppt; // % - int16_t reserved1; + int16_t Tdc; //Fan control uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS]; @@ -701,16 +726,19 @@ typedef struct { uint32_t FeatureCtrlMask; int16_t VoltageOffsetPerZoneBoundary; - uint16_t reserved[2]; + uint16_t VddGfxVmax; // in mV + + uint8_t IdlePwrSavingFeaturesCtrl; + uint8_t RuntimePwrSavingFeaturesCtrl; - uint16_t GfxclkFmin; // MHz - uint16_t GfxclkFmax; // MHz + int16_t GfxclkFmin; // MHz + int16_t GfxclkFmax; // MHz uint16_t UclkFmin; // MHz uint16_t UclkFmax; // MHz //PPT int16_t Ppt; // % - int16_t reserved1; + int16_t Tdc; uint8_t FanLinearPwmPoints; uint8_t FanLinearTempPoints; @@ -857,7 +885,8 @@ typedef struct { uint16_t FanStartTempMin; uint16_t FanStartTempMax; - uint32_t Spare[12]; + uint16_t PowerMinPpt0[POWER_SOURCE_COUNT]; + uint32_t Spare[11]; } MsgLimits_t; @@ -1041,7 +1070,17 @@ typedef struct { uint32_t GfxoffSpare[15]; // GFX GPO - uint32_t GfxGpoSpare[16]; + uint32_t DfllBtcMasterScalerM; + int32_t DfllBtcMasterScalerB; + uint32_t DfllBtcSlaveScalerM; + int32_t DfllBtcSlaveScalerB; + + uint32_t DfllPccAsWaitCtrl; //GDFLL_AS_WAIT_CTRL_PCC register value to be passed to RLC msg + uint32_t DfllPccAsStepCtrl; //GDFLL_AS_STEP_CTRL_PCC register value to be passed to RLC msg + + uint32_t DfllL2FrequencyBoostM; //Unitless (float) + uint32_t DfllL2FrequencyBoostB; //In MHz (integer) + uint32_t GfxGpoSpare[8]; // GFX DCS @@ -1114,12 +1153,14 @@ typedef struct { uint16_t IntakeTempHighIntakeAcousticLimit; uint16_t IntakeTempAcouticLimitReleaseRate; - uint16_t FanStalledTempLimitOffset; + int16_t FanAbnormalTempLimitOffset; uint16_t FanStalledTriggerRpm; - uint16_t FanAbnormalTriggerRpm; - uint16_t FanPadding; + uint16_t FanAbnormalTriggerRpmCoeff; + uint16_t FanAbnormalDetectionEnable; - uint32_t FanSpare[14]; + uint8_t FanIntakeSensorSupport; + uint8_t FanIntakePadding[3]; + uint32_t FanSpare[13]; // SECTION: VDD_GFX AVFS @@ -1198,8 +1239,13 @@ typedef struct { int16_t TotalBoardPowerM; int16_t TotalBoardPowerB; + //PMFW-11158 + QuadraticInt_t qFeffCoeffGameClock[POWER_SOURCE_COUNT]; + QuadraticInt_t qFeffCoeffBaseClock[POWER_SOURCE_COUNT]; + QuadraticInt_t qFeffCoeffBoostClock[POWER_SOURCE_COUNT]; + // SECTION: Sku Reserved - uint32_t Spare[61]; + uint32_t Spare[43]; // Padding for MMHUB - do not modify this uint32_t MmHubPadding[8]; @@ -1288,8 +1334,11 @@ typedef struct { uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS + uint8_t FuseWritePowerMuxPresent; + uint8_t FuseWritePadding[3]; + // SECTION: Board Reserved - uint32_t BoardSpare[64]; + uint32_t BoardSpare[63]; // SECTION: Structure Padding @@ -1381,7 +1430,7 @@ typedef struct { uint16_t AverageTotalBoardPower; uint16_t AvgTemperature[TEMP_COUNT]; - uint16_t TempPadding; + uint16_t AvgTemperatureFanIntake; uint8_t PcieRate ; uint8_t PcieWidth ; @@ -1550,5 +1599,7 @@ typedef struct { #define IH_INTERRUPT_CONTEXT_ID_AUDIO_D0 0x5 #define IH_INTERRUPT_CONTEXT_ID_AUDIO_D3 0x6 #define IH_INTERRUPT_CONTEXT_ID_THERMAL_THROTTLING 0x7 +#define IH_INTERRUPT_CONTEXT_ID_FAN_ABNORMAL 0x8 +#define IH_INTERRUPT_CONTEXT_ID_FAN_RECOVERY 0x9 #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h index ae2d337158f3..f77401709d83 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h @@ -27,7 +27,7 @@ // *** IMPORTANT *** // SMU TEAM: Always increment the interface version if // any structure is changed in this file -#define PMFW_DRIVER_IF_VERSION 5 +#define PMFW_DRIVER_IF_VERSION 7 typedef struct { int32_t value; @@ -163,8 +163,8 @@ typedef struct { uint16_t DclkFrequency; //[MHz] uint16_t MemclkFrequency; //[MHz] uint16_t spare; //[centi] - uint16_t UvdActivity; //[centi] uint16_t GfxActivity; //[centi] + uint16_t UvdActivity; //[centi] uint16_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_SOC uint16_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_SOC @@ -199,6 +199,19 @@ typedef struct { uint16_t DeviceState; uint16_t CurTemp; //[centi-Celsius] uint16_t spare2; + + uint16_t AverageGfxclkFrequency; + uint16_t AverageFclkFrequency; + uint16_t AverageGfxActivity; + uint16_t AverageSocclkFrequency; + uint16_t AverageVclkFrequency; + uint16_t AverageVcnActivity; + uint16_t AverageDRAMReads; //Filtered DF Bandwidth::DRAM Reads + uint16_t AverageDRAMWrites; //Filtered DF Bandwidth::DRAM Writes + uint16_t AverageSocketPower; //Filtered value of CurrentSocketPower + uint16_t AverageCorePower; //Filtered of [sum of CorePower[8]]) + uint16_t AverageCoreC0Residency[8]; //Filtered of [average C0 residency % per core] + uint32_t MetricsCounter; //Counts the # of metrics table parameter reads per update to the metrics table, i.e. if the metrics table update happens every 1 second, this value could be up to 1000 if the smu collected metrics data every cycle, or as low as 0 if the smu was asleep the whole time. Reset to 0 after writing. } SmuMetrics_t; typedef struct { diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h index d9b0cd752200..f4d6c07b56ea 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_4_ppsmc.h @@ -54,14 +54,14 @@ #define PPSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team #define PPSMC_MSG_GetPmfwVersion 0x02 ///< Get PMFW version #define PPSMC_MSG_GetDriverIfVersion 0x03 ///< Get PMFW_DRIVER_IF version -#define PPSMC_MSG_EnableGfxOff 0x04 ///< Enable GFXOFF -#define PPSMC_MSG_DisableGfxOff 0x05 ///< Disable GFXOFF +#define PPSMC_MSG_SPARE0 0x04 ///< SPARE +#define PPSMC_MSG_SPARE1 0x05 ///< SPARE #define PPSMC_MSG_PowerDownVcn 0x06 ///< Power down VCN #define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default #define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display #define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz -#define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Needs update -#define PPSMC_MSG_ForcePowerDownGfx 0x0B ///< Force power down GFX, i.e. enter GFXOFF +#define PPSMC_MSG_SPARE2 0x0A ///< SPARE +#define PPSMC_MSG_SPARE3 0x0B ///< SPARE #define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload #define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer #define PPSMC_MSG_SetDriverDramAddrLow 0x0E ///< Set low 32 bits of DRAM address for Driver table transfer @@ -73,8 +73,7 @@ #define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK #define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK) - -#define PPSMC_MSG_EnableGfxImu 0x16 ///< Needs update +#define PPSMC_MSG_EnableGfxImu 0x16 ///< Enable GFX IMU #define PPSMC_MSG_GetGfxclkFrequency 0x17 ///< Get GFX clock frequency #define PPSMC_MSG_GetFclkFrequency 0x18 ///< Get FCLK frequency @@ -102,8 +101,8 @@ #define PPSMC_MSG_SetHardMinIspxclkByFreq 0x2C ///< Set HardMin by frequency for ISPXCLK #define PPSMC_MSG_PowerDownUmsch 0x2D ///< Power down VCN.UMSCH (aka VSCH) scheduler #define PPSMC_MSG_PowerUpUmsch 0x2E ///< Power up VCN.UMSCH (aka VSCH) scheduler -#define PPSMC_Message_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis -#define PPSMC_Message_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn +#define PPSMC_MSG_IspStutterOn_MmhubPgDis 0x2F ///< ISP StutterOn mmHub PgDis +#define PPSMC_MSG_IspStutterOff_MmhubPgEn 0x30 ///< ISP StufferOff mmHub PgEn #define PPSMC_Message_Count 0x31 ///< Total number of PPSMC messages /** @}*/ diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h index a9215494dcdd..d466db6f0ad4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h @@ -147,14 +147,6 @@ struct smu_11_5_power_context { uint32_t max_fast_ppt_limit; }; -enum smu_v11_0_baco_seq { - BACO_SEQ_BACO = 0, - BACO_SEQ_MSR, - BACO_SEQ_BAMACO, - BACO_SEQ_ULPS, - BACO_SEQ_COUNT, -}; - #if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) int smu_v11_0_init_microcode(struct smu_context *smu); @@ -257,7 +249,7 @@ int smu_v11_0_baco_enter(struct smu_context *smu); int smu_v11_0_baco_exit(struct smu_context *smu); int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, - enum smu_v11_0_baco_seq baco_seq); + enum smu_baco_seq baco_seq); int smu_v11_0_mode1_reset(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index 9d62ea2af132..b7f4569aff2a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -28,9 +28,9 @@ #define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04 #define SMU13_DRIVER_IF_VERSION_ALDE 0x08 -#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05 +#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04 -#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x30 +#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_10 0x1D @@ -124,14 +124,6 @@ struct smu_13_0_power_context { enum smu_13_0_power_state power_state; }; -enum smu_v13_0_baco_seq { - BACO_SEQ_BACO = 0, - BACO_SEQ_MSR, - BACO_SEQ_BAMACO, - BACO_SEQ_ULPS, - BACO_SEQ_COUNT, -}; - #if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) int smu_v13_0_init_microcode(struct smu_context *smu); @@ -218,6 +210,9 @@ int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu); int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); +int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu, + enum smu_baco_seq baco_seq); + bool smu_v13_0_baco_is_support(struct smu_context *smu); enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 445005571f76..9cd005131f56 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -2242,9 +2242,17 @@ static void arcturus_get_unique_id(struct smu_context *smu) static int arcturus_set_df_cstate(struct smu_context *smu, enum pp_df_cstate state) { + struct amdgpu_device *adev = smu->adev; uint32_t smu_version; int ret; + /* + * Arcturus does not need the cstate disablement + * prerequisite for gpu reset. + */ + if (amdgpu_in_reset(adev) || adev->in_suspend) + return 0; + ret = smu_cmn_get_smc_version(smu, NULL, &smu_version); if (ret) { dev_err(smu->adev->dev, "Failed to get smu version!\n"); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 74996a8fb671..697e98a0a20a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -377,7 +377,13 @@ static void sienna_cichlid_check_bxco_support(struct smu_context *smu) if (((adev->pdev->device == 0x73A1) && (adev->pdev->revision == 0x00)) || ((adev->pdev->device == 0x73BF) && - (adev->pdev->revision == 0xCF))) + (adev->pdev->revision == 0xCF)) || + ((adev->pdev->device == 0x7422) && + (adev->pdev->revision == 0x00)) || + ((adev->pdev->device == 0x73A3) && + (adev->pdev->revision == 0x00)) || + ((adev->pdev->device == 0x73E3) && + (adev->pdev->revision == 0x00))) smu_baco->platform_support = false; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index dccbd9f70723..70b560737687 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1576,7 +1576,7 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu) } int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, - enum smu_v11_0_baco_seq baco_seq) + enum smu_baco_seq baco_seq) { return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 619aee51b123..d30ec3005ea1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1640,6 +1640,15 @@ static bool aldebaran_is_baco_supported(struct smu_context *smu) static int aldebaran_set_df_cstate(struct smu_context *smu, enum pp_df_cstate state) { + struct amdgpu_device *adev = smu->adev; + + /* + * Aldebaran does not need the cstate disablement + * prerequisite for gpu reset. + */ + if (amdgpu_in_reset(adev) || adev->in_suspend) + return 0; + return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index 93fffdbab4f0..89f0f6eb19f3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -211,7 +211,8 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu) return 0; if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7)) || - (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0))) + (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) || + (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10))) return 0; /* override pptable_id from driver parameter */ @@ -288,7 +289,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_ALDE; break; case IP_VERSION(13, 0, 0): - smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0; + case IP_VERSION(13, 0, 10): + smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10; break; case IP_VERSION(13, 0, 7): smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_7; @@ -304,9 +306,6 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) case IP_VERSION(13, 0, 5): smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_5; break; - case IP_VERSION(13, 0, 10): - smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_10; - break; default: dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n", adev->ip_versions[MP1_HWIP][0]); @@ -454,9 +453,6 @@ int smu_v13_0_setup_pptable(struct smu_context *smu) dev_info(adev->dev, "override pptable id %d\n", pptable_id); } else { pptable_id = smu->smu_table.boot_values.pp_table_id; - - if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 10)) - pptable_id = 6666; } /* force using vbios pptable in sriov mode */ @@ -844,6 +840,7 @@ int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable) case IP_VERSION(13, 0, 5): case IP_VERSION(13, 0, 7): case IP_VERSION(13, 0, 8): + case IP_VERSION(13, 0, 10): if (!(adev->pm.pp_feature & PP_GFXOFF_MASK)) return 0; if (enable) @@ -2233,6 +2230,15 @@ int smu_v13_0_gfx_ulv_control(struct smu_context *smu, return ret; } +int smu_v13_0_baco_set_armd3_sequence(struct smu_context *smu, + enum smu_baco_seq baco_seq) +{ + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_ArmD3, + baco_seq, + NULL); +} + bool smu_v13_0_baco_is_support(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c index 1d454485e0d9..f0121d171630 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c @@ -119,6 +119,8 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0), MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), + MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), + MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), }; static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = { @@ -1565,6 +1567,31 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu, NULL); } +static int smu_v13_0_0_baco_enter(struct smu_context *smu) +{ + struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) + return smu_v13_0_baco_set_armd3_sequence(smu, + smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); + else + return smu_v13_0_baco_enter(smu); +} + +static int smu_v13_0_0_baco_exit(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { + /* Wait for PMFW handling for the Dstate change */ + usleep_range(10000, 11000); + return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); + } else { + return smu_v13_0_baco_exit(smu); + } +} + static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -1753,6 +1780,15 @@ static int smu_v13_0_0_set_mp1_state(struct smu_context *smu, return ret; } +static int smu_v13_0_0_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_DFCstateControl, + state, + NULL); +} + static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table, @@ -1817,11 +1853,12 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = { .baco_is_support = smu_v13_0_baco_is_support, .baco_get_state = smu_v13_0_baco_get_state, .baco_set_state = smu_v13_0_baco_set_state, - .baco_enter = smu_v13_0_baco_enter, - .baco_exit = smu_v13_0_baco_exit, + .baco_enter = smu_v13_0_0_baco_enter, + .baco_exit = smu_v13_0_0_baco_exit, .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported, .mode1_reset = smu_v13_0_mode1_reset, .set_mp1_state = smu_v13_0_0_set_mp1_state, + .set_df_cstate = smu_v13_0_0_set_df_cstate, }; void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c index c422bf8a09b1..d74debc584f8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c @@ -121,6 +121,8 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] = MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0), MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0), MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0), + MSG_MAP(DFCstateControl, PPSMC_MSG_SetExternalClientDfCstateAllow, 0), + MSG_MAP(ArmD3, PPSMC_MSG_ArmD3, 0), }; static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = { @@ -1577,6 +1579,31 @@ static int smu_v13_0_7_set_mp1_state(struct smu_context *smu, return ret; } +static int smu_v13_0_7_baco_enter(struct smu_context *smu) +{ + struct smu_baco_context *smu_baco = &smu->smu_baco; + struct amdgpu_device *adev = smu->adev; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) + return smu_v13_0_baco_set_armd3_sequence(smu, + smu_baco->maco_support ? BACO_SEQ_BAMACO : BACO_SEQ_BACO); + else + return smu_v13_0_baco_enter(smu); +} + +static int smu_v13_0_7_baco_exit(struct smu_context *smu) +{ + struct amdgpu_device *adev = smu->adev; + + if (adev->in_runpm && smu_cmn_is_audio_func_enabled(adev)) { + /* Wait for PMFW handling for the Dstate change */ + usleep_range(10000, 11000); + return smu_v13_0_baco_set_armd3_sequence(smu, BACO_SEQ_ULPS); + } else { + return smu_v13_0_baco_exit(smu); + } +} + static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -1587,6 +1614,16 @@ static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu) return true; } + +static int smu_v13_0_7_set_df_cstate(struct smu_context *smu, + enum pp_df_cstate state) +{ + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_DFCstateControl, + state, + NULL); +} + static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask, .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table, @@ -1644,11 +1681,12 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = { .baco_is_support = smu_v13_0_baco_is_support, .baco_get_state = smu_v13_0_baco_get_state, .baco_set_state = smu_v13_0_baco_set_state, - .baco_enter = smu_v13_0_baco_enter, - .baco_exit = smu_v13_0_baco_exit, + .baco_enter = smu_v13_0_7_baco_enter, + .baco_exit = smu_v13_0_7_baco_exit, .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported, .mode1_reset = smu_v13_0_mode1_reset, .set_mp1_state = smu_v13_0_7_set_mp1_state, + .set_df_cstate = smu_v13_0_7_set_df_cstate, }; void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c index d7483c13c569..083337a27966 100644 --- a/drivers/gpu/drm/bridge/parade-ps8640.c +++ b/drivers/gpu/drm/bridge/parade-ps8640.c @@ -105,6 +105,7 @@ struct ps8640 { struct gpio_desc *gpio_powerdown; struct device_link *link; bool pre_enabled; + bool need_post_hpd_delay; }; static const struct regmap_config ps8640_regmap_config[] = { @@ -173,14 +174,31 @@ static int _ps8640_wait_hpd_asserted(struct ps8640 *ps_bridge, unsigned long wai { struct regmap *map = ps_bridge->regmap[PAGE2_TOP_CNTL]; int status; + int ret; /* * Apparently something about the firmware in the chip signals that * HPD goes high by reporting GPIO9 as high (even though HPD isn't * actually connected to GPIO9). */ - return regmap_read_poll_timeout(map, PAGE2_GPIO_H, status, - status & PS_GPIO9, wait_us / 10, wait_us); + ret = regmap_read_poll_timeout(map, PAGE2_GPIO_H, status, + status & PS_GPIO9, wait_us / 10, wait_us); + + /* + * The first time we see HPD go high after a reset we delay an extra + * 50 ms. The best guess is that the MCU is doing "stuff" during this + * time (maybe talking to the panel) and we don't want to interrupt it. + * + * No locking is done around "need_post_hpd_delay". If we're here we + * know we're holding a PM Runtime reference and the only other place + * that touches this is PM Runtime resume. + */ + if (!ret && ps_bridge->need_post_hpd_delay) { + ps_bridge->need_post_hpd_delay = false; + msleep(50); + } + + return ret; } static int ps8640_wait_hpd_asserted(struct drm_dp_aux *aux, unsigned long wait_us) @@ -381,6 +399,9 @@ static int __maybe_unused ps8640_resume(struct device *dev) msleep(50); gpiod_set_value(ps_bridge->gpio_reset, 0); + /* We just reset things, so we need a delay after the first HPD */ + ps_bridge->need_post_hpd_delay = true; + /* * Mystery 200 ms delay for the "MCU to be ready". It's unclear if * this is truly necessary since the MCU will already signal that diff --git a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c index 3ea53bb67d3b..bd61e20770a5 100644 --- a/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c +++ b/drivers/gpu/drm/display/drm_dp_dual_mode_helper.c @@ -63,23 +63,45 @@ ssize_t drm_dp_dual_mode_read(struct i2c_adapter *adapter, u8 offset, void *buffer, size_t size) { + u8 zero = 0; + char *tmpbuf = NULL; + /* + * As sub-addressing is not supported by all adaptors, + * always explicitly read from the start and discard + * any bytes that come before the requested offset. + * This way, no matter whether the adaptor supports it + * or not, we'll end up reading the proper data. + */ struct i2c_msg msgs[] = { { .addr = DP_DUAL_MODE_SLAVE_ADDRESS, .flags = 0, .len = 1, - .buf = &offset, + .buf = &zero, }, { .addr = DP_DUAL_MODE_SLAVE_ADDRESS, .flags = I2C_M_RD, - .len = size, + .len = size + offset, .buf = buffer, }, }; int ret; + if (offset) { + tmpbuf = kmalloc(size + offset, GFP_KERNEL); + if (!tmpbuf) + return -ENOMEM; + + msgs[1].buf = tmpbuf; + } + ret = i2c_transfer(adapter, msgs, ARRAY_SIZE(msgs)); + if (tmpbuf) + memcpy(buffer, tmpbuf + offset, size); + + kfree(tmpbuf); + if (ret < 0) return ret; if (ret != ARRAY_SIZE(msgs)) @@ -208,18 +230,6 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(const struct drm_device *dev, if (ret) return DRM_DP_DUAL_MODE_UNKNOWN; - /* - * Sigh. Some (maybe all?) type 1 adaptors are broken and ack - * the offset but ignore it, and instead they just always return - * data from the start of the HDMI ID buffer. So for a broken - * type 1 HDMI adaptor a single byte read will always give us - * 0x44, and for a type 1 DVI adaptor it should give 0x00 - * (assuming it implements any registers). Fortunately neither - * of those values will match the type 2 signature of the - * DP_DUAL_MODE_ADAPTOR_ID register so we can proceed with - * the type 2 adaptor detection safely even in the presence - * of broken type 1 adaptors. - */ ret = drm_dp_dual_mode_read(adapter, DP_DUAL_MODE_ADAPTOR_ID, &adaptor_id, sizeof(adaptor_id)); drm_dbg_kms(dev, "DP dual mode adaptor ID: %02x (err %zd)\n", adaptor_id, ret); @@ -233,11 +243,10 @@ enum drm_dp_dual_mode_type drm_dp_dual_mode_detect(const struct drm_device *dev, return DRM_DP_DUAL_MODE_TYPE2_DVI; } /* - * If neither a proper type 1 ID nor a broken type 1 adaptor - * as described above, assume type 1, but let the user know - * that we may have misdetected the type. + * If not a proper type 1 ID, still assume type 1, but let + * the user know that we may have misdetected the type. */ - if (!is_type1_adaptor(adaptor_id) && adaptor_id != hdmi_id[0]) + if (!is_type1_adaptor(adaptor_id)) drm_err(dev, "Unexpected DP dual mode adaptor ID %02x\n", adaptor_id); } @@ -343,10 +352,8 @@ EXPORT_SYMBOL(drm_dp_dual_mode_get_tmds_output); * @enable: enable (as opposed to disable) the TMDS output buffers * * Set the state of the TMDS output buffers in the adaptor. For - * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. As - * some type 1 adaptors have problems with registers (see comments - * in drm_dp_dual_mode_detect()) we avoid touching the register, - * making this function a no-op on type 1 adaptors. + * type2 this is set via the DP_DUAL_MODE_TMDS_OEN register. + * Type1 adaptors do not support any register writes. * * Returns: * 0 on success, negative error code on failure diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c index e3142c8142b3..61c29ce74b03 100644 --- a/drivers/gpu/drm/drm_connector.c +++ b/drivers/gpu/drm/drm_connector.c @@ -435,7 +435,7 @@ int drmm_connector_init(struct drm_device *dev, if (drm_WARN_ON(dev, funcs && funcs->destroy)) return -EINVAL; - ret = __drm_connector_init(dev, connector, funcs, connector_type, NULL); + ret = __drm_connector_init(dev, connector, funcs, connector_type, ddc); if (ret) return ret; diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c index 8214a0b1ab7f..203bf8d6c34c 100644 --- a/drivers/gpu/drm/drm_drv.c +++ b/drivers/gpu/drm/drm_drv.c @@ -615,7 +615,7 @@ static int drm_dev_init(struct drm_device *dev, mutex_init(&dev->clientlist_mutex); mutex_init(&dev->master_mutex); - ret = drmm_add_action(dev, drm_dev_init_release, NULL); + ret = drmm_add_action_or_reset(dev, drm_dev_init_release, NULL); if (ret) return ret; diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c index e2f76621453c..3ee59bae9d2f 100644 --- a/drivers/gpu/drm/drm_format_helper.c +++ b/drivers/gpu/drm/drm_format_helper.c @@ -807,6 +807,38 @@ static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t return false; } +static const uint32_t conv_from_xrgb8888[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_RGB565, + DRM_FORMAT_RGB888, +}; + +static const uint32_t conv_from_rgb565_888[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, +}; + +static bool is_conversion_supported(uint32_t from, uint32_t to) +{ + switch (from) { + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + return is_listed_fourcc(conv_from_xrgb8888, ARRAY_SIZE(conv_from_xrgb8888), to); + case DRM_FORMAT_RGB565: + case DRM_FORMAT_RGB888: + return is_listed_fourcc(conv_from_rgb565_888, ARRAY_SIZE(conv_from_rgb565_888), to); + case DRM_FORMAT_XRGB2101010: + return to == DRM_FORMAT_ARGB2101010; + case DRM_FORMAT_ARGB2101010: + return to == DRM_FORMAT_XRGB2101010; + default: + return false; + } +} + /** * drm_fb_build_fourcc_list - Filters a list of supported color formats against * the device's native formats @@ -827,7 +859,9 @@ static bool is_listed_fourcc(const uint32_t *fourccs, size_t nfourccs, uint32_t * be handed over to drm_universal_plane_init() et al. Native formats * will go before emulated formats. Other heuristics might be applied * to optimize the order. Formats near the beginning of the list are - * usually preferred over formats near the end of the list. + * usually preferred over formats near the end of the list. Formats + * without conversion helpers will be skipped. New drivers should only + * pass in XRGB8888 and avoid exposing additional emulated formats. * * Returns: * The number of color-formats 4CC codes returned in @fourccs_out. @@ -839,7 +873,7 @@ size_t drm_fb_build_fourcc_list(struct drm_device *dev, { u32 *fourccs = fourccs_out; const u32 *fourccs_end = fourccs_out + nfourccs_out; - bool found_native = false; + uint32_t native_format = 0; size_t i; /* @@ -858,27 +892,19 @@ size_t drm_fb_build_fourcc_list(struct drm_device *dev, drm_dbg_kms(dev, "adding native format %p4cc\n", &fourcc); - if (!found_native) - found_native = is_listed_fourcc(driver_fourccs, driver_nfourccs, fourcc); + /* + * There should only be one native format with the current API. + * This API needs to be refactored to correctly support arbitrary + * sets of native formats, since it needs to report which native + * format to use for each emulated format. + */ + if (!native_format) + native_format = fourcc; *fourccs = fourcc; ++fourccs; } /* - * The plane's atomic_update helper converts the framebuffer's color format - * to a native format when copying to device memory. - * - * If there is not a single format supported by both, device and - * driver, the native formats are likely not supported by the conversion - * helpers. Therefore *only* support the native formats and add a - * conversion helper ASAP. - */ - if (!found_native) { - drm_warn(dev, "Format conversion helpers required to add extra formats.\n"); - goto out; - } - - /* * The extra formats, emulated by the driver, go second. */ @@ -890,6 +916,9 @@ size_t drm_fb_build_fourcc_list(struct drm_device *dev, } else if (fourccs == fourccs_end) { drm_warn(dev, "Ignoring emulated format %p4cc\n", &fourcc); continue; /* end of available output buffer */ + } else if (!is_conversion_supported(fourcc, native_format)) { + drm_dbg_kms(dev, "Unsupported emulated format %p4cc\n", &fourcc); + continue; /* format is not supported for conversion */ } drm_dbg_kms(dev, "adding emulated format %p4cc\n", &fourcc); @@ -898,7 +927,6 @@ size_t drm_fb_build_fourcc_list(struct drm_device *dev, ++fourccs; } -out: return fourccs - fourccs_out; } EXPORT_SYMBOL(drm_fb_build_fourcc_list); diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index 7bb98e6a446d..5ea5e260118c 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -104,7 +104,8 @@ static inline void drm_vblank_flush_worker(struct drm_vblank_crtc *vblank) static inline void drm_vblank_destroy_worker(struct drm_vblank_crtc *vblank) { - kthread_destroy_worker(vblank->worker); + if (vblank->worker) + kthread_destroy_worker(vblank->worker); } int drm_vblank_worker_init(struct drm_vblank_crtc *vblank); diff --git a/drivers/gpu/drm/drm_mode_config.c b/drivers/gpu/drm/drm_mode_config.c index 939d621c9ad4..688c8afe0bf1 100644 --- a/drivers/gpu/drm/drm_mode_config.c +++ b/drivers/gpu/drm/drm_mode_config.c @@ -151,9 +151,6 @@ int drm_mode_getresources(struct drm_device *dev, void *data, count = 0; connector_id = u64_to_user_ptr(card_res->connector_id_ptr); drm_for_each_connector_iter(connector, &conn_iter) { - if (connector->registration_state != DRM_CONNECTOR_REGISTERED) - continue; - /* only expose writeback connectors if userspace understands them */ if (!file_priv->writeback_connectors && (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK)) diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 8a0c0e0bb5bd..52d8800a8ab8 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -134,6 +134,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "One S1003"), }, .driver_data = (void *)&lcd800x1280_rightside_up, + }, { /* Acer Switch V 10 (SW5-017) */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* Anbernic Win600 */ .matches = { DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "Anbernic"), @@ -319,6 +325,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"), }, .driver_data = (void *)&lcd1200x1920_rightside_up, + }, { /* Nanote UMPC-01 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "RWC CO.,LTD"), + DMI_MATCH(DMI_PRODUCT_NAME, "UMPC-01"), + }, + .driver_data = (void *)&lcd1200x1920_rightside_up, }, { /* OneGX1 Pro */ .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "SYSTEM_MANUFACTURER"), diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index a26edcdadc21..cea00aaca04b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -282,6 +282,7 @@ i915-y += \ display/intel_ddi.o \ display/intel_ddi_buf_trans.o \ display/intel_display_trace.o \ + display/intel_dkl_phy.o \ display/intel_dp.o \ display/intel_dp_aux.o \ display/intel_dp_aux_backlight.o \ diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index da8472cdc135..69ecf2a3d6c6 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -43,6 +43,7 @@ #include "intel_de.h" #include "intel_display_power.h" #include "intel_display_types.h" +#include "intel_dkl_phy.h" #include "intel_dp.h" #include "intel_dp_link_training.h" #include "intel_dp_mst.h" @@ -1262,33 +1263,30 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, for (ln = 0; ln < 2; ln++) { int level; - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), - HIP_INDEX_VAL(tc_port, ln)); - - intel_de_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), 0); + intel_dkl_phy_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), ln, 0); level = intel_ddi_level(encoder, crtc_state, 2*ln+0); - intel_de_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port), - DKL_TX_PRESHOOT_COEFF_MASK | - DKL_TX_DE_EMPAHSIS_COEFF_MASK | - DKL_TX_VSWING_CONTROL_MASK, - DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | - DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | - DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); + intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL0(tc_port), ln, + DKL_TX_PRESHOOT_COEFF_MASK | + DKL_TX_DE_EMPAHSIS_COEFF_MASK | + DKL_TX_VSWING_CONTROL_MASK, + DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | + DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | + DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); level = intel_ddi_level(encoder, crtc_state, 2*ln+1); - intel_de_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port), - DKL_TX_PRESHOOT_COEFF_MASK | - DKL_TX_DE_EMPAHSIS_COEFF_MASK | - DKL_TX_VSWING_CONTROL_MASK, - DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | - DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | - DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); + intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL1(tc_port), ln, + DKL_TX_PRESHOOT_COEFF_MASK | + DKL_TX_DE_EMPAHSIS_COEFF_MASK | + DKL_TX_VSWING_CONTROL_MASK, + DKL_TX_PRESHOOT_COEFF(trans->entries[level].dkl.preshoot) | + DKL_TX_DE_EMPHASIS_COEFF(trans->entries[level].dkl.de_emphasis) | + DKL_TX_VSWING_CONTROL(trans->entries[level].dkl.vswing)); - intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), - DKL_TX_DP20BITMODE, 0); + intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), ln, + DKL_TX_DP20BITMODE, 0); if (IS_ALDERLAKE_P(dev_priv)) { u32 val; @@ -1306,10 +1304,10 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(0); } - intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), - DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK | - DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, - val); + intel_dkl_phy_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), ln, + DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK | + DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, + val); } } } @@ -2019,12 +2017,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port, return; if (DISPLAY_VER(dev_priv) >= 12) { - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), - HIP_INDEX_VAL(tc_port, 0x0)); - ln0 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port)); - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), - HIP_INDEX_VAL(tc_port, 0x1)); - ln1 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port)); + ln0 = intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port), 0); + ln1 = intel_dkl_phy_read(dev_priv, DKL_DP_MODE(tc_port), 1); } else { ln0 = intel_de_read(dev_priv, MG_DP_MODE(0, tc_port)); ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port)); @@ -2085,12 +2079,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port, } if (DISPLAY_VER(dev_priv) >= 12) { - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), - HIP_INDEX_VAL(tc_port, 0x0)); - intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln0); - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), - HIP_INDEX_VAL(tc_port, 0x1)); - intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln1); + intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port), 0, ln0); + intel_dkl_phy_write(dev_priv, DKL_DP_MODE(tc_port), 1, ln1); } else { intel_de_write(dev_priv, MG_DP_MODE(0, tc_port), ln0); intel_de_write(dev_priv, MG_DP_MODE(1, tc_port), ln1); @@ -3094,10 +3084,8 @@ static void adlp_tbt_to_dp_alt_switch_wa(struct intel_encoder *encoder) enum tc_port tc_port = intel_port_to_tc(i915, encoder->port); int ln; - for (ln = 0; ln < 2; ln++) { - intel_de_write(i915, HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln)); - intel_de_rmw(i915, DKL_PCS_DW5(tc_port), DKL_PCS_DW5_CORE_SOFTRESET, 0); - } + for (ln = 0; ln < 2; ln++) + intel_dkl_phy_rmw(i915, DKL_PCS_DW5(tc_port), ln, DKL_PCS_DW5_CORE_SOFTRESET, 0); } static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp, diff --git a/drivers/gpu/drm/i915/display/intel_display_core.h b/drivers/gpu/drm/i915/display/intel_display_core.h index 96cf994b0ad1..9b51148e8ba5 100644 --- a/drivers/gpu/drm/i915/display/intel_display_core.h +++ b/drivers/gpu/drm/i915/display/intel_display_core.h @@ -316,6 +316,14 @@ struct intel_display { } dbuf; struct { + /* + * dkl.phy_lock protects against concurrent access of the + * Dekel TypeC PHYs. + */ + spinlock_t phy_lock; + } dkl; + + struct { /* VLV/CHV/BXT/GLK DSI MMIO register base address */ u32 mmio_base; } dsi; diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index df7ee4969ef1..1d18eee56253 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -12,6 +12,7 @@ #include "intel_de.h" #include "intel_display_power_well.h" #include "intel_display_types.h" +#include "intel_dkl_phy.h" #include "intel_dmc.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" @@ -529,11 +530,9 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv, enum tc_port tc_port; tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx); - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), - HIP_INDEX_VAL(tc_port, 0x2)); - if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port), - DKL_CMN_UC_DW27_UC_HEALTH, 1)) + if (wait_for(intel_dkl_phy_read(dev_priv, DKL_CMN_UC_DW_27(tc_port), 2) & + DKL_CMN_UC_DW27_UC_HEALTH, 1)) drm_warn(&dev_priv->drm, "Timeout waiting TC uC health\n"); } diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.c b/drivers/gpu/drm/i915/display/intel_dkl_phy.c new file mode 100644 index 000000000000..710b030c7ed5 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.c @@ -0,0 +1,109 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include "i915_drv.h" +#include "i915_reg.h" + +#include "intel_de.h" +#include "intel_display.h" +#include "intel_dkl_phy.h" + +static void +dkl_phy_set_hip_idx(struct drm_i915_private *i915, i915_reg_t reg, int idx) +{ + enum tc_port tc_port = DKL_REG_TC_PORT(reg); + + drm_WARN_ON(&i915->drm, tc_port < TC_PORT_1 || tc_port >= I915_MAX_TC_PORTS); + + intel_de_write(i915, + HIP_INDEX_REG(tc_port), + HIP_INDEX_VAL(tc_port, idx)); +} + +/** + * intel_dkl_phy_read - read a Dekel PHY register + * @i915: i915 device instance + * @reg: Dekel PHY register + * @ln: lane instance of @reg + * + * Read the @reg Dekel PHY register. + * + * Returns the read value. + */ +u32 +intel_dkl_phy_read(struct drm_i915_private *i915, i915_reg_t reg, int ln) +{ + u32 val; + + spin_lock(&i915->display.dkl.phy_lock); + + dkl_phy_set_hip_idx(i915, reg, ln); + val = intel_de_read(i915, reg); + + spin_unlock(&i915->display.dkl.phy_lock); + + return val; +} + +/** + * intel_dkl_phy_write - write a Dekel PHY register + * @i915: i915 device instance + * @reg: Dekel PHY register + * @ln: lane instance of @reg + * @val: value to write + * + * Write @val to the @reg Dekel PHY register. + */ +void +intel_dkl_phy_write(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 val) +{ + spin_lock(&i915->display.dkl.phy_lock); + + dkl_phy_set_hip_idx(i915, reg, ln); + intel_de_write(i915, reg, val); + + spin_unlock(&i915->display.dkl.phy_lock); +} + +/** + * intel_dkl_phy_rmw - read-modify-write a Dekel PHY register + * @i915: i915 device instance + * @reg: Dekel PHY register + * @ln: lane instance of @reg + * @clear: mask to clear + * @set: mask to set + * + * Read the @reg Dekel PHY register, clearing then setting the @clear/@set bits in it, and writing + * this value back to the register if the value differs from the read one. + */ +void +intel_dkl_phy_rmw(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 clear, u32 set) +{ + spin_lock(&i915->display.dkl.phy_lock); + + dkl_phy_set_hip_idx(i915, reg, ln); + intel_de_rmw(i915, reg, clear, set); + + spin_unlock(&i915->display.dkl.phy_lock); +} + +/** + * intel_dkl_phy_posting_read - do a posting read from a Dekel PHY register + * @i915: i915 device instance + * @reg: Dekel PHY register + * @ln: lane instance of @reg + * + * Read the @reg Dekel PHY register without returning the read value. + */ +void +intel_dkl_phy_posting_read(struct drm_i915_private *i915, i915_reg_t reg, int ln) +{ + spin_lock(&i915->display.dkl.phy_lock); + + dkl_phy_set_hip_idx(i915, reg, ln); + intel_de_posting_read(i915, reg); + + spin_unlock(&i915->display.dkl.phy_lock); +} diff --git a/drivers/gpu/drm/i915/display/intel_dkl_phy.h b/drivers/gpu/drm/i915/display/intel_dkl_phy.h new file mode 100644 index 000000000000..260ad121a0b1 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dkl_phy.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_DKL_PHY_H__ +#define __INTEL_DKL_PHY_H__ + +#include <linux/types.h> + +#include "i915_reg_defs.h" + +struct drm_i915_private; + +u32 +intel_dkl_phy_read(struct drm_i915_private *i915, i915_reg_t reg, int ln); +void +intel_dkl_phy_write(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 val); +void +intel_dkl_phy_rmw(struct drm_i915_private *i915, i915_reg_t reg, int ln, u32 clear, u32 set); +void +intel_dkl_phy_posting_read(struct drm_i915_private *i915, i915_reg_t reg, int ln); + +#endif /* __INTEL_DKL_PHY_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index c9be61d2348e..2b5bc95a8b0d 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -3957,6 +3957,8 @@ intel_dp_handle_hdmi_link_status_change(struct intel_dp *intel_dp) drm_dp_pcon_hdmi_frl_link_error_count(&intel_dp->aux, &intel_dp->attached_connector->base); + intel_dp->frl.is_trained = false; + /* Restart FRL training or fall back to TMDS mode */ intel_dp_check_frl_training(intel_dp); } @@ -5274,7 +5276,7 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, encoder->devdata, IS_ERR(edid) ? NULL : edid); intel_panel_add_edid_fixed_modes(intel_connector, - intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE, + intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE || intel_vrr_is_capable(intel_connector)); /* MSO requires information from the EDID */ diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index e5fb66a5dd02..64dd603dc69a 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -25,6 +25,7 @@ #include "intel_de.h" #include "intel_display_types.h" +#include "intel_dkl_phy.h" #include "intel_dpio_phy.h" #include "intel_dpll.h" #include "intel_dpll_mgr.h" @@ -3508,15 +3509,12 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, * All registers read here have the same HIP_INDEX_REG even though * they are on different building blocks */ - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), - HIP_INDEX_VAL(tc_port, 0x2)); - - hw_state->mg_refclkin_ctl = intel_de_read(dev_priv, - DKL_REFCLKIN_CTL(tc_port)); + hw_state->mg_refclkin_ctl = intel_dkl_phy_read(dev_priv, + DKL_REFCLKIN_CTL(tc_port), 2); hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK; hw_state->mg_clktop2_hsclkctl = - intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port)); + intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), 2); hw_state->mg_clktop2_hsclkctl &= MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | @@ -3524,32 +3522,32 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv, MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK; hw_state->mg_clktop2_coreclkctl1 = - intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port)); + intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), 2); hw_state->mg_clktop2_coreclkctl1 &= MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; - hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port)); + hw_state->mg_pll_div0 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV0(tc_port), 2); val = DKL_PLL_DIV0_MASK; if (dev_priv->display.vbt.override_afc_startup) val |= DKL_PLL_DIV0_AFC_STARTUP_MASK; hw_state->mg_pll_div0 &= val; - hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port)); + hw_state->mg_pll_div1 = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port), 2); hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK | DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); - hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port)); + hw_state->mg_pll_ssc = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port), 2); hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | DKL_PLL_SSC_STEP_LEN_MASK | DKL_PLL_SSC_STEP_NUM_MASK | DKL_PLL_SSC_EN); - hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port)); + hw_state->mg_pll_bias = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port), 2); hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H | DKL_PLL_BIAS_FBDIV_FRAC_MASK); hw_state->mg_pll_tdc_coldst_bias = - intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); + intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2); hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK | DKL_PLL_TDC_FEED_FWD_GAIN_MASK); @@ -3737,61 +3735,58 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv, * All registers programmed here have the same HIP_INDEX_REG even * though on different building block */ - intel_de_write(dev_priv, HIP_INDEX_REG(tc_port), - HIP_INDEX_VAL(tc_port, 0x2)); - /* All the registers are RMW */ - val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port)); + val = intel_dkl_phy_read(dev_priv, DKL_REFCLKIN_CTL(tc_port), 2); val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK; val |= hw_state->mg_refclkin_ctl; - intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val); + intel_dkl_phy_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), 2, val); - val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port)); + val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), 2); val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK; val |= hw_state->mg_clktop2_coreclkctl1; - intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val); + intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), 2, val); - val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port)); + val = intel_dkl_phy_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), 2); val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK | MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK | MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK | MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK); val |= hw_state->mg_clktop2_hsclkctl; - intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val); + intel_dkl_phy_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), 2, val); val = DKL_PLL_DIV0_MASK; if (dev_priv->display.vbt.override_afc_startup) val |= DKL_PLL_DIV0_AFC_STARTUP_MASK; - intel_de_rmw(dev_priv, DKL_PLL_DIV0(tc_port), val, - hw_state->mg_pll_div0); + intel_dkl_phy_rmw(dev_priv, DKL_PLL_DIV0(tc_port), 2, val, + hw_state->mg_pll_div0); - val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port)); + val = intel_dkl_phy_read(dev_priv, DKL_PLL_DIV1(tc_port), 2); val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK | DKL_PLL_DIV1_TDC_TARGET_CNT_MASK); val |= hw_state->mg_pll_div1; - intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val); + intel_dkl_phy_write(dev_priv, DKL_PLL_DIV1(tc_port), 2, val); - val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port)); + val = intel_dkl_phy_read(dev_priv, DKL_PLL_SSC(tc_port), 2); val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK | DKL_PLL_SSC_STEP_LEN_MASK | DKL_PLL_SSC_STEP_NUM_MASK | DKL_PLL_SSC_EN); val |= hw_state->mg_pll_ssc; - intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val); + intel_dkl_phy_write(dev_priv, DKL_PLL_SSC(tc_port), 2, val); - val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port)); + val = intel_dkl_phy_read(dev_priv, DKL_PLL_BIAS(tc_port), 2); val &= ~(DKL_PLL_BIAS_FRAC_EN_H | DKL_PLL_BIAS_FBDIV_FRAC_MASK); val |= hw_state->mg_pll_bias; - intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val); + intel_dkl_phy_write(dev_priv, DKL_PLL_BIAS(tc_port), 2, val); - val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); + val = intel_dkl_phy_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2); val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK | DKL_PLL_TDC_FEED_FWD_GAIN_MASK); val |= hw_state->mg_pll_tdc_coldst_bias; - intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val); + intel_dkl_phy_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2, val); - intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port)); + intel_dkl_phy_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), 2); } static void icl_pll_power_enable(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index 9aa38e8141b5..e5352239b2a2 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -972,8 +972,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv) /* Try EDID first */ intel_panel_add_edid_fixed_modes(intel_connector, - intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE, - false); + intel_connector->panel.vbt.drrs_type != DRRS_TYPE_NONE); /* Failed to get EDID, what about VBT? */ if (!intel_panel_preferred_fixed_mode(intel_connector)) diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c index a3a3f9fe4342..41cec9dc4223 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.c +++ b/drivers/gpu/drm/i915/display/intel_panel.c @@ -254,10 +254,10 @@ static void intel_panel_destroy_probed_modes(struct intel_connector *connector) } void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, - bool has_drrs, bool has_vrr) + bool use_alt_fixed_modes) { intel_panel_add_edid_preferred_mode(connector); - if (intel_panel_preferred_fixed_mode(connector) && (has_drrs || has_vrr)) + if (intel_panel_preferred_fixed_mode(connector) && use_alt_fixed_modes) intel_panel_add_edid_alt_fixed_modes(connector); intel_panel_destroy_probed_modes(connector); } diff --git a/drivers/gpu/drm/i915/display/intel_panel.h b/drivers/gpu/drm/i915/display/intel_panel.h index eff3ffd3d082..5c5b5b7f95b6 100644 --- a/drivers/gpu/drm/i915/display/intel_panel.h +++ b/drivers/gpu/drm/i915/display/intel_panel.h @@ -44,7 +44,7 @@ int intel_panel_fitting(struct intel_crtc_state *crtc_state, int intel_panel_compute_config(struct intel_connector *connector, struct drm_display_mode *adjusted_mode); void intel_panel_add_edid_fixed_modes(struct intel_connector *connector, - bool has_drrs, bool has_vrr); + bool use_alt_fixed_modes); void intel_panel_add_vbt_lfp_fixed_mode(struct intel_connector *connector); void intel_panel_add_vbt_sdvo_fixed_mode(struct intel_connector *connector); void intel_panel_add_encoder_fixed_mode(struct intel_connector *connector, diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index d4cce627d7a8..15c3e448aa0e 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -2201,8 +2201,11 @@ static void _psr_invalidate_handle(struct intel_dp *intel_dp) if (intel_dp->psr.psr2_sel_fetch_enabled) { u32 val; - if (intel_dp->psr.psr2_sel_fetch_cff_enabled) + if (intel_dp->psr.psr2_sel_fetch_cff_enabled) { + /* Send one update otherwise lag is observed in screen */ + intel_de_write(dev_priv, CURSURFLIVE(intel_dp->psr.pipe), 0); return; + } val = man_trk_ctl_enable_bit_get(dev_priv) | man_trk_ctl_partial_frame_bit_get(dev_priv) | diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index f5b744bef18f..774c1dc31a52 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -2747,13 +2747,10 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) if (!intel_sdvo_connector) return false; - if (device == 0) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS0; + if (device == 0) intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS0; - } else if (device == 1) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_TMDS1; + else if (device == 1) intel_sdvo_connector->output_flag = SDVO_OUTPUT_TMDS1; - } intel_connector = &intel_sdvo_connector->base; connector = &intel_connector->base; @@ -2808,7 +2805,6 @@ intel_sdvo_tv_init(struct intel_sdvo *intel_sdvo, int type) encoder->encoder_type = DRM_MODE_ENCODER_TVDAC; connector->connector_type = DRM_MODE_CONNECTOR_SVIDEO; - intel_sdvo->controlled_output |= type; intel_sdvo_connector->output_flag = type; if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { @@ -2849,13 +2845,10 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device) encoder->encoder_type = DRM_MODE_ENCODER_DAC; connector->connector_type = DRM_MODE_CONNECTOR_VGA; - if (device == 0) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB0; + if (device == 0) intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB0; - } else if (device == 1) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_RGB1; + else if (device == 1) intel_sdvo_connector->output_flag = SDVO_OUTPUT_RGB1; - } if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { kfree(intel_sdvo_connector); @@ -2885,13 +2878,10 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) encoder->encoder_type = DRM_MODE_ENCODER_LVDS; connector->connector_type = DRM_MODE_CONNECTOR_LVDS; - if (device == 0) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS0; + if (device == 0) intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS0; - } else if (device == 1) { - intel_sdvo->controlled_output |= SDVO_OUTPUT_LVDS1; + else if (device == 1) intel_sdvo_connector->output_flag = SDVO_OUTPUT_LVDS1; - } if (intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo) < 0) { kfree(intel_sdvo_connector); @@ -2910,8 +2900,12 @@ intel_sdvo_lvds_init(struct intel_sdvo *intel_sdvo, int device) intel_panel_add_vbt_sdvo_fixed_mode(intel_connector); if (!intel_panel_preferred_fixed_mode(intel_connector)) { + mutex_lock(&i915->drm.mode_config.mutex); + intel_ddc_get_modes(connector, &intel_sdvo->ddc); - intel_panel_add_edid_fixed_modes(intel_connector, false, false); + intel_panel_add_edid_fixed_modes(intel_connector, false); + + mutex_unlock(&i915->drm.mode_config.mutex); } intel_panel_init(intel_connector); @@ -2926,16 +2920,39 @@ err: return false; } +static u16 intel_sdvo_filter_output_flags(u16 flags) +{ + flags &= SDVO_OUTPUT_MASK; + + /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ + if (!(flags & SDVO_OUTPUT_TMDS0)) + flags &= ~SDVO_OUTPUT_TMDS1; + + if (!(flags & SDVO_OUTPUT_RGB0)) + flags &= ~SDVO_OUTPUT_RGB1; + + if (!(flags & SDVO_OUTPUT_LVDS0)) + flags &= ~SDVO_OUTPUT_LVDS1; + + return flags; +} + static bool intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) { - /* SDVO requires XXX1 function may not exist unless it has XXX0 function.*/ + struct drm_i915_private *i915 = to_i915(intel_sdvo->base.base.dev); + + flags = intel_sdvo_filter_output_flags(flags); + + intel_sdvo->controlled_output = flags; + + intel_sdvo_select_ddc_bus(i915, intel_sdvo); if (flags & SDVO_OUTPUT_TMDS0) if (!intel_sdvo_dvi_init(intel_sdvo, 0)) return false; - if ((flags & SDVO_TMDS_MASK) == SDVO_TMDS_MASK) + if (flags & SDVO_OUTPUT_TMDS1) if (!intel_sdvo_dvi_init(intel_sdvo, 1)) return false; @@ -2956,7 +2973,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) if (!intel_sdvo_analog_init(intel_sdvo, 0)) return false; - if ((flags & SDVO_RGB_MASK) == SDVO_RGB_MASK) + if (flags & SDVO_OUTPUT_RGB1) if (!intel_sdvo_analog_init(intel_sdvo, 1)) return false; @@ -2964,14 +2981,13 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, u16 flags) if (!intel_sdvo_lvds_init(intel_sdvo, 0)) return false; - if ((flags & SDVO_LVDS_MASK) == SDVO_LVDS_MASK) + if (flags & SDVO_OUTPUT_LVDS1) if (!intel_sdvo_lvds_init(intel_sdvo, 1)) return false; - if ((flags & SDVO_OUTPUT_MASK) == 0) { + if (flags == 0) { unsigned char bytes[2]; - intel_sdvo->controlled_output = 0; memcpy(bytes, &intel_sdvo->caps.output_flags, 2); DRM_DEBUG_KMS("%s: Unknown SDVO output type (0x%02x%02x)\n", SDVO_NAME(intel_sdvo), @@ -3383,8 +3399,6 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv, */ intel_sdvo->base.cloneable = 0; - intel_sdvo_select_ddc_bus(dev_priv, intel_sdvo); - /* Set the input timing to the screen. Assume always input 0. */ if (!intel_sdvo_set_target_input(intel_sdvo)) goto err_output; diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c index f5062d0c6333..824971a1ceec 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c @@ -40,13 +40,13 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme goto err; } - ret = sg_alloc_table(st, obj->mm.pages->nents, GFP_KERNEL); + ret = sg_alloc_table(st, obj->mm.pages->orig_nents, GFP_KERNEL); if (ret) goto err_free; src = obj->mm.pages->sgl; dst = st->sgl; - for (i = 0; i < obj->mm.pages->nents; i++) { + for (i = 0; i < obj->mm.pages->orig_nents; i++) { sg_set_page(dst, sg_page(src), src->length, 0); dst = sg_next(dst); src = sg_next(src); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c index c698f95af15f..629acb403a2c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c @@ -6,7 +6,6 @@ #include <linux/scatterlist.h> #include <linux/slab.h> -#include <linux/swiotlb.h> #include "i915_drv.h" #include "i915_gem.h" @@ -38,22 +37,12 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj) struct scatterlist *sg; unsigned int sg_page_sizes; unsigned int npages; - int max_order; + int max_order = MAX_ORDER; + unsigned int max_segment; gfp_t gfp; - max_order = MAX_ORDER; -#ifdef CONFIG_SWIOTLB - if (is_swiotlb_active(obj->base.dev->dev)) { - unsigned int max_segment; - - max_segment = swiotlb_max_segment(); - if (max_segment) { - max_segment = max_t(unsigned int, max_segment, - PAGE_SIZE) >> PAGE_SHIFT; - max_order = min(max_order, ilog2(max_segment)); - } - } -#endif + max_segment = i915_sg_segment_size(i915->drm.dev) >> PAGE_SHIFT; + max_order = min(max_order, get_order(max_segment)); gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE; if (IS_I965GM(i915) || IS_I965G(i915)) { diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c index f42ca1179f37..2f7804492cd5 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c @@ -194,7 +194,7 @@ static int shmem_get_pages(struct drm_i915_gem_object *obj) struct intel_memory_region *mem = obj->mm.region; struct address_space *mapping = obj->base.filp->f_mapping; const unsigned long page_count = obj->base.size / PAGE_SIZE; - unsigned int max_segment = i915_sg_segment_size(); + unsigned int max_segment = i915_sg_segment_size(i915->drm.dev); struct sg_table *st; struct sgt_iter sgt_iter; struct page *page; @@ -369,14 +369,14 @@ __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj, __start_cpu_write(obj); /* - * On non-LLC platforms, force the flush-on-acquire if this is ever + * On non-LLC igfx platforms, force the flush-on-acquire if this is ever * swapped-in. Our async flush path is not trust worthy enough yet(and * happens in the wrong order), and with some tricks it's conceivable * for userspace to change the cache-level to I915_CACHE_NONE after the * pages are swapped-in, and since execbuf binds the object before doing * the async flush, we have a race window. */ - if (!HAS_LLC(i915)) + if (!HAS_LLC(i915) && !IS_DGFX(i915)) obj->cache_dirty = true; } diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 4f861782c3e8..3d4305eea1aa 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -189,7 +189,7 @@ static int i915_ttm_tt_shmem_populate(struct ttm_device *bdev, struct drm_i915_private *i915 = container_of(bdev, typeof(*i915), bdev); struct intel_memory_region *mr = i915->mm.regions[INTEL_MEMORY_SYSTEM]; struct i915_ttm_tt *i915_tt = container_of(ttm, typeof(*i915_tt), ttm); - const unsigned int max_segment = i915_sg_segment_size(); + const unsigned int max_segment = i915_sg_segment_size(i915->drm.dev); const size_t size = (size_t)ttm->num_pages << PAGE_SHIFT; struct file *filp = i915_tt->filp; struct sgt_iter sgt_iter; @@ -538,7 +538,7 @@ static struct i915_refct_sgt *i915_ttm_tt_get_st(struct ttm_tt *ttm) ret = sg_alloc_table_from_pages_segment(st, ttm->pages, ttm->num_pages, 0, (unsigned long)ttm->num_pages << PAGE_SHIFT, - i915_sg_segment_size(), GFP_KERNEL); + i915_sg_segment_size(i915_tt->dev), GFP_KERNEL); if (ret) { st->sgl = NULL; return ERR_PTR(ret); @@ -1013,9 +1013,6 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) return VM_FAULT_SIGBUS; } - if (i915_ttm_cpu_maps_iomem(bo->resource)) - wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); - if (!i915_ttm_resource_mappable(bo->resource)) { int err = -ENODEV; int i; @@ -1042,6 +1039,9 @@ static vm_fault_t vm_fault_ttm(struct vm_fault *vmf) } } + if (i915_ttm_cpu_maps_iomem(bo->resource)) + wakeref = intel_runtime_pm_get(&to_i915(obj->base.dev)->runtime_pm); + if (drm_dev_enter(dev, &idx)) { ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, TTM_BO_VM_NUM_PREFAULT); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index d4398948f016..ba14b18d65f3 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -129,7 +129,7 @@ static void i915_gem_object_userptr_drop_ref(struct drm_i915_gem_object *obj) static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) { const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; - unsigned int max_segment = i915_sg_segment_size(); + unsigned int max_segment = i915_sg_segment_size(obj->base.dev->dev); struct sg_table *st; unsigned int sg_page_sizes; struct page **pvec; @@ -428,9 +428,10 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len) { VMA_ITERATOR(vmi, mm, addr); struct vm_area_struct *vma; + unsigned long end = addr + len; mmap_read_lock(mm); - for_each_vma_range(vmi, vma, addr + len) { + for_each_vma_range(vmi, vma, end) { /* Check for holes, note that we also update the addr below */ if (vma->vm_start > addr) break; @@ -442,7 +443,7 @@ probe_range(struct mm_struct *mm, unsigned long addr, unsigned long len) } mmap_read_unlock(mm); - if (vma) + if (vma || addr < end) return -EFAULT; return 0; } diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 6d2003d598e6..a821e3d405db 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -2293,11 +2293,11 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) } if (IS_DG1_GRAPHICS_STEP(i915, STEP_A0, STEP_B0) || - IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) { + IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) { /* * Wa_1607030317:tgl * Wa_1607186500:tgl - * Wa_1607297627:tgl,rkl,dg1[a0] + * Wa_1607297627:tgl,rkl,dg1[a0],adlp * * On TGL and RKL there are multiple entries for this WA in the * BSpec; some indicate this is an A0-only WA, others indicate diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index c459eb362c47..f2a15d8155f4 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -353,6 +353,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv) mutex_init(&dev_priv->display.wm.wm_mutex); mutex_init(&dev_priv->display.pps.mutex); mutex_init(&dev_priv->display.hdcp.comp_mutex); + spin_lock_init(&dev_priv->display.dkl.phy_lock); i915_memcpy_init_early(dev_priv); intel_runtime_pm_init_early(&dev_priv->runtime_pm); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 0b287a59dc2f..da35bb2db26b 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -7420,6 +7420,9 @@ enum skl_power_gate { #define _DKL_PHY5_BASE 0x16C000 #define _DKL_PHY6_BASE 0x16D000 +#define DKL_REG_TC_PORT(__reg) \ + (TC_PORT_1 + ((__reg).reg - _DKL_PHY1_BASE) / (_DKL_PHY2_BASE - _DKL_PHY1_BASE)) + /* DEKEL PHY MMIO Address = Phy base + (internal address & ~index_mask) */ #define _DKL_PCS_DW5 0x14 #define DKL_PCS_DW5(tc_port) _MMIO(_PORT(tc_port, _DKL_PHY1_BASE, \ diff --git a/drivers/gpu/drm/i915/i915_scatterlist.h b/drivers/gpu/drm/i915/i915_scatterlist.h index 9ddb3e743a3e..b0a1db44f895 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.h +++ b/drivers/gpu/drm/i915/i915_scatterlist.h @@ -9,7 +9,8 @@ #include <linux/pfn.h> #include <linux/scatterlist.h> -#include <linux/swiotlb.h> +#include <linux/dma-mapping.h> +#include <xen/xen.h> #include "i915_gem.h" @@ -127,19 +128,26 @@ static inline unsigned int i915_sg_dma_sizes(struct scatterlist *sg) return page_sizes; } -static inline unsigned int i915_sg_segment_size(void) +static inline unsigned int i915_sg_segment_size(struct device *dev) { - unsigned int size = swiotlb_max_segment(); - - if (size == 0) - size = UINT_MAX; - - size = rounddown(size, PAGE_SIZE); - /* swiotlb_max_segment_size can return 1 byte when it means one page. */ - if (size < PAGE_SIZE) - size = PAGE_SIZE; - - return size; + size_t max = min_t(size_t, UINT_MAX, dma_max_mapping_size(dev)); + + /* + * For Xen PV guests pages aren't contiguous in DMA (machine) address + * space. The DMA API takes care of that both in dma_alloc_* (by + * calling into the hypervisor to make the pages contiguous) and in + * dma_map_* (by bounce buffering). But i915 abuses ignores the + * coherency aspects of the DMA API and thus can't cope with bounce + * buffering actually happening, so add a hack here to force small + * allocations and mappings when running in PV mode on Xen. + * + * Note this will still break if bounce buffering is required for other + * reasons, like confidential computing hypervisors or PCIe root ports + * with addressing limitations. + */ + if (xen_pv_domain()) + max = PAGE_SIZE; + return round_down(max, PAGE_SIZE); } bool i915_sg_trim(struct sg_table *orig_st); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 6ed5786bcd29..744cca507946 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -591,8 +591,15 @@ void intel_runtime_pm_enable(struct intel_runtime_pm *rpm) pm_runtime_use_autosuspend(kdev); } - /* Enable by default */ - pm_runtime_allow(kdev); + /* + * FIXME: Temp hammer to keep autosupend disable on lmem supported platforms. + * As per PCIe specs 5.3.1.4.1, all iomem read write request over a PCIe + * function will be unsupported in case PCIe endpoint function is in D3. + * Let's keep i915 autosuspend control 'on' till we fix all known issue + * with lmem access in D3. + */ + if (!IS_DGFX(i915)) + pm_runtime_allow(kdev); /* * The core calls the driver load handler with an RPM reference held. diff --git a/drivers/gpu/drm/imx/Kconfig b/drivers/gpu/drm/imx/Kconfig index 975de4ff7313..fd5b2471fdf0 100644 --- a/drivers/gpu/drm/imx/Kconfig +++ b/drivers/gpu/drm/imx/Kconfig @@ -4,7 +4,6 @@ config DRM_IMX select DRM_KMS_HELPER select VIDEOMODE_HELPERS select DRM_GEM_DMA_HELPER - select DRM_KMS_HELPER depends on DRM && (ARCH_MXC || ARCH_MULTIPLATFORM || COMPILE_TEST) depends on IMX_IPUV3_CORE help diff --git a/drivers/gpu/drm/imx/imx-tve.c b/drivers/gpu/drm/imx/imx-tve.c index 6b34fac3f73a..ab4d1c878fda 100644 --- a/drivers/gpu/drm/imx/imx-tve.c +++ b/drivers/gpu/drm/imx/imx-tve.c @@ -218,8 +218,9 @@ static int imx_tve_connector_get_modes(struct drm_connector *connector) return ret; } -static int imx_tve_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +imx_tve_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { struct imx_tve *tve = con_to_tve(connector); unsigned long rate; diff --git a/drivers/gpu/drm/lima/lima_devfreq.c b/drivers/gpu/drm/lima/lima_devfreq.c index 011be7ff51e1..bc8fb4e38d0a 100644 --- a/drivers/gpu/drm/lima/lima_devfreq.c +++ b/drivers/gpu/drm/lima/lima_devfreq.c @@ -112,11 +112,6 @@ int lima_devfreq_init(struct lima_device *ldev) unsigned long cur_freq; int ret; const char *regulator_names[] = { "mali", NULL }; - const char *clk_names[] = { "core", NULL }; - struct dev_pm_opp_config config = { - .regulator_names = regulator_names, - .clk_names = clk_names, - }; if (!device_property_present(dev, "operating-points-v2")) /* Optional, continue without devfreq */ @@ -124,7 +119,15 @@ int lima_devfreq_init(struct lima_device *ldev) spin_lock_init(&ldevfreq->lock); - ret = devm_pm_opp_set_config(dev, &config); + /* + * clkname is set separately so it is not affected by the optional + * regulator setting which may return error. + */ + ret = devm_pm_opp_set_clkname(dev, "core"); + if (ret) + return ret; + + ret = devm_pm_opp_set_regulators(dev, regulator_names); if (ret) { /* Continue if the optional regulator is missing */ if (ret != -ENODEV) diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 4e0cbd682725..3c9dfdb0b328 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -155,7 +155,7 @@ config DRM_MSM_HDMI Compile in support for the HDMI output MSM DRM driver. It can be a primary or a secondary display on device. Note that this is used only for the direct HDMI output. If the device outputs HDMI data - throught some kind of DSI-to-HDMI bridge, this option can be disabled. + through some kind of DSI-to-HDMI bridge, this option can be disabled. config DRM_MSM_HDMI_HDCP bool "Enable HDMI HDCP support in MSM DRM driver" diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c index 55f443328d8e..a5c3d1ed255a 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c @@ -91,7 +91,7 @@ struct a6xx_state_memobj { static void *state_kcalloc(struct a6xx_gpu_state *a6xx_state, int nr, size_t objsize) { struct a6xx_state_memobj *obj = - kzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL); + kvzalloc((nr * objsize) + sizeof(*obj), GFP_KERNEL); if (!obj) return NULL; @@ -813,6 +813,9 @@ static struct msm_gpu_state_bo *a6xx_snapshot_gmu_bo( { struct msm_gpu_state_bo *snapshot; + if (!bo->size) + return NULL; + snapshot = state_kcalloc(a6xx_state, 1, sizeof(*snapshot)); if (!snapshot) return NULL; @@ -1040,8 +1043,13 @@ static void a6xx_gpu_state_destroy(struct kref *kref) if (a6xx_state->gmu_hfi) kvfree(a6xx_state->gmu_hfi->data); - list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node) - kfree(obj); + if (a6xx_state->gmu_debug) + kvfree(a6xx_state->gmu_debug->data); + + list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node) { + list_del(&obj->node); + kvfree(obj); + } adreno_gpu_state_destroy(state); kfree(a6xx_state); diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 24b489b6129a..628806423f7d 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -679,6 +679,9 @@ static int adreno_system_suspend(struct device *dev) struct msm_gpu *gpu = dev_to_gpu(dev); int remaining, ret; + if (!gpu) + return 0; + suspend_scheduler(gpu); remaining = wait_event_timeout(gpu->retire_event, @@ -700,7 +703,12 @@ out: static int adreno_system_resume(struct device *dev) { - resume_scheduler(dev_to_gpu(dev)); + struct msm_gpu *gpu = dev_to_gpu(dev); + + if (!gpu) + return 0; + + resume_scheduler(gpu); return pm_runtime_force_resume(dev); } diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 382fb7f9e497..5a0e8491cd3a 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -729,7 +729,12 @@ static char *adreno_gpu_ascii85_encode(u32 *src, size_t len) return buf; } -/* len is expected to be in bytes */ +/* len is expected to be in bytes + * + * WARNING: *ptr should be allocated with kvmalloc or friends. It can be free'd + * with kvfree() and replaced with a newly kvmalloc'd buffer on the first call + * when the unencoded raw data is encoded + */ void adreno_show_object(struct drm_printer *p, void **ptr, int len, bool *encoded) { diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c index 7288041dd86a..7444b75c4215 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_lvds_connector.c @@ -56,8 +56,9 @@ static int mdp4_lvds_connector_get_modes(struct drm_connector *connector) return ret; } -static int mdp4_lvds_connector_mode_valid(struct drm_connector *connector, - struct drm_display_mode *mode) +static enum drm_mode_status +mdp4_lvds_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) { struct mdp4_lvds_connector *mdp4_lvds_connector = to_mdp4_lvds_connector(connector); diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index 3854c9f1f7e9..dd26ca651a05 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -1243,8 +1243,7 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, { int ret = 0; const u8 *dpcd = ctrl->panel->dpcd; - u8 encoding = DP_SET_ANSI_8B10B; - u8 ssc; + u8 encoding[] = { 0, DP_SET_ANSI_8B10B }; u8 assr; struct dp_link_info link_info = {0}; @@ -1256,13 +1255,11 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl, dp_aux_link_configure(ctrl->aux, &link_info); - if (drm_dp_max_downspread(dpcd)) { - ssc = DP_SPREAD_AMP_0_5; - drm_dp_dpcd_write(ctrl->aux, DP_DOWNSPREAD_CTRL, &ssc, 1); - } + if (drm_dp_max_downspread(dpcd)) + encoding[0] |= DP_SPREAD_AMP_0_5; - drm_dp_dpcd_write(ctrl->aux, DP_MAIN_LINK_CHANNEL_CODING_SET, - &encoding, 1); + /* config DOWNSPREAD_CTRL and MAIN_LINK_CHANNEL_CODING_SET */ + drm_dp_dpcd_write(ctrl->aux, DP_DOWNSPREAD_CTRL, encoding, 2); if (drm_dp_alternate_scrambler_reset_cap(dpcd)) { assr = DP_ALTERNATE_SCRAMBLER_RESET_ENABLE; diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c index bfd0aeff3f0d..a49f6dbbe888 100644 --- a/drivers/gpu/drm/msm/dp/dp_display.c +++ b/drivers/gpu/drm/msm/dp/dp_display.c @@ -1249,7 +1249,7 @@ int dp_display_request_irq(struct msm_dp *dp_display) return -EINVAL; } - rc = devm_request_irq(&dp->pdev->dev, dp->irq, + rc = devm_request_irq(dp_display->drm_dev->dev, dp->irq, dp_display_irq_handler, IRQF_TRIGGER_HIGH, "dp_display_isr", dp); if (rc < 0) { @@ -1528,6 +1528,11 @@ void msm_dp_debugfs_init(struct msm_dp *dp_display, struct drm_minor *minor) } } +static void of_dp_aux_depopulate_bus_void(void *data) +{ + of_dp_aux_depopulate_bus(data); +} + static int dp_display_get_next_bridge(struct msm_dp *dp) { int rc; @@ -1552,10 +1557,16 @@ static int dp_display_get_next_bridge(struct msm_dp *dp) * panel driver is probed asynchronously but is the best we * can do without a bigger driver reorganization. */ - rc = devm_of_dp_aux_populate_ep_devices(dp_priv->aux); + rc = of_dp_aux_populate_bus(dp_priv->aux, NULL); of_node_put(aux_bus); if (rc) goto error; + + rc = devm_add_action_or_reset(dp->drm_dev->dev, + of_dp_aux_depopulate_bus_void, + dp_priv->aux); + if (rc) + goto error; } else if (dp->is_edp) { DRM_ERROR("eDP aux_bus not found\n"); return -ENODEV; @@ -1568,7 +1579,7 @@ static int dp_display_get_next_bridge(struct msm_dp *dp) * For DisplayPort interfaces external bridges are optional, so * silently ignore an error if one is not present (-ENODEV). */ - rc = dp_parser_find_next_bridge(dp_priv->parser); + rc = devm_dp_parser_find_next_bridge(dp->drm_dev->dev, dp_priv->parser); if (!dp->is_edp && rc == -ENODEV) return 0; @@ -1597,6 +1608,12 @@ int msm_dp_modeset_init(struct msm_dp *dp_display, struct drm_device *dev, return -EINVAL; priv = dev->dev_private; + + if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) { + DRM_DEV_ERROR(dev->dev, "too many bridges\n"); + return -ENOSPC; + } + dp_display->drm_dev = dev; dp_priv = container_of(dp_display, struct dp_display_private, dp_display); diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c index 6df25f7662e7..6db82f9b03af 100644 --- a/drivers/gpu/drm/msm/dp/dp_drm.c +++ b/drivers/gpu/drm/msm/dp/dp_drm.c @@ -31,6 +31,36 @@ static enum drm_connector_status dp_bridge_detect(struct drm_bridge *bridge) connector_status_disconnected; } +static int dp_bridge_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct msm_dp *dp; + + dp = to_dp_bridge(bridge)->dp_display; + + drm_dbg_dp(dp->drm_dev, "is_connected = %s\n", + (dp->is_connected) ? "true" : "false"); + + /* + * There is no protection in the DRM framework to check if the display + * pipeline has been already disabled before trying to disable it again. + * Hence if the sink is unplugged, the pipeline gets disabled, but the + * crtc->active is still true. Any attempt to set the mode or manually + * disable this encoder will result in the crash. + * + * TODO: add support for telling the DRM subsystem that the pipeline is + * disabled by the hardware and thus all access to it should be forbidden. + * After that this piece of code can be removed. + */ + if (bridge->ops & DRM_BRIDGE_OP_HPD) + return (dp->is_connected) ? 0 : -ENOTCONN; + + return 0; +} + + /** * dp_bridge_get_modes - callback to add drm modes via drm_mode_probed_add() * @bridge: Poiner to drm bridge @@ -61,6 +91,9 @@ static int dp_bridge_get_modes(struct drm_bridge *bridge, struct drm_connector * } static const struct drm_bridge_funcs dp_bridge_ops = { + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, .enable = dp_bridge_enable, .disable = dp_bridge_disable, .post_disable = dp_bridge_post_disable, @@ -68,6 +101,7 @@ static const struct drm_bridge_funcs dp_bridge_ops = { .mode_valid = dp_bridge_mode_valid, .get_modes = dp_bridge_get_modes, .detect = dp_bridge_detect, + .atomic_check = dp_bridge_atomic_check, }; struct drm_bridge *dp_bridge_init(struct msm_dp *dp_display, struct drm_device *dev, diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c index dd732215d55b..dcbe893d66d7 100644 --- a/drivers/gpu/drm/msm/dp/dp_parser.c +++ b/drivers/gpu/drm/msm/dp/dp_parser.c @@ -240,12 +240,12 @@ static int dp_parser_clock(struct dp_parser *parser) return 0; } -int dp_parser_find_next_bridge(struct dp_parser *parser) +int devm_dp_parser_find_next_bridge(struct device *dev, struct dp_parser *parser) { - struct device *dev = &parser->pdev->dev; + struct platform_device *pdev = parser->pdev; struct drm_bridge *bridge; - bridge = devm_drm_of_get_bridge(dev, dev->of_node, 1, 0); + bridge = devm_drm_of_get_bridge(dev, pdev->dev.of_node, 1, 0); if (IS_ERR(bridge)) return PTR_ERR(bridge); diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h index 866c1a82bf1a..d30ab773db46 100644 --- a/drivers/gpu/drm/msm/dp/dp_parser.h +++ b/drivers/gpu/drm/msm/dp/dp_parser.h @@ -138,8 +138,9 @@ struct dp_parser { struct dp_parser *dp_parser_get(struct platform_device *pdev); /** - * dp_parser_find_next_bridge() - find an additional bridge to DP + * devm_dp_parser_find_next_bridge() - find an additional bridge to DP * + * @dev: device to tie bridge lifetime to * @parser: dp_parser data from client * * This function is used to find any additional bridge attached to @@ -147,6 +148,6 @@ struct dp_parser *dp_parser_get(struct platform_device *pdev); * * Return: 0 if able to get the bridge, otherwise negative errno for failure. */ -int dp_parser_find_next_bridge(struct dp_parser *parser); +int devm_dp_parser_find_next_bridge(struct device *dev, struct dp_parser *parser); #endif diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c index 39bbabb5daf6..8a95c744972a 100644 --- a/drivers/gpu/drm/msm/dsi/dsi.c +++ b/drivers/gpu/drm/msm/dsi/dsi.c @@ -218,6 +218,12 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev, return -EINVAL; priv = dev->dev_private; + + if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) { + DRM_DEV_ERROR(dev->dev, "too many bridges\n"); + return -ENOSPC; + } + msm_dsi->dev = dev; ret = msm_dsi_host_modeset_init(msm_dsi->host, dev); diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c index 93fe61b86967..f28fb21e3891 100644 --- a/drivers/gpu/drm/msm/hdmi/hdmi.c +++ b/drivers/gpu/drm/msm/hdmi/hdmi.c @@ -300,6 +300,11 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, struct platform_device *pdev = hdmi->pdev; int ret; + if (priv->num_bridges == ARRAY_SIZE(priv->bridges)) { + DRM_DEV_ERROR(dev->dev, "too many bridges\n"); + return -ENOSPC; + } + hdmi->dev = dev; hdmi->encoder = encoder; @@ -339,7 +344,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, goto fail; } - ret = devm_request_irq(&pdev->dev, hdmi->irq, + ret = devm_request_irq(dev->dev, hdmi->irq, msm_hdmi_irq, IRQF_TRIGGER_HIGH, "hdmi_isr", hdmi); if (ret < 0) { diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 28034c21f6bc..105b5b48e828 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -247,6 +247,7 @@ static int msm_drm_uninit(struct device *dev) for (i = 0; i < priv->num_bridges; i++) drm_bridge_remove(priv->bridges[i]); + priv->num_bridges = 0; pm_runtime_get_sync(dev); msm_irq_uninstall(ddev); diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 5599d93ec0d2..45a3e5cadc7d 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -501,11 +501,11 @@ out: */ static void submit_cleanup(struct msm_gem_submit *submit, bool error) { - unsigned cleanup_flags = BO_LOCKED | BO_OBJ_PINNED; + unsigned cleanup_flags = BO_LOCKED; unsigned i; if (error) - cleanup_flags |= BO_VMA_PINNED; + cleanup_flags |= BO_VMA_PINNED | BO_OBJ_PINNED; for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; @@ -706,7 +706,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct msm_drm_private *priv = dev->dev_private; struct drm_msm_gem_submit *args = data; struct msm_file_private *ctx = file->driver_priv; - struct msm_gem_submit *submit = NULL; + struct msm_gem_submit *submit; struct msm_gpu *gpu = priv->gpu; struct msm_gpu_submitqueue *queue; struct msm_ringbuffer *ring; @@ -946,8 +946,7 @@ out_unlock: put_unused_fd(out_fence_fd); mutex_unlock(&queue->lock); out_post_unlock: - if (submit) - msm_gem_submit_put(submit); + msm_gem_submit_put(submit); if (!IS_ERR_OR_NULL(post_deps)) { for (i = 0; i < args->nr_out_syncobjs; ++i) { kfree(post_deps[i].chain); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 0098ee8438aa..021f4e29b613 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -997,4 +997,6 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) } msm_devfreq_cleanup(gpu); + + platform_set_drvdata(gpu->pdev, NULL); } diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index ff911e7305ce..58a72e6b1400 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -280,6 +280,10 @@ struct msm_gpu { static inline struct msm_gpu *dev_to_gpu(struct device *dev) { struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev); + + if (!adreno_smmu) + return NULL; + return container_of(adreno_smmu, struct msm_gpu, adreno_smmu); } diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c index cad4c3525f0b..57a8e9564540 100644 --- a/drivers/gpu/drm/msm/msm_ringbuffer.c +++ b/drivers/gpu/drm/msm/msm_ringbuffer.c @@ -25,7 +25,8 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job) msm_gem_lock(obj); msm_gem_unpin_vma_fenced(submit->bos[i].vma, fctx); - submit->bos[i].flags &= ~BO_VMA_PINNED; + msm_gem_unpin_locked(obj); + submit->bos[i].flags &= ~(BO_VMA_PINNED | BO_OBJ_PINNED); msm_gem_unlock(obj); } diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 5fe209107246..20fe53815b20 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -176,6 +176,7 @@ static vm_fault_t nouveau_dmem_migrate_to_ram(struct vm_fault *vmf) .src = &src, .dst = &dst, .pgmap_owner = drm->dev, + .fault_page = vmf->page, .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE, }; diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 2944228a8e2c..8a3b685c2fcc 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -2500,6 +2500,7 @@ static const struct display_timing logictechno_lt161010_2nh_timing = { static const struct panel_desc logictechno_lt161010_2nh = { .timings = &logictechno_lt161010_2nh_timing, .num_timings = 1, + .bpc = 6, .size = { .width = 154, .height = 86, @@ -2529,6 +2530,7 @@ static const struct display_timing logictechno_lt170410_2whc_timing = { static const struct panel_desc logictechno_lt170410_2whc = { .timings = &logictechno_lt170410_2whc_timing, .num_timings = 1, + .bpc = 8, .size = { .width = 217, .height = 136, diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c index 89056a1aac7d..6bd0634e2d58 100644 --- a/drivers/gpu/drm/panfrost/panfrost_dump.c +++ b/drivers/gpu/drm/panfrost/panfrost_dump.c @@ -63,13 +63,13 @@ static void panfrost_core_dump_header(struct panfrost_dump_iterator *iter, { struct panfrost_dump_object_header *hdr = iter->hdr; - hdr->magic = cpu_to_le32(PANFROSTDUMP_MAGIC); - hdr->type = cpu_to_le32(type); - hdr->file_offset = cpu_to_le32(iter->data - iter->start); - hdr->file_size = cpu_to_le32(data_end - iter->data); + hdr->magic = PANFROSTDUMP_MAGIC; + hdr->type = type; + hdr->file_offset = iter->data - iter->start; + hdr->file_size = data_end - iter->data; iter->hdr++; - iter->data += le32_to_cpu(hdr->file_size); + iter->data += hdr->file_size; } static void @@ -93,8 +93,8 @@ panfrost_core_dump_registers(struct panfrost_dump_iterator *iter, reg = panfrost_dump_registers[i] + js_as_offset; - dumpreg->reg = cpu_to_le32(reg); - dumpreg->value = cpu_to_le32(gpu_read(pfdev, reg)); + dumpreg->reg = reg; + dumpreg->value = gpu_read(pfdev, reg); } panfrost_core_dump_header(iter, PANFROSTDUMP_BUF_REG, dumpreg); @@ -106,7 +106,7 @@ void panfrost_core_dump(struct panfrost_job *job) struct panfrost_dump_iterator iter; struct drm_gem_object *dbo; unsigned int n_obj, n_bomap_pages; - __le64 *bomap, *bomap_start; + u64 *bomap, *bomap_start; size_t file_size; u32 as_nr; int slot; @@ -177,11 +177,11 @@ void panfrost_core_dump(struct panfrost_job *job) * For now, we write the job identifier in the register dump header, * so that we can decode the entire dump later with pandecode */ - iter.hdr->reghdr.jc = cpu_to_le64(job->jc); - iter.hdr->reghdr.major = cpu_to_le32(PANFROSTDUMP_MAJOR); - iter.hdr->reghdr.minor = cpu_to_le32(PANFROSTDUMP_MINOR); - iter.hdr->reghdr.gpu_id = cpu_to_le32(pfdev->features.id); - iter.hdr->reghdr.nbos = cpu_to_le64(job->bo_count); + iter.hdr->reghdr.jc = job->jc; + iter.hdr->reghdr.major = PANFROSTDUMP_MAJOR; + iter.hdr->reghdr.minor = PANFROSTDUMP_MINOR; + iter.hdr->reghdr.gpu_id = pfdev->features.id; + iter.hdr->reghdr.nbos = job->bo_count; panfrost_core_dump_registers(&iter, pfdev, as_nr, slot); @@ -218,27 +218,27 @@ void panfrost_core_dump(struct panfrost_job *job) WARN_ON(!mapping->active); - iter.hdr->bomap.data[0] = cpu_to_le32((bomap - bomap_start)); + iter.hdr->bomap.data[0] = bomap - bomap_start; for_each_sgtable_page(bo->base.sgt, &page_iter, 0) { struct page *page = sg_page_iter_page(&page_iter); if (!IS_ERR(page)) { - *bomap++ = cpu_to_le64(page_to_phys(page)); + *bomap++ = page_to_phys(page); } else { dev_err(pfdev->dev, "Panfrost Dump: wrong page\n"); - *bomap++ = ~cpu_to_le64(0); + *bomap++ = 0; } } - iter.hdr->bomap.iova = cpu_to_le64(mapping->mmnode.start << PAGE_SHIFT); + iter.hdr->bomap.iova = mapping->mmnode.start << PAGE_SHIFT; vaddr = map.vaddr; memcpy(iter.data, vaddr, bo->base.base.size); drm_gem_shmem_vunmap(&bo->base, &map); - iter.hdr->bomap.valid = cpu_to_le32(1); + iter.hdr->bomap.valid = 1; dump_header: panfrost_core_dump_header(&iter, PANFROSTDUMP_BUF_BO, iter.data + bo->base.base.size); diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index e246d914e7f6..4e83a1891f3e 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -250,13 +250,22 @@ void panfrost_mmu_reset(struct panfrost_device *pfdev) static size_t get_pgsize(u64 addr, size_t size, size_t *count) { + /* + * io-pgtable only operates on multiple pages within a single table + * entry, so we need to split at boundaries of the table size, i.e. + * the next block size up. The distance from address A to the next + * boundary of block size B is logically B - A % B, but in unsigned + * two's complement where B is a power of two we get the equivalence + * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :) + */ size_t blk_offset = -addr % SZ_2M; if (blk_offset || size < SZ_2M) { *count = min_not_zero(blk_offset, size) / SZ_4K; return SZ_4K; } - *count = size / SZ_2M; + blk_offset = -addr % SZ_1G ?: SZ_1G; + *count = min(blk_offset, size) / SZ_2M; return SZ_2M; } diff --git a/drivers/gpu/drm/rcar-du/Kconfig b/drivers/gpu/drm/rcar-du/Kconfig index c959e8c6be7d..fd2c2eaee26b 100644 --- a/drivers/gpu/drm/rcar-du/Kconfig +++ b/drivers/gpu/drm/rcar-du/Kconfig @@ -44,13 +44,18 @@ config DRM_RCAR_LVDS select OF_FLATTREE select OF_OVERLAY -config DRM_RCAR_MIPI_DSI - tristate "R-Car DU MIPI DSI Encoder Support" - depends on DRM && DRM_BRIDGE && OF - select DRM_MIPI_DSI +config DRM_RCAR_USE_MIPI_DSI + bool "R-Car DU MIPI DSI Encoder Support" + depends on DRM_BRIDGE && OF + default DRM_RCAR_DU help Enable support for the R-Car Display Unit embedded MIPI DSI encoders. +config DRM_RCAR_MIPI_DSI + def_tristate DRM_RCAR_DU + depends on DRM_RCAR_USE_MIPI_DSI + select DRM_MIPI_DSI + config DRM_RCAR_VSP bool "R-Car DU VSP Compositor Support" if ARM default y if ARM64 diff --git a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c index bf6948125b84..f4df9820b295 100644 --- a/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw-mipi-dsi-rockchip.c @@ -752,7 +752,7 @@ static void dw_mipi_dsi_rockchip_config(struct dw_mipi_dsi_rockchip *dsi) static void dw_mipi_dsi_rockchip_set_lcdsel(struct dw_mipi_dsi_rockchip *dsi, int mux) { - if (dsi->cdata->lcdsel_grf_reg < 0) + if (dsi->cdata->lcdsel_grf_reg) regmap_write(dsi->grf_regmap, dsi->cdata->lcdsel_grf_reg, mux ? dsi->cdata->lcdsel_lit : dsi->cdata->lcdsel_big); } @@ -1051,23 +1051,31 @@ static int dw_mipi_dsi_rockchip_host_attach(void *priv_data, if (ret) { DRM_DEV_ERROR(dsi->dev, "Failed to register component: %d\n", ret); - return ret; + goto out; } second = dw_mipi_dsi_rockchip_find_second(dsi); - if (IS_ERR(second)) - return PTR_ERR(second); + if (IS_ERR(second)) { + ret = PTR_ERR(second); + goto out; + } if (second) { ret = component_add(second, &dw_mipi_dsi_rockchip_ops); if (ret) { DRM_DEV_ERROR(second, "Failed to register component: %d\n", ret); - return ret; + goto out; } } return 0; + +out: + mutex_lock(&dsi->usage_mutex); + dsi->usage_mode = DW_DSI_USAGE_IDLE; + mutex_unlock(&dsi->usage_mutex); + return ret; } static int dw_mipi_dsi_rockchip_host_detach(void *priv_data, @@ -1635,7 +1643,6 @@ static const struct rockchip_dw_dsi_chip_data rk3399_chip_data[] = { static const struct rockchip_dw_dsi_chip_data rk3568_chip_data[] = { { .reg = 0xfe060000, - .lcdsel_grf_reg = -1, .lanecfg1_grf_reg = RK3568_GRF_VO_CON2, .lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI0_SKEWCALHS | RK3568_DSI0_FORCETXSTOPMODE | @@ -1645,7 +1652,6 @@ static const struct rockchip_dw_dsi_chip_data rk3568_chip_data[] = { }, { .reg = 0xfe070000, - .lcdsel_grf_reg = -1, .lanecfg1_grf_reg = RK3568_GRF_VO_CON3, .lanecfg1 = HIWORD_UPDATE(0, RK3568_DSI1_SKEWCALHS | RK3568_DSI1_FORCETXSTOPMODE | @@ -1681,5 +1687,11 @@ struct platform_driver dw_mipi_dsi_rockchip_driver = { .of_match_table = dw_mipi_dsi_rockchip_dt_ids, .pm = &dw_mipi_dsi_rockchip_pm_ops, .name = "dw-mipi-dsi-rockchip", + /* + * For dual-DSI display, one DSI pokes at the other DSI's + * drvdata in dw_mipi_dsi_rockchip_find_second(). This is not + * safe for asynchronous probe. + */ + .probe_type = PROBE_FORCE_SYNCHRONOUS, }, }; diff --git a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c index c14f88893868..2f4b8f64cbad 100644 --- a/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c +++ b/drivers/gpu/drm/rockchip/dw_hdmi-rockchip.c @@ -565,7 +565,8 @@ static int dw_hdmi_rockchip_bind(struct device *dev, struct device *master, ret = rockchip_hdmi_parse_dt(hdmi); if (ret) { - DRM_DEV_ERROR(hdmi->dev, "Unable to parse OF data\n"); + if (ret != -EPROBE_DEFER) + DRM_DEV_ERROR(hdmi->dev, "Unable to parse OF data\n"); return ret; } diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c index 614e97aaac80..da8a69953706 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c @@ -364,9 +364,12 @@ rockchip_gem_create_with_handle(struct drm_file *file_priv, { struct rockchip_gem_object *rk_obj; struct drm_gem_object *obj; + bool is_framebuffer; int ret; - rk_obj = rockchip_gem_create_object(drm, size, false); + is_framebuffer = drm->fb_helper && file_priv == drm->fb_helper->client.file; + + rk_obj = rockchip_gem_create_object(drm, size, is_framebuffer); if (IS_ERR(rk_obj)) return ERR_CAST(rk_obj); diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c index aac20be5ac08..105a548d0abe 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop2.c @@ -877,10 +877,14 @@ static void vop2_crtc_atomic_disable(struct drm_crtc *crtc, { struct vop2_video_port *vp = to_vop2_video_port(crtc); struct vop2 *vop2 = vp->vop2; + struct drm_crtc_state *old_crtc_state; int ret; vop2_lock(vop2); + old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc); + drm_atomic_helper_disable_planes_on_crtc(old_crtc_state, false); + drm_crtc_vblank_off(crtc); /* @@ -996,13 +1000,15 @@ static int vop2_plane_atomic_check(struct drm_plane *plane, static void vop2_plane_atomic_disable(struct drm_plane *plane, struct drm_atomic_state *state) { - struct drm_plane_state *old_pstate = drm_atomic_get_old_plane_state(state, plane); + struct drm_plane_state *old_pstate = NULL; struct vop2_win *win = to_vop2_win(plane); struct vop2 *vop2 = win->vop2; drm_dbg(vop2->drm, "%s disable\n", win->data->name); - if (!old_pstate->crtc) + if (state) + old_pstate = drm_atomic_get_old_plane_state(state, plane); + if (old_pstate && !old_pstate->crtc) return; vop2_win_disable(win); diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 6b25b2f4f5a3..4b913dbb7d7b 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -207,6 +207,7 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f, struct drm_sched_job *job = container_of(cb, struct drm_sched_job, finish_cb); + dma_fence_put(f); INIT_WORK(&job->work, drm_sched_entity_kill_jobs_work); schedule_work(&job->work); } @@ -234,8 +235,10 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity) struct drm_sched_fence *s_fence = job->s_fence; /* Wait for all dependencies to avoid data corruptions */ - while ((f = drm_sched_job_dependency(job, entity))) + while ((f = drm_sched_job_dependency(job, entity))) { dma_fence_wait(f, false); + dma_fence_put(f); + } drm_sched_fence_scheduled(s_fence); dma_fence_set_error(&s_fence->finished, -ESRCH); @@ -250,6 +253,7 @@ static void drm_sched_entity_kill_jobs(struct drm_sched_entity *entity) continue; } + dma_fence_get(entity->last_scheduled); r = dma_fence_add_callback(entity->last_scheduled, &job->finish_cb, drm_sched_entity_kill_jobs_cb); @@ -385,7 +389,8 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) } s_fence = to_drm_sched_fence(fence); - if (s_fence && s_fence->sched == sched) { + if (s_fence && s_fence->sched == sched && + !test_bit(DRM_SCHED_FENCE_DONT_PIPELINE, &fence->flags)) { /* * Fence is from the same scheduler, only need to wait for diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 6748ec1e0005..a1f909dac89a 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -1093,6 +1093,10 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev) struct host1x *host1x = dev_get_drvdata(dev->dev.parent); struct iommu_domain *domain; + /* Our IOMMU usage policy doesn't currently play well with GART */ + if (of_machine_is_compatible("nvidia,tegra20")) + return false; + /* * If the Tegra DRM clients are backed by an IOMMU, push buffers are * likely to be allocated beyond the 32-bit boundary if sufficient diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c index 8d86c250c2ec..2191e57f2297 100644 --- a/drivers/gpu/drm/tests/drm_format_helper_test.c +++ b/drivers/gpu/drm/tests/drm_format_helper_test.c @@ -438,7 +438,7 @@ static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test) iosys_map_set_vaddr(&src, xrgb8888); drm_fb_xrgb8888_to_xrgb2101010(&dst, &result->dst_pitch, &src, &fb, ¶ms->clip); - buf = le32buf_to_cpu(test, buf, TEST_BUF_SIZE); + buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32)); KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0); } diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c index ffbbb454c9e8..8c329c071c62 100644 --- a/drivers/gpu/drm/vc4/vc4_drv.c +++ b/drivers/gpu/drm/vc4/vc4_drv.c @@ -476,7 +476,12 @@ static int __init vc4_drm_register(void) if (ret) return ret; - return platform_driver_register(&vc4_platform_driver); + ret = platform_driver_register(&vc4_platform_driver); + if (ret) + platform_unregister_drivers(component_drivers, + ARRAY_SIZE(component_drivers)); + + return ret; } static void __exit vc4_drm_unregister(void) @@ -490,6 +495,7 @@ module_init(vc4_drm_register); module_exit(vc4_drm_unregister); MODULE_ALIAS("platform:vc4-drm"); +MODULE_SOFTDEP("pre: snd-soc-hdmi-codec"); MODULE_DESCRIPTION("Broadcom VC4 DRM Driver"); MODULE_AUTHOR("Eric Anholt <eric@anholt.net>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 64f9feabf43e..470432c8fd70 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -349,27 +349,40 @@ static int vc4_hdmi_reset_link(struct drm_connector *connector, if (!crtc_state->active) return 0; - if (!vc4_hdmi_supports_scrambling(encoder)) + mutex_lock(&vc4_hdmi->mutex); + + if (!vc4_hdmi_supports_scrambling(encoder)) { + mutex_unlock(&vc4_hdmi->mutex); return 0; + } scrambling_needed = vc4_hdmi_mode_needs_scrambling(&vc4_hdmi->saved_adjusted_mode, vc4_hdmi->output_bpc, vc4_hdmi->output_format); - if (!scrambling_needed) + if (!scrambling_needed) { + mutex_unlock(&vc4_hdmi->mutex); return 0; + } if (conn_state->commit && - !try_wait_for_completion(&conn_state->commit->hw_done)) + !try_wait_for_completion(&conn_state->commit->hw_done)) { + mutex_unlock(&vc4_hdmi->mutex); return 0; + } ret = drm_scdc_readb(connector->ddc, SCDC_TMDS_CONFIG, &config); if (ret < 0) { drm_err(drm, "Failed to read TMDS config: %d\n", ret); + mutex_unlock(&vc4_hdmi->mutex); return 0; } - if (!!(config & SCDC_SCRAMBLING_ENABLE) == scrambling_needed) + if (!!(config & SCDC_SCRAMBLING_ENABLE) == scrambling_needed) { + mutex_unlock(&vc4_hdmi->mutex); return 0; + } + + mutex_unlock(&vc4_hdmi->mutex); /* * HDMI 2.0 says that one should not send scrambled data @@ -397,9 +410,8 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi, * .adap_enable, which leads to that funtion being called with * our mutex held. * - * A similar situation occurs with - * drm_atomic_helper_connector_hdmi_reset_link() that will call - * into our KMS hooks if the scrambling was enabled. + * A similar situation occurs with vc4_hdmi_reset_link() that + * will call into our KMS hooks if the scrambling was enabled. * * Concurrency isn't an issue at the moment since we don't share * any state with any of the other frameworks so we can ignore @@ -3160,9 +3172,16 @@ static int vc4_hdmi_init_resources(struct drm_device *drm, DRM_ERROR("Failed to get HDMI state machine clock\n"); return PTR_ERR(vc4_hdmi->hsm_clock); } + vc4_hdmi->audio_clock = vc4_hdmi->hsm_clock; vc4_hdmi->cec_clock = vc4_hdmi->hsm_clock; + vc4_hdmi->hsm_rpm_clock = devm_clk_get(dev, "hdmi"); + if (IS_ERR(vc4_hdmi->hsm_rpm_clock)) { + DRM_ERROR("Failed to get HDMI state machine clock\n"); + return PTR_ERR(vc4_hdmi->hsm_rpm_clock); + } + return 0; } @@ -3245,6 +3264,12 @@ static int vc5_hdmi_init_resources(struct drm_device *drm, return PTR_ERR(vc4_hdmi->hsm_clock); } + vc4_hdmi->hsm_rpm_clock = devm_clk_get(dev, "hdmi"); + if (IS_ERR(vc4_hdmi->hsm_rpm_clock)) { + DRM_ERROR("Failed to get HDMI state machine clock\n"); + return PTR_ERR(vc4_hdmi->hsm_rpm_clock); + } + vc4_hdmi->pixel_bvb_clock = devm_clk_get(dev, "bvb"); if (IS_ERR(vc4_hdmi->pixel_bvb_clock)) { DRM_ERROR("Failed to get pixel bvb clock\n"); @@ -3308,7 +3333,7 @@ static int vc4_hdmi_runtime_suspend(struct device *dev) { struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); - clk_disable_unprepare(vc4_hdmi->hsm_clock); + clk_disable_unprepare(vc4_hdmi->hsm_rpm_clock); return 0; } @@ -3318,12 +3343,37 @@ static int vc4_hdmi_runtime_resume(struct device *dev) struct vc4_hdmi *vc4_hdmi = dev_get_drvdata(dev); unsigned long __maybe_unused flags; u32 __maybe_unused value; + unsigned long rate; int ret; - ret = clk_prepare_enable(vc4_hdmi->hsm_clock); + /* + * The HSM clock is in the HDMI power domain, so we need to set + * its frequency while the power domain is active so that it + * keeps its rate. + */ + ret = clk_set_min_rate(vc4_hdmi->hsm_rpm_clock, HSM_MIN_CLOCK_FREQ); + if (ret) + return ret; + + ret = clk_prepare_enable(vc4_hdmi->hsm_rpm_clock); if (ret) return ret; + /* + * Whenever the RaspberryPi boots without an HDMI monitor + * plugged in, the firmware won't have initialized the HSM clock + * rate and it will be reported as 0. + * + * If we try to access a register of the controller in such a + * case, it will lead to a silent CPU stall. Let's make sure we + * prevent such a case. + */ + rate = clk_get_rate(vc4_hdmi->hsm_rpm_clock); + if (!rate) { + ret = -EINVAL; + goto err_disable_clk; + } + if (vc4_hdmi->variant->reset) vc4_hdmi->variant->reset(vc4_hdmi); @@ -3345,6 +3395,10 @@ static int vc4_hdmi_runtime_resume(struct device *dev) #endif return 0; + +err_disable_clk: + clk_disable_unprepare(vc4_hdmi->hsm_clock); + return ret; } static void vc4_hdmi_put_ddc_device(void *ptr) diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h index db823efb2563..1ad8e8c377e2 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -172,6 +172,7 @@ struct vc4_hdmi { struct clk *cec_clock; struct clk *pixel_clock; struct clk *hsm_clock; + struct clk *hsm_rpm_clock; struct clk *audio_clock; struct clk *pixel_bvb_clock; diff --git a/drivers/gpu/drm/vc4/vc4_kms.c b/drivers/gpu/drm/vc4/vc4_kms.c index 4419e810103d..0a6347c05df4 100644 --- a/drivers/gpu/drm/vc4/vc4_kms.c +++ b/drivers/gpu/drm/vc4/vc4_kms.c @@ -197,8 +197,8 @@ vc4_hvs_get_new_global_state(struct drm_atomic_state *state) struct drm_private_state *priv_state; priv_state = drm_atomic_get_new_private_obj_state(state, &vc4->hvs_channels); - if (IS_ERR(priv_state)) - return ERR_CAST(priv_state); + if (!priv_state) + return ERR_PTR(-EINVAL); return to_vc4_hvs_state(priv_state); } @@ -210,8 +210,8 @@ vc4_hvs_get_old_global_state(struct drm_atomic_state *state) struct drm_private_state *priv_state; priv_state = drm_atomic_get_old_private_obj_state(state, &vc4->hvs_channels); - if (IS_ERR(priv_state)) - return ERR_CAST(priv_state); + if (!priv_state) + return ERR_PTR(-EINVAL); return to_vc4_hvs_state(priv_state); } diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c index 0cd3f97e7e49..f60ea24db0ec 100644 --- a/drivers/gpu/host1x/dev.c +++ b/drivers/gpu/host1x/dev.c @@ -292,6 +292,10 @@ static void host1x_setup_virtualization_tables(struct host1x *host) static bool host1x_wants_iommu(struct host1x *host1x) { + /* Our IOMMU usage policy doesn't currently play well with GART */ + if (of_machine_is_compatible("nvidia,tegra20")) + return false; + /* * If we support addressing a maximum of 32 bits of physical memory * and if the host1x firewall is enabled, there's no need to enable diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c index b59c3dafa6a4..f99752b998f3 100644 --- a/drivers/hid/hid-asus.c +++ b/drivers/hid/hid-asus.c @@ -219,14 +219,13 @@ static void asus_report_tool_width(struct asus_drvdata *drvdat) { struct input_mt *mt = drvdat->input->mt; struct input_mt_slot *oldest; - int oldid, count, i; + int oldid, i; if (drvdat->tp->contact_size < 5) return; oldest = NULL; oldid = mt->trkid; - count = 0; for (i = 0; i < mt->num_slots; ++i) { struct input_mt_slot *ps = &mt->slots[i]; @@ -238,7 +237,6 @@ static void asus_report_tool_width(struct asus_drvdata *drvdat) oldest = ps; oldid = id; } - count++; } if (oldest) { diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c index e0bc73124196..ab57b49a44ed 100644 --- a/drivers/hid/hid-hyperv.c +++ b/drivers/hid/hid-hyperv.c @@ -499,7 +499,7 @@ static int mousevsc_probe(struct hv_device *device, ret = hid_add_device(hid_dev); if (ret) - goto probe_err1; + goto probe_err2; ret = hid_parse(hid_dev); diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index da86565f04d4..dad953f66996 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -867,6 +867,7 @@ #define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 #define USB_DEVICE_ID_MADCATZ_RAT5 0x1705 #define USB_DEVICE_ID_MADCATZ_RAT9 0x1709 +#define USB_DEVICE_ID_MADCATZ_MMO7 0x1713 #define USB_VENDOR_ID_MCC 0x09db #define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 @@ -1142,6 +1143,7 @@ #define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc #define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0 #define USB_DEVICE_ID_SONY_PS5_CONTROLLER 0x0ce6 +#define USB_DEVICE_ID_SONY_PS5_CONTROLLER_2 0x0df2 #define USB_DEVICE_ID_SONY_MOTION_CONTROLLER 0x03d5 #define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f #define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002 diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c index 9dabd6323234..44763c0da444 100644 --- a/drivers/hid/hid-lenovo.c +++ b/drivers/hid/hid-lenovo.c @@ -985,7 +985,7 @@ static int lenovo_led_brightness_set(struct led_classdev *led_cdev, struct device *dev = led_cdev->dev->parent; struct hid_device *hdev = to_hid_device(dev); struct lenovo_drvdata *data_pointer = hid_get_drvdata(hdev); - u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED }; + static const u8 tp10ubkbd_led[] = { TP10UBKBD_MUTE_LED, TP10UBKBD_MICMUTE_LED }; int led_nr = 0; int ret = 0; diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c index 664a624a363d..c9c968d4b36a 100644 --- a/drivers/hid/hid-magicmouse.c +++ b/drivers/hid/hid-magicmouse.c @@ -480,7 +480,7 @@ static int magicmouse_raw_event(struct hid_device *hdev, magicmouse_raw_event(hdev, report, data + 2, data[1]); magicmouse_raw_event(hdev, report, data + 2 + data[1], size - 2 - data[1]); - break; + return 0; default: return 0; } diff --git a/drivers/hid/hid-playstation.c b/drivers/hid/hid-playstation.c index 40050eb85c0a..0b58763bfd30 100644 --- a/drivers/hid/hid-playstation.c +++ b/drivers/hid/hid-playstation.c @@ -46,6 +46,7 @@ struct ps_device { uint32_t fw_version; int (*parse_report)(struct ps_device *dev, struct hid_report *report, u8 *data, int size); + void (*remove)(struct ps_device *dev); }; /* Calibration data for playstation motion sensors. */ @@ -107,6 +108,9 @@ struct ps_led_info { #define DS_STATUS_CHARGING GENMASK(7, 4) #define DS_STATUS_CHARGING_SHIFT 4 +/* Feature version from DualSense Firmware Info report. */ +#define DS_FEATURE_VERSION(major, minor) ((major & 0xff) << 8 | (minor & 0xff)) + /* * Status of a DualSense touch point contact. * Contact IDs, with highest bit set are 'inactive' @@ -125,6 +129,7 @@ struct ps_led_info { #define DS_OUTPUT_VALID_FLAG1_RELEASE_LEDS BIT(3) #define DS_OUTPUT_VALID_FLAG1_PLAYER_INDICATOR_CONTROL_ENABLE BIT(4) #define DS_OUTPUT_VALID_FLAG2_LIGHTBAR_SETUP_CONTROL_ENABLE BIT(1) +#define DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2 BIT(2) #define DS_OUTPUT_POWER_SAVE_CONTROL_MIC_MUTE BIT(4) #define DS_OUTPUT_LIGHTBAR_SETUP_LIGHT_OUT BIT(1) @@ -142,6 +147,9 @@ struct dualsense { struct input_dev *sensors; struct input_dev *touchpad; + /* Update version is used as a feature/capability version. */ + uint16_t update_version; + /* Calibration data for accelerometer and gyroscope. */ struct ps_calibration_data accel_calib_data[3]; struct ps_calibration_data gyro_calib_data[3]; @@ -152,6 +160,7 @@ struct dualsense { uint32_t sensor_timestamp_us; /* Compatible rumble state */ + bool use_vibration_v2; bool update_rumble; uint8_t motor_left; uint8_t motor_right; @@ -174,6 +183,7 @@ struct dualsense { struct led_classdev player_leds[5]; struct work_struct output_worker; + bool output_worker_initialized; void *output_report_dmabuf; uint8_t output_seq; /* Sequence number for output report. */ }; @@ -299,6 +309,7 @@ static const struct {int x; int y; } ps_gamepad_hat_mapping[] = { {0, 0}, }; +static inline void dualsense_schedule_work(struct dualsense *ds); static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t green, uint8_t blue); /* @@ -789,6 +800,7 @@ err_free: return ret; } + static int dualsense_get_firmware_info(struct dualsense *ds) { uint8_t *buf; @@ -808,6 +820,15 @@ static int dualsense_get_firmware_info(struct dualsense *ds) ds->base.hw_version = get_unaligned_le32(&buf[24]); ds->base.fw_version = get_unaligned_le32(&buf[28]); + /* Update version is some kind of feature version. It is distinct from + * the firmware version as there can be many different variations of a + * controller over time with the same physical shell, but with different + * PCBs and other internal changes. The update version (internal name) is + * used as a means to detect what features are available and change behavior. + * Note: the version is different between DualSense and DualSense Edge. + */ + ds->update_version = get_unaligned_le16(&buf[44]); + err_free: kfree(buf); return ret; @@ -878,7 +899,7 @@ static int dualsense_player_led_set_brightness(struct led_classdev *led, enum le ds->update_player_leds = true; spin_unlock_irqrestore(&ds->base.lock, flags); - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); return 0; } @@ -922,6 +943,16 @@ static void dualsense_init_output_report(struct dualsense *ds, struct dualsense_ } } +static inline void dualsense_schedule_work(struct dualsense *ds) +{ + unsigned long flags; + + spin_lock_irqsave(&ds->base.lock, flags); + if (ds->output_worker_initialized) + schedule_work(&ds->output_worker); + spin_unlock_irqrestore(&ds->base.lock, flags); +} + /* * Helper function to send DualSense output reports. Applies a CRC at the end of a report * for Bluetooth reports. @@ -960,7 +991,10 @@ static void dualsense_output_worker(struct work_struct *work) if (ds->update_rumble) { /* Select classic rumble style haptics and enable it. */ common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_HAPTICS_SELECT; - common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION; + if (ds->use_vibration_v2) + common->valid_flag2 |= DS_OUTPUT_VALID_FLAG2_COMPATIBLE_VIBRATION2; + else + common->valid_flag0 |= DS_OUTPUT_VALID_FLAG0_COMPATIBLE_VIBRATION; common->motor_left = ds->motor_left; common->motor_right = ds->motor_right; ds->update_rumble = false; @@ -1082,7 +1116,7 @@ static int dualsense_parse_report(struct ps_device *ps_dev, struct hid_report *r spin_unlock_irqrestore(&ps_dev->lock, flags); /* Schedule updating of microphone state at hardware level. */ - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); } ds->last_btn_mic_state = btn_mic_state; @@ -1197,10 +1231,22 @@ static int dualsense_play_effect(struct input_dev *dev, void *data, struct ff_ef ds->motor_right = effect->u.rumble.weak_magnitude / 256; spin_unlock_irqrestore(&ds->base.lock, flags); - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); return 0; } +static void dualsense_remove(struct ps_device *ps_dev) +{ + struct dualsense *ds = container_of(ps_dev, struct dualsense, base); + unsigned long flags; + + spin_lock_irqsave(&ds->base.lock, flags); + ds->output_worker_initialized = false; + spin_unlock_irqrestore(&ds->base.lock, flags); + + cancel_work_sync(&ds->output_worker); +} + static int dualsense_reset_leds(struct dualsense *ds) { struct dualsense_output_report report; @@ -1237,7 +1283,7 @@ static void dualsense_set_lightbar(struct dualsense *ds, uint8_t red, uint8_t gr ds->lightbar_blue = blue; spin_unlock_irqrestore(&ds->base.lock, flags); - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); } static void dualsense_set_player_leds(struct dualsense *ds) @@ -1260,7 +1306,7 @@ static void dualsense_set_player_leds(struct dualsense *ds) ds->update_player_leds = true; ds->player_leds_state = player_ids[player_id]; - schedule_work(&ds->output_worker); + dualsense_schedule_work(ds); } static struct ps_device *dualsense_create(struct hid_device *hdev) @@ -1299,7 +1345,9 @@ static struct ps_device *dualsense_create(struct hid_device *hdev) ps_dev->battery_capacity = 100; /* initial value until parse_report. */ ps_dev->battery_status = POWER_SUPPLY_STATUS_UNKNOWN; ps_dev->parse_report = dualsense_parse_report; + ps_dev->remove = dualsense_remove; INIT_WORK(&ds->output_worker, dualsense_output_worker); + ds->output_worker_initialized = true; hid_set_drvdata(hdev, ds); max_output_report_size = sizeof(struct dualsense_output_report_bt); @@ -1320,6 +1368,21 @@ static struct ps_device *dualsense_create(struct hid_device *hdev) return ERR_PTR(ret); } + /* Original DualSense firmware simulated classic controller rumble through + * its new haptics hardware. It felt different from classic rumble users + * were used to. Since then new firmwares were introduced to change behavior + * and make this new 'v2' behavior default on PlayStation and other platforms. + * The original DualSense requires a new enough firmware as bundled with PS5 + * software released in 2021. DualSense edge supports it out of the box. + * Both devices also support the old mode, but it is not really used. + */ + if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER) { + /* Feature version 2.21 introduced new vibration method. */ + ds->use_vibration_v2 = ds->update_version >= DS_FEATURE_VERSION(2, 21); + } else if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) { + ds->use_vibration_v2 = true; + } + ret = ps_devices_list_add(ps_dev); if (ret) return ERR_PTR(ret); @@ -1436,7 +1499,8 @@ static int ps_probe(struct hid_device *hdev, const struct hid_device_id *id) goto err_stop; } - if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER) { + if (hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER || + hdev->product == USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) { dev = dualsense_create(hdev); if (IS_ERR(dev)) { hid_err(hdev, "Failed to create dualsense.\n"); @@ -1461,6 +1525,9 @@ static void ps_remove(struct hid_device *hdev) ps_devices_list_remove(dev); ps_device_release_player_id(dev); + if (dev->remove) + dev->remove(dev); + hid_hw_close(hdev); hid_hw_stop(hdev); } @@ -1468,6 +1535,8 @@ static void ps_remove(struct hid_device *hdev) static const struct hid_device_id ps_devices[] = { { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) }, { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER) }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS5_CONTROLLER_2) }, { } }; MODULE_DEVICE_TABLE(hid, ps_devices); diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index 70f602c64fd1..50e1c717fc0a 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -620,6 +620,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT5) }, { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) }, + { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7) }, #endif #if IS_ENABLED(CONFIG_HID_SAMSUNG) { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c index c7bf14c01960..b84e975977c4 100644 --- a/drivers/hid/hid-saitek.c +++ b/drivers/hid/hid-saitek.c @@ -187,6 +187,8 @@ static const struct hid_device_id saitek_devices[] = { .driver_data = SAITEK_RELEASE_MODE_RAT7 }, { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7), .driver_data = SAITEK_RELEASE_MODE_MMO7 }, + { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_MMO7), + .driver_data = SAITEK_RELEASE_MODE_MMO7 }, { } }; diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c index 77486962a773..0f3d57b42684 100644 --- a/drivers/hid/wacom_wac.c +++ b/drivers/hid/wacom_wac.c @@ -2520,11 +2520,12 @@ static void wacom_wac_pen_report(struct hid_device *hdev, if (!delay_pen_events(wacom_wac) && wacom_wac->tool[0]) { int id = wacom_wac->id[0]; - if (wacom_wac->features.quirks & WACOM_QUIRK_PEN_BUTTON3 && - wacom_wac->hid_data.barrelswitch & wacom_wac->hid_data.barrelswitch2) { - wacom_wac->hid_data.barrelswitch = 0; - wacom_wac->hid_data.barrelswitch2 = 0; - wacom_wac->hid_data.barrelswitch3 = 1; + if (wacom_wac->features.quirks & WACOM_QUIRK_PEN_BUTTON3) { + int sw_state = wacom_wac->hid_data.barrelswitch | + (wacom_wac->hid_data.barrelswitch2 << 1); + wacom_wac->hid_data.barrelswitch = sw_state == 1; + wacom_wac->hid_data.barrelswitch2 = sw_state == 2; + wacom_wac->hid_data.barrelswitch3 = sw_state == 3; } input_report_key(input, BTN_STYLUS, wacom_wac->hid_data.barrelswitch); input_report_key(input, BTN_STYLUS2, wacom_wac->hid_data.barrelswitch2); diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c index fdf6decacf06..6c127f061f06 100644 --- a/drivers/hv/hv_balloon.c +++ b/drivers/hv/hv_balloon.c @@ -905,7 +905,7 @@ static unsigned long handle_pg_range(unsigned long pg_start, * We have some residual hot add range * that needs to be hot added; hot add * it now. Hot add a multiple of - * of HA_CHUNK that fully covers the pages + * HA_CHUNK that fully covers the pages * we have. */ size = (has->end_pfn - has->ha_end_pfn); diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index ccf0af5b988a..8bf32c6c85d9 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c @@ -46,9 +46,6 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) -#define TO_CORE_ID(cpu) (cpu_data(cpu).cpu_core_id) -#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO) - #ifdef CONFIG_SMP #define for_each_sibling(i, cpu) \ for_each_cpu(i, topology_sibling_cpumask(cpu)) @@ -91,6 +88,8 @@ struct temp_data { struct platform_data { struct device *hwmon_dev; u16 pkg_id; + u16 cpu_map[NUM_REAL_CORES]; + struct ida ida; struct cpumask cpumask; struct temp_data *core_data[MAX_CORE_DATA]; struct device_attribute name_attr; @@ -441,7 +440,7 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag) MSR_IA32_THERM_STATUS; tdata->is_pkg_data = pkg_flag; tdata->cpu = cpu; - tdata->cpu_core_id = TO_CORE_ID(cpu); + tdata->cpu_core_id = topology_core_id(cpu); tdata->attr_size = MAX_CORE_ATTRS; mutex_init(&tdata->update_lock); return tdata; @@ -454,7 +453,7 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, struct platform_data *pdata = platform_get_drvdata(pdev); struct cpuinfo_x86 *c = &cpu_data(cpu); u32 eax, edx; - int err, attr_no; + int err, index, attr_no; /* * Find attr number for sysfs: @@ -462,14 +461,26 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, * The attr number is always core id + 2 * The Pkgtemp will always show up as temp1_*, if available */ - attr_no = pkg_flag ? PKG_SYSFS_ATTR_NO : TO_ATTR_NO(cpu); + if (pkg_flag) { + attr_no = PKG_SYSFS_ATTR_NO; + } else { + index = ida_alloc(&pdata->ida, GFP_KERNEL); + if (index < 0) + return index; + pdata->cpu_map[index] = topology_core_id(cpu); + attr_no = index + BASE_SYSFS_ATTR_NO; + } - if (attr_no > MAX_CORE_DATA - 1) - return -ERANGE; + if (attr_no > MAX_CORE_DATA - 1) { + err = -ERANGE; + goto ida_free; + } tdata = init_temp_data(cpu, pkg_flag); - if (!tdata) - return -ENOMEM; + if (!tdata) { + err = -ENOMEM; + goto ida_free; + } /* Test if we can access the status register */ err = rdmsr_safe_on_cpu(cpu, tdata->status_reg, &eax, &edx); @@ -505,6 +516,9 @@ static int create_core_data(struct platform_device *pdev, unsigned int cpu, exit_free: pdata->core_data[attr_no] = NULL; kfree(tdata); +ida_free: + if (!pkg_flag) + ida_free(&pdata->ida, index); return err; } @@ -524,6 +538,9 @@ static void coretemp_remove_core(struct platform_data *pdata, int indx) kfree(pdata->core_data[indx]); pdata->core_data[indx] = NULL; + + if (indx >= BASE_SYSFS_ATTR_NO) + ida_free(&pdata->ida, indx - BASE_SYSFS_ATTR_NO); } static int coretemp_probe(struct platform_device *pdev) @@ -537,6 +554,7 @@ static int coretemp_probe(struct platform_device *pdev) return -ENOMEM; pdata->pkg_id = pdev->id; + ida_init(&pdata->ida); platform_set_drvdata(pdev, pdata); pdata->hwmon_dev = devm_hwmon_device_register_with_groups(dev, DRVNAME, @@ -553,6 +571,7 @@ static int coretemp_remove(struct platform_device *pdev) if (pdata->core_data[i]) coretemp_remove_core(pdata, i); + ida_destroy(&pdata->ida); return 0; } @@ -647,7 +666,7 @@ static int coretemp_cpu_offline(unsigned int cpu) struct platform_device *pdev = coretemp_get_pdev(cpu); struct platform_data *pd; struct temp_data *tdata; - int indx, target; + int i, indx = -1, target; /* * Don't execute this on suspend as the device remove locks @@ -660,12 +679,19 @@ static int coretemp_cpu_offline(unsigned int cpu) if (!pdev) return 0; - /* The core id is too big, just return */ - indx = TO_ATTR_NO(cpu); - if (indx > MAX_CORE_DATA - 1) + pd = platform_get_drvdata(pdev); + + for (i = 0; i < NUM_REAL_CORES; i++) { + if (pd->cpu_map[i] == topology_core_id(cpu)) { + indx = i + BASE_SYSFS_ATTR_NO; + break; + } + } + + /* Too many cores and this core is not populated, just return */ + if (indx < 0) return 0; - pd = platform_get_drvdata(pdev); tdata = pd->core_data[indx]; cpumask_clear_cpu(cpu, &pd->cpumask); diff --git a/drivers/hwmon/corsair-psu.c b/drivers/hwmon/corsair-psu.c index 345d883ab044..2210aa62e3d0 100644 --- a/drivers/hwmon/corsair-psu.c +++ b/drivers/hwmon/corsair-psu.c @@ -820,7 +820,8 @@ static const struct hid_device_id corsairpsu_idtable[] = { { HID_USB_DEVICE(0x1b1c, 0x1c0b) }, /* Corsair RM750i */ { HID_USB_DEVICE(0x1b1c, 0x1c0c) }, /* Corsair RM850i */ { HID_USB_DEVICE(0x1b1c, 0x1c0d) }, /* Corsair RM1000i */ - { HID_USB_DEVICE(0x1b1c, 0x1c1e) }, /* Corsaur HX1000i revision 2 */ + { HID_USB_DEVICE(0x1b1c, 0x1c1e) }, /* Corsair HX1000i revision 2 */ + { HID_USB_DEVICE(0x1b1c, 0x1c1f) }, /* Corsair HX1500i */ { }, }; MODULE_DEVICE_TABLE(hid, corsairpsu_idtable); diff --git a/drivers/hwmon/pmbus/pmbus.h b/drivers/hwmon/pmbus/pmbus.h index 7daaf0caf4d3..10fb17879f8e 100644 --- a/drivers/hwmon/pmbus/pmbus.h +++ b/drivers/hwmon/pmbus/pmbus.h @@ -467,7 +467,6 @@ extern const struct regulator_ops pmbus_regulator_ops; #define PMBUS_REGULATOR_STEP(_name, _id, _voltages, _step) \ [_id] = { \ .name = (_name # _id), \ - .supply_name = "vin", \ .id = (_id), \ .of_match = of_match_ptr(_name # _id), \ .regulators_node = of_match_ptr("regulators"), \ diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c index dc3d9a22d917..83a347ca35da 100644 --- a/drivers/hwmon/pwm-fan.c +++ b/drivers/hwmon/pwm-fan.c @@ -257,7 +257,10 @@ static int pwm_fan_update_enable(struct pwm_fan_ctx *ctx, long val) if (val == 0) { /* Disable pwm-fan unconditionally */ - ret = __set_pwm(ctx, 0); + if (ctx->enabled) + ret = __set_pwm(ctx, 0); + else + ret = pwm_fan_switch_power(ctx, false); if (ret) ctx->enable_mode = old_val; pwm_fan_update_state(ctx, 0); diff --git a/drivers/hwmon/scmi-hwmon.c b/drivers/hwmon/scmi-hwmon.c index b1329a58ce40..e192f0c67146 100644 --- a/drivers/hwmon/scmi-hwmon.c +++ b/drivers/hwmon/scmi-hwmon.c @@ -20,6 +20,11 @@ struct scmi_sensors { const struct scmi_sensor_info **info[hwmon_max]; }; +struct scmi_thermal_sensor { + const struct scmi_protocol_handle *ph; + const struct scmi_sensor_info *info; +}; + static inline u64 __pow10(u8 x) { u64 r = 1; @@ -64,16 +69,14 @@ static int scmi_hwmon_scale(const struct scmi_sensor_info *sensor, u64 *value) return 0; } -static int scmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type, - u32 attr, int channel, long *val) +static int scmi_hwmon_read_scaled_value(const struct scmi_protocol_handle *ph, + const struct scmi_sensor_info *sensor, + long *val) { int ret; u64 value; - const struct scmi_sensor_info *sensor; - struct scmi_sensors *scmi_sensors = dev_get_drvdata(dev); - sensor = *(scmi_sensors->info[type] + channel); - ret = sensor_ops->reading_get(scmi_sensors->ph, sensor->id, &value); + ret = sensor_ops->reading_get(ph, sensor->id, &value); if (ret) return ret; @@ -84,6 +87,17 @@ static int scmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type, return ret; } +static int scmi_hwmon_read(struct device *dev, enum hwmon_sensor_types type, + u32 attr, int channel, long *val) +{ + const struct scmi_sensor_info *sensor; + struct scmi_sensors *scmi_sensors = dev_get_drvdata(dev); + + sensor = *(scmi_sensors->info[type] + channel); + + return scmi_hwmon_read_scaled_value(scmi_sensors->ph, sensor, val); +} + static int scmi_hwmon_read_string(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, const char **str) @@ -122,6 +136,25 @@ static struct hwmon_chip_info scmi_chip_info = { .info = NULL, }; +static int scmi_hwmon_thermal_get_temp(struct thermal_zone_device *tz, + int *temp) +{ + int ret; + long value; + struct scmi_thermal_sensor *th_sensor = tz->devdata; + + ret = scmi_hwmon_read_scaled_value(th_sensor->ph, th_sensor->info, + &value); + if (!ret) + *temp = value; + + return ret; +} + +static const struct thermal_zone_device_ops scmi_hwmon_thermal_ops = { + .get_temp = scmi_hwmon_thermal_get_temp, +}; + static int scmi_hwmon_add_chan_info(struct hwmon_channel_info *scmi_hwmon_chan, struct device *dev, int num, enum hwmon_sensor_types type, u32 config) @@ -149,7 +182,6 @@ static enum hwmon_sensor_types scmi_types[] = { }; static u32 hwmon_attributes[hwmon_max] = { - [hwmon_chip] = HWMON_C_REGISTER_TZ, [hwmon_temp] = HWMON_T_INPUT | HWMON_T_LABEL, [hwmon_in] = HWMON_I_INPUT | HWMON_I_LABEL, [hwmon_curr] = HWMON_C_INPUT | HWMON_C_LABEL, @@ -157,6 +189,43 @@ static u32 hwmon_attributes[hwmon_max] = { [hwmon_energy] = HWMON_E_INPUT | HWMON_E_LABEL, }; +static int scmi_thermal_sensor_register(struct device *dev, + const struct scmi_protocol_handle *ph, + const struct scmi_sensor_info *sensor) +{ + struct scmi_thermal_sensor *th_sensor; + struct thermal_zone_device *tzd; + + th_sensor = devm_kzalloc(dev, sizeof(*th_sensor), GFP_KERNEL); + if (!th_sensor) + return -ENOMEM; + + th_sensor->ph = ph; + th_sensor->info = sensor; + + /* + * Try to register a temperature sensor with the Thermal Framework: + * skip sensors not defined as part of any thermal zone (-ENODEV) but + * report any other errors related to misconfigured zones/sensors. + */ + tzd = devm_thermal_of_zone_register(dev, th_sensor->info->id, th_sensor, + &scmi_hwmon_thermal_ops); + if (IS_ERR(tzd)) { + devm_kfree(dev, th_sensor); + + if (PTR_ERR(tzd) != -ENODEV) + return PTR_ERR(tzd); + + dev_dbg(dev, "Sensor '%s' not attached to any thermal zone.\n", + sensor->name); + } else { + dev_dbg(dev, "Sensor '%s' attached to thermal zone ID:%d\n", + sensor->name, tzd->id); + } + + return 0; +} + static int scmi_hwmon_probe(struct scmi_device *sdev) { int i, idx; @@ -164,7 +233,7 @@ static int scmi_hwmon_probe(struct scmi_device *sdev) enum hwmon_sensor_types type; struct scmi_sensors *scmi_sensors; const struct scmi_sensor_info *sensor; - int nr_count[hwmon_max] = {0}, nr_types = 0; + int nr_count[hwmon_max] = {0}, nr_types = 0, nr_count_temp = 0; const struct hwmon_chip_info *chip_info; struct device *hwdev, *dev = &sdev->dev; struct hwmon_channel_info *scmi_hwmon_chan; @@ -208,10 +277,8 @@ static int scmi_hwmon_probe(struct scmi_device *sdev) } } - if (nr_count[hwmon_temp]) { - nr_count[hwmon_chip]++; - nr_types++; - } + if (nr_count[hwmon_temp]) + nr_count_temp = nr_count[hwmon_temp]; scmi_hwmon_chan = devm_kcalloc(dev, nr_types, sizeof(*scmi_hwmon_chan), GFP_KERNEL); @@ -262,8 +329,31 @@ static int scmi_hwmon_probe(struct scmi_device *sdev) hwdev = devm_hwmon_device_register_with_info(dev, "scmi_sensors", scmi_sensors, chip_info, NULL); + if (IS_ERR(hwdev)) + return PTR_ERR(hwdev); - return PTR_ERR_OR_ZERO(hwdev); + for (i = 0; i < nr_count_temp; i++) { + int ret; + + sensor = *(scmi_sensors->info[hwmon_temp] + i); + if (!sensor) + continue; + + /* + * Warn on any misconfiguration related to thermal zones but + * bail out of probing only on memory errors. + */ + ret = scmi_thermal_sensor_register(dev, ph, sensor); + if (ret) { + if (ret == -ENOMEM) + return ret; + dev_warn(dev, + "Thermal zone misconfigured for %s. err=%d\n", + sensor->name, ret); + } + } + + return 0; } static const struct scmi_device_id scmi_id_table[] = { diff --git a/drivers/hwspinlock/qcom_hwspinlock.c b/drivers/hwspinlock/qcom_hwspinlock.c index 80ea45b3a815..9cf186362ae2 100644 --- a/drivers/hwspinlock/qcom_hwspinlock.c +++ b/drivers/hwspinlock/qcom_hwspinlock.c @@ -22,6 +22,7 @@ struct qcom_hwspinlock_of_data { u32 offset; u32 stride; + const struct regmap_config *regmap_config; }; static int qcom_hwspinlock_trylock(struct hwspinlock *lock) @@ -73,15 +74,42 @@ static const struct qcom_hwspinlock_of_data of_sfpb_mutex = { .stride = 0x4, }; -/* All modern platform has offset 0 and stride of 4k */ +static const struct regmap_config tcsr_msm8226_mutex_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x1000, + .fast_io = true, +}; + +static const struct qcom_hwspinlock_of_data of_msm8226_tcsr_mutex = { + .offset = 0, + .stride = 0x80, + .regmap_config = &tcsr_msm8226_mutex_config, +}; + +static const struct regmap_config tcsr_mutex_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .max_register = 0x20000, + .fast_io = true, +}; + static const struct qcom_hwspinlock_of_data of_tcsr_mutex = { .offset = 0, .stride = 0x1000, + .regmap_config = &tcsr_mutex_config, }; static const struct of_device_id qcom_hwspinlock_of_match[] = { { .compatible = "qcom,sfpb-mutex", .data = &of_sfpb_mutex }, { .compatible = "qcom,tcsr-mutex", .data = &of_tcsr_mutex }, + { .compatible = "qcom,apq8084-tcsr-mutex", .data = &of_msm8226_tcsr_mutex }, + { .compatible = "qcom,ipq6018-tcsr-mutex", .data = &of_msm8226_tcsr_mutex }, + { .compatible = "qcom,msm8226-tcsr-mutex", .data = &of_msm8226_tcsr_mutex }, + { .compatible = "qcom,msm8974-tcsr-mutex", .data = &of_msm8226_tcsr_mutex }, + { .compatible = "qcom,msm8994-tcsr-mutex", .data = &of_msm8226_tcsr_mutex }, { } }; MODULE_DEVICE_TABLE(of, qcom_hwspinlock_of_match); @@ -117,14 +145,6 @@ static struct regmap *qcom_hwspinlock_probe_syscon(struct platform_device *pdev, return regmap; } -static const struct regmap_config tcsr_mutex_config = { - .reg_bits = 32, - .reg_stride = 4, - .val_bits = 32, - .max_register = 0x40000, - .fast_io = true, -}; - static struct regmap *qcom_hwspinlock_probe_mmio(struct platform_device *pdev, u32 *offset, u32 *stride) { @@ -133,6 +153,8 @@ static struct regmap *qcom_hwspinlock_probe_mmio(struct platform_device *pdev, void __iomem *base; data = of_device_get_match_data(dev); + if (!data->regmap_config) + return ERR_PTR(-EINVAL); *offset = data->offset; *stride = data->stride; @@ -141,7 +163,7 @@ static struct regmap *qcom_hwspinlock_probe_mmio(struct platform_device *pdev, if (IS_ERR(base)) return ERR_CAST(base); - return devm_regmap_init_mmio(dev, base, &tcsr_mutex_config); + return devm_regmap_init_mmio(dev, base, data->regmap_config); } static int qcom_hwspinlock_probe(struct platform_device *pdev) diff --git a/drivers/hwtracing/coresight/coresight-core.c b/drivers/hwtracing/coresight/coresight-core.c index d5dbc67bacb4..f3068175ca9d 100644 --- a/drivers/hwtracing/coresight/coresight-core.c +++ b/drivers/hwtracing/coresight/coresight-core.c @@ -1687,14 +1687,15 @@ struct coresight_device *coresight_register(struct coresight_desc *desc) ret = coresight_fixup_device_conns(csdev); if (!ret) ret = coresight_fixup_orphan_conns(csdev); - if (!ret && cti_assoc_ops && cti_assoc_ops->add) - cti_assoc_ops->add(csdev); out_unlock: mutex_unlock(&coresight_mutex); /* Success */ - if (!ret) + if (!ret) { + if (cti_assoc_ops && cti_assoc_ops->add) + cti_assoc_ops->add(csdev); return csdev; + } /* Unregister the device if needed */ if (registered) { diff --git a/drivers/hwtracing/coresight/coresight-cti-core.c b/drivers/hwtracing/coresight/coresight-cti-core.c index 8988b2ed2ea6..c6e8c6542f24 100644 --- a/drivers/hwtracing/coresight/coresight-cti-core.c +++ b/drivers/hwtracing/coresight/coresight-cti-core.c @@ -90,11 +90,9 @@ void cti_write_all_hw_regs(struct cti_drvdata *drvdata) static int cti_enable_hw(struct cti_drvdata *drvdata) { struct cti_config *config = &drvdata->config; - struct device *dev = &drvdata->csdev->dev; unsigned long flags; int rc = 0; - pm_runtime_get_sync(dev->parent); spin_lock_irqsave(&drvdata->spinlock, flags); /* no need to do anything if enabled or unpowered*/ @@ -119,7 +117,6 @@ cti_state_unchanged: /* cannot enable due to error */ cti_err_not_enabled: spin_unlock_irqrestore(&drvdata->spinlock, flags); - pm_runtime_put(dev->parent); return rc; } @@ -153,7 +150,6 @@ cti_hp_not_enabled: static int cti_disable_hw(struct cti_drvdata *drvdata) { struct cti_config *config = &drvdata->config; - struct device *dev = &drvdata->csdev->dev; struct coresight_device *csdev = drvdata->csdev; spin_lock(&drvdata->spinlock); @@ -175,7 +171,6 @@ static int cti_disable_hw(struct cti_drvdata *drvdata) coresight_disclaim_device_unlocked(csdev); CS_LOCK(drvdata->base); spin_unlock(&drvdata->spinlock); - pm_runtime_put(dev->parent); return 0; /* not disabled this call */ @@ -541,7 +536,7 @@ cti_match_fixup_csdev(struct cti_device *ctidev, const char *node_name, /* * Search the cti list to add an associated CTI into the supplied CS device * This will set the association if CTI declared before the CS device. - * (called from coresight_register() with coresight_mutex locked). + * (called from coresight_register() without coresight_mutex locked). */ static void cti_add_assoc_to_csdev(struct coresight_device *csdev) { @@ -569,7 +564,8 @@ static void cti_add_assoc_to_csdev(struct coresight_device *csdev) * if we found a matching csdev then update the ECT * association pointer for the device with this CTI. */ - csdev->ect_dev = ect_item->csdev; + coresight_set_assoc_ectdev_mutex(csdev->ect_dev, + ect_item->csdev); break; } } diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index 264e780ae32e..e50f9603d189 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -764,6 +764,7 @@ config I2C_LPC2K config I2C_MLXBF tristate "Mellanox BlueField I2C controller" depends on MELLANOX_PLATFORM && ARM64 + depends on ACPI select I2C_SLAVE help Enabling this option will add I2C SMBus support for Mellanox BlueField diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index e06509edc5f3..1fda1eaa6d6a 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1243,6 +1243,7 @@ static const struct { */ { "Latitude 5480", 0x29 }, { "Vostro V131", 0x1d }, + { "Vostro 5568", 0x29 }, }; static void register_dell_lis3lv02d_i2c_device(struct i801_priv *priv) diff --git a/drivers/i2c/busses/i2c-mlxbf.c b/drivers/i2c/busses/i2c-mlxbf.c index e68e775f187e..1810d5791b3d 100644 --- a/drivers/i2c/busses/i2c-mlxbf.c +++ b/drivers/i2c/busses/i2c-mlxbf.c @@ -2247,7 +2247,6 @@ static struct i2c_adapter_quirks mlxbf_i2c_quirks = { .max_write_len = MLXBF_I2C_MASTER_DATA_W_LENGTH, }; -#ifdef CONFIG_ACPI static const struct acpi_device_id mlxbf_i2c_acpi_ids[] = { { "MLNXBF03", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_1] }, { "MLNXBF23", (kernel_ulong_t)&mlxbf_i2c_chip[MLXBF_I2C_CHIP_TYPE_2] }, @@ -2282,12 +2281,6 @@ static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv) return 0; } -#else -static int mlxbf_i2c_acpi_probe(struct device *dev, struct mlxbf_i2c_priv *priv) -{ - return -ENOENT; -} -#endif /* CONFIG_ACPI */ static int mlxbf_i2c_probe(struct platform_device *pdev) { @@ -2490,9 +2483,7 @@ static struct platform_driver mlxbf_i2c_driver = { .remove = mlxbf_i2c_remove, .driver = { .name = "i2c-mlxbf", -#ifdef CONFIG_ACPI .acpi_match_table = ACPI_PTR(mlxbf_i2c_acpi_ids), -#endif /* CONFIG_ACPI */ }, }; diff --git a/drivers/i2c/busses/i2c-mlxcpld.c b/drivers/i2c/busses/i2c-mlxcpld.c index 72fcfb17dd67..081f51ef0551 100644 --- a/drivers/i2c/busses/i2c-mlxcpld.c +++ b/drivers/i2c/busses/i2c-mlxcpld.c @@ -40,7 +40,7 @@ #define MLXCPLD_LPCI2C_STATUS_REG 0x9 #define MLXCPLD_LPCI2C_DATA_REG 0xa -/* LPC I2C masks and parametres */ +/* LPC I2C masks and parameters */ #define MLXCPLD_LPCI2C_RST_SEL_MASK 0x1 #define MLXCPLD_LPCI2C_TRANS_END 0x1 #define MLXCPLD_LPCI2C_STATUS_NACK 0x10 diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c index 39cb1b7bb865..809fbd014cd6 100644 --- a/drivers/i2c/busses/i2c-piix4.c +++ b/drivers/i2c/busses/i2c-piix4.c @@ -1080,6 +1080,7 @@ static int piix4_probe(struct pci_dev *dev, const struct pci_device_id *id) "", &piix4_main_adapters[0]); if (retval < 0) return retval; + piix4_adapter_count = 1; } /* Check for auxiliary SMBus on some AMD chipsets */ diff --git a/drivers/i2c/busses/i2c-qcom-cci.c b/drivers/i2c/busses/i2c-qcom-cci.c index 87739fb4388b..a4b97fe3c3a5 100644 --- a/drivers/i2c/busses/i2c-qcom-cci.c +++ b/drivers/i2c/busses/i2c-qcom-cci.c @@ -639,6 +639,11 @@ static int cci_probe(struct platform_device *pdev) if (ret < 0) goto error; + pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); + pm_runtime_use_autosuspend(dev); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + for (i = 0; i < cci->data->num_masters; i++) { if (!cci->master[i].cci) continue; @@ -650,14 +655,12 @@ static int cci_probe(struct platform_device *pdev) } } - pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); - pm_runtime_use_autosuspend(dev); - pm_runtime_set_active(dev); - pm_runtime_enable(dev); - return 0; error_i2c: + pm_runtime_disable(dev); + pm_runtime_dont_use_autosuspend(dev); + for (--i ; i >= 0; i--) { if (cci->master[i].cci) { i2c_del_adapter(&cci->master[i].adap); diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c index cfb8e04a2a83..87d56250d78a 100644 --- a/drivers/i2c/busses/i2c-sis630.c +++ b/drivers/i2c/busses/i2c-sis630.c @@ -97,7 +97,7 @@ MODULE_PARM_DESC(high_clock, module_param(force, bool, 0); MODULE_PARM_DESC(force, "Forcibly enable the SIS630. DANGEROUS!"); -/* SMBus base adress */ +/* SMBus base address */ static unsigned short smbus_base; /* supported chips */ diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c index 954022c04cc4..3869c258a529 100644 --- a/drivers/i2c/busses/i2c-tegra.c +++ b/drivers/i2c/busses/i2c-tegra.c @@ -284,6 +284,7 @@ struct tegra_i2c_dev { struct dma_chan *tx_dma_chan; struct dma_chan *rx_dma_chan; unsigned int dma_buf_size; + struct device *dma_dev; dma_addr_t dma_phys; void *dma_buf; @@ -420,7 +421,7 @@ static int tegra_i2c_dma_submit(struct tegra_i2c_dev *i2c_dev, size_t len) static void tegra_i2c_release_dma(struct tegra_i2c_dev *i2c_dev) { if (i2c_dev->dma_buf) { - dma_free_coherent(i2c_dev->dev, i2c_dev->dma_buf_size, + dma_free_coherent(i2c_dev->dma_dev, i2c_dev->dma_buf_size, i2c_dev->dma_buf, i2c_dev->dma_phys); i2c_dev->dma_buf = NULL; } @@ -472,10 +473,13 @@ static int tegra_i2c_init_dma(struct tegra_i2c_dev *i2c_dev) i2c_dev->tx_dma_chan = chan; + WARN_ON(i2c_dev->tx_dma_chan->device != i2c_dev->rx_dma_chan->device); + i2c_dev->dma_dev = chan->device->dev; + i2c_dev->dma_buf_size = i2c_dev->hw->quirks->max_write_len + I2C_PACKET_HEADER_SIZE; - dma_buf = dma_alloc_coherent(i2c_dev->dev, i2c_dev->dma_buf_size, + dma_buf = dma_alloc_coherent(i2c_dev->dma_dev, i2c_dev->dma_buf_size, &dma_phys, GFP_KERNEL | __GFP_NOWARN); if (!dma_buf) { dev_err(i2c_dev->dev, "failed to allocate DMA buffer\n"); @@ -1272,7 +1276,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, if (i2c_dev->dma_mode) { if (i2c_dev->msg_read) { - dma_sync_single_for_device(i2c_dev->dev, + dma_sync_single_for_device(i2c_dev->dma_dev, i2c_dev->dma_phys, xfer_size, DMA_FROM_DEVICE); @@ -1280,7 +1284,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, if (err) return err; } else { - dma_sync_single_for_cpu(i2c_dev->dev, + dma_sync_single_for_cpu(i2c_dev->dma_dev, i2c_dev->dma_phys, xfer_size, DMA_TO_DEVICE); } @@ -1293,7 +1297,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, memcpy(i2c_dev->dma_buf + I2C_PACKET_HEADER_SIZE, msg->buf, msg->len); - dma_sync_single_for_device(i2c_dev->dev, + dma_sync_single_for_device(i2c_dev->dma_dev, i2c_dev->dma_phys, xfer_size, DMA_TO_DEVICE); @@ -1344,7 +1348,7 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev, } if (i2c_dev->msg_read && i2c_dev->msg_err == I2C_ERR_NONE) { - dma_sync_single_for_cpu(i2c_dev->dev, + dma_sync_single_for_cpu(i2c_dev->dma_dev, i2c_dev->dma_phys, xfer_size, DMA_FROM_DEVICE); diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index b3fe6b2aa3ca..277a02455cdd 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -920,6 +920,7 @@ static struct platform_driver xiic_i2c_driver = { module_platform_driver(xiic_i2c_driver); +MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_AUTHOR("info@mocean-labs.com"); MODULE_DESCRIPTION("Xilinx I2C bus driver"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/iio/accel/adxl367.c b/drivers/iio/accel/adxl367.c index 47feb375b70b..7c7d78040793 100644 --- a/drivers/iio/accel/adxl367.c +++ b/drivers/iio/accel/adxl367.c @@ -1185,17 +1185,30 @@ static ssize_t adxl367_get_fifo_watermark(struct device *dev, return sysfs_emit(buf, "%d\n", fifo_watermark); } -static IIO_CONST_ATTR(hwfifo_watermark_min, "1"); -static IIO_CONST_ATTR(hwfifo_watermark_max, - __stringify(ADXL367_FIFO_MAX_WATERMARK)); +static ssize_t hwfifo_watermark_min_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", "1"); +} + +static ssize_t hwfifo_watermark_max_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", __stringify(ADXL367_FIFO_MAX_WATERMARK)); +} + +static IIO_DEVICE_ATTR_RO(hwfifo_watermark_min, 0); +static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0); static IIO_DEVICE_ATTR(hwfifo_watermark, 0444, adxl367_get_fifo_watermark, NULL, 0); static IIO_DEVICE_ATTR(hwfifo_enabled, 0444, adxl367_get_fifo_enabled, NULL, 0); static const struct attribute *adxl367_fifo_attributes[] = { - &iio_const_attr_hwfifo_watermark_min.dev_attr.attr, - &iio_const_attr_hwfifo_watermark_max.dev_attr.attr, + &iio_dev_attr_hwfifo_watermark_min.dev_attr.attr, + &iio_dev_attr_hwfifo_watermark_max.dev_attr.attr, &iio_dev_attr_hwfifo_watermark.dev_attr.attr, &iio_dev_attr_hwfifo_enabled.dev_attr.attr, NULL, diff --git a/drivers/iio/accel/adxl372.c b/drivers/iio/accel/adxl372.c index e3ecbaee61f7..bc53af809d5d 100644 --- a/drivers/iio/accel/adxl372.c +++ b/drivers/iio/accel/adxl372.c @@ -998,17 +998,30 @@ static ssize_t adxl372_get_fifo_watermark(struct device *dev, return sprintf(buf, "%d\n", st->watermark); } -static IIO_CONST_ATTR(hwfifo_watermark_min, "1"); -static IIO_CONST_ATTR(hwfifo_watermark_max, - __stringify(ADXL372_FIFO_SIZE)); +static ssize_t hwfifo_watermark_min_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", "1"); +} + +static ssize_t hwfifo_watermark_max_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", __stringify(ADXL372_FIFO_SIZE)); +} + +static IIO_DEVICE_ATTR_RO(hwfifo_watermark_min, 0); +static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0); static IIO_DEVICE_ATTR(hwfifo_watermark, 0444, adxl372_get_fifo_watermark, NULL, 0); static IIO_DEVICE_ATTR(hwfifo_enabled, 0444, adxl372_get_fifo_enabled, NULL, 0); static const struct attribute *adxl372_fifo_attributes[] = { - &iio_const_attr_hwfifo_watermark_min.dev_attr.attr, - &iio_const_attr_hwfifo_watermark_max.dev_attr.attr, + &iio_dev_attr_hwfifo_watermark_min.dev_attr.attr, + &iio_dev_attr_hwfifo_watermark_max.dev_attr.attr, &iio_dev_attr_hwfifo_watermark.dev_attr.attr, &iio_dev_attr_hwfifo_enabled.dev_attr.attr, NULL, diff --git a/drivers/iio/accel/bma400_core.c b/drivers/iio/accel/bma400_core.c index ad8fce3e08cd..490c342ef72a 100644 --- a/drivers/iio/accel/bma400_core.c +++ b/drivers/iio/accel/bma400_core.c @@ -869,18 +869,6 @@ static int bma400_init(struct bma400_data *data) unsigned int val; int ret; - /* Try to read chip_id register. It must return 0x90. */ - ret = regmap_read(data->regmap, BMA400_CHIP_ID_REG, &val); - if (ret) { - dev_err(data->dev, "Failed to read chip id register\n"); - return ret; - } - - if (val != BMA400_ID_REG_VAL) { - dev_err(data->dev, "Chip ID mismatch\n"); - return -ENODEV; - } - data->regulators[BMA400_VDD_REGULATOR].supply = "vdd"; data->regulators[BMA400_VDDIO_REGULATOR].supply = "vddio"; ret = devm_regulator_bulk_get(data->dev, @@ -906,6 +894,18 @@ static int bma400_init(struct bma400_data *data) if (ret) return ret; + /* Try to read chip_id register. It must return 0x90. */ + ret = regmap_read(data->regmap, BMA400_CHIP_ID_REG, &val); + if (ret) { + dev_err(data->dev, "Failed to read chip id register\n"); + return ret; + } + + if (val != BMA400_ID_REG_VAL) { + dev_err(data->dev, "Chip ID mismatch\n"); + return -ENODEV; + } + ret = bma400_get_power_mode(data); if (ret) { dev_err(data->dev, "Failed to get the initial power-mode\n"); diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c index 57e8a8350cd1..92f8b139acce 100644 --- a/drivers/iio/accel/bmc150-accel-core.c +++ b/drivers/iio/accel/bmc150-accel-core.c @@ -925,17 +925,30 @@ static const struct iio_chan_spec_ext_info bmc150_accel_ext_info[] = { { } }; -static IIO_CONST_ATTR(hwfifo_watermark_min, "1"); -static IIO_CONST_ATTR(hwfifo_watermark_max, - __stringify(BMC150_ACCEL_FIFO_LENGTH)); +static ssize_t hwfifo_watermark_min_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", "1"); +} + +static ssize_t hwfifo_watermark_max_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", __stringify(BMC150_ACCEL_FIFO_LENGTH)); +} + +static IIO_DEVICE_ATTR_RO(hwfifo_watermark_min, 0); +static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0); static IIO_DEVICE_ATTR(hwfifo_enabled, S_IRUGO, bmc150_accel_get_fifo_state, NULL, 0); static IIO_DEVICE_ATTR(hwfifo_watermark, S_IRUGO, bmc150_accel_get_fifo_watermark, NULL, 0); static const struct attribute *bmc150_accel_fifo_attributes[] = { - &iio_const_attr_hwfifo_watermark_min.dev_attr.attr, - &iio_const_attr_hwfifo_watermark_max.dev_attr.attr, + &iio_dev_attr_hwfifo_watermark_min.dev_attr.attr, + &iio_dev_attr_hwfifo_watermark_max.dev_attr.attr, &iio_dev_attr_hwfifo_watermark.dev_attr.attr, &iio_dev_attr_hwfifo_enabled.dev_attr.attr, NULL, diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c index 4294d6539cdb..870f4cb60923 100644 --- a/drivers/iio/adc/at91-sama5d2_adc.c +++ b/drivers/iio/adc/at91-sama5d2_adc.c @@ -2193,17 +2193,30 @@ static ssize_t at91_adc_get_watermark(struct device *dev, return scnprintf(buf, PAGE_SIZE, "%d\n", st->dma_st.watermark); } +static ssize_t hwfifo_watermark_min_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", "2"); +} + +static ssize_t hwfifo_watermark_max_show(struct device *dev, + struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "%s\n", AT91_HWFIFO_MAX_SIZE_STR); +} + static IIO_DEVICE_ATTR(hwfifo_enabled, 0444, at91_adc_get_fifo_state, NULL, 0); static IIO_DEVICE_ATTR(hwfifo_watermark, 0444, at91_adc_get_watermark, NULL, 0); - -static IIO_CONST_ATTR(hwfifo_watermark_min, "2"); -static IIO_CONST_ATTR(hwfifo_watermark_max, AT91_HWFIFO_MAX_SIZE_STR); +static IIO_DEVICE_ATTR_RO(hwfifo_watermark_min, 0); +static IIO_DEVICE_ATTR_RO(hwfifo_watermark_max, 0); static const struct attribute *at91_adc_fifo_attributes[] = { - &iio_const_attr_hwfifo_watermark_min.dev_attr.attr, - &iio_const_attr_hwfifo_watermark_max.dev_attr.attr, + &iio_dev_attr_hwfifo_watermark_min.dev_attr.attr, + &iio_dev_attr_hwfifo_watermark_max.dev_attr.attr, &iio_dev_attr_hwfifo_watermark.dev_attr.attr, &iio_dev_attr_hwfifo_enabled.dev_attr.attr, NULL, @@ -2294,11 +2307,9 @@ static int at91_adc_temp_sensor_init(struct at91_adc_state *st, clb->p6 = buf[AT91_ADC_TS_CLB_IDX_P6]; /* - * We prepare here the conversion to milli and also add constant - * factor (5 degrees Celsius) to p1 here to avoid doing it on - * hotpath. + * We prepare here the conversion to milli to avoid doing it on hotpath. */ - clb->p1 = clb->p1 * 1000 + 5000; + clb->p1 = clb->p1 * 1000; free_buf: kfree(buf); diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c index 532daaa6f943..366e252ebeb0 100644 --- a/drivers/iio/adc/at91_adc.c +++ b/drivers/iio/adc/at91_adc.c @@ -634,8 +634,10 @@ static struct iio_trigger *at91_adc_allocate_trigger(struct iio_dev *idev, trig->ops = &at91_adc_trigger_ops; ret = iio_trigger_register(trig); - if (ret) + if (ret) { + iio_trigger_free(trig); return NULL; + } return trig; } diff --git a/drivers/iio/adc/mcp3911.c b/drivers/iio/adc/mcp3911.c index b35fd2c9c3c0..76b334f5ac61 100644 --- a/drivers/iio/adc/mcp3911.c +++ b/drivers/iio/adc/mcp3911.c @@ -55,8 +55,9 @@ /* Internal voltage reference in mV */ #define MCP3911_INT_VREF_MV 1200 -#define MCP3911_REG_READ(reg, id) ((((reg) << 1) | ((id) << 5) | (1 << 0)) & 0xff) -#define MCP3911_REG_WRITE(reg, id) ((((reg) << 1) | ((id) << 5) | (0 << 0)) & 0xff) +#define MCP3911_REG_READ(reg, id) ((((reg) << 1) | ((id) << 6) | (1 << 0)) & 0xff) +#define MCP3911_REG_WRITE(reg, id) ((((reg) << 1) | ((id) << 6) | (0 << 0)) & 0xff) +#define MCP3911_REG_MASK GENMASK(4, 1) #define MCP3911_NUM_CHANNELS 2 @@ -89,8 +90,8 @@ static int mcp3911_read(struct mcp3911 *adc, u8 reg, u32 *val, u8 len) be32_to_cpus(val); *val >>= ((4 - len) * 8); - dev_dbg(&adc->spi->dev, "reading 0x%x from register 0x%x\n", *val, - reg >> 1); + dev_dbg(&adc->spi->dev, "reading 0x%x from register 0x%lx\n", *val, + FIELD_GET(MCP3911_REG_MASK, reg)); return ret; } @@ -248,7 +249,7 @@ static int mcp3911_write_raw(struct iio_dev *indio_dev, break; case IIO_CHAN_INFO_OVERSAMPLING_RATIO: - for (int i = 0; i < sizeof(mcp3911_osr_table); i++) { + for (int i = 0; i < ARRAY_SIZE(mcp3911_osr_table); i++) { if (val == mcp3911_osr_table[i]) { val = FIELD_PREP(MCP3911_CONFIG_OSR, i); ret = mcp3911_update(adc, MCP3911_REG_CONFIG, MCP3911_CONFIG_OSR, @@ -496,7 +497,7 @@ static int mcp3911_probe(struct spi_device *spi) indio_dev->name, iio_device_id(indio_dev)); if (!adc->trig) - return PTR_ERR(adc->trig); + return -ENOMEM; adc->trig->ops = &mcp3911_trigger_ops; iio_trigger_set_drvdata(adc->trig, adc); diff --git a/drivers/iio/adc/mp2629_adc.c b/drivers/iio/adc/mp2629_adc.c index 30a31f185d08..88e947f300cf 100644 --- a/drivers/iio/adc/mp2629_adc.c +++ b/drivers/iio/adc/mp2629_adc.c @@ -57,7 +57,8 @@ static struct iio_map mp2629_adc_maps[] = { MP2629_MAP(SYSTEM_VOLT, "system-volt"), MP2629_MAP(INPUT_VOLT, "input-volt"), MP2629_MAP(BATT_CURRENT, "batt-current"), - MP2629_MAP(INPUT_CURRENT, "input-current") + MP2629_MAP(INPUT_CURRENT, "input-current"), + { } }; static int mp2629_read_raw(struct iio_dev *indio_dev, @@ -74,7 +75,7 @@ static int mp2629_read_raw(struct iio_dev *indio_dev, if (ret) return ret; - if (chan->address == MP2629_INPUT_VOLT) + if (chan->channel == MP2629_INPUT_VOLT) rval &= GENMASK(6, 0); *val = rval; return IIO_VAL_INT; diff --git a/drivers/iio/adc/stm32-adc.c b/drivers/iio/adc/stm32-adc.c index 6256977eb7f7..3cda529f081d 100644 --- a/drivers/iio/adc/stm32-adc.c +++ b/drivers/iio/adc/stm32-adc.c @@ -2086,18 +2086,19 @@ static int stm32_adc_generic_chan_init(struct iio_dev *indio_dev, stm32_adc_chan_init_one(indio_dev, &channels[scan_index], val, vin[1], scan_index, differential); + val = 0; ret = fwnode_property_read_u32(child, "st,min-sample-time-ns", &val); /* st,min-sample-time-ns is optional */ - if (!ret) { - stm32_adc_smpr_init(adc, channels[scan_index].channel, val); - if (differential) - stm32_adc_smpr_init(adc, vin[1], val); - } else if (ret != -EINVAL) { + if (ret && ret != -EINVAL) { dev_err(&indio_dev->dev, "Invalid st,min-sample-time-ns property %d\n", ret); goto err; } + stm32_adc_smpr_init(adc, channels[scan_index].channel, val); + if (differential) + stm32_adc_smpr_init(adc, vin[1], val); + scan_index++; } diff --git a/drivers/iio/imu/bno055/bno055.c b/drivers/iio/imu/bno055/bno055.c index 307557a609e3..52744dd98e65 100644 --- a/drivers/iio/imu/bno055/bno055.c +++ b/drivers/iio/imu/bno055/bno055.c @@ -632,7 +632,7 @@ static int bno055_set_regmask(struct bno055_priv *priv, int val, int val2, return -EINVAL; } delta = abs(tbl_val - req_val); - if (delta < best_delta || first) { + if (first || delta < best_delta) { best_delta = delta; hwval = i; first = false; diff --git a/drivers/iio/light/tsl2583.c b/drivers/iio/light/tsl2583.c index 0a2ca1a8146d..7bcb5c718922 100644 --- a/drivers/iio/light/tsl2583.c +++ b/drivers/iio/light/tsl2583.c @@ -858,7 +858,7 @@ static int tsl2583_probe(struct i2c_client *clientp, TSL2583_POWER_OFF_DELAY_MS); pm_runtime_use_autosuspend(&clientp->dev); - ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev); + ret = iio_device_register(indio_dev); if (ret) { dev_err(&clientp->dev, "%s: iio registration failed\n", __func__); diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h index cbc9349c342a..550b75b7186f 100644 --- a/drivers/iio/pressure/ms5611.h +++ b/drivers/iio/pressure/ms5611.h @@ -25,13 +25,6 @@ enum { MS5607, }; -struct ms5611_chip_info { - u16 prom[MS5611_PROM_WORDS_NB]; - - int (*temp_and_pressure_compensate)(struct ms5611_chip_info *chip_info, - s32 *temp, s32 *pressure); -}; - /* * OverSampling Rate descriptor. * Warning: cmd MUST be kept aligned on a word boundary (see @@ -50,12 +43,15 @@ struct ms5611_state { const struct ms5611_osr *pressure_osr; const struct ms5611_osr *temp_osr; + u16 prom[MS5611_PROM_WORDS_NB]; + int (*reset)(struct ms5611_state *st); int (*read_prom_word)(struct ms5611_state *st, int index, u16 *word); int (*read_adc_temp_and_pressure)(struct ms5611_state *st, s32 *temp, s32 *pressure); - struct ms5611_chip_info *chip_info; + int (*compensate_temp_and_pressure)(struct ms5611_state *st, s32 *temp, + s32 *pressure); struct regulator *vdd; }; diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c index 717521de66c4..c564a1d6cafe 100644 --- a/drivers/iio/pressure/ms5611_core.c +++ b/drivers/iio/pressure/ms5611_core.c @@ -85,7 +85,7 @@ static int ms5611_read_prom(struct iio_dev *indio_dev) struct ms5611_state *st = iio_priv(indio_dev); for (i = 0; i < MS5611_PROM_WORDS_NB; i++) { - ret = st->read_prom_word(st, i, &st->chip_info->prom[i]); + ret = st->read_prom_word(st, i, &st->prom[i]); if (ret < 0) { dev_err(&indio_dev->dev, "failed to read prom at %d\n", i); @@ -93,7 +93,7 @@ static int ms5611_read_prom(struct iio_dev *indio_dev) } } - if (!ms5611_prom_is_valid(st->chip_info->prom, MS5611_PROM_WORDS_NB)) { + if (!ms5611_prom_is_valid(st->prom, MS5611_PROM_WORDS_NB)) { dev_err(&indio_dev->dev, "PROM integrity check failed\n"); return -ENODEV; } @@ -114,21 +114,20 @@ static int ms5611_read_temp_and_pressure(struct iio_dev *indio_dev, return ret; } - return st->chip_info->temp_and_pressure_compensate(st->chip_info, - temp, pressure); + return st->compensate_temp_and_pressure(st, temp, pressure); } -static int ms5611_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info, +static int ms5611_temp_and_pressure_compensate(struct ms5611_state *st, s32 *temp, s32 *pressure) { s32 t = *temp, p = *pressure; s64 off, sens, dt; - dt = t - (chip_info->prom[5] << 8); - off = ((s64)chip_info->prom[2] << 16) + ((chip_info->prom[4] * dt) >> 7); - sens = ((s64)chip_info->prom[1] << 15) + ((chip_info->prom[3] * dt) >> 8); + dt = t - (st->prom[5] << 8); + off = ((s64)st->prom[2] << 16) + ((st->prom[4] * dt) >> 7); + sens = ((s64)st->prom[1] << 15) + ((st->prom[3] * dt) >> 8); - t = 2000 + ((chip_info->prom[6] * dt) >> 23); + t = 2000 + ((st->prom[6] * dt) >> 23); if (t < 2000) { s64 off2, sens2, t2; @@ -154,17 +153,17 @@ static int ms5611_temp_and_pressure_compensate(struct ms5611_chip_info *chip_inf return 0; } -static int ms5607_temp_and_pressure_compensate(struct ms5611_chip_info *chip_info, +static int ms5607_temp_and_pressure_compensate(struct ms5611_state *st, s32 *temp, s32 *pressure) { s32 t = *temp, p = *pressure; s64 off, sens, dt; - dt = t - (chip_info->prom[5] << 8); - off = ((s64)chip_info->prom[2] << 17) + ((chip_info->prom[4] * dt) >> 6); - sens = ((s64)chip_info->prom[1] << 16) + ((chip_info->prom[3] * dt) >> 7); + dt = t - (st->prom[5] << 8); + off = ((s64)st->prom[2] << 17) + ((st->prom[4] * dt) >> 6); + sens = ((s64)st->prom[1] << 16) + ((st->prom[3] * dt) >> 7); - t = 2000 + ((chip_info->prom[6] * dt) >> 23); + t = 2000 + ((st->prom[6] * dt) >> 23); if (t < 2000) { s64 off2, sens2, t2, tmp; @@ -342,15 +341,6 @@ static int ms5611_write_raw(struct iio_dev *indio_dev, static const unsigned long ms5611_scan_masks[] = {0x3, 0}; -static struct ms5611_chip_info chip_info_tbl[] = { - [MS5611] = { - .temp_and_pressure_compensate = ms5611_temp_and_pressure_compensate, - }, - [MS5607] = { - .temp_and_pressure_compensate = ms5607_temp_and_pressure_compensate, - } -}; - static const struct iio_chan_spec ms5611_channels[] = { { .type = IIO_PRESSURE, @@ -433,7 +423,20 @@ int ms5611_probe(struct iio_dev *indio_dev, struct device *dev, struct ms5611_state *st = iio_priv(indio_dev); mutex_init(&st->lock); - st->chip_info = &chip_info_tbl[type]; + + switch (type) { + case MS5611: + st->compensate_temp_and_pressure = + ms5611_temp_and_pressure_compensate; + break; + case MS5607: + st->compensate_temp_and_pressure = + ms5607_temp_and_pressure_compensate; + break; + default: + return -EINVAL; + } + st->temp_osr = &ms5611_avail_temp_osr[ARRAY_SIZE(ms5611_avail_temp_osr) - 1]; st->pressure_osr = diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c index 432e912096f4..a0a7205c9c3a 100644 --- a/drivers/iio/pressure/ms5611_spi.c +++ b/drivers/iio/pressure/ms5611_spi.c @@ -91,7 +91,7 @@ static int ms5611_spi_probe(struct spi_device *spi) spi_set_drvdata(spi, indio_dev); spi->mode = SPI_MODE_0; - spi->max_speed_hz = 20000000; + spi->max_speed_hz = min(spi->max_speed_hz, 20000000U); spi->bits_per_word = 8; ret = spi_setup(spi); if (ret < 0) diff --git a/drivers/iio/temperature/ltc2983.c b/drivers/iio/temperature/ltc2983.c index b652d2b39bcf..a60ccf183687 100644 --- a/drivers/iio/temperature/ltc2983.c +++ b/drivers/iio/temperature/ltc2983.c @@ -1385,13 +1385,6 @@ static int ltc2983_setup(struct ltc2983_data *st, bool assign_iio) return ret; } - st->iio_chan = devm_kzalloc(&st->spi->dev, - st->iio_channels * sizeof(*st->iio_chan), - GFP_KERNEL); - - if (!st->iio_chan) - return -ENOMEM; - ret = regmap_update_bits(st->regmap, LTC2983_GLOBAL_CONFIG_REG, LTC2983_NOTCH_FREQ_MASK, LTC2983_NOTCH_FREQ(st->filter_notch_freq)); @@ -1514,6 +1507,12 @@ static int ltc2983_probe(struct spi_device *spi) gpiod_set_value_cansleep(gpio, 0); } + st->iio_chan = devm_kzalloc(&spi->dev, + st->iio_channels * sizeof(*st->iio_chan), + GFP_KERNEL); + if (!st->iio_chan) + return -ENOMEM; + ret = ltc2983_setup(st, true); if (ret) return ret; diff --git a/drivers/iio/trigger/iio-trig-sysfs.c b/drivers/iio/trigger/iio-trig-sysfs.c index d6c5e9644738..6b05eed41612 100644 --- a/drivers/iio/trigger/iio-trig-sysfs.c +++ b/drivers/iio/trigger/iio-trig-sysfs.c @@ -203,9 +203,13 @@ static int iio_sysfs_trigger_remove(int id) static int __init iio_sysfs_trig_init(void) { + int ret; device_initialize(&iio_sysfs_trig_dev); dev_set_name(&iio_sysfs_trig_dev, "iio_sysfs_trigger"); - return device_add(&iio_sysfs_trig_dev); + ret = device_add(&iio_sysfs_trig_dev); + if (ret) + put_device(&iio_sysfs_trig_dev); + return ret; } module_init(iio_sysfs_trig_init); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index cc2222b85c88..26d1772179b8 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -1556,7 +1556,7 @@ static bool validate_ipv4_net_dev(struct net_device *net_dev, return false; memset(&fl4, 0, sizeof(fl4)); - fl4.flowi4_iif = net_dev->ifindex; + fl4.flowi4_oif = net_dev->ifindex; fl4.daddr = daddr; fl4.saddr = saddr; diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 3893b6517421..fa65c5d3d395 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2815,10 +2815,18 @@ static int __init ib_core_init(void) nldev_init(); rdma_nl_register(RDMA_NL_LS, ibnl_ls_cb_table); - roce_gid_mgmt_init(); + ret = roce_gid_mgmt_init(); + if (ret) { + pr_warn("Couldn't init RoCE GID management\n"); + goto err_parent; + } return 0; +err_parent: + rdma_nl_unregister(RDMA_NL_LS); + nldev_exit(); + unregister_pernet_device(&rdma_dev_net_ops); err_compat: unregister_blocking_lsm_notifier(&ibdev_lsm_nb); err_sa: diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index b92358f606d0..12dc97067ed2 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -2537,7 +2537,7 @@ void __init nldev_init(void) rdma_nl_register(RDMA_NL_NLDEV, nldev_cb_table); } -void __exit nldev_exit(void) +void nldev_exit(void) { rdma_nl_unregister(RDMA_NL_NLDEV); } diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c index 94b94cca4870..15ee92081118 100644 --- a/drivers/infiniband/hw/efa/efa_main.c +++ b/drivers/infiniband/hw/efa/efa_main.c @@ -1,6 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause /* - * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2022 Amazon.com, Inc. or its affiliates. All rights reserved. */ #include <linux/module.h> @@ -14,10 +14,12 @@ #define PCI_DEV_ID_EFA0_VF 0xefa0 #define PCI_DEV_ID_EFA1_VF 0xefa1 +#define PCI_DEV_ID_EFA2_VF 0xefa2 static const struct pci_device_id efa_pci_tbl[] = { { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA0_VF) }, { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA1_VF) }, + { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA2_VF) }, { } }; diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c index 3d42bd2b36bd..51ae58c02b15 100644 --- a/drivers/infiniband/hw/hfi1/pio.c +++ b/drivers/infiniband/hw/hfi1/pio.c @@ -913,8 +913,7 @@ void sc_disable(struct send_context *sc) spin_unlock(&sc->release_lock); write_seqlock(&sc->waitlock); - if (!list_empty(&sc->piowait)) - list_move(&sc->piowait, &wake_list); + list_splice_init(&sc->piowait, &wake_list); write_sequnlock(&sc->waitlock); while (!list_empty(&wake_list)) { struct iowait *wait; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index 1ead35fb031b..1435fe2ea176 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -118,7 +118,6 @@ static const u32 hns_roce_op_code[] = { HR_OPC_MAP(ATOMIC_CMP_AND_SWP, ATOM_CMP_AND_SWAP), HR_OPC_MAP(ATOMIC_FETCH_AND_ADD, ATOM_FETCH_AND_ADD), HR_OPC_MAP(SEND_WITH_INV, SEND_WITH_INV), - HR_OPC_MAP(LOCAL_INV, LOCAL_INV), HR_OPC_MAP(MASKED_ATOMIC_CMP_AND_SWP, ATOM_MSK_CMP_AND_SWAP), HR_OPC_MAP(MASKED_ATOMIC_FETCH_AND_ADD, ATOM_MSK_FETCH_AND_ADD), HR_OPC_MAP(REG_MR, FAST_REG_PMR), @@ -559,9 +558,6 @@ static int set_rc_opcode(struct hns_roce_dev *hr_dev, else ret = -EOPNOTSUPP; break; - case IB_WR_LOCAL_INV: - hr_reg_enable(rc_sq_wqe, RC_SEND_WQE_SO); - fallthrough; case IB_WR_SEND_WITH_INV: rc_sq_wqe->inv_key = cpu_to_le32(wr->ex.invalidate_rkey); break; @@ -2805,8 +2801,12 @@ static int free_mr_modify_qp(struct hns_roce_dev *hr_dev) static int free_mr_init(struct hns_roce_dev *hr_dev) { + struct hns_roce_v2_priv *priv = hr_dev->priv; + struct hns_roce_v2_free_mr *free_mr = &priv->free_mr; int ret; + mutex_init(&free_mr->mutex); + ret = free_mr_alloc_res(hr_dev); if (ret) return ret; @@ -3222,7 +3222,6 @@ static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, hr_reg_write(mpt_entry, MPT_ST, V2_MPT_ST_VALID); hr_reg_write(mpt_entry, MPT_PD, mr->pd); - hr_reg_enable(mpt_entry, MPT_L_INV_EN); hr_reg_write_bool(mpt_entry, MPT_BIND_EN, mr->access & IB_ACCESS_MW_BIND); @@ -3313,7 +3312,6 @@ static int hns_roce_v2_frmr_write_mtpt(struct hns_roce_dev *hr_dev, hr_reg_enable(mpt_entry, MPT_RA_EN); hr_reg_enable(mpt_entry, MPT_R_INV_EN); - hr_reg_enable(mpt_entry, MPT_L_INV_EN); hr_reg_enable(mpt_entry, MPT_FRE); hr_reg_clear(mpt_entry, MPT_MR_MW); @@ -3345,7 +3343,6 @@ static int hns_roce_v2_mw_write_mtpt(void *mb_buf, struct hns_roce_mw *mw) hr_reg_write(mpt_entry, MPT_PD, mw->pdn); hr_reg_enable(mpt_entry, MPT_R_INV_EN); - hr_reg_enable(mpt_entry, MPT_L_INV_EN); hr_reg_enable(mpt_entry, MPT_LW_EN); hr_reg_enable(mpt_entry, MPT_MR_MW); @@ -3794,7 +3791,6 @@ static const u32 wc_send_op_map[] = { HR_WC_OP_MAP(RDMA_READ, RDMA_READ), HR_WC_OP_MAP(RDMA_WRITE, RDMA_WRITE), HR_WC_OP_MAP(RDMA_WRITE_WITH_IMM, RDMA_WRITE), - HR_WC_OP_MAP(LOCAL_INV, LOCAL_INV), HR_WC_OP_MAP(ATOM_CMP_AND_SWAP, COMP_SWAP), HR_WC_OP_MAP(ATOM_FETCH_AND_ADD, FETCH_ADD), HR_WC_OP_MAP(ATOM_MSK_CMP_AND_SWAP, MASKED_COMP_SWAP), @@ -3844,9 +3840,6 @@ static void fill_send_wc(struct ib_wc *wc, struct hns_roce_v2_cqe *cqe) case HNS_ROCE_V2_WQE_OP_RDMA_WRITE_WITH_IMM: wc->wc_flags |= IB_WC_WITH_IMM; break; - case HNS_ROCE_V2_WQE_OP_LOCAL_INV: - wc->wc_flags |= IB_WC_WITH_INVALIDATE; - break; case HNS_ROCE_V2_WQE_OP_ATOM_CMP_AND_SWAP: case HNS_ROCE_V2_WQE_OP_ATOM_FETCH_AND_ADD: case HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP: diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index b11579027e82..c7bf2d52c1cd 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -179,7 +179,6 @@ enum { HNS_ROCE_V2_WQE_OP_ATOM_MSK_CMP_AND_SWAP = 0x8, HNS_ROCE_V2_WQE_OP_ATOM_MSK_FETCH_AND_ADD = 0x9, HNS_ROCE_V2_WQE_OP_FAST_REG_PMR = 0xa, - HNS_ROCE_V2_WQE_OP_LOCAL_INV = 0xb, HNS_ROCE_V2_WQE_OP_BIND_MW = 0xc, HNS_ROCE_V2_WQE_OP_MASK = 0x1f, }; @@ -915,7 +914,6 @@ struct hns_roce_v2_rc_send_wqe { #define RC_SEND_WQE_OWNER RC_SEND_WQE_FIELD_LOC(7, 7) #define RC_SEND_WQE_CQE RC_SEND_WQE_FIELD_LOC(8, 8) #define RC_SEND_WQE_FENCE RC_SEND_WQE_FIELD_LOC(9, 9) -#define RC_SEND_WQE_SO RC_SEND_WQE_FIELD_LOC(10, 10) #define RC_SEND_WQE_SE RC_SEND_WQE_FIELD_LOC(11, 11) #define RC_SEND_WQE_INLINE RC_SEND_WQE_FIELD_LOC(12, 12) #define RC_SEND_WQE_WQE_INDEX RC_SEND_WQE_FIELD_LOC(30, 15) diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 5152f10d2e6d..ba0c3e4c07d8 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -344,6 +344,10 @@ static int qedr_alloc_resources(struct qedr_dev *dev) if (IS_IWARP(dev)) { xa_init(&dev->qps); dev->iwarp_wq = create_singlethread_workqueue("qedr_iwarpq"); + if (!dev->iwarp_wq) { + rc = -ENOMEM; + goto err1; + } } /* Allocate Status blocks for CNQ */ @@ -351,7 +355,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev) GFP_KERNEL); if (!dev->sb_array) { rc = -ENOMEM; - goto err1; + goto err_destroy_wq; } dev->cnq_array = kcalloc(dev->num_cnq, @@ -402,6 +406,9 @@ err3: kfree(dev->cnq_array); err2: kfree(dev->sb_array); +err_destroy_wq: + if (IS_IWARP(dev)) + destroy_workqueue(dev->iwarp_wq); err1: kfree(dev->sgid_tbl); return rc; diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index ed5a09e86417..693081e813ec 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -806,8 +806,10 @@ static enum resp_states read_reply(struct rxe_qp *qp, skb = prepare_ack_packet(qp, &ack_pkt, opcode, payload, res->cur_psn, AETH_ACK_UNLIMITED); - if (!skb) + if (!skb) { + rxe_put(mr); return RESPST_ERR_RNR; + } rxe_mr_copy(mr, res->read.va, payload_addr(&ack_pkt), payload, RXE_FROM_MR_OBJ); diff --git a/drivers/input/joystick/iforce/iforce-main.c b/drivers/input/joystick/iforce/iforce-main.c index b86de1312512..84b87526b7ba 100644 --- a/drivers/input/joystick/iforce/iforce-main.c +++ b/drivers/input/joystick/iforce/iforce-main.c @@ -273,22 +273,22 @@ int iforce_init_device(struct device *parent, u16 bustype, * Get device info. */ - if (!iforce_get_id_packet(iforce, 'M', buf, &len) || len < 3) + if (!iforce_get_id_packet(iforce, 'M', buf, &len) && len >= 3) input_dev->id.vendor = get_unaligned_le16(buf + 1); else dev_warn(&iforce->dev->dev, "Device does not respond to id packet M\n"); - if (!iforce_get_id_packet(iforce, 'P', buf, &len) || len < 3) + if (!iforce_get_id_packet(iforce, 'P', buf, &len) && len >= 3) input_dev->id.product = get_unaligned_le16(buf + 1); else dev_warn(&iforce->dev->dev, "Device does not respond to id packet P\n"); - if (!iforce_get_id_packet(iforce, 'B', buf, &len) || len < 3) + if (!iforce_get_id_packet(iforce, 'B', buf, &len) && len >= 3) iforce->device_memory.end = get_unaligned_le16(buf + 1); else dev_warn(&iforce->dev->dev, "Device does not respond to id packet B\n"); - if (!iforce_get_id_packet(iforce, 'N', buf, &len) || len < 2) + if (!iforce_get_id_packet(iforce, 'N', buf, &len) && len >= 2) ff_effects = buf[1]; else dev_warn(&iforce->dev->dev, "Device does not respond to id packet N\n"); diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c index 480476121c01..09489380afda 100644 --- a/drivers/input/misc/soc_button_array.c +++ b/drivers/input/misc/soc_button_array.c @@ -18,6 +18,10 @@ #include <linux/gpio.h> #include <linux/platform_device.h> +static bool use_low_level_irq; +module_param(use_low_level_irq, bool, 0444); +MODULE_PARM_DESC(use_low_level_irq, "Use low-level triggered IRQ instead of edge triggered"); + struct soc_button_info { const char *name; int acpi_index; @@ -74,6 +78,13 @@ static const struct dmi_system_id dmi_use_low_level_irq[] = { }, }, { + /* Acer Switch V 10 SW5-017, same issue as Acer Switch 10 SW5-012. */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "SW5-017"), + }, + }, + { /* * Acer One S1003. _LID method messes with power-button GPIO * IRQ settings, leading to a non working power-button. @@ -164,7 +175,8 @@ soc_button_device_create(struct platform_device *pdev, } /* See dmi_use_low_level_irq[] comment */ - if (!autorepeat && dmi_check_system(dmi_use_low_level_irq)) { + if (!autorepeat && (use_low_level_irq || + dmi_check_system(dmi_use_low_level_irq))) { irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW); gpio_keys[n_buttons].irq = irq; gpio_keys[n_buttons].gpio = -ENOENT; diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c index fa021af8506e..b0f776448a1c 100644 --- a/drivers/input/mouse/synaptics.c +++ b/drivers/input/mouse/synaptics.c @@ -192,6 +192,7 @@ static const char * const smbus_pnp_ids[] = { "SYN3221", /* HP 15-ay000 */ "SYN323d", /* HP Spectre X360 13-w013dx */ "SYN3257", /* HP Envy 13-ad105ng */ + "SYN3286", /* HP Laptop 15-da3001TU */ NULL }; diff --git a/drivers/input/serio/i8042-acpipnpio.h b/drivers/input/serio/i8042-acpipnpio.h index 0778dc03cd9e..46f8a694291e 100644 --- a/drivers/input/serio/i8042-acpipnpio.h +++ b/drivers/input/serio/i8042-acpipnpio.h @@ -115,18 +115,18 @@ static const struct dmi_system_id i8042_dmi_quirk_table[] __initconst = { .driver_data = (void *)(SERIO_QUIRK_NOMUX | SERIO_QUIRK_RESET_NEVER) }, { - /* ASUS ZenBook UX425UA */ + /* ASUS ZenBook UX425UA/QA */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425UA"), + DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX425"), }, .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER) }, { - /* ASUS ZenBook UM325UA */ + /* ASUS ZenBook UM325UA/QA */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325UA_UM325UA"), + DMI_MATCH(DMI_PRODUCT_NAME, "ZenBook UX325"), }, .driver_data = (void *)(SERIO_QUIRK_PROBE_DEFER | SERIO_QUIRK_RESET_NEVER) }, diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index f9486495baef..6dac7c1853a5 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -1543,8 +1543,6 @@ static int i8042_probe(struct platform_device *dev) { int error; - i8042_platform_device = dev; - if (i8042_reset == I8042_RESET_ALWAYS) { error = i8042_controller_selftest(); if (error) @@ -1582,7 +1580,6 @@ static int i8042_probe(struct platform_device *dev) i8042_free_aux_ports(); /* in case KBD failed but AUX not */ i8042_free_irqs(); i8042_controller_reset(false); - i8042_platform_device = NULL; return error; } @@ -1592,7 +1589,6 @@ static int i8042_remove(struct platform_device *dev) i8042_unregister_ports(); i8042_free_irqs(); i8042_controller_reset(false); - i8042_platform_device = NULL; return 0; } diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c index a33cc7950cf5..c281e49826c2 100644 --- a/drivers/input/touchscreen/goodix.c +++ b/drivers/input/touchscreen/goodix.c @@ -1158,6 +1158,7 @@ static int goodix_configure_dev(struct goodix_ts_data *ts) input_set_abs_params(ts->input_dev, ABS_MT_WIDTH_MAJOR, 0, 255, 0, 0); input_set_abs_params(ts->input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); +retry_read_config: /* Read configuration and apply touchscreen parameters */ goodix_read_config(ts); @@ -1165,6 +1166,16 @@ static int goodix_configure_dev(struct goodix_ts_data *ts) touchscreen_parse_properties(ts->input_dev, true, &ts->prop); if (!ts->prop.max_x || !ts->prop.max_y || !ts->max_touch_num) { + if (!ts->reset_controller_at_probe && + ts->irq_pin_access_method != IRQ_PIN_ACCESS_NONE) { + dev_info(&ts->client->dev, "Config not set, resetting controller\n"); + /* Retry after a controller reset */ + ts->reset_controller_at_probe = true; + error = goodix_reset(ts); + if (error) + return error; + goto retry_read_config; + } dev_err(&ts->client->dev, "Invalid config (%d, %d, %d), using defaults\n", ts->prop.max_x, ts->prop.max_y, ts->max_touch_num); diff --git a/drivers/iommu/amd/iommu.c b/drivers/iommu/amd/iommu.c index 65856e401949..d3b39d0416fa 100644 --- a/drivers/iommu/amd/iommu.c +++ b/drivers/iommu/amd/iommu.c @@ -2330,7 +2330,8 @@ static void amd_iommu_get_resv_regions(struct device *dev, type = IOMMU_RESV_RESERVED; region = iommu_alloc_resv_region(entry->address_start, - length, prot, type); + length, prot, type, + GFP_KERNEL); if (!region) { dev_err(dev, "Out of memory allocating dm-regions\n"); return; @@ -2340,14 +2341,14 @@ static void amd_iommu_get_resv_regions(struct device *dev, region = iommu_alloc_resv_region(MSI_RANGE_START, MSI_RANGE_END - MSI_RANGE_START + 1, - 0, IOMMU_RESV_MSI); + 0, IOMMU_RESV_MSI, GFP_KERNEL); if (!region) return; list_add_tail(®ion->list, head); region = iommu_alloc_resv_region(HT_RANGE_START, HT_RANGE_END - HT_RANGE_START + 1, - 0, IOMMU_RESV_RESERVED); + 0, IOMMU_RESV_RESERVED, GFP_KERNEL); if (!region) return; list_add_tail(®ion->list, head); diff --git a/drivers/iommu/apple-dart.c b/drivers/iommu/apple-dart.c index 4526575b999e..4f4a323be0d0 100644 --- a/drivers/iommu/apple-dart.c +++ b/drivers/iommu/apple-dart.c @@ -758,7 +758,7 @@ static void apple_dart_get_resv_regions(struct device *dev, region = iommu_alloc_resv_region(DOORBELL_ADDR, PAGE_SIZE, prot, - IOMMU_RESV_MSI); + IOMMU_RESV_MSI, GFP_KERNEL); if (!region) return; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index ba47c73f5b8c..6d5df91c5c46 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -2757,7 +2757,7 @@ static void arm_smmu_get_resv_regions(struct device *dev, int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, - prot, IOMMU_RESV_SW_MSI); + prot, IOMMU_RESV_SW_MSI, GFP_KERNEL); if (!region) return; diff --git a/drivers/iommu/arm/arm-smmu/arm-smmu.c b/drivers/iommu/arm/arm-smmu/arm-smmu.c index 6c1114a4d6cc..30dab1418e3f 100644 --- a/drivers/iommu/arm/arm-smmu/arm-smmu.c +++ b/drivers/iommu/arm/arm-smmu/arm-smmu.c @@ -1534,7 +1534,7 @@ static void arm_smmu_get_resv_regions(struct device *dev, int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; region = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, - prot, IOMMU_RESV_SW_MSI); + prot, IOMMU_RESV_SW_MSI, GFP_KERNEL); if (!region) return; diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index a8b36c3fddf1..996a8b5ee5ee 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -959,11 +959,9 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; - if (domain_use_first_level(domain)) { - pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US; - if (iommu_is_dma_domain(&domain->domain)) - pteval |= DMA_FL_PTE_ACCESS; - } + if (domain_use_first_level(domain)) + pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US | DMA_FL_PTE_ACCESS; + if (cmpxchg64(&pte->val, 0ULL, pteval)) /* Someone else set it while we were thinking; use theirs. */ free_pgtable_page(tmp_page); @@ -2410,6 +2408,7 @@ static int __init si_domain_init(int hw) if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { domain_exit(si_domain); + si_domain = NULL; return -EFAULT; } @@ -3052,6 +3051,10 @@ free_iommu: disable_dmar_iommu(iommu); free_dmar_iommu(iommu); } + if (si_domain) { + domain_exit(si_domain); + si_domain = NULL; + } return ret; } @@ -4534,7 +4537,7 @@ static void intel_iommu_get_resv_regions(struct device *device, struct device *i_dev; int i; - down_read(&dmar_global_lock); + rcu_read_lock(); for_each_rmrr_units(rmrr) { for_each_active_dev_scope(rmrr->devices, rmrr->devices_cnt, i, i_dev) { @@ -4552,14 +4555,15 @@ static void intel_iommu_get_resv_regions(struct device *device, IOMMU_RESV_DIRECT_RELAXABLE : IOMMU_RESV_DIRECT; resv = iommu_alloc_resv_region(rmrr->base_address, - length, prot, type); + length, prot, type, + GFP_ATOMIC); if (!resv) break; list_add_tail(&resv->list, head); } } - up_read(&dmar_global_lock); + rcu_read_unlock(); #ifdef CONFIG_INTEL_IOMMU_FLOPPY_WA if (dev_is_pci(device)) { @@ -4567,7 +4571,8 @@ static void intel_iommu_get_resv_regions(struct device *device, if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_ISA) { reg = iommu_alloc_resv_region(0, 1UL << 24, prot, - IOMMU_RESV_DIRECT_RELAXABLE); + IOMMU_RESV_DIRECT_RELAXABLE, + GFP_KERNEL); if (reg) list_add_tail(®->list, head); } @@ -4576,7 +4581,7 @@ static void intel_iommu_get_resv_regions(struct device *device, reg = iommu_alloc_resv_region(IOAPIC_RANGE_START, IOAPIC_RANGE_END - IOAPIC_RANGE_START + 1, - 0, IOMMU_RESV_MSI); + 0, IOMMU_RESV_MSI, GFP_KERNEL); if (!reg) return; list_add_tail(®->list, head); diff --git a/drivers/iommu/intel/pasid.c b/drivers/iommu/intel/pasid.c index c30ddac40ee5..e13d7e5273e1 100644 --- a/drivers/iommu/intel/pasid.c +++ b/drivers/iommu/intel/pasid.c @@ -642,7 +642,7 @@ int intel_pasid_setup_second_level(struct intel_iommu *iommu, * Since it is a second level only translation setup, we should * set SRE bit as well (addresses are expected to be GPAs). */ - if (pasid != PASID_RID2PASID) + if (pasid != PASID_RID2PASID && ecap_srs(iommu->ecap)) pasid_set_sre(pte); pasid_set_present(pte); spin_unlock(&iommu->lock); @@ -685,7 +685,8 @@ int intel_pasid_setup_pass_through(struct intel_iommu *iommu, * We should set SRE bit as well since the addresses are expected * to be GPAs. */ - pasid_set_sre(pte); + if (ecap_srs(iommu->ecap)) + pasid_set_sre(pte); pasid_set_present(pte); spin_unlock(&iommu->lock); diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 4893c2429ca5..65a3b3d886dc 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -504,7 +504,7 @@ static int iommu_insert_resv_region(struct iommu_resv_region *new, LIST_HEAD(stack); nr = iommu_alloc_resv_region(new->start, new->length, - new->prot, new->type); + new->prot, new->type, GFP_KERNEL); if (!nr) return -ENOMEM; @@ -2579,11 +2579,12 @@ EXPORT_SYMBOL(iommu_put_resv_regions); struct iommu_resv_region *iommu_alloc_resv_region(phys_addr_t start, size_t length, int prot, - enum iommu_resv_type type) + enum iommu_resv_type type, + gfp_t gfp) { struct iommu_resv_region *region; - region = kzalloc(sizeof(*region), GFP_KERNEL); + region = kzalloc(sizeof(*region), gfp); if (!region) return NULL; diff --git a/drivers/iommu/mtk_iommu.c b/drivers/iommu/mtk_iommu.c index 5a4e00e4bbbc..2ab2ecfe01f8 100644 --- a/drivers/iommu/mtk_iommu.c +++ b/drivers/iommu/mtk_iommu.c @@ -917,7 +917,8 @@ static void mtk_iommu_get_resv_regions(struct device *dev, continue; region = iommu_alloc_resv_region(resv->iova_base, resv->size, - prot, IOMMU_RESV_RESERVED); + prot, IOMMU_RESV_RESERVED, + GFP_KERNEL); if (!region) return; diff --git a/drivers/iommu/virtio-iommu.c b/drivers/iommu/virtio-iommu.c index b7c22802f57c..8b1b5c270e50 100644 --- a/drivers/iommu/virtio-iommu.c +++ b/drivers/iommu/virtio-iommu.c @@ -490,11 +490,13 @@ static int viommu_add_resv_mem(struct viommu_endpoint *vdev, fallthrough; case VIRTIO_IOMMU_RESV_MEM_T_RESERVED: region = iommu_alloc_resv_region(start, size, 0, - IOMMU_RESV_RESERVED); + IOMMU_RESV_RESERVED, + GFP_KERNEL); break; case VIRTIO_IOMMU_RESV_MEM_T_MSI: region = iommu_alloc_resv_region(start, size, prot, - IOMMU_RESV_MSI); + IOMMU_RESV_MSI, + GFP_KERNEL); break; } if (!region) @@ -909,7 +911,8 @@ static void viommu_get_resv_regions(struct device *dev, struct list_head *head) */ if (!msi) { msi = iommu_alloc_resv_region(MSI_IOVA_BASE, MSI_IOVA_LENGTH, - prot, IOMMU_RESV_SW_MSI); + prot, IOMMU_RESV_SW_MSI, + GFP_KERNEL); if (!msi) return; diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c index a52f275f8263..f8447135a902 100644 --- a/drivers/isdn/hardware/mISDN/netjet.c +++ b/drivers/isdn/hardware/mISDN/netjet.c @@ -956,7 +956,7 @@ nj_release(struct tiger_hw *card) } if (card->irq > 0) free_irq(card->irq, card); - if (card->isac.dch.dev.dev.class) + if (device_is_registered(&card->isac.dch.dev.dev)) mISDN_unregister_device(&card->isac.dch.dev); for (i = 0; i < 2; i++) { diff --git a/drivers/isdn/mISDN/core.c b/drivers/isdn/mISDN/core.c index a41b4b264594..90ee56d07a6e 100644 --- a/drivers/isdn/mISDN/core.c +++ b/drivers/isdn/mISDN/core.c @@ -222,7 +222,7 @@ mISDN_register_device(struct mISDNdevice *dev, err = get_free_devid(); if (err < 0) - goto error1; + return err; dev->id = err; device_initialize(&dev->dev); @@ -233,11 +233,12 @@ mISDN_register_device(struct mISDNdevice *dev, if (debug & DEBUG_CORE) printk(KERN_DEBUG "mISDN_register %s %d\n", dev_name(&dev->dev), dev->id); + dev->dev.class = &mISDN_class; + err = create_stack(dev); if (err) goto error1; - dev->dev.class = &mISDN_class; dev->dev.platform_data = dev; dev->dev.parent = parent; dev_set_drvdata(&dev->dev, dev); @@ -249,8 +250,8 @@ mISDN_register_device(struct mISDNdevice *dev, error3: delete_stack(dev); - return err; error1: + put_device(&dev->dev); return err; } diff --git a/drivers/isdn/mISDN/dsp_pipeline.c b/drivers/isdn/mISDN/dsp_pipeline.c index c3b2c99b5cd5..cfbcd9e973c2 100644 --- a/drivers/isdn/mISDN/dsp_pipeline.c +++ b/drivers/isdn/mISDN/dsp_pipeline.c @@ -77,6 +77,7 @@ int mISDN_dsp_element_register(struct mISDN_dsp_element *elem) if (!entry) return -ENOMEM; + INIT_LIST_HEAD(&entry->list); entry->elem = elem; entry->dev.class = elements_class; @@ -107,7 +108,7 @@ err2: device_unregister(&entry->dev); return ret; err1: - kfree(entry); + put_device(&entry->dev); return ret; } EXPORT_SYMBOL(mISDN_dsp_element_register); diff --git a/drivers/leds/simple/simatic-ipc-leds-gpio.c b/drivers/leds/simple/simatic-ipc-leds-gpio.c index b9eeb8702df0..07f0d79d604d 100644 --- a/drivers/leds/simple/simatic-ipc-leds-gpio.c +++ b/drivers/leds/simple/simatic-ipc-leds-gpio.c @@ -20,12 +20,12 @@ static struct gpiod_lookup_table *simatic_ipc_led_gpio_table; static struct gpiod_lookup_table simatic_ipc_led_gpio_table_127e = { .dev_id = "leds-gpio", .table = { - GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 52, NULL, 1, GPIO_ACTIVE_LOW), - GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 53, NULL, 2, GPIO_ACTIVE_LOW), - GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 57, NULL, 3, GPIO_ACTIVE_LOW), - GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 58, NULL, 4, GPIO_ACTIVE_LOW), - GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 60, NULL, 5, GPIO_ACTIVE_LOW), - GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 51, NULL, 0, GPIO_ACTIVE_LOW), + GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 52, NULL, 0, GPIO_ACTIVE_LOW), + GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 53, NULL, 1, GPIO_ACTIVE_LOW), + GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 57, NULL, 2, GPIO_ACTIVE_LOW), + GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 58, NULL, 3, GPIO_ACTIVE_LOW), + GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 60, NULL, 4, GPIO_ACTIVE_LOW), + GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 51, NULL, 5, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 56, NULL, 6, GPIO_ACTIVE_LOW), GPIO_LOOKUP_IDX("apollolake-pinctrl.0", 59, NULL, 7, GPIO_ACTIVE_HIGH), }, diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 09c7ed2650ca..bb786c39545e 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c @@ -795,7 +795,8 @@ static void __make_buffer_clean(struct dm_buffer *b) { BUG_ON(b->hold_count); - if (!b->state) /* fast case */ + /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */ + if (!smp_load_acquire(&b->state)) /* fast case */ return; wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE); @@ -816,7 +817,7 @@ static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c) BUG_ON(test_bit(B_DIRTY, &b->state)); if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep && - unlikely(test_bit(B_READING, &b->state))) + unlikely(test_bit_acquire(B_READING, &b->state))) continue; if (!b->hold_count) { @@ -1058,7 +1059,7 @@ found_buffer: * If the user called both dm_bufio_prefetch and dm_bufio_get on * the same buffer, it would deadlock if we waited. */ - if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state))) + if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) return NULL; b->hold_count++; @@ -1218,7 +1219,7 @@ void dm_bufio_release(struct dm_buffer *b) * invalid buffer. */ if ((b->read_error || b->write_error) && - !test_bit(B_READING, &b->state) && + !test_bit_acquire(B_READING, &b->state) && !test_bit(B_WRITING, &b->state) && !test_bit(B_DIRTY, &b->state)) { __unlink_buffer(b); @@ -1479,7 +1480,7 @@ EXPORT_SYMBOL_GPL(dm_bufio_release_move); static void forget_buffer_locked(struct dm_buffer *b) { - if (likely(!b->hold_count) && likely(!b->state)) { + if (likely(!b->hold_count) && likely(!smp_load_acquire(&b->state))) { __unlink_buffer(b); __free_buffer_wake(b); } @@ -1639,7 +1640,7 @@ static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp) { if (!(gfp & __GFP_FS) || (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) { - if (test_bit(B_READING, &b->state) || + if (test_bit_acquire(B_READING, &b->state) || test_bit(B_WRITING, &b->state) || test_bit(B_DIRTY, &b->state)) return false; @@ -1857,6 +1858,8 @@ bad: dm_io_client_destroy(c->dm_io); bad_dm_io: mutex_destroy(&c->lock); + if (c->no_sleep) + static_branch_dec(&no_sleep_enabled); kfree(c); bad_client: return ERR_PTR(r); diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h index c05fc3436cef..06eb31af626f 100644 --- a/drivers/md/dm-cache-policy.h +++ b/drivers/md/dm-cache-policy.h @@ -166,7 +166,7 @@ struct dm_cache_policy_type { struct dm_cache_policy_type *real; /* - * Policies may store a hint for each each cache block. + * Policies may store a hint for each cache block. * Currently the size of this hint must be 0 or 4 bytes but we * expect to relax this in future. */ diff --git a/drivers/md/dm-clone-target.c b/drivers/md/dm-clone-target.c index 811b0a5379d0..2f1cc66d2641 100644 --- a/drivers/md/dm-clone-target.c +++ b/drivers/md/dm-clone-target.c @@ -2035,7 +2035,7 @@ static void disable_passdown_if_not_supported(struct clone *clone) reason = "max discard sectors smaller than a region"; if (reason) { - DMWARN("Destination device (%pd) %s: Disabling discard passdown.", + DMWARN("Destination device (%pg) %s: Disabling discard passdown.", dest_dev, reason); clear_bit(DM_CLONE_DISCARD_PASSDOWN, &clone->flags); } diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 159c6806c19b..2653516bcdef 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -3630,6 +3630,7 @@ static void crypt_io_hints(struct dm_target *ti, struct queue_limits *limits) limits->physical_block_size = max_t(unsigned, limits->physical_block_size, cc->sector_size); limits->io_min = max_t(unsigned, limits->io_min, cc->sector_size); + limits->dma_alignment = limits->logical_block_size - 1; } static struct target_type crypt_target = { diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c index aaf2472df6e5..e97e9f97456d 100644 --- a/drivers/md/dm-integrity.c +++ b/drivers/md/dm-integrity.c @@ -263,6 +263,7 @@ struct dm_integrity_c { struct completion crypto_backoff; + bool wrote_to_journal; bool journal_uptodate; bool just_formatted; bool recalculate_flag; @@ -2375,6 +2376,8 @@ static void integrity_commit(struct work_struct *w) if (!commit_sections) goto release_flush_bios; + ic->wrote_to_journal = true; + i = commit_start; for (n = 0; n < commit_sections; n++) { for (j = 0; j < ic->journal_section_entries; j++) { @@ -2591,10 +2594,6 @@ static void integrity_writer(struct work_struct *w) unsigned prev_free_sectors; - /* the following test is not needed, but it tests the replay code */ - if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev) - return; - spin_lock_irq(&ic->endio_wait.lock); write_start = ic->committed_section; write_sections = ic->n_committed_sections; @@ -3101,10 +3100,17 @@ static void dm_integrity_postsuspend(struct dm_target *ti) drain_workqueue(ic->commit_wq); if (ic->mode == 'J') { - if (ic->meta_dev) - queue_work(ic->writer_wq, &ic->writer_work); + queue_work(ic->writer_wq, &ic->writer_work); drain_workqueue(ic->writer_wq); dm_integrity_flush_buffers(ic, true); + if (ic->wrote_to_journal) { + init_journal(ic, ic->free_section, + ic->journal_sections - ic->free_section, ic->commit_seq); + if (ic->free_section) { + init_journal(ic, 0, ic->free_section, + next_commit_seq(ic->commit_seq)); + } + } } if (ic->mode == 'B') { @@ -3132,6 +3138,8 @@ static void dm_integrity_resume(struct dm_target *ti) DEBUG_print("resume\n"); + ic->wrote_to_journal = false; + if (ic->provided_data_sectors != old_provided_data_sectors) { if (ic->provided_data_sectors > old_provided_data_sectors && ic->mode == 'B' && @@ -3370,6 +3378,7 @@ static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *lim limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT; limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT; blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT); + limits->dma_alignment = limits->logical_block_size - 1; } } diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index 98976aaa9db9..3bfc1583c20a 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -434,10 +434,10 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, hc = __get_name_cell(new); if (hc) { - DMWARN("Unable to change %s on mapped device %s to one that " - "already exists: %s", - change_uuid ? "uuid" : "name", - param->name, new); + DMERR("Unable to change %s on mapped device %s to one that " + "already exists: %s", + change_uuid ? "uuid" : "name", + param->name, new); dm_put(hc->md); up_write(&_hash_lock); kfree(new_data); @@ -449,8 +449,8 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, */ hc = __get_name_cell(param->name); if (!hc) { - DMWARN("Unable to rename non-existent device, %s to %s%s", - param->name, change_uuid ? "uuid " : "", new); + DMERR("Unable to rename non-existent device, %s to %s%s", + param->name, change_uuid ? "uuid " : "", new); up_write(&_hash_lock); kfree(new_data); return ERR_PTR(-ENXIO); @@ -460,9 +460,9 @@ static struct mapped_device *dm_hash_rename(struct dm_ioctl *param, * Does this device already have a uuid? */ if (change_uuid && hc->uuid) { - DMWARN("Unable to change uuid of mapped device %s to %s " - "because uuid is already set to %s", - param->name, new, hc->uuid); + DMERR("Unable to change uuid of mapped device %s to %s " + "because uuid is already set to %s", + param->name, new, hc->uuid); dm_put(hc->md); up_write(&_hash_lock); kfree(new_data); @@ -655,7 +655,7 @@ static void list_version_get_needed(struct target_type *tt, void *needed_param) size_t *needed = needed_param; *needed += sizeof(struct dm_target_versions); - *needed += strlen(tt->name); + *needed += strlen(tt->name) + 1; *needed += ALIGN_MASK; } @@ -720,7 +720,7 @@ static int __list_versions(struct dm_ioctl *param, size_t param_size, const char iter_info.old_vers = NULL; iter_info.vers = vers; iter_info.flags = 0; - iter_info.end = (char *)vers+len; + iter_info.end = (char *)vers + needed; /* * Now loop through filling out the names & versions. @@ -750,7 +750,7 @@ static int get_target_version(struct file *filp, struct dm_ioctl *param, size_t static int check_name(const char *name) { if (strchr(name, '/')) { - DMWARN("invalid device name"); + DMERR("invalid device name"); return -EINVAL; } @@ -773,7 +773,7 @@ static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *src down_read(&_hash_lock); hc = dm_get_mdptr(md); if (!hc || hc->md != md) { - DMWARN("device has been removed from the dev hash table."); + DMERR("device has been removed from the dev hash table."); goto out; } @@ -1026,7 +1026,7 @@ static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_si if (new_data < param->data || invalid_str(new_data, (void *) param + param_size) || !*new_data || strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) { - DMWARN("Invalid new mapped device name or uuid string supplied."); + DMERR("Invalid new mapped device name or uuid string supplied."); return -EINVAL; } @@ -1061,7 +1061,7 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa if (geostr < param->data || invalid_str(geostr, (void *) param + param_size)) { - DMWARN("Invalid geometry supplied."); + DMERR("Invalid geometry supplied."); goto out; } @@ -1069,13 +1069,13 @@ static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t pa indata + 1, indata + 2, indata + 3, &dummy); if (x != 4) { - DMWARN("Unable to interpret geometry settings."); + DMERR("Unable to interpret geometry settings."); goto out; } if (indata[0] > 65535 || indata[1] > 255 || indata[2] > 255 || indata[3] > ULONG_MAX) { - DMWARN("Geometry exceeds range limits."); + DMERR("Geometry exceeds range limits."); goto out; } @@ -1387,7 +1387,7 @@ static int populate_table(struct dm_table *table, char *target_params; if (!param->target_count) { - DMWARN("populate_table: no targets specified"); + DMERR("populate_table: no targets specified"); return -EINVAL; } @@ -1395,7 +1395,7 @@ static int populate_table(struct dm_table *table, r = next_target(spec, next, end, &spec, &target_params); if (r) { - DMWARN("unable to find target"); + DMERR("unable to find target"); return r; } @@ -1404,7 +1404,7 @@ static int populate_table(struct dm_table *table, (sector_t) spec->length, target_params); if (r) { - DMWARN("error adding target to table"); + DMERR("error adding target to table"); return r; } @@ -1451,8 +1451,8 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si if (immutable_target_type && (immutable_target_type != dm_table_get_immutable_target_type(t)) && !dm_table_get_wildcard_target(t)) { - DMWARN("can't replace immutable target type %s", - immutable_target_type->name); + DMERR("can't replace immutable target type %s", + immutable_target_type->name); r = -EINVAL; goto err_unlock_md_type; } @@ -1461,12 +1461,12 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si /* setup md->queue to reflect md's type (may block) */ r = dm_setup_md_queue(md, t); if (r) { - DMWARN("unable to set up device queue for new table."); + DMERR("unable to set up device queue for new table."); goto err_unlock_md_type; } } else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) { - DMWARN("can't change device type (old=%u vs new=%u) after initial table load.", - dm_get_md_type(md), dm_table_get_type(t)); + DMERR("can't change device type (old=%u vs new=%u) after initial table load.", + dm_get_md_type(md), dm_table_get_type(t)); r = -EINVAL; goto err_unlock_md_type; } @@ -1477,7 +1477,7 @@ static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_si down_write(&_hash_lock); hc = dm_get_mdptr(md); if (!hc || hc->md != md) { - DMWARN("device has been removed from the dev hash table."); + DMERR("device has been removed from the dev hash table."); up_write(&_hash_lock); r = -ENXIO; goto err_destroy_table; @@ -1686,19 +1686,19 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para if (tmsg < (struct dm_target_msg *) param->data || invalid_str(tmsg->message, (void *) param + param_size)) { - DMWARN("Invalid target message parameters."); + DMERR("Invalid target message parameters."); r = -EINVAL; goto out; } r = dm_split_args(&argc, &argv, tmsg->message); if (r) { - DMWARN("Failed to split target message parameters"); + DMERR("Failed to split target message parameters"); goto out; } if (!argc) { - DMWARN("Empty message received."); + DMERR("Empty message received."); r = -EINVAL; goto out_argv; } @@ -1718,12 +1718,12 @@ static int target_message(struct file *filp, struct dm_ioctl *param, size_t para ti = dm_table_find_target(table, tmsg->sector); if (!ti) { - DMWARN("Target message sector outside device."); + DMERR("Target message sector outside device."); r = -EINVAL; } else if (ti->type->message) r = ti->type->message(ti, argc, argv, result, maxlen); else { - DMWARN("Target type does not support messages"); + DMERR("Target type does not support messages"); r = -EINVAL; } @@ -1814,11 +1814,11 @@ static int check_version(unsigned int cmd, struct dm_ioctl __user *user) if ((DM_VERSION_MAJOR != version[0]) || (DM_VERSION_MINOR < version[1])) { - DMWARN("ioctl interface mismatch: " - "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", - DM_VERSION_MAJOR, DM_VERSION_MINOR, - DM_VERSION_PATCHLEVEL, - version[0], version[1], version[2], cmd); + DMERR("ioctl interface mismatch: " + "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)", + DM_VERSION_MAJOR, DM_VERSION_MINOR, + DM_VERSION_PATCHLEVEL, + version[0], version[1], version[2], cmd); r = -EINVAL; } @@ -1927,11 +1927,11 @@ static int validate_params(uint cmd, struct dm_ioctl *param) if (cmd == DM_DEV_CREATE_CMD) { if (!*param->name) { - DMWARN("name not supplied when creating device"); + DMERR("name not supplied when creating device"); return -EINVAL; } } else if (*param->uuid && *param->name) { - DMWARN("only supply one of name or uuid, cmd(%u)", cmd); + DMERR("only supply one of name or uuid, cmd(%u)", cmd); return -EINVAL; } @@ -1978,7 +1978,7 @@ static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *us fn = lookup_ioctl(cmd, &ioctl_flags); if (!fn) { - DMWARN("dm_ctl_ioctl: unknown command 0x%x", command); + DMERR("dm_ctl_ioctl: unknown command 0x%x", command); return -ENOTTY; } @@ -2203,7 +2203,7 @@ int __init dm_early_create(struct dm_ioctl *dmi, (sector_t) spec_array[i]->length, target_params_array[i]); if (r) { - DMWARN("error adding target to table"); + DMERR("error adding target to table"); goto err_destroy_table; } } @@ -2216,7 +2216,7 @@ int __init dm_early_create(struct dm_ioctl *dmi, /* setup md->queue to reflect md's type (may block) */ r = dm_setup_md_queue(md, t); if (r) { - DMWARN("unable to set up device queue for new table."); + DMERR("unable to set up device queue for new table."); goto err_destroy_table; } diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 20fd688f72e7..178e13a5b059 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -875,6 +875,7 @@ static void log_writes_io_hints(struct dm_target *ti, struct queue_limits *limit limits->logical_block_size = bdev_logical_block_size(lc->dev->bdev); limits->physical_block_size = bdev_physical_block_size(lc->dev->bdev); limits->io_min = limits->physical_block_size; + limits->dma_alignment = limits->logical_block_size - 1; } #if IS_ENABLED(CONFIG_FS_DAX) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index c640be453313..54263679a7b1 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -2529,7 +2529,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) * of the "sync" directive. * * With reshaping capability added, we must ensure that - * that the "sync" directive is disallowed during the reshape. + * the "sync" directive is disallowed during the reshape. */ if (test_bit(__CTR_FLAG_SYNC, &rs->ctr_flags)) continue; @@ -2590,7 +2590,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs) /* * Adjust data_offset and new_data_offset on all disk members of @rs - * for out of place reshaping if requested by contructor + * for out of place reshaping if requested by constructor * * We need free space at the beginning of each raid disk for forward * and at the end for backward reshapes which userspace has to provide diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 3001b10a3fbf..a41209a43506 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -238,7 +238,7 @@ static void dm_done(struct request *clone, blk_status_t error, bool mapped) dm_requeue_original_request(tio, true); break; default: - DMWARN("unimplemented target endio return value: %d", r); + DMCRIT("unimplemented target endio return value: %d", r); BUG(); } } @@ -409,7 +409,7 @@ static int map_request(struct dm_rq_target_io *tio) dm_kill_unmapped_request(rq, BLK_STS_IOERR); break; default: - DMWARN("unimplemented target map return value: %d", r); + DMCRIT("unimplemented target map return value: %d", r); BUG(); } diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c index 8326f9fe0e91..f105a71915ab 100644 --- a/drivers/md/dm-stats.c +++ b/drivers/md/dm-stats.c @@ -1220,7 +1220,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv, return 2; /* this wasn't a stats message */ if (r == -EINVAL) - DMWARN("Invalid parameters for message %s", argv[0]); + DMCRIT("Invalid parameters for message %s", argv[0]); return r; } diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index d8034ff0cb24..078da18bb86d 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -234,12 +234,12 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, return 0; if ((start >= dev_size) || (start + len > dev_size)) { - DMWARN("%s: %pg too small for target: " - "start=%llu, len=%llu, dev_size=%llu", - dm_device_name(ti->table->md), bdev, - (unsigned long long)start, - (unsigned long long)len, - (unsigned long long)dev_size); + DMERR("%s: %pg too small for target: " + "start=%llu, len=%llu, dev_size=%llu", + dm_device_name(ti->table->md), bdev, + (unsigned long long)start, + (unsigned long long)len, + (unsigned long long)dev_size); return 1; } @@ -251,10 +251,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, unsigned int zone_sectors = bdev_zone_sectors(bdev); if (start & (zone_sectors - 1)) { - DMWARN("%s: start=%llu not aligned to h/w zone size %u of %pg", - dm_device_name(ti->table->md), - (unsigned long long)start, - zone_sectors, bdev); + DMERR("%s: start=%llu not aligned to h/w zone size %u of %pg", + dm_device_name(ti->table->md), + (unsigned long long)start, + zone_sectors, bdev); return 1; } @@ -268,10 +268,10 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, * the sector range. */ if (len & (zone_sectors - 1)) { - DMWARN("%s: len=%llu not aligned to h/w zone size %u of %pg", - dm_device_name(ti->table->md), - (unsigned long long)len, - zone_sectors, bdev); + DMERR("%s: len=%llu not aligned to h/w zone size %u of %pg", + dm_device_name(ti->table->md), + (unsigned long long)len, + zone_sectors, bdev); return 1; } } @@ -280,20 +280,20 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev, return 0; if (start & (logical_block_size_sectors - 1)) { - DMWARN("%s: start=%llu not aligned to h/w " - "logical block size %u of %pg", - dm_device_name(ti->table->md), - (unsigned long long)start, - limits->logical_block_size, bdev); + DMERR("%s: start=%llu not aligned to h/w " + "logical block size %u of %pg", + dm_device_name(ti->table->md), + (unsigned long long)start, + limits->logical_block_size, bdev); return 1; } if (len & (logical_block_size_sectors - 1)) { - DMWARN("%s: len=%llu not aligned to h/w " - "logical block size %u of %pg", - dm_device_name(ti->table->md), - (unsigned long long)len, - limits->logical_block_size, bdev); + DMERR("%s: len=%llu not aligned to h/w " + "logical block size %u of %pg", + dm_device_name(ti->table->md), + (unsigned long long)len, + limits->logical_block_size, bdev); return 1; } @@ -434,8 +434,8 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d) } } if (!found) { - DMWARN("%s: device %s not in table devices list", - dm_device_name(ti->table->md), d->name); + DMERR("%s: device %s not in table devices list", + dm_device_name(ti->table->md), d->name); return; } if (refcount_dec_and_test(&dd->count)) { @@ -618,12 +618,12 @@ static int validate_hardware_logical_block_alignment(struct dm_table *t, } if (remaining) { - DMWARN("%s: table line %u (start sect %llu len %llu) " - "not aligned to h/w logical block size %u", - dm_device_name(t->md), i, - (unsigned long long) ti->begin, - (unsigned long long) ti->len, - limits->logical_block_size); + DMERR("%s: table line %u (start sect %llu len %llu) " + "not aligned to h/w logical block size %u", + dm_device_name(t->md), i, + (unsigned long long) ti->begin, + (unsigned long long) ti->len, + limits->logical_block_size); return -EINVAL; } @@ -1008,7 +1008,7 @@ static int dm_table_alloc_md_mempools(struct dm_table *t, struct mapped_device * struct dm_md_mempools *pools; if (unlikely(type == DM_TYPE_NONE)) { - DMWARN("no table type is set, can't allocate mempools"); + DMERR("no table type is set, can't allocate mempools"); return -EINVAL; } @@ -1112,7 +1112,7 @@ static bool integrity_profile_exists(struct gendisk *disk) * Get a disk whose integrity profile reflects the table's profile. * Returns NULL if integrity support was inconsistent or unavailable. */ -static struct gendisk * dm_table_get_integrity_disk(struct dm_table *t) +static struct gendisk *dm_table_get_integrity_disk(struct dm_table *t) { struct list_head *devices = dm_table_get_devices(t); struct dm_dev_internal *dd = NULL; @@ -1185,10 +1185,10 @@ static int dm_table_register_integrity(struct dm_table *t) * profile the new profile should not conflict. */ if (blk_integrity_compare(dm_disk(md), template_disk) < 0) { - DMWARN("%s: conflict with existing integrity profile: " - "%s profile mismatch", - dm_device_name(t->md), - template_disk->disk_name); + DMERR("%s: conflict with existing integrity profile: " + "%s profile mismatch", + dm_device_name(t->md), + template_disk->disk_name); return 1; } @@ -1327,7 +1327,7 @@ static int dm_table_construct_crypto_profile(struct dm_table *t) if (t->md->queue && !blk_crypto_has_capabilities(profile, t->md->queue->crypto_profile)) { - DMWARN("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!"); + DMERR("Inline encryption capabilities of new DM table were more restrictive than the old table's. This is not supported!"); dm_destroy_crypto_profile(profile); return -EINVAL; } diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c index 8a00cc42e498..ccf5b852fbf7 100644 --- a/drivers/md/dm-verity-target.c +++ b/drivers/md/dm-verity-target.c @@ -1401,14 +1401,16 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv) /* WQ_UNBOUND greatly improves performance when running on ramdisk */ wq_flags = WQ_MEM_RECLAIM | WQ_UNBOUND; - if (v->use_tasklet) { - /* - * Allow verify_wq to preempt softirq since verification in - * tasklet will fall-back to using it for error handling - * (or if the bufio cache doesn't have required hashes). - */ - wq_flags |= WQ_HIGHPRI; - } + /* + * Using WQ_HIGHPRI improves throughput and completion latency by + * reducing wait times when reading from a dm-verity device. + * + * Also as required for the "try_verify_in_tasklet" feature: WQ_HIGHPRI + * allows verify_wq to preempt softirq since verification in tasklet + * will fall-back to using it for error handling (or if the bufio cache + * doesn't have required hashes). + */ + wq_flags |= WQ_HIGHPRI; v->verify_wq = alloc_workqueue("kverityd", wq_flags, num_online_cpus()); if (!v->verify_wq) { ti->error = "Cannot allocate workqueue"; diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 60549b65c799..95a1ee3d314e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -864,7 +864,7 @@ int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo) sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors; if (geo->start > sz) { - DMWARN("Start sector is beyond the geometry limits."); + DMERR("Start sector is beyond the geometry limits."); return -EINVAL; } @@ -1149,7 +1149,7 @@ static void clone_endio(struct bio *bio) /* The target will handle the io */ return; default: - DMWARN("unimplemented target endio return value: %d", r); + DMCRIT("unimplemented target endio return value: %d", r); BUG(); } } @@ -1455,7 +1455,7 @@ static void __map_bio(struct bio *clone) dm_io_dec_pending(io, BLK_STS_DM_REQUEUE); break; default: - DMWARN("unimplemented target map return value: %d", r); + DMCRIT("unimplemented target map return value: %d", r); BUG(); } } @@ -2005,7 +2005,7 @@ static struct mapped_device *alloc_dev(int minor) md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id); if (!md) { - DMWARN("unable to allocate device, out of memory."); + DMERR("unable to allocate device, out of memory."); return NULL; } @@ -2065,7 +2065,6 @@ static struct mapped_device *alloc_dev(int minor) md->disk->minors = 1; md->disk->flags |= GENHD_FL_NO_PART; md->disk->fops = &dm_blk_dops; - md->disk->queue = md->queue; md->disk->private_data = md; sprintf(md->disk->disk_name, "dm-%d", minor); diff --git a/drivers/media/Kconfig b/drivers/media/Kconfig index ba6592b3dab2..283b78b5766e 100644 --- a/drivers/media/Kconfig +++ b/drivers/media/Kconfig @@ -24,7 +24,7 @@ if MEDIA_SUPPORT config MEDIA_SUPPORT_FILTER bool "Filter media drivers" - default y if !EMBEDDED && !EXPERT + default y if !EXPERT help Configuring the media subsystem can be complex, as there are hundreds of drivers and other config options. diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c index 41a79293ee02..4f5ab3cae8a7 100644 --- a/drivers/media/cec/core/cec-adap.c +++ b/drivers/media/cec/core/cec-adap.c @@ -1027,6 +1027,7 @@ static const u8 cec_msg_size[256] = { [CEC_MSG_REPORT_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED, [CEC_MSG_REQUEST_SHORT_AUDIO_DESCRIPTOR] = 2 | DIRECTED, [CEC_MSG_SET_SYSTEM_AUDIO_MODE] = 3 | BOTH, + [CEC_MSG_SET_AUDIO_VOLUME_LEVEL] = 3 | DIRECTED, [CEC_MSG_SYSTEM_AUDIO_MODE_REQUEST] = 2 | DIRECTED, [CEC_MSG_SYSTEM_AUDIO_MODE_STATUS] = 3 | DIRECTED, [CEC_MSG_SET_AUDIO_RATE] = 3 | DIRECTED, diff --git a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c index 3b583ed4da9d..6ebedc71d67d 100644 --- a/drivers/media/cec/platform/cros-ec/cros-ec-cec.c +++ b/drivers/media/cec/platform/cros-ec/cros-ec-cec.c @@ -44,6 +44,8 @@ static void handle_cec_message(struct cros_ec_cec *cros_ec_cec) uint8_t *cec_message = cros_ec->event_data.data.cec_message; unsigned int len = cros_ec->event_size; + if (len > CEC_MAX_MSG_SIZE) + len = CEC_MAX_MSG_SIZE; cros_ec_cec->rx_msg.len = len; memcpy(cros_ec_cec->rx_msg.msg, cec_message, len); @@ -221,6 +223,8 @@ static const struct cec_dmi_match cec_dmi_match_table[] = { { "Google", "Moli", "0000:00:02.0", "Port B" }, /* Google Kinox */ { "Google", "Kinox", "0000:00:02.0", "Port B" }, + /* Google Kuldax */ + { "Google", "Kuldax", "0000:00:02.0", "Port B" }, }; static struct device *cros_ec_cec_find_hdmi_dev(struct device *dev, diff --git a/drivers/media/cec/platform/s5p/s5p_cec.c b/drivers/media/cec/platform/s5p/s5p_cec.c index ce9a9d922f11..0a30e7acdc10 100644 --- a/drivers/media/cec/platform/s5p/s5p_cec.c +++ b/drivers/media/cec/platform/s5p/s5p_cec.c @@ -115,6 +115,8 @@ static irqreturn_t s5p_cec_irq_handler(int irq, void *priv) dev_dbg(cec->dev, "Buffer overrun (worker did not process previous message)\n"); cec->rx = STATE_BUSY; cec->msg.len = status >> 24; + if (cec->msg.len > CEC_MAX_MSG_SIZE) + cec->msg.len = CEC_MAX_MSG_SIZE; cec->msg.rx_status = CEC_RX_STATUS_OK; s5p_cec_get_rx_buf(cec, cec->msg.len, cec->msg.msg); diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c index 47d83e0a470c..9807f5411996 100644 --- a/drivers/media/dvb-frontends/drxk_hard.c +++ b/drivers/media/dvb-frontends/drxk_hard.c @@ -6660,7 +6660,7 @@ static int drxk_read_snr(struct dvb_frontend *fe, u16 *snr) static int drxk_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { struct drxk_state *state = fe->demodulator_priv; - u16 err; + u16 err = 0; dprintk(1, "\n"); diff --git a/drivers/media/i2c/ar0521.c b/drivers/media/i2c/ar0521.c index c6ab531532be..e408049f6312 100644 --- a/drivers/media/i2c/ar0521.c +++ b/drivers/media/i2c/ar0521.c @@ -406,7 +406,6 @@ static int ar0521_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_format *format) { struct ar0521_dev *sensor = to_ar0521_dev(sd); - int ret = 0; ar0521_adj_fmt(&format->format); @@ -423,7 +422,7 @@ static int ar0521_set_fmt(struct v4l2_subdev *sd, } mutex_unlock(&sensor->lock); - return ret; + return 0; } static int ar0521_s_ctrl(struct v4l2_ctrl *ctrl) @@ -756,10 +755,12 @@ static int ar0521_power_on(struct device *dev) gpiod_set_value(sensor->reset_gpio, 0); usleep_range(4500, 5000); /* min 45000 clocks */ - for (cnt = 0; cnt < ARRAY_SIZE(initial_regs); cnt++) - if (ar0521_write_regs(sensor, initial_regs[cnt].data, - initial_regs[cnt].count)) + for (cnt = 0; cnt < ARRAY_SIZE(initial_regs); cnt++) { + ret = ar0521_write_regs(sensor, initial_regs[cnt].data, + initial_regs[cnt].count); + if (ret) goto off; + } ret = ar0521_write_reg(sensor, AR0521_REG_SERIAL_FORMAT, AR0521_REG_SERIAL_FORMAT_MIPI | diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c index ee6bbbb977f7..25bf1132dbff 100644 --- a/drivers/media/i2c/ir-kbd-i2c.c +++ b/drivers/media/i2c/ir-kbd-i2c.c @@ -238,6 +238,43 @@ static int get_key_knc1(struct IR_i2c *ir, enum rc_proto *protocol, return 1; } +static int get_key_geniatech(struct IR_i2c *ir, enum rc_proto *protocol, + u32 *scancode, u8 *toggle) +{ + int i, rc; + unsigned char b; + + /* poll IR chip */ + for (i = 0; i < 4; i++) { + rc = i2c_master_recv(ir->c, &b, 1); + if (rc == 1) + break; + msleep(20); + } + if (rc != 1) { + dev_dbg(&ir->rc->dev, "read error\n"); + if (rc < 0) + return rc; + return -EIO; + } + + /* don't repeat the key */ + if (ir->old == b) + return 0; + ir->old = b; + + /* decode to RC5 */ + b &= 0x7f; + b = (b - 1) / 2; + + dev_dbg(&ir->rc->dev, "key %02x\n", b); + + *protocol = RC_PROTO_RC5; + *scancode = b; + *toggle = ir->old >> 7; + return 1; +} + static int get_key_avermedia_cardbus(struct IR_i2c *ir, enum rc_proto *protocol, u32 *scancode, u8 *toggle) { @@ -766,6 +803,13 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) rc_proto = RC_PROTO_BIT_OTHER; ir_codes = RC_MAP_EMPTY; break; + case 0x33: + name = "Geniatech"; + ir->get_key = get_key_geniatech; + rc_proto = RC_PROTO_BIT_RC5; + ir_codes = RC_MAP_TOTAL_MEDIA_IN_HAND_02; + ir->old = 0xfc; + break; case 0x6b: name = "FusionHDTV"; ir->get_key = get_key_fusionhdtv; @@ -825,6 +869,9 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id) case IR_KBD_GET_KEY_KNC1: ir->get_key = get_key_knc1; break; + case IR_KBD_GET_KEY_GENIATECH: + ir->get_key = get_key_geniatech; + break; case IR_KBD_GET_KEY_FUSIONHDTV: ir->get_key = get_key_fusionhdtv; break; diff --git a/drivers/media/i2c/isl7998x.c b/drivers/media/i2c/isl7998x.c index 246d8d182a8e..20f548a8a054 100644 --- a/drivers/media/i2c/isl7998x.c +++ b/drivers/media/i2c/isl7998x.c @@ -8,7 +8,7 @@ #include <linux/bitfield.h> #include <linux/delay.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/i2c.h> #include <linux/module.h> #include <linux/of_graph.h> diff --git a/drivers/media/i2c/mt9v111.c b/drivers/media/i2c/mt9v111.c index fe18e5258d7a..46d91cd0870c 100644 --- a/drivers/media/i2c/mt9v111.c +++ b/drivers/media/i2c/mt9v111.c @@ -633,7 +633,7 @@ static int mt9v111_hw_config(struct mt9v111_dev *mt9v111) /* * Set pixel integration time to the whole frame time. - * This value controls the the shutter delay when running with AE + * This value controls the shutter delay when running with AE * disabled. If longer than frame time, it affects the output * frame rate. */ diff --git a/drivers/media/i2c/ov5640.c b/drivers/media/i2c/ov5640.c index 1852e1cfc7df..2d740397a5d4 100644 --- a/drivers/media/i2c/ov5640.c +++ b/drivers/media/i2c/ov5640.c @@ -15,6 +15,7 @@ #include <linux/init.h> #include <linux/module.h> #include <linux/of_device.h> +#include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/types.h> @@ -447,8 +448,6 @@ struct ov5640_dev { /* lock to protect all members below */ struct mutex lock; - int power_count; - struct v4l2_mbus_framefmt fmt; bool pending_fmt_change; @@ -2696,39 +2695,24 @@ power_off: return ret; } -/* --------------- Subdev Operations --------------- */ - -static int ov5640_s_power(struct v4l2_subdev *sd, int on) +static int ov5640_sensor_suspend(struct device *dev) { - struct ov5640_dev *sensor = to_ov5640_dev(sd); - int ret = 0; - - mutex_lock(&sensor->lock); - - /* - * If the power count is modified from 0 to != 0 or from != 0 to 0, - * update the power state. - */ - if (sensor->power_count == !on) { - ret = ov5640_set_power(sensor, !!on); - if (ret) - goto out; - } + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct ov5640_dev *ov5640 = to_ov5640_dev(sd); - /* Update the power count. */ - sensor->power_count += on ? 1 : -1; - WARN_ON(sensor->power_count < 0); -out: - mutex_unlock(&sensor->lock); + return ov5640_set_power(ov5640, false); +} - if (on && !ret && sensor->power_count == 1) { - /* restore controls */ - ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler); - } +static int ov5640_sensor_resume(struct device *dev) +{ + struct v4l2_subdev *sd = dev_get_drvdata(dev); + struct ov5640_dev *ov5640 = to_ov5640_dev(sd); - return ret; + return ov5640_set_power(ov5640, true); } +/* --------------- Subdev Operations --------------- */ + static int ov5640_try_frame_interval(struct ov5640_dev *sensor, struct v4l2_fract *fi, u32 width, u32 height) @@ -3314,6 +3298,9 @@ static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl) /* v4l2_ctrl_lock() locks our own mutex */ + if (!pm_runtime_get_if_in_use(&sensor->i2c_client->dev)) + return 0; + switch (ctrl->id) { case V4L2_CID_AUTOGAIN: val = ov5640_get_gain(sensor); @@ -3329,6 +3316,8 @@ static int ov5640_g_volatile_ctrl(struct v4l2_ctrl *ctrl) break; } + pm_runtime_put_autosuspend(&sensor->i2c_client->dev); + return 0; } @@ -3358,9 +3347,9 @@ static int ov5640_s_ctrl(struct v4l2_ctrl *ctrl) /* * If the device is not powered up by the host driver do * not apply any controls to H/W at this time. Instead - * the controls will be restored right after power-up. + * the controls will be restored at start streaming time. */ - if (sensor->power_count == 0) + if (!pm_runtime_get_if_in_use(&sensor->i2c_client->dev)) return 0; switch (ctrl->id) { @@ -3402,6 +3391,8 @@ static int ov5640_s_ctrl(struct v4l2_ctrl *ctrl) break; } + pm_runtime_put_autosuspend(&sensor->i2c_client->dev); + return ret; } @@ -3677,6 +3668,18 @@ static int ov5640_s_stream(struct v4l2_subdev *sd, int enable) struct ov5640_dev *sensor = to_ov5640_dev(sd); int ret = 0; + if (enable) { + ret = pm_runtime_resume_and_get(&sensor->i2c_client->dev); + if (ret < 0) + return ret; + + ret = v4l2_ctrl_handler_setup(&sensor->ctrls.handler); + if (ret) { + pm_runtime_put(&sensor->i2c_client->dev); + return ret; + } + } + mutex_lock(&sensor->lock); if (sensor->streaming == !enable) { @@ -3701,8 +3704,13 @@ static int ov5640_s_stream(struct v4l2_subdev *sd, int enable) if (!ret) sensor->streaming = enable; } + out: mutex_unlock(&sensor->lock); + + if (!enable || ret) + pm_runtime_put_autosuspend(&sensor->i2c_client->dev); + return ret; } @@ -3724,7 +3732,6 @@ static int ov5640_init_cfg(struct v4l2_subdev *sd, } static const struct v4l2_subdev_core_ops ov5640_core_ops = { - .s_power = ov5640_s_power, .log_status = v4l2_ctrl_subdev_log_status, .subscribe_event = v4l2_ctrl_subdev_subscribe_event, .unsubscribe_event = v4l2_event_subdev_unsubscribe, @@ -3770,26 +3777,20 @@ static int ov5640_check_chip_id(struct ov5640_dev *sensor) int ret = 0; u16 chip_id; - ret = ov5640_set_power_on(sensor); - if (ret) - return ret; - ret = ov5640_read_reg16(sensor, OV5640_REG_CHIP_ID, &chip_id); if (ret) { dev_err(&client->dev, "%s: failed to read chip identifier\n", __func__); - goto power_off; + return ret; } if (chip_id != 0x5640) { dev_err(&client->dev, "%s: wrong chip identifier, expected 0x5640, got 0x%x\n", __func__, chip_id); - ret = -ENXIO; + return -ENXIO; } -power_off: - ov5640_set_power_off(sensor); - return ret; + return 0; } static int ov5640_probe(struct i2c_client *client) @@ -3880,26 +3881,43 @@ static int ov5640_probe(struct i2c_client *client) ret = ov5640_get_regulators(sensor); if (ret) - return ret; + goto entity_cleanup; mutex_init(&sensor->lock); - ret = ov5640_check_chip_id(sensor); + ret = ov5640_init_controls(sensor); if (ret) goto entity_cleanup; - ret = ov5640_init_controls(sensor); - if (ret) + ret = ov5640_sensor_resume(dev); + if (ret) { + dev_err(dev, "failed to power on\n"); goto entity_cleanup; + } + + pm_runtime_set_active(dev); + pm_runtime_get_noresume(dev); + pm_runtime_enable(dev); + + ret = ov5640_check_chip_id(sensor); + if (ret) + goto err_pm_runtime; ret = v4l2_async_register_subdev_sensor(&sensor->sd); if (ret) - goto free_ctrls; + goto err_pm_runtime; + + pm_runtime_set_autosuspend_delay(dev, 1000); + pm_runtime_use_autosuspend(dev); + pm_runtime_put_autosuspend(dev); return 0; -free_ctrls: +err_pm_runtime: + pm_runtime_put_noidle(dev); + pm_runtime_disable(dev); v4l2_ctrl_handler_free(&sensor->ctrls.handler); + ov5640_sensor_suspend(dev); entity_cleanup: media_entity_cleanup(&sensor->sd.entity); mutex_destroy(&sensor->lock); @@ -3910,6 +3928,12 @@ static void ov5640_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct ov5640_dev *sensor = to_ov5640_dev(sd); + struct device *dev = &client->dev; + + pm_runtime_disable(dev); + if (!pm_runtime_status_suspended(dev)) + ov5640_sensor_suspend(dev); + pm_runtime_set_suspended(dev); v4l2_async_unregister_subdev(&sensor->sd); media_entity_cleanup(&sensor->sd.entity); @@ -3917,6 +3941,10 @@ static void ov5640_remove(struct i2c_client *client) mutex_destroy(&sensor->lock); } +static const struct dev_pm_ops ov5640_pm_ops = { + SET_RUNTIME_PM_OPS(ov5640_sensor_suspend, ov5640_sensor_resume, NULL) +}; + static const struct i2c_device_id ov5640_id[] = { {"ov5640", 0}, {}, @@ -3933,6 +3961,7 @@ static struct i2c_driver ov5640_i2c_driver = { .driver = { .name = "ov5640", .of_match_table = ov5640_dt_ids, + .pm = &ov5640_pm_ops, }, .id_table = ov5640_id, .probe_new = ov5640_probe, diff --git a/drivers/media/i2c/ov8865.c b/drivers/media/i2c/ov8865.c index a233c34b168e..cae1866134a0 100644 --- a/drivers/media/i2c/ov8865.c +++ b/drivers/media/i2c/ov8865.c @@ -3034,11 +3034,13 @@ static int ov8865_probe(struct i2c_client *client) &rate); if (!ret && sensor->extclk) { ret = clk_set_rate(sensor->extclk, rate); - if (ret) - return dev_err_probe(dev, ret, - "failed to set clock rate\n"); + if (ret) { + dev_err_probe(dev, ret, "failed to set clock rate\n"); + goto error_endpoint; + } } else if (ret && !sensor->extclk) { - return dev_err_probe(dev, ret, "invalid clock config\n"); + dev_err_probe(dev, ret, "invalid clock config\n"); + goto error_endpoint; } sensor->extclk_rate = rate ? rate : clk_get_rate(sensor->extclk); diff --git a/drivers/media/mc/mc-device.c b/drivers/media/mc/mc-device.c index b8176a3b76d3..25020d58eb06 100644 --- a/drivers/media/mc/mc-device.c +++ b/drivers/media/mc/mc-device.c @@ -581,7 +581,7 @@ static void __media_device_unregister_entity(struct media_entity *entity) struct media_device *mdev = entity->graph_obj.mdev; struct media_link *link, *tmp; struct media_interface *intf; - unsigned int i; + struct media_pad *iter; ida_free(&mdev->entity_internal_idx, entity->internal_idx); @@ -597,8 +597,8 @@ static void __media_device_unregister_entity(struct media_entity *entity) __media_entity_remove_links(entity); /* Remove all pads that belong to this entity */ - for (i = 0; i < entity->num_pads; i++) - media_gobj_destroy(&entity->pads[i].graph_obj); + media_entity_for_each_pad(entity, iter) + media_gobj_destroy(&iter->graph_obj); /* Remove the entity */ media_gobj_destroy(&entity->graph_obj); @@ -610,7 +610,7 @@ int __must_check media_device_register_entity(struct media_device *mdev, struct media_entity *entity) { struct media_entity_notify *notify, *next; - unsigned int i; + struct media_pad *iter; int ret; if (entity->function == MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN || @@ -639,9 +639,8 @@ int __must_check media_device_register_entity(struct media_device *mdev, media_gobj_create(mdev, MEDIA_GRAPH_ENTITY, &entity->graph_obj); /* Initialize objects at the pads */ - for (i = 0; i < entity->num_pads; i++) - media_gobj_create(mdev, MEDIA_GRAPH_PAD, - &entity->pads[i].graph_obj); + media_entity_for_each_pad(entity, iter) + media_gobj_create(mdev, MEDIA_GRAPH_PAD, &iter->graph_obj); /* invoke entity_notify callbacks */ list_for_each_entry_safe(notify, next, &mdev->entity_notify, list) diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c index afd1bd7ff7b6..b8bcbc734eaf 100644 --- a/drivers/media/mc/mc-entity.c +++ b/drivers/media/mc/mc-entity.c @@ -59,10 +59,12 @@ static inline const char *link_type_name(struct media_link *link) } } -__must_check int __media_entity_enum_init(struct media_entity_enum *ent_enum, - int idx_max) +__must_check int media_entity_enum_init(struct media_entity_enum *ent_enum, + struct media_device *mdev) { - idx_max = ALIGN(idx_max, BITS_PER_LONG); + int idx_max; + + idx_max = ALIGN(mdev->entity_internal_idx_max + 1, BITS_PER_LONG); ent_enum->bmap = bitmap_zalloc(idx_max, GFP_KERNEL); if (!ent_enum->bmap) return -ENOMEM; @@ -71,7 +73,7 @@ __must_check int __media_entity_enum_init(struct media_entity_enum *ent_enum, return 0; } -EXPORT_SYMBOL_GPL(__media_entity_enum_init); +EXPORT_SYMBOL_GPL(media_entity_enum_init); void media_entity_enum_cleanup(struct media_entity_enum *ent_enum) { @@ -193,7 +195,8 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads, struct media_pad *pads) { struct media_device *mdev = entity->graph_obj.mdev; - unsigned int i; + struct media_pad *iter; + unsigned int i = 0; if (num_pads >= MEDIA_ENTITY_MAX_PADS) return -E2BIG; @@ -204,12 +207,12 @@ int media_entity_pads_init(struct media_entity *entity, u16 num_pads, if (mdev) mutex_lock(&mdev->graph_mutex); - for (i = 0; i < num_pads; i++) { - pads[i].entity = entity; - pads[i].index = i; + media_entity_for_each_pad(entity, iter) { + iter->entity = entity; + iter->index = i++; if (mdev) media_gobj_create(mdev, MEDIA_GRAPH_PAD, - &entity->pads[i].graph_obj); + &iter->graph_obj); } if (mdev) @@ -223,6 +226,33 @@ EXPORT_SYMBOL_GPL(media_entity_pads_init); * Graph traversal */ +/* + * This function checks the interdependency inside the entity between @pad0 + * and @pad1. If two pads are interdependent they are part of the same pipeline + * and enabling one of the pads means that the other pad will become "locked" + * and doesn't allow configuration changes. + * + * This function uses the &media_entity_operations.has_pad_interdep() operation + * to check the dependency inside the entity between @pad0 and @pad1. If the + * has_pad_interdep operation is not implemented, all pads of the entity are + * considered to be interdependent. + */ +static bool media_entity_has_pad_interdep(struct media_entity *entity, + unsigned int pad0, unsigned int pad1) +{ + if (pad0 >= entity->num_pads || pad1 >= entity->num_pads) + return false; + + if (entity->pads[pad0].flags & entity->pads[pad1].flags & + (MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_SOURCE)) + return false; + + if (!entity->ops || !entity->ops->has_pad_interdep) + return true; + + return entity->ops->has_pad_interdep(entity, pad0, pad1); +} + static struct media_entity * media_entity_other(struct media_entity *entity, struct media_link *link) { @@ -367,139 +397,435 @@ struct media_entity *media_graph_walk_next(struct media_graph *graph) } EXPORT_SYMBOL_GPL(media_graph_walk_next); -int media_entity_get_fwnode_pad(struct media_entity *entity, - struct fwnode_handle *fwnode, - unsigned long direction_flags) +/* ----------------------------------------------------------------------------- + * Pipeline management + */ + +/* + * The pipeline traversal stack stores pads that are reached during graph + * traversal, with a list of links to be visited to continue the traversal. + * When a new pad is reached, an entry is pushed on the top of the stack and + * points to the incoming pad and the first link of the entity. + * + * To find further pads in the pipeline, the traversal algorithm follows + * internal pad dependencies in the entity, and then links in the graph. It + * does so by iterating over all links of the entity, and following enabled + * links that originate from a pad that is internally connected to the incoming + * pad, as reported by the media_entity_has_pad_interdep() function. + */ + +/** + * struct media_pipeline_walk_entry - Entry in the pipeline traversal stack + * + * @pad: The media pad being visited + * @links: Links left to be visited + */ +struct media_pipeline_walk_entry { + struct media_pad *pad; + struct list_head *links; +}; + +/** + * struct media_pipeline_walk - State used by the media pipeline traversal + * algorithm + * + * @mdev: The media device + * @stack: Depth-first search stack + * @stack.size: Number of allocated entries in @stack.entries + * @stack.top: Index of the top stack entry (-1 if the stack is empty) + * @stack.entries: Stack entries + */ +struct media_pipeline_walk { + struct media_device *mdev; + + struct { + unsigned int size; + int top; + struct media_pipeline_walk_entry *entries; + } stack; +}; + +#define MEDIA_PIPELINE_STACK_GROW_STEP 16 + +static struct media_pipeline_walk_entry * +media_pipeline_walk_top(struct media_pipeline_walk *walk) { - struct fwnode_endpoint endpoint; - unsigned int i; + return &walk->stack.entries[walk->stack.top]; +} + +static bool media_pipeline_walk_empty(struct media_pipeline_walk *walk) +{ + return walk->stack.top == -1; +} + +/* Increase the stack size by MEDIA_PIPELINE_STACK_GROW_STEP elements. */ +static int media_pipeline_walk_resize(struct media_pipeline_walk *walk) +{ + struct media_pipeline_walk_entry *entries; + unsigned int new_size; + + /* Safety check, to avoid stack overflows in case of bugs. */ + if (walk->stack.size >= 256) + return -E2BIG; + + new_size = walk->stack.size + MEDIA_PIPELINE_STACK_GROW_STEP; + + entries = krealloc(walk->stack.entries, + new_size * sizeof(*walk->stack.entries), + GFP_KERNEL); + if (!entries) + return -ENOMEM; + + walk->stack.entries = entries; + walk->stack.size = new_size; + + return 0; +} + +/* Push a new entry on the stack. */ +static int media_pipeline_walk_push(struct media_pipeline_walk *walk, + struct media_pad *pad) +{ + struct media_pipeline_walk_entry *entry; int ret; - if (!entity->ops || !entity->ops->get_fwnode_pad) { - for (i = 0; i < entity->num_pads; i++) { - if (entity->pads[i].flags & direction_flags) - return i; + if (walk->stack.top + 1 >= walk->stack.size) { + ret = media_pipeline_walk_resize(walk); + if (ret) + return ret; + } + + walk->stack.top++; + entry = media_pipeline_walk_top(walk); + entry->pad = pad; + entry->links = pad->entity->links.next; + + dev_dbg(walk->mdev->dev, + "media pipeline: pushed entry %u: '%s':%u\n", + walk->stack.top, pad->entity->name, pad->index); + + return 0; +} + +/* + * Move the top entry link cursor to the next link. If all links of the entry + * have been visited, pop the entry itself. + */ +static void media_pipeline_walk_pop(struct media_pipeline_walk *walk) +{ + struct media_pipeline_walk_entry *entry; + + if (WARN_ON(walk->stack.top < 0)) + return; + + entry = media_pipeline_walk_top(walk); + + if (entry->links->next == &entry->pad->entity->links) { + dev_dbg(walk->mdev->dev, + "media pipeline: entry %u has no more links, popping\n", + walk->stack.top); + + walk->stack.top--; + return; + } + + entry->links = entry->links->next; + + dev_dbg(walk->mdev->dev, + "media pipeline: moved entry %u to next link\n", + walk->stack.top); +} + +/* Free all memory allocated while walking the pipeline. */ +static void media_pipeline_walk_destroy(struct media_pipeline_walk *walk) +{ + kfree(walk->stack.entries); +} + +/* Add a pad to the pipeline and push it to the stack. */ +static int media_pipeline_add_pad(struct media_pipeline *pipe, + struct media_pipeline_walk *walk, + struct media_pad *pad) +{ + struct media_pipeline_pad *ppad; + + list_for_each_entry(ppad, &pipe->pads, list) { + if (ppad->pad == pad) { + dev_dbg(pad->graph_obj.mdev->dev, + "media pipeline: already contains pad '%s':%u\n", + pad->entity->name, pad->index); + return 0; } + } - return -ENXIO; + ppad = kzalloc(sizeof(*ppad), GFP_KERNEL); + if (!ppad) + return -ENOMEM; + + ppad->pipe = pipe; + ppad->pad = pad; + + list_add_tail(&ppad->list, &pipe->pads); + + dev_dbg(pad->graph_obj.mdev->dev, + "media pipeline: added pad '%s':%u\n", + pad->entity->name, pad->index); + + return media_pipeline_walk_push(walk, pad); +} + +/* Explore the next link of the entity at the top of the stack. */ +static int media_pipeline_explore_next_link(struct media_pipeline *pipe, + struct media_pipeline_walk *walk) +{ + struct media_pipeline_walk_entry *entry = media_pipeline_walk_top(walk); + struct media_pad *pad; + struct media_link *link; + struct media_pad *local; + struct media_pad *remote; + int ret; + + pad = entry->pad; + link = list_entry(entry->links, typeof(*link), list); + media_pipeline_walk_pop(walk); + + dev_dbg(walk->mdev->dev, + "media pipeline: exploring link '%s':%u -> '%s':%u\n", + link->source->entity->name, link->source->index, + link->sink->entity->name, link->sink->index); + + /* Skip links that are not enabled. */ + if (!(link->flags & MEDIA_LNK_FL_ENABLED)) { + dev_dbg(walk->mdev->dev, + "media pipeline: skipping link (disabled)\n"); + return 0; } - ret = fwnode_graph_parse_endpoint(fwnode, &endpoint); + /* Get the local pad and remote pad. */ + if (link->source->entity == pad->entity) { + local = link->source; + remote = link->sink; + } else { + local = link->sink; + remote = link->source; + } + + /* + * Skip links that originate from a different pad than the incoming pad + * that is not connected internally in the entity to the incoming pad. + */ + if (pad != local && + !media_entity_has_pad_interdep(pad->entity, pad->index, local->index)) { + dev_dbg(walk->mdev->dev, + "media pipeline: skipping link (no route)\n"); + return 0; + } + + /* + * Add the local and remote pads of the link to the pipeline and push + * them to the stack, if they're not already present. + */ + ret = media_pipeline_add_pad(pipe, walk, local); if (ret) return ret; - ret = entity->ops->get_fwnode_pad(entity, &endpoint); - if (ret < 0) + ret = media_pipeline_add_pad(pipe, walk, remote); + if (ret) return ret; - if (ret >= entity->num_pads) - return -ENXIO; + return 0; +} - if (!(entity->pads[ret].flags & direction_flags)) - return -ENXIO; +static void media_pipeline_cleanup(struct media_pipeline *pipe) +{ + while (!list_empty(&pipe->pads)) { + struct media_pipeline_pad *ppad; - return ret; + ppad = list_first_entry(&pipe->pads, typeof(*ppad), list); + list_del(&ppad->list); + kfree(ppad); + } } -EXPORT_SYMBOL_GPL(media_entity_get_fwnode_pad); -/* ----------------------------------------------------------------------------- - * Pipeline management - */ +static int media_pipeline_populate(struct media_pipeline *pipe, + struct media_pad *pad) +{ + struct media_pipeline_walk walk = { }; + struct media_pipeline_pad *ppad; + int ret; + + /* + * Populate the media pipeline by walking the media graph, starting + * from @pad. + */ + INIT_LIST_HEAD(&pipe->pads); + pipe->mdev = pad->graph_obj.mdev; + + walk.mdev = pipe->mdev; + walk.stack.top = -1; + ret = media_pipeline_add_pad(pipe, &walk, pad); + if (ret) + goto done; + + /* + * Use a depth-first search algorithm: as long as the stack is not + * empty, explore the next link of the top entry. The + * media_pipeline_explore_next_link() function will either move to the + * next link, pop the entry if fully visited, or add new entries on + * top. + */ + while (!media_pipeline_walk_empty(&walk)) { + ret = media_pipeline_explore_next_link(pipe, &walk); + if (ret) + goto done; + } + + dev_dbg(pad->graph_obj.mdev->dev, + "media pipeline populated, found pads:\n"); + + list_for_each_entry(ppad, &pipe->pads, list) + dev_dbg(pad->graph_obj.mdev->dev, "- '%s':%u\n", + ppad->pad->entity->name, ppad->pad->index); + + WARN_ON(walk.stack.top != -1); -__must_check int __media_pipeline_start(struct media_entity *entity, + ret = 0; + +done: + media_pipeline_walk_destroy(&walk); + + if (ret) + media_pipeline_cleanup(pipe); + + return ret; +} + +__must_check int __media_pipeline_start(struct media_pad *pad, struct media_pipeline *pipe) { - struct media_device *mdev = entity->graph_obj.mdev; - struct media_graph *graph = &pipe->graph; - struct media_entity *entity_err = entity; - struct media_link *link; + struct media_device *mdev = pad->entity->graph_obj.mdev; + struct media_pipeline_pad *err_ppad; + struct media_pipeline_pad *ppad; int ret; - if (pipe->streaming_count) { - pipe->streaming_count++; + lockdep_assert_held(&mdev->graph_mutex); + + /* + * If the entity is already part of a pipeline, that pipeline must + * be the same as the pipe given to media_pipeline_start(). + */ + if (WARN_ON(pad->pipe && pad->pipe != pipe)) + return -EINVAL; + + /* + * If the pipeline has already been started, it is guaranteed to be + * valid, so just increase the start count. + */ + if (pipe->start_count) { + pipe->start_count++; return 0; } - ret = media_graph_walk_init(&pipe->graph, mdev); + /* + * Populate the pipeline. This populates the media_pipeline pads list + * with media_pipeline_pad instances for each pad found during graph + * walk. + */ + ret = media_pipeline_populate(pipe, pad); if (ret) return ret; - media_graph_walk_start(&pipe->graph, entity); + /* + * Now that all the pads in the pipeline have been gathered, perform + * the validation steps. + */ + + list_for_each_entry(ppad, &pipe->pads, list) { + struct media_pad *pad = ppad->pad; + struct media_entity *entity = pad->entity; + bool has_enabled_link = false; + bool has_link = false; + struct media_link *link; - while ((entity = media_graph_walk_next(graph))) { - DECLARE_BITMAP(active, MEDIA_ENTITY_MAX_PADS); - DECLARE_BITMAP(has_no_links, MEDIA_ENTITY_MAX_PADS); + dev_dbg(mdev->dev, "Validating pad '%s':%u\n", pad->entity->name, + pad->index); - if (entity->pipe && entity->pipe != pipe) { - pr_err("Pipe active for %s. Can't start for %s\n", - entity->name, - entity_err->name); + /* + * 1. Ensure that the pad doesn't already belong to a different + * pipeline. + */ + if (pad->pipe) { + dev_dbg(mdev->dev, "Failed to start pipeline: pad '%s':%u busy\n", + pad->entity->name, pad->index); ret = -EBUSY; goto error; } - /* Already streaming --- no need to check. */ - if (entity->pipe) - continue; - - entity->pipe = pipe; - - if (!entity->ops || !entity->ops->link_validate) - continue; - - bitmap_zero(active, entity->num_pads); - bitmap_fill(has_no_links, entity->num_pads); - + /* + * 2. Validate all active links whose sink is the current pad. + * Validation of the source pads is performed in the context of + * the connected sink pad to avoid duplicating checks. + */ for_each_media_entity_data_link(entity, link) { - struct media_pad *pad = link->sink->entity == entity - ? link->sink : link->source; + /* Skip links unrelated to the current pad. */ + if (link->sink != pad && link->source != pad) + continue; - /* Mark that a pad is connected by a link. */ - bitmap_clear(has_no_links, pad->index, 1); + /* Record if the pad has links and enabled links. */ + if (link->flags & MEDIA_LNK_FL_ENABLED) + has_enabled_link = true; + has_link = true; /* - * Pads that either do not need to connect or - * are connected through an enabled link are - * fine. + * Validate the link if it's enabled and has the + * current pad as its sink. */ - if (!(pad->flags & MEDIA_PAD_FL_MUST_CONNECT) || - link->flags & MEDIA_LNK_FL_ENABLED) - bitmap_set(active, pad->index, 1); + if (!(link->flags & MEDIA_LNK_FL_ENABLED)) + continue; - /* - * Link validation will only take place for - * sink ends of the link that are enabled. - */ - if (link->sink != pad || - !(link->flags & MEDIA_LNK_FL_ENABLED)) + if (link->sink != pad) + continue; + + if (!entity->ops || !entity->ops->link_validate) continue; ret = entity->ops->link_validate(link); - if (ret < 0 && ret != -ENOIOCTLCMD) { - dev_dbg(entity->graph_obj.mdev->dev, - "link validation failed for '%s':%u -> '%s':%u, error %d\n", + if (ret) { + dev_dbg(mdev->dev, + "Link '%s':%u -> '%s':%u failed validation: %d\n", link->source->entity->name, link->source->index, - entity->name, link->sink->index, ret); + link->sink->entity->name, + link->sink->index, ret); goto error; } - } - /* Either no links or validated links are fine. */ - bitmap_or(active, active, has_no_links, entity->num_pads); + dev_dbg(mdev->dev, + "Link '%s':%u -> '%s':%u is valid\n", + link->source->entity->name, + link->source->index, + link->sink->entity->name, + link->sink->index); + } - if (!bitmap_full(active, entity->num_pads)) { + /* + * 3. If the pad has the MEDIA_PAD_FL_MUST_CONNECT flag set, + * ensure that it has either no link or an enabled link. + */ + if ((pad->flags & MEDIA_PAD_FL_MUST_CONNECT) && has_link && + !has_enabled_link) { + dev_dbg(mdev->dev, + "Pad '%s':%u must be connected by an enabled link\n", + pad->entity->name, pad->index); ret = -ENOLINK; - dev_dbg(entity->graph_obj.mdev->dev, - "'%s':%u must be connected by an enabled link\n", - entity->name, - (unsigned)find_first_zero_bit( - active, entity->num_pads)); goto error; } + + /* Validation passed, store the pipe pointer in the pad. */ + pad->pipe = pipe; } - pipe->streaming_count++; + pipe->start_count++; return 0; @@ -508,42 +834,37 @@ error: * Link validation on graph failed. We revert what we did and * return the error. */ - media_graph_walk_start(graph, entity_err); - while ((entity_err = media_graph_walk_next(graph))) { - entity_err->pipe = NULL; - - /* - * We haven't started entities further than this so we quit - * here. - */ - if (entity_err == entity) + list_for_each_entry(err_ppad, &pipe->pads, list) { + if (err_ppad == ppad) break; + + err_ppad->pad->pipe = NULL; } - media_graph_walk_cleanup(graph); + media_pipeline_cleanup(pipe); return ret; } EXPORT_SYMBOL_GPL(__media_pipeline_start); -__must_check int media_pipeline_start(struct media_entity *entity, +__must_check int media_pipeline_start(struct media_pad *pad, struct media_pipeline *pipe) { - struct media_device *mdev = entity->graph_obj.mdev; + struct media_device *mdev = pad->entity->graph_obj.mdev; int ret; mutex_lock(&mdev->graph_mutex); - ret = __media_pipeline_start(entity, pipe); + ret = __media_pipeline_start(pad, pipe); mutex_unlock(&mdev->graph_mutex); return ret; } EXPORT_SYMBOL_GPL(media_pipeline_start); -void __media_pipeline_stop(struct media_entity *entity) +void __media_pipeline_stop(struct media_pad *pad) { - struct media_graph *graph = &entity->pipe->graph; - struct media_pipeline *pipe = entity->pipe; + struct media_pipeline *pipe = pad->pipe; + struct media_pipeline_pad *ppad; /* * If the following check fails, the driver has performed an @@ -552,29 +873,65 @@ void __media_pipeline_stop(struct media_entity *entity) if (WARN_ON(!pipe)) return; - if (--pipe->streaming_count) + if (--pipe->start_count) return; - media_graph_walk_start(graph, entity); - - while ((entity = media_graph_walk_next(graph))) - entity->pipe = NULL; + list_for_each_entry(ppad, &pipe->pads, list) + ppad->pad->pipe = NULL; - media_graph_walk_cleanup(graph); + media_pipeline_cleanup(pipe); + if (pipe->allocated) + kfree(pipe); } EXPORT_SYMBOL_GPL(__media_pipeline_stop); -void media_pipeline_stop(struct media_entity *entity) +void media_pipeline_stop(struct media_pad *pad) { - struct media_device *mdev = entity->graph_obj.mdev; + struct media_device *mdev = pad->entity->graph_obj.mdev; mutex_lock(&mdev->graph_mutex); - __media_pipeline_stop(entity); + __media_pipeline_stop(pad); mutex_unlock(&mdev->graph_mutex); } EXPORT_SYMBOL_GPL(media_pipeline_stop); +__must_check int media_pipeline_alloc_start(struct media_pad *pad) +{ + struct media_device *mdev = pad->entity->graph_obj.mdev; + struct media_pipeline *new_pipe = NULL; + struct media_pipeline *pipe; + int ret; + + mutex_lock(&mdev->graph_mutex); + + /* + * Is the entity already part of a pipeline? If not, we need to allocate + * a pipe. + */ + pipe = media_pad_pipeline(pad); + if (!pipe) { + new_pipe = kzalloc(sizeof(*new_pipe), GFP_KERNEL); + if (!new_pipe) { + ret = -ENOMEM; + goto out; + } + + pipe = new_pipe; + pipe->allocated = true; + } + + ret = __media_pipeline_start(pad, pipe); + if (ret) + kfree(new_pipe); + +out: + mutex_unlock(&mdev->graph_mutex); + + return ret; +} +EXPORT_SYMBOL_GPL(media_pipeline_alloc_start); + /* ----------------------------------------------------------------------------- * Links management */ @@ -829,7 +1186,7 @@ int __media_entity_setup_link(struct media_link *link, u32 flags) { const u32 mask = MEDIA_LNK_FL_ENABLED; struct media_device *mdev; - struct media_entity *source, *sink; + struct media_pad *source, *sink; int ret = -EBUSY; if (link == NULL) @@ -845,12 +1202,11 @@ int __media_entity_setup_link(struct media_link *link, u32 flags) if (link->flags == flags) return 0; - source = link->source->entity; - sink = link->sink->entity; + source = link->source; + sink = link->sink; if (!(link->flags & MEDIA_LNK_FL_DYNAMIC) && - (media_entity_is_streaming(source) || - media_entity_is_streaming(sink))) + (media_pad_is_streaming(source) || media_pad_is_streaming(sink))) return -EBUSY; mdev = source->graph_obj.mdev; @@ -991,6 +1347,60 @@ struct media_pad *media_pad_remote_pad_unique(const struct media_pad *pad) } EXPORT_SYMBOL_GPL(media_pad_remote_pad_unique); +int media_entity_get_fwnode_pad(struct media_entity *entity, + struct fwnode_handle *fwnode, + unsigned long direction_flags) +{ + struct fwnode_endpoint endpoint; + unsigned int i; + int ret; + + if (!entity->ops || !entity->ops->get_fwnode_pad) { + for (i = 0; i < entity->num_pads; i++) { + if (entity->pads[i].flags & direction_flags) + return i; + } + + return -ENXIO; + } + + ret = fwnode_graph_parse_endpoint(fwnode, &endpoint); + if (ret) + return ret; + + ret = entity->ops->get_fwnode_pad(entity, &endpoint); + if (ret < 0) + return ret; + + if (ret >= entity->num_pads) + return -ENXIO; + + if (!(entity->pads[ret].flags & direction_flags)) + return -ENXIO; + + return ret; +} +EXPORT_SYMBOL_GPL(media_entity_get_fwnode_pad); + +struct media_pipeline *media_entity_pipeline(struct media_entity *entity) +{ + struct media_pad *pad; + + media_entity_for_each_pad(entity, pad) { + if (pad->pipe) + return pad->pipe; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(media_entity_pipeline); + +struct media_pipeline *media_pad_pipeline(struct media_pad *pad) +{ + return pad->pipe; +} +EXPORT_SYMBOL_GPL(media_pad_pipeline); + static void media_interface_init(struct media_device *mdev, struct media_interface *intf, u32 gobj_type, diff --git a/drivers/media/pci/cx18/cx18-av-core.c b/drivers/media/pci/cx18/cx18-av-core.c index d3358643fb7d..ee6e71157786 100644 --- a/drivers/media/pci/cx18/cx18-av-core.c +++ b/drivers/media/pci/cx18/cx18-av-core.c @@ -339,7 +339,7 @@ void cx18_av_std_setup(struct cx18 *cx) /* * For a 13.5 Mpps clock and 15,625 Hz line rate, a line is - * is 864 pixels = 720 active + 144 blanking. ITU-R BT.601 + * 864 pixels = 720 active + 144 blanking. ITU-R BT.601 * specifies 12 luma clock periods or ~ 0.9 * 13.5 Mpps after * the end of active video to start a horizontal line, so that * leaves 132 pixels of hblank to ignore. @@ -399,7 +399,7 @@ void cx18_av_std_setup(struct cx18 *cx) /* * For a 13.5 Mpps clock and 15,734.26 Hz line rate, a line is - * is 858 pixels = 720 active + 138 blanking. The Hsync leading + * 858 pixels = 720 active + 138 blanking. The Hsync leading * edge should happen 1.2 us * 13.5 Mpps ~= 16 pixels after the * end of active video, leaving 122 pixels of hblank to ignore * before active video starts. diff --git a/drivers/media/pci/cx88/cx88-input.c b/drivers/media/pci/cx88/cx88-input.c index ce0ef0b8186f..a04a1d33fadb 100644 --- a/drivers/media/pci/cx88/cx88-input.c +++ b/drivers/media/pci/cx88/cx88-input.c @@ -586,7 +586,7 @@ void cx88_i2c_init_ir(struct cx88_core *core) { struct i2c_board_info info; static const unsigned short default_addr_list[] = { - 0x18, 0x6b, 0x71, + 0x18, 0x33, 0x6b, 0x71, I2C_CLIENT_END }; static const unsigned short pvr2000_addr_list[] = { diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c index b509c2a03852..c0ef03ed74f9 100644 --- a/drivers/media/pci/cx88/cx88-video.c +++ b/drivers/media/pci/cx88/cx88-video.c @@ -1388,6 +1388,7 @@ static int cx8800_initdev(struct pci_dev *pci_dev, } fallthrough; case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO: + case CX88_BOARD_NOTONLYTV_LV3H: request_module("ir-kbd-i2c"); } diff --git a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c index a3fe547b7fce..390bd5ea3472 100644 --- a/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c +++ b/drivers/media/pci/intel/ipu3/ipu3-cio2-main.c @@ -989,7 +989,7 @@ static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) return r; } - r = media_pipeline_start(&q->vdev.entity, &q->pipe); + r = video_device_pipeline_start(&q->vdev, &q->pipe); if (r) goto fail_pipeline; @@ -1009,7 +1009,7 @@ static int cio2_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) fail_csi2_subdev: cio2_hw_exit(cio2, q); fail_hw: - media_pipeline_stop(&q->vdev.entity); + video_device_pipeline_stop(&q->vdev); fail_pipeline: dev_dbg(dev, "failed to start streaming (%d)\n", r); cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_QUEUED); @@ -1030,7 +1030,7 @@ static void cio2_vb2_stop_streaming(struct vb2_queue *vq) cio2_hw_exit(cio2, q); synchronize_irq(cio2->pci_dev->irq); cio2_vb2_return_all_buffers(q, VB2_BUF_STATE_ERROR); - media_pipeline_stop(&q->vdev.entity); + video_device_pipeline_stop(&q->vdev); pm_runtime_put(dev); cio2->streaming = false; } diff --git a/drivers/media/platform/amphion/vpu_v4l2.c b/drivers/media/platform/amphion/vpu_v4l2.c index 8a3eed957ae6..b779e0ba916c 100644 --- a/drivers/media/platform/amphion/vpu_v4l2.c +++ b/drivers/media/platform/amphion/vpu_v4l2.c @@ -603,6 +603,10 @@ static int vpu_v4l2_release(struct vpu_inst *inst) inst->workqueue = NULL; } + if (inst->fh.m2m_ctx) { + v4l2_m2m_ctx_release(inst->fh.m2m_ctx); + inst->fh.m2m_ctx = NULL; + } v4l2_ctrl_handler_free(&inst->ctrl_handler); mutex_destroy(&inst->lock); v4l2_fh_del(&inst->fh); @@ -685,13 +689,6 @@ int vpu_v4l2_close(struct file *file) vpu_trace(vpu->dev, "tgid = %d, pid = %d, inst = %p\n", inst->tgid, inst->pid, inst); - vpu_inst_lock(inst); - if (inst->fh.m2m_ctx) { - v4l2_m2m_ctx_release(inst->fh.m2m_ctx); - inst->fh.m2m_ctx = NULL; - } - vpu_inst_unlock(inst); - call_void_vop(inst, release); vpu_inst_unregister(inst); vpu_inst_put(inst); diff --git a/drivers/media/platform/chips-media/coda-jpeg.c b/drivers/media/platform/chips-media/coda-jpeg.c index a0b22b07f69a..435e7030fc2a 100644 --- a/drivers/media/platform/chips-media/coda-jpeg.c +++ b/drivers/media/platform/chips-media/coda-jpeg.c @@ -421,7 +421,7 @@ static inline void coda9_jpeg_write_huff_values(struct coda_dev *dev, u8 *bits, coda_write(dev, (s32)values[i], CODA9_REG_JPEG_HUFF_DATA); } -static int coda9_jpeg_dec_huff_setup(struct coda_ctx *ctx) +static void coda9_jpeg_dec_huff_setup(struct coda_ctx *ctx) { struct coda_huff_tab *huff_tab = ctx->params.jpeg_huff_tab; struct coda_dev *dev = ctx->dev; @@ -455,7 +455,6 @@ static int coda9_jpeg_dec_huff_setup(struct coda_ctx *ctx) coda9_jpeg_write_huff_values(dev, huff_tab->luma_ac, 162); coda9_jpeg_write_huff_values(dev, huff_tab->chroma_ac, 162); coda_write(dev, 0x000, CODA9_REG_JPEG_HUFF_CTRL); - return 0; } static inline void coda9_jpeg_write_qmat_tab(struct coda_dev *dev, @@ -1394,14 +1393,8 @@ static int coda9_jpeg_prepare_decode(struct coda_ctx *ctx) coda_write(dev, ctx->params.jpeg_restart_interval, CODA9_REG_JPEG_RST_INTVAL); - if (ctx->params.jpeg_huff_tab) { - ret = coda9_jpeg_dec_huff_setup(ctx); - if (ret < 0) { - v4l2_err(&dev->v4l2_dev, - "failed to set up Huffman tables: %d\n", ret); - return ret; - } - } + if (ctx->params.jpeg_huff_tab) + coda9_jpeg_dec_huff_setup(ctx); coda9_jpeg_qmat_setup(ctx); diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c index 29f6c1cd3de7..86c054600a08 100644 --- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c +++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-cmdq.c @@ -457,7 +457,7 @@ err_cmdq_data: kfree(path); atomic_dec(&mdp->job_count); wake_up(&mdp->callback_wq); - if (cmd->pkt.buf_size > 0) + if (cmd && cmd->pkt.buf_size > 0) mdp_cmdq_pkt_destroy(&cmd->pkt); kfree(comps); kfree(cmd); diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c index e62abf3587bf..d3eaf8884412 100644 --- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c +++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-comp.c @@ -682,7 +682,7 @@ int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp) int i, ret; if (comp->comp_dev) { - ret = pm_runtime_get_sync(comp->comp_dev); + ret = pm_runtime_resume_and_get(comp->comp_dev); if (ret < 0) { dev_err(dev, "Failed to get power, err %d. type:%d id:%d\n", @@ -699,6 +699,7 @@ int mdp_comp_clock_on(struct device *dev, struct mdp_comp *comp) dev_err(dev, "Failed to enable clk %d. type:%d id:%d\n", i, comp->type, comp->id); + pm_runtime_put(comp->comp_dev); return ret; } } @@ -869,7 +870,7 @@ static struct mdp_comp *mdp_comp_create(struct mdp_dev *mdp, ret = mdp_comp_init(mdp, node, comp, id); if (ret) { - kfree(comp); + devm_kfree(dev, comp); return ERR_PTR(ret); } mdp->comp[id] = comp; @@ -930,7 +931,7 @@ void mdp_comp_destroy(struct mdp_dev *mdp) if (mdp->comp[i]) { pm_runtime_disable(mdp->comp[i]->comp_dev); mdp_comp_deinit(mdp->comp[i]); - kfree(mdp->comp[i]); + devm_kfree(mdp->comp[i]->comp_dev, mdp->comp[i]); mdp->comp[i] = NULL; } } diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c index cde59579b7ae..c413e59d4286 100644 --- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c +++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-core.c @@ -289,7 +289,8 @@ err_deinit_comp: mdp_comp_destroy(mdp); err_return: for (i = 0; i < MDP_PIPE_MAX; i++) - mtk_mutex_put(mdp->mdp_mutex[i]); + if (mdp) + mtk_mutex_put(mdp->mdp_mutex[i]); kfree(mdp); dev_dbg(dev, "Errno %d\n", ret); return ret; diff --git a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c index 9f5844385c8f..a72bed927bb6 100644 --- a/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c +++ b/drivers/media/platform/mediatek/mdp3/mtk-mdp3-vpu.c @@ -173,7 +173,8 @@ int mdp_vpu_dev_init(struct mdp_vpu_dev *vpu, struct mtk_scp *scp, /* vpu work_size was set in mdp_vpu_ipi_handle_init_ack */ mem_size = vpu_alloc_size; - if (mdp_vpu_shared_mem_alloc(vpu)) { + err = mdp_vpu_shared_mem_alloc(vpu); + if (err) { dev_err(&mdp->pdev->dev, "VPU memory alloc fail!"); goto err_mem_alloc; } diff --git a/drivers/media/platform/nxp/dw100/dw100.c b/drivers/media/platform/nxp/dw100/dw100.c index b3b057798ab6..f6d48c36f386 100644 --- a/drivers/media/platform/nxp/dw100/dw100.c +++ b/drivers/media/platform/nxp/dw100/dw100.c @@ -373,7 +373,7 @@ static const struct v4l2_ctrl_ops dw100_ctrl_ops = { * The coordinates are saved in UQ12.4 fixed point format. */ static void dw100_ctrl_dewarping_map_init(const struct v4l2_ctrl *ctrl, - u32 from_idx, u32 elems, + u32 from_idx, union v4l2_ctrl_ptr ptr) { struct dw100_ctx *ctx = @@ -398,7 +398,7 @@ static void dw100_ctrl_dewarping_map_init(const struct v4l2_ctrl *ctrl, ctx->map_height = mh; ctx->map_size = mh * mw * sizeof(u32); - for (idx = from_idx; idx < elems; idx++) { + for (idx = from_idx; idx < ctrl->elems; idx++) { qy = min_t(u32, (idx / mw) * qdy, qsh); qx = min_t(u32, (idx % mw) * qdx, qsw); map[idx] = dw100_map_format_coordinates(qx, qy); diff --git a/drivers/media/platform/qcom/camss/camss-video.c b/drivers/media/platform/qcom/camss/camss-video.c index 290df04c4d02..81fb3a5bc1d5 100644 --- a/drivers/media/platform/qcom/camss/camss-video.c +++ b/drivers/media/platform/qcom/camss/camss-video.c @@ -493,7 +493,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count) struct v4l2_subdev *subdev; int ret; - ret = media_pipeline_start(&vdev->entity, &video->pipe); + ret = video_device_pipeline_start(vdev, &video->pipe); if (ret < 0) return ret; @@ -522,7 +522,7 @@ static int video_start_streaming(struct vb2_queue *q, unsigned int count) return 0; error: - media_pipeline_stop(&vdev->entity); + video_device_pipeline_stop(vdev); video->ops->flush_buffers(video, VB2_BUF_STATE_QUEUED); @@ -553,7 +553,7 @@ static void video_stop_streaming(struct vb2_queue *q) v4l2_subdev_call(subdev, video, s_stream, 0); } - media_pipeline_stop(&vdev->entity); + video_device_pipeline_stop(vdev); video->ops->flush_buffers(video, VB2_BUF_STATE_ERROR); } diff --git a/drivers/media/platform/qcom/venus/helpers.c b/drivers/media/platform/qcom/venus/helpers.c index 60de4200375d..ab6a29ffc81e 100644 --- a/drivers/media/platform/qcom/venus/helpers.c +++ b/drivers/media/platform/qcom/venus/helpers.c @@ -1800,7 +1800,7 @@ bool venus_helper_check_format(struct venus_inst *inst, u32 v4l2_pixfmt) struct venus_core *core = inst->core; u32 fmt = to_hfi_raw_fmt(v4l2_pixfmt); struct hfi_plat_caps *caps; - u32 buftype; + bool found; if (!fmt) return false; @@ -1809,12 +1809,13 @@ bool venus_helper_check_format(struct venus_inst *inst, u32 v4l2_pixfmt) if (!caps) return false; - if (inst->session_type == VIDC_SESSION_TYPE_DEC) - buftype = HFI_BUFFER_OUTPUT2; - else - buftype = HFI_BUFFER_OUTPUT; + found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT, fmt); + if (found) + goto done; - return find_fmt_from_caps(caps, buftype, fmt); + found = find_fmt_from_caps(caps, HFI_BUFFER_OUTPUT2, fmt); +done: + return found; } EXPORT_SYMBOL_GPL(venus_helper_check_format); diff --git a/drivers/media/platform/qcom/venus/hfi.c b/drivers/media/platform/qcom/venus/hfi.c index 1968f09ad177..e00aedb41d16 100644 --- a/drivers/media/platform/qcom/venus/hfi.c +++ b/drivers/media/platform/qcom/venus/hfi.c @@ -569,8 +569,6 @@ irqreturn_t hfi_isr(int irq, void *dev) int hfi_create(struct venus_core *core, const struct hfi_core_ops *ops) { - int ret; - if (!ops) return -EINVAL; @@ -579,9 +577,8 @@ int hfi_create(struct venus_core *core, const struct hfi_core_ops *ops) core->state = CORE_UNINIT; init_completion(&core->done); pkt_set_version(core->res->hfi_version); - ret = venus_hfi_create(core); - return ret; + return venus_hfi_create(core); } void hfi_destroy(struct venus_core *core) diff --git a/drivers/media/platform/qcom/venus/vdec.c b/drivers/media/platform/qcom/venus/vdec.c index ac0bb45d07f4..4ceaba37e2e5 100644 --- a/drivers/media/platform/qcom/venus/vdec.c +++ b/drivers/media/platform/qcom/venus/vdec.c @@ -183,6 +183,8 @@ vdec_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f) else return NULL; fmt = find_format(inst, pixmp->pixelformat, f->type); + if (!fmt) + return NULL; } pixmp->width = clamp(pixmp->width, frame_width_min(inst), diff --git a/drivers/media/platform/qcom/venus/venc.c b/drivers/media/platform/qcom/venus/venc.c index 86918aea1d24..cdb12546c4fa 100644 --- a/drivers/media/platform/qcom/venus/venc.c +++ b/drivers/media/platform/qcom/venus/venc.c @@ -192,10 +192,8 @@ venc_try_fmt_common(struct venus_inst *inst, struct v4l2_format *f) pixmp->height = clamp(pixmp->height, frame_height_min(inst), frame_height_max(inst)); - if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { - pixmp->width = ALIGN(pixmp->width, 128); - pixmp->height = ALIGN(pixmp->height, 32); - } + pixmp->width = ALIGN(pixmp->width, 128); + pixmp->height = ALIGN(pixmp->height, 32); pixmp->width = ALIGN(pixmp->width, 2); pixmp->height = ALIGN(pixmp->height, 2); @@ -392,7 +390,7 @@ static int venc_s_parm(struct file *file, void *fh, struct v4l2_streamparm *a) struct v4l2_fract *timeperframe = &out->timeperframe; u64 us_per_frame, fps; - if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) return -EINVAL; @@ -424,7 +422,7 @@ static int venc_g_parm(struct file *file, void *fh, struct v4l2_streamparm *a) { struct venus_inst *inst = to_inst(file); - if (a->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE && + if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT && a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) return -EINVAL; @@ -509,6 +507,19 @@ static int venc_enum_frameintervals(struct file *file, void *fh, return 0; } +static int venc_subscribe_event(struct v4l2_fh *fh, + const struct v4l2_event_subscription *sub) +{ + switch (sub->type) { + case V4L2_EVENT_EOS: + return v4l2_event_subscribe(fh, sub, 2, NULL); + case V4L2_EVENT_CTRL: + return v4l2_ctrl_subscribe_event(fh, sub); + default: + return -EINVAL; + } +} + static const struct v4l2_ioctl_ops venc_ioctl_ops = { .vidioc_querycap = venc_querycap, .vidioc_enum_fmt_vid_cap = venc_enum_fmt, @@ -534,8 +545,9 @@ static const struct v4l2_ioctl_ops venc_ioctl_ops = { .vidioc_g_parm = venc_g_parm, .vidioc_enum_framesizes = venc_enum_framesizes, .vidioc_enum_frameintervals = venc_enum_frameintervals, - .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, + .vidioc_subscribe_event = venc_subscribe_event, .vidioc_unsubscribe_event = v4l2_event_unsubscribe, + .vidioc_try_encoder_cmd = v4l2_m2m_ioctl_try_encoder_cmd, }; static int venc_pm_get(struct venus_inst *inst) @@ -686,7 +698,8 @@ static int venc_set_properties(struct venus_inst *inst) return ret; } - if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC) { + if (inst->fmt_cap->pixfmt == V4L2_PIX_FMT_HEVC && + ctr->profile.hevc == V4L2_MPEG_VIDEO_HEVC_PROFILE_MAIN_10) { struct hfi_hdr10_pq_sei hdr10; unsigned int c; diff --git a/drivers/media/platform/qcom/venus/venc_ctrls.c b/drivers/media/platform/qcom/venus/venc_ctrls.c index ed44e5800759..7468e43800a9 100644 --- a/drivers/media/platform/qcom/venus/venc_ctrls.c +++ b/drivers/media/platform/qcom/venus/venc_ctrls.c @@ -8,6 +8,7 @@ #include "core.h" #include "venc.h" +#include "helpers.h" #define BITRATE_MIN 32000 #define BITRATE_MAX 160000000 @@ -336,8 +337,6 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl) * if we disable 8x8 transform for HP. */ - if (ctrl->val == 0) - return -EINVAL; ctr->h264_8x8_transform = ctrl->val; break; @@ -348,15 +347,41 @@ static int venc_op_s_ctrl(struct v4l2_ctrl *ctrl) return 0; } +static int venc_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl) +{ + struct venus_inst *inst = ctrl_to_inst(ctrl); + struct hfi_buffer_requirements bufreq; + enum hfi_version ver = inst->core->res->hfi_version; + int ret; + + switch (ctrl->id) { + case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT: + ret = venus_helper_get_bufreq(inst, HFI_BUFFER_INPUT, &bufreq); + if (!ret) + ctrl->val = HFI_BUFREQ_COUNT_MIN(&bufreq, ver); + break; + default: + return -EINVAL; + } + + return 0; +} + static const struct v4l2_ctrl_ops venc_ctrl_ops = { .s_ctrl = venc_op_s_ctrl, + .g_volatile_ctrl = venc_op_g_volatile_ctrl, }; int venc_ctrl_init(struct venus_inst *inst) { int ret; + struct v4l2_ctrl_hdr10_mastering_display p_hdr10_mastering = { + { 34000, 13250, 7500 }, + { 16000, 34500, 3000 }, 15635, 16450, 10000000, 500, + }; + struct v4l2_ctrl_hdr10_cll_info p_hdr10_cll = { 1000, 400 }; - ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 58); + ret = v4l2_ctrl_handler_init(&inst->ctrl_handler, 59); if (ret) return ret; @@ -437,6 +462,9 @@ int venc_ctrl_init(struct venus_inst *inst) 0, V4L2_MPEG_VIDEO_VP8_PROFILE_0); v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, + V4L2_CID_MIN_BUFFERS_FOR_OUTPUT, 4, 11, 1, 4); + + v4l2_ctrl_new_std(&inst->ctrl_handler, &venc_ctrl_ops, V4L2_CID_MPEG_VIDEO_BITRATE, BITRATE_MIN, BITRATE_MAX, BITRATE_STEP, BITRATE_DEFAULT); @@ -579,11 +607,11 @@ int venc_ctrl_init(struct venus_inst *inst) v4l2_ctrl_new_std_compound(&inst->ctrl_handler, &venc_ctrl_ops, V4L2_CID_COLORIMETRY_HDR10_CLL_INFO, - v4l2_ctrl_ptr_create(NULL)); + v4l2_ctrl_ptr_create(&p_hdr10_cll)); v4l2_ctrl_new_std_compound(&inst->ctrl_handler, &venc_ctrl_ops, V4L2_CID_COLORIMETRY_HDR10_MASTERING_DISPLAY, - v4l2_ctrl_ptr_create(NULL)); + v4l2_ctrl_ptr_create((void *)&p_hdr10_mastering)); v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &venc_ctrl_ops, V4L2_CID_MPEG_VIDEO_INTRA_REFRESH_PERIOD_TYPE, diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-core.c b/drivers/media/platform/renesas/rcar-vin/rcar-core.c index 968a74234e92..2f7daa853ed8 100644 --- a/drivers/media/platform/renesas/rcar-vin/rcar-core.c +++ b/drivers/media/platform/renesas/rcar-vin/rcar-core.c @@ -786,9 +786,8 @@ static int rvin_csi2_link_notify(struct media_link *link, u32 flags, return 0; /* - * Don't allow link changes if any entity in the graph is - * streaming, modifying the CHSEL register fields can disrupt - * running streams. + * Don't allow link changes if any stream in the graph is active as + * modifying the CHSEL register fields can disrupt running streams. */ media_device_for_each_entity(entity, &group->mdev) if (media_entity_is_streaming(entity)) diff --git a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c index 8d37fbdc266a..3aea96d85165 100644 --- a/drivers/media/platform/renesas/rcar-vin/rcar-dma.c +++ b/drivers/media/platform/renesas/rcar-vin/rcar-dma.c @@ -1244,8 +1244,6 @@ static int rvin_mc_validate_format(struct rvin_dev *vin, struct v4l2_subdev *sd, static int rvin_set_stream(struct rvin_dev *vin, int on) { - struct media_pipeline *pipe; - struct media_device *mdev; struct v4l2_subdev *sd; struct media_pad *pad; int ret; @@ -1265,7 +1263,7 @@ static int rvin_set_stream(struct rvin_dev *vin, int on) sd = media_entity_to_v4l2_subdev(pad->entity); if (!on) { - media_pipeline_stop(&vin->vdev.entity); + video_device_pipeline_stop(&vin->vdev); return v4l2_subdev_call(sd, video, s_stream, 0); } @@ -1273,17 +1271,7 @@ static int rvin_set_stream(struct rvin_dev *vin, int on) if (ret) return ret; - /* - * The graph lock needs to be taken to protect concurrent - * starts of multiple VIN instances as they might share - * a common subdevice down the line and then should use - * the same pipe. - */ - mdev = vin->vdev.entity.graph_obj.mdev; - mutex_lock(&mdev->graph_mutex); - pipe = sd->entity.pipe ? sd->entity.pipe : &vin->vdev.pipe; - ret = __media_pipeline_start(&vin->vdev.entity, pipe); - mutex_unlock(&mdev->graph_mutex); + ret = video_device_pipeline_alloc_start(&vin->vdev); if (ret) return ret; @@ -1291,7 +1279,7 @@ static int rvin_set_stream(struct rvin_dev *vin, int on) if (ret == -ENOIOCTLCMD) ret = 0; if (ret) - media_pipeline_stop(&vin->vdev.entity); + video_device_pipeline_stop(&vin->vdev); return ret; } diff --git a/drivers/media/platform/renesas/vsp1/vsp1_video.c b/drivers/media/platform/renesas/vsp1/vsp1_video.c index df1606b49d77..9d24647c8f32 100644 --- a/drivers/media/platform/renesas/vsp1/vsp1_video.c +++ b/drivers/media/platform/renesas/vsp1/vsp1_video.c @@ -927,7 +927,7 @@ static void vsp1_video_stop_streaming(struct vb2_queue *vq) } mutex_unlock(&pipe->lock); - media_pipeline_stop(&video->video.entity); + video_device_pipeline_stop(&video->video); vsp1_video_release_buffers(video); vsp1_video_pipeline_put(pipe); } @@ -1046,7 +1046,7 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) return PTR_ERR(pipe); } - ret = __media_pipeline_start(&video->video.entity, &pipe->pipe); + ret = __video_device_pipeline_start(&video->video, &pipe->pipe); if (ret < 0) { mutex_unlock(&mdev->graph_mutex); goto err_pipe; @@ -1070,7 +1070,7 @@ vsp1_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) return 0; err_stop: - media_pipeline_stop(&video->video.entity); + video_device_pipeline_stop(&video->video); err_pipe: vsp1_video_pipeline_put(pipe); return ret; diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c index d5904c96ff3f..d4540684ea9a 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-capture.c @@ -913,7 +913,7 @@ static void rkisp1_cap_stream_disable(struct rkisp1_capture *cap) * * Call s_stream(false) in the reverse order from * rkisp1_pipeline_stream_enable() and disable the DMA engine. - * Should be called before media_pipeline_stop() + * Should be called before video_device_pipeline_stop() */ static void rkisp1_pipeline_stream_disable(struct rkisp1_capture *cap) __must_hold(&cap->rkisp1->stream_lock) @@ -926,7 +926,7 @@ static void rkisp1_pipeline_stream_disable(struct rkisp1_capture *cap) * If the other capture is streaming, isp and sensor nodes shouldn't * be disabled, skip them. */ - if (rkisp1->pipe.streaming_count < 2) + if (rkisp1->pipe.start_count < 2) v4l2_subdev_call(&rkisp1->isp.sd, video, s_stream, false); v4l2_subdev_call(&rkisp1->resizer_devs[cap->id].sd, video, s_stream, @@ -937,7 +937,7 @@ static void rkisp1_pipeline_stream_disable(struct rkisp1_capture *cap) * rkisp1_pipeline_stream_enable - enable nodes in the pipeline * * Enable the DMA Engine and call s_stream(true) through the pipeline. - * Should be called after media_pipeline_start() + * Should be called after video_device_pipeline_start() */ static int rkisp1_pipeline_stream_enable(struct rkisp1_capture *cap) __must_hold(&cap->rkisp1->stream_lock) @@ -956,7 +956,7 @@ static int rkisp1_pipeline_stream_enable(struct rkisp1_capture *cap) * If the other capture is streaming, isp and sensor nodes are already * enabled, skip them. */ - if (rkisp1->pipe.streaming_count > 1) + if (rkisp1->pipe.start_count > 1) return 0; ret = v4l2_subdev_call(&rkisp1->isp.sd, video, s_stream, true); @@ -994,7 +994,7 @@ static void rkisp1_vb2_stop_streaming(struct vb2_queue *queue) rkisp1_dummy_buf_destroy(cap); - media_pipeline_stop(&node->vdev.entity); + video_device_pipeline_stop(&node->vdev); mutex_unlock(&cap->rkisp1->stream_lock); } @@ -1008,7 +1008,7 @@ rkisp1_vb2_start_streaming(struct vb2_queue *queue, unsigned int count) mutex_lock(&cap->rkisp1->stream_lock); - ret = media_pipeline_start(entity, &cap->rkisp1->pipe); + ret = video_device_pipeline_start(&cap->vnode.vdev, &cap->rkisp1->pipe); if (ret) { dev_err(cap->rkisp1->dev, "start pipeline failed %d\n", ret); goto err_ret_buffers; @@ -1044,7 +1044,7 @@ err_pipe_pm_put: err_destroy_dummy: rkisp1_dummy_buf_destroy(cap); err_pipeline_stop: - media_pipeline_stop(entity); + video_device_pipeline_stop(&cap->vnode.vdev); err_ret_buffers: rkisp1_return_all_buffers(cap, VB2_BUF_STATE_QUEUED); mutex_unlock(&cap->rkisp1->stream_lock); @@ -1273,11 +1273,12 @@ static int rkisp1_capture_link_validate(struct media_link *link) struct rkisp1_capture *cap = video_get_drvdata(vdev); const struct rkisp1_capture_fmt_cfg *fmt = rkisp1_find_fmt_cfg(cap, cap->pix.fmt.pixelformat); - struct v4l2_subdev_format sd_fmt; + struct v4l2_subdev_format sd_fmt = { + .which = V4L2_SUBDEV_FORMAT_ACTIVE, + .pad = link->source->index, + }; int ret; - sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; - sd_fmt.pad = link->source->index; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sd_fmt); if (ret) return ret; diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h index 8056997d5c29..a1293c45aae1 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-common.h @@ -378,6 +378,7 @@ struct rkisp1_params { struct v4l2_format vdev_fmt; enum v4l2_quantization quantization; + enum v4l2_ycbcr_encoding ycbcr_encoding; enum rkisp1_fmt_raw_pat_type raw_type; }; @@ -556,17 +557,32 @@ void rkisp1_sd_adjust_crop(struct v4l2_rect *crop, */ const struct rkisp1_mbus_info *rkisp1_mbus_info_get_by_code(u32 mbus_code); -/* rkisp1_params_configure - configure the params when stream starts. - * This function is called by the isp entity upon stream starts. - * The function applies the initial configuration of the parameters. +/* + * rkisp1_params_pre_configure - Configure the params before stream start * - * @params: pointer to rkisp1_params. + * @params: pointer to rkisp1_params * @bayer_pat: the bayer pattern on the isp video sink pad * @quantization: the quantization configured on the isp's src pad + * @ycbcr_encoding: the ycbcr_encoding configured on the isp's src pad + * + * This function is called by the ISP entity just before the ISP gets started. + * It applies the initial ISP parameters from the first params buffer, but + * skips LSC as it needs to be configured after the ISP is started. + */ +void rkisp1_params_pre_configure(struct rkisp1_params *params, + enum rkisp1_fmt_raw_pat_type bayer_pat, + enum v4l2_quantization quantization, + enum v4l2_ycbcr_encoding ycbcr_encoding); + +/* + * rkisp1_params_post_configure - Configure the params after stream start + * + * @params: pointer to rkisp1_params + * + * This function is called by the ISP entity just after the ISP gets started. + * It applies the initial ISP LSC parameters from the first params buffer. */ -void rkisp1_params_configure(struct rkisp1_params *params, - enum rkisp1_fmt_raw_pat_type bayer_pat, - enum v4l2_quantization quantization); +void rkisp1_params_post_configure(struct rkisp1_params *params); /* rkisp1_params_disable - disable all parameters. * This function is called by the isp entity upon stream start diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c index 383a3ec83ca9..585cf3f53469 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-isp.c @@ -231,10 +231,11 @@ static int rkisp1_config_isp(struct rkisp1_isp *isp, struct v4l2_mbus_framefmt *src_frm; src_frm = rkisp1_isp_get_pad_fmt(isp, NULL, - RKISP1_ISP_PAD_SINK_VIDEO, + RKISP1_ISP_PAD_SOURCE_VIDEO, V4L2_SUBDEV_FORMAT_ACTIVE); - rkisp1_params_configure(&rkisp1->params, sink_fmt->bayer_pat, - src_frm->quantization); + rkisp1_params_pre_configure(&rkisp1->params, sink_fmt->bayer_pat, + src_frm->quantization, + src_frm->ycbcr_enc); } return 0; @@ -340,6 +341,9 @@ static void rkisp1_isp_start(struct rkisp1_isp *isp) RKISP1_CIF_ISP_CTRL_ISP_ENABLE | RKISP1_CIF_ISP_CTRL_ISP_INFORM_ENABLE; rkisp1_write(rkisp1, RKISP1_CIF_ISP_CTRL, val); + + if (isp->src_fmt->pixel_enc != V4L2_PIXEL_ENC_BAYER) + rkisp1_params_post_configure(&rkisp1->params); } /* ---------------------------------------------------------------------------- @@ -431,12 +435,17 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *sink_fmt, *src_fmt; struct v4l2_rect *sink_crop, *src_crop; + /* Video. */ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, RKISP1_ISP_PAD_SINK_VIDEO); sink_fmt->width = RKISP1_DEFAULT_WIDTH; sink_fmt->height = RKISP1_DEFAULT_HEIGHT; sink_fmt->field = V4L2_FIELD_NONE; sink_fmt->code = RKISP1_DEF_SINK_PAD_FMT; + sink_fmt->colorspace = V4L2_COLORSPACE_RAW; + sink_fmt->xfer_func = V4L2_XFER_FUNC_NONE; + sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601; + sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE; sink_crop = v4l2_subdev_get_try_crop(sd, sd_state, RKISP1_ISP_PAD_SINK_VIDEO); @@ -449,11 +458,16 @@ static int rkisp1_isp_init_config(struct v4l2_subdev *sd, RKISP1_ISP_PAD_SOURCE_VIDEO); *src_fmt = *sink_fmt; src_fmt->code = RKISP1_DEF_SRC_PAD_FMT; + src_fmt->colorspace = V4L2_COLORSPACE_SRGB; + src_fmt->xfer_func = V4L2_XFER_FUNC_SRGB; + src_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601; + src_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE; src_crop = v4l2_subdev_get_try_crop(sd, sd_state, RKISP1_ISP_PAD_SOURCE_VIDEO); *src_crop = *sink_crop; + /* Parameters and statistics. */ sink_fmt = v4l2_subdev_get_try_format(sd, sd_state, RKISP1_ISP_PAD_SINK_PARAMS); src_fmt = v4l2_subdev_get_try_format(sd, sd_state, @@ -472,40 +486,105 @@ static void rkisp1_isp_set_src_fmt(struct rkisp1_isp *isp, struct v4l2_mbus_framefmt *format, unsigned int which) { - const struct rkisp1_mbus_info *mbus_info; + const struct rkisp1_mbus_info *sink_info; + const struct rkisp1_mbus_info *src_info; + struct v4l2_mbus_framefmt *sink_fmt; struct v4l2_mbus_framefmt *src_fmt; const struct v4l2_rect *src_crop; + bool set_csc; + sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state, + RKISP1_ISP_PAD_SINK_VIDEO, which); src_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state, RKISP1_ISP_PAD_SOURCE_VIDEO, which); src_crop = rkisp1_isp_get_pad_crop(isp, sd_state, RKISP1_ISP_PAD_SOURCE_VIDEO, which); + /* + * Media bus code. The ISP can operate in pass-through mode (Bayer in, + * Bayer out or YUV in, YUV out) or process Bayer data to YUV, but + * can't convert from YUV to Bayer. + */ + sink_info = rkisp1_mbus_info_get_by_code(sink_fmt->code); + src_fmt->code = format->code; - mbus_info = rkisp1_mbus_info_get_by_code(src_fmt->code); - if (!mbus_info || !(mbus_info->direction & RKISP1_ISP_SD_SRC)) { + src_info = rkisp1_mbus_info_get_by_code(src_fmt->code); + if (!src_info || !(src_info->direction & RKISP1_ISP_SD_SRC)) { src_fmt->code = RKISP1_DEF_SRC_PAD_FMT; - mbus_info = rkisp1_mbus_info_get_by_code(src_fmt->code); + src_info = rkisp1_mbus_info_get_by_code(src_fmt->code); } - if (which == V4L2_SUBDEV_FORMAT_ACTIVE) - isp->src_fmt = mbus_info; + + if (sink_info->pixel_enc == V4L2_PIXEL_ENC_YUV && + src_info->pixel_enc == V4L2_PIXEL_ENC_BAYER) { + src_fmt->code = sink_fmt->code; + src_info = sink_info; + } + + /* + * The source width and height must be identical to the source crop + * size. + */ src_fmt->width = src_crop->width; src_fmt->height = src_crop->height; /* - * The CSC API is used to allow userspace to force full - * quantization on YUV formats. + * Copy the color space for the sink pad. When converting from Bayer to + * YUV, default to a limited quantization range. */ - if (format->flags & V4L2_MBUS_FRAMEFMT_SET_CSC && - format->quantization == V4L2_QUANTIZATION_FULL_RANGE && - mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV) - src_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE; - else if (mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV) + src_fmt->colorspace = sink_fmt->colorspace; + src_fmt->xfer_func = sink_fmt->xfer_func; + src_fmt->ycbcr_enc = sink_fmt->ycbcr_enc; + + if (sink_info->pixel_enc == V4L2_PIXEL_ENC_BAYER && + src_info->pixel_enc == V4L2_PIXEL_ENC_YUV) src_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE; else - src_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE; + src_fmt->quantization = sink_fmt->quantization; + + /* + * Allow setting the source color space fields when the SET_CSC flag is + * set and the source format is YUV. If the sink format is YUV, don't + * set the color primaries, transfer function or YCbCr encoding as the + * ISP is bypassed in that case and passes YUV data through without + * modifications. + * + * The color primaries and transfer function are configured through the + * cross-talk matrix and tone curve respectively. Settings for those + * hardware blocks are conveyed through the ISP parameters buffer, as + * they need to combine color space information with other image tuning + * characteristics and can't thus be computed by the kernel based on the + * color space. The source pad colorspace and xfer_func fields are thus + * ignored by the driver, but can be set by userspace to propagate + * accurate color space information down the pipeline. + */ + set_csc = format->flags & V4L2_MBUS_FRAMEFMT_SET_CSC; + + if (set_csc && src_info->pixel_enc == V4L2_PIXEL_ENC_YUV) { + if (sink_info->pixel_enc == V4L2_PIXEL_ENC_BAYER) { + if (format->colorspace != V4L2_COLORSPACE_DEFAULT) + src_fmt->colorspace = format->colorspace; + if (format->xfer_func != V4L2_XFER_FUNC_DEFAULT) + src_fmt->xfer_func = format->xfer_func; + if (format->ycbcr_enc != V4L2_YCBCR_ENC_DEFAULT) + src_fmt->ycbcr_enc = format->ycbcr_enc; + } + + if (format->quantization != V4L2_QUANTIZATION_DEFAULT) + src_fmt->quantization = format->quantization; + } *format = *src_fmt; + + /* + * Restore the SET_CSC flag if it was set to indicate support for the + * CSC setting API. + */ + if (set_csc) + format->flags |= V4L2_MBUS_FRAMEFMT_SET_CSC; + + /* Store the source format info when setting the active format. */ + if (which == V4L2_SUBDEV_FORMAT_ACTIVE) + isp->src_fmt = src_info; } static void rkisp1_isp_set_src_crop(struct rkisp1_isp *isp, @@ -573,6 +652,7 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp, const struct rkisp1_mbus_info *mbus_info; struct v4l2_mbus_framefmt *sink_fmt; struct v4l2_rect *sink_crop; + bool is_yuv; sink_fmt = rkisp1_isp_get_pad_fmt(isp, sd_state, RKISP1_ISP_PAD_SINK_VIDEO, @@ -593,6 +673,36 @@ static void rkisp1_isp_set_sink_fmt(struct rkisp1_isp *isp, RKISP1_ISP_MIN_HEIGHT, RKISP1_ISP_MAX_HEIGHT); + /* + * Adjust the color space fields. Accept any color primaries and + * transfer function for both YUV and Bayer. For YUV any YCbCr encoding + * and quantization range is also accepted. For Bayer formats, the YCbCr + * encoding isn't applicable, and the quantization range can only be + * full. + */ + is_yuv = mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV; + + sink_fmt->colorspace = format->colorspace ? : + (is_yuv ? V4L2_COLORSPACE_SRGB : + V4L2_COLORSPACE_RAW); + sink_fmt->xfer_func = format->xfer_func ? : + V4L2_MAP_XFER_FUNC_DEFAULT(sink_fmt->colorspace); + if (is_yuv) { + sink_fmt->ycbcr_enc = format->ycbcr_enc ? : + V4L2_MAP_YCBCR_ENC_DEFAULT(sink_fmt->colorspace); + sink_fmt->quantization = format->quantization ? : + V4L2_MAP_QUANTIZATION_DEFAULT(false, sink_fmt->colorspace, + sink_fmt->ycbcr_enc); + } else { + /* + * The YCbCr encoding isn't applicable for non-YUV formats, but + * V4L2 has no "no encoding" value. Hardcode it to Rec. 601, it + * should be ignored by userspace. + */ + sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601; + sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE; + } + *format = *sink_fmt; /* Propagate to in crop */ diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c index 9da7dc1bc690..d8731ebbf479 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-params.c @@ -18,6 +18,8 @@ #define RKISP1_ISP_PARAMS_REQ_BUFS_MIN 2 #define RKISP1_ISP_PARAMS_REQ_BUFS_MAX 8 +#define RKISP1_ISP_DPCC_METHODS_SET(n) \ + (RKISP1_CIF_ISP_DPCC_METHODS_SET_1 + 0x4 * (n)) #define RKISP1_ISP_DPCC_LINE_THRESH(n) \ (RKISP1_CIF_ISP_DPCC_LINE_THRESH_1 + 0x14 * (n)) #define RKISP1_ISP_DPCC_LINE_MAD_FAC(n) \ @@ -56,39 +58,47 @@ static void rkisp1_dpcc_config(struct rkisp1_params *params, unsigned int i; u32 mode; - /* avoid to override the old enable value */ + /* + * The enable bit is controlled in rkisp1_isp_isr_other_config() and + * must be preserved. The grayscale mode should be configured + * automatically based on the media bus code on the ISP sink pad, so + * only the STAGE1_ENABLE bit can be set by userspace. + */ mode = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_DPCC_MODE); - mode &= RKISP1_CIF_ISP_DPCC_ENA; - mode |= arg->mode & ~RKISP1_CIF_ISP_DPCC_ENA; + mode &= RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE; + mode |= arg->mode & RKISP1_CIF_ISP_DPCC_MODE_STAGE1_ENABLE; rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_MODE, mode); + rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_OUTPUT_MODE, - arg->output_mode); + arg->output_mode & RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_MASK); rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_SET_USE, - arg->set_use); - - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_METHODS_SET_1, - arg->methods[0].method); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_METHODS_SET_2, - arg->methods[1].method); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_METHODS_SET_3, - arg->methods[2].method); + arg->set_use & RKISP1_CIF_ISP_DPCC_SET_USE_MASK); + for (i = 0; i < RKISP1_CIF_ISP_DPCC_METHODS_MAX; i++) { + rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_METHODS_SET(i), + arg->methods[i].method & + RKISP1_CIF_ISP_DPCC_METHODS_SET_MASK); rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_LINE_THRESH(i), - arg->methods[i].line_thresh); + arg->methods[i].line_thresh & + RKISP1_CIF_ISP_DPCC_LINE_THRESH_MASK); rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_LINE_MAD_FAC(i), - arg->methods[i].line_mad_fac); + arg->methods[i].line_mad_fac & + RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_MASK); rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_PG_FAC(i), - arg->methods[i].pg_fac); + arg->methods[i].pg_fac & + RKISP1_CIF_ISP_DPCC_PG_FAC_MASK); rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_RND_THRESH(i), - arg->methods[i].rnd_thresh); + arg->methods[i].rnd_thresh & + RKISP1_CIF_ISP_DPCC_RND_THRESH_MASK); rkisp1_write(params->rkisp1, RKISP1_ISP_DPCC_RG_FAC(i), - arg->methods[i].rg_fac); + arg->methods[i].rg_fac & + RKISP1_CIF_ISP_DPCC_RG_FAC_MASK); } rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_RND_OFFS, - arg->rnd_offs); + arg->rnd_offs & RKISP1_CIF_ISP_DPCC_RND_OFFS_MASK); rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_DPCC_RO_LIMITS, - arg->ro_limits); + arg->ro_limits & RKISP1_CIF_ISP_DPCC_RO_LIMIT_MASK); } /* ISP black level subtraction interface function */ @@ -188,149 +198,131 @@ static void rkisp1_lsc_matrix_config_v10(struct rkisp1_params *params, const struct rkisp1_cif_isp_lsc_config *pconfig) { - unsigned int isp_lsc_status, sram_addr, isp_lsc_table_sel, i, j, data; + struct rkisp1_device *rkisp1 = params->rkisp1; + u32 lsc_status, sram_addr, lsc_table_sel; + unsigned int i, j; - isp_lsc_status = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_STATUS); + lsc_status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_LSC_STATUS); /* RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153 = ( 17 * 18 ) >> 1 */ - sram_addr = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ? + sram_addr = lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE ? RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 : RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153; - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR, sram_addr); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR, sram_addr); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR, sram_addr); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR, sram_addr); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR, sram_addr); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR, sram_addr); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR, sram_addr); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR, sram_addr); /* program data tables (table size is 9 * 17 = 153) */ for (i = 0; i < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; i++) { + const __u16 *r_tbl = pconfig->r_data_tbl[i]; + const __u16 *gr_tbl = pconfig->gr_data_tbl[i]; + const __u16 *gb_tbl = pconfig->gb_data_tbl[i]; + const __u16 *b_tbl = pconfig->b_data_tbl[i]; + /* * 17 sectors with 2 values in one DWORD = 9 * DWORDs (2nd value of last DWORD unused) */ for (j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX - 1; j += 2) { - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->r_data_tbl[i][j], - pconfig->r_data_tbl[i][j + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_R_TABLE_DATA, data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gr_data_tbl[i][j], - pconfig->gr_data_tbl[i][j + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gb_data_tbl[i][j], - pconfig->gb_data_tbl[i][j + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->b_data_tbl[i][j], - pconfig->b_data_tbl[i][j + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_B_TABLE_DATA, data); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V10( + r_tbl[j], r_tbl[j + 1])); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V10( + gr_tbl[j], gr_tbl[j + 1])); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V10( + gb_tbl[j], gb_tbl[j + 1])); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V10( + b_tbl[j], b_tbl[j + 1])); } - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->r_data_tbl[i][j], 0); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA, - data); - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gr_data_tbl[i][j], 0); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, - data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->gb_data_tbl[i][j], 0); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, - data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(pconfig->b_data_tbl[i][j], 0); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA, - data); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(r_tbl[j], 0)); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(gr_tbl[j], 0)); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(gb_tbl[j], 0)); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V10(b_tbl[j], 0)); } - isp_lsc_table_sel = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ? - RKISP1_CIF_ISP_LSC_TABLE_0 : - RKISP1_CIF_ISP_LSC_TABLE_1; - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_TABLE_SEL, - isp_lsc_table_sel); + + lsc_table_sel = lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE ? + RKISP1_CIF_ISP_LSC_TABLE_0 : RKISP1_CIF_ISP_LSC_TABLE_1; + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_TABLE_SEL, lsc_table_sel); } static void rkisp1_lsc_matrix_config_v12(struct rkisp1_params *params, const struct rkisp1_cif_isp_lsc_config *pconfig) { - unsigned int isp_lsc_status, sram_addr, isp_lsc_table_sel, i, j, data; + struct rkisp1_device *rkisp1 = params->rkisp1; + u32 lsc_status, sram_addr, lsc_table_sel; + unsigned int i, j; - isp_lsc_status = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_STATUS); + lsc_status = rkisp1_read(rkisp1, RKISP1_CIF_ISP_LSC_STATUS); /* RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153 = ( 17 * 18 ) >> 1 */ - sram_addr = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ? - RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 : - RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153; - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR, sram_addr); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR, sram_addr); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR, sram_addr); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR, sram_addr); + sram_addr = lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE ? + RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_0 : + RKISP1_CIF_ISP_LSC_TABLE_ADDRESS_153; + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_ADDR, sram_addr); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_ADDR, sram_addr); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_ADDR, sram_addr); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_ADDR, sram_addr); /* program data tables (table size is 9 * 17 = 153) */ for (i = 0; i < RKISP1_CIF_ISP_LSC_SAMPLES_MAX; i++) { + const __u16 *r_tbl = pconfig->r_data_tbl[i]; + const __u16 *gr_tbl = pconfig->gr_data_tbl[i]; + const __u16 *gb_tbl = pconfig->gb_data_tbl[i]; + const __u16 *b_tbl = pconfig->b_data_tbl[i]; + /* * 17 sectors with 2 values in one DWORD = 9 * DWORDs (2nd value of last DWORD unused) */ for (j = 0; j < RKISP1_CIF_ISP_LSC_SAMPLES_MAX - 1; j += 2) { - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12( - pconfig->r_data_tbl[i][j], - pconfig->r_data_tbl[i][j + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_R_TABLE_DATA, data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12( - pconfig->gr_data_tbl[i][j], - pconfig->gr_data_tbl[i][j + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12( - pconfig->gb_data_tbl[i][j], - pconfig->gb_data_tbl[i][j + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12( - pconfig->b_data_tbl[i][j], - pconfig->b_data_tbl[i][j + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_B_TABLE_DATA, data); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V12( + r_tbl[j], r_tbl[j + 1])); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V12( + gr_tbl[j], gr_tbl[j + 1])); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V12( + gb_tbl[j], gb_tbl[j + 1])); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V12( + b_tbl[j], b_tbl[j + 1])); } - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->r_data_tbl[i][j], 0); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA, - data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->gr_data_tbl[i][j], 0); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, - data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->gb_data_tbl[i][j], 0); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, - data); - - data = RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(pconfig->b_data_tbl[i][j], 0); - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA, - data); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_R_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(r_tbl[j], 0)); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GR_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(gr_tbl[j], 0)); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_GB_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(gb_tbl[j], 0)); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_B_TABLE_DATA, + RKISP1_CIF_ISP_LSC_TABLE_DATA_V12(b_tbl[j], 0)); } - isp_lsc_table_sel = (isp_lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE) ? - RKISP1_CIF_ISP_LSC_TABLE_0 : - RKISP1_CIF_ISP_LSC_TABLE_1; - rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_LSC_TABLE_SEL, - isp_lsc_table_sel); + + lsc_table_sel = lsc_status & RKISP1_CIF_ISP_LSC_ACTIVE_TABLE ? + RKISP1_CIF_ISP_LSC_TABLE_0 : RKISP1_CIF_ISP_LSC_TABLE_1; + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_TABLE_SEL, lsc_table_sel); } static void rkisp1_lsc_config(struct rkisp1_params *params, const struct rkisp1_cif_isp_lsc_config *arg) { - unsigned int i, data; - u32 lsc_ctrl; + struct rkisp1_device *rkisp1 = params->rkisp1; + u32 lsc_ctrl, data; + unsigned int i; /* To config must be off , store the current status firstly */ - lsc_ctrl = rkisp1_read(params->rkisp1, RKISP1_CIF_ISP_LSC_CTRL); + lsc_ctrl = rkisp1_read(rkisp1, RKISP1_CIF_ISP_LSC_CTRL); rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL, RKISP1_CIF_ISP_LSC_CTRL_ENA); params->ops->lsc_matrix_config(params, arg); @@ -339,38 +331,31 @@ static void rkisp1_lsc_config(struct rkisp1_params *params, /* program x size tables */ data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->x_size_tbl[i * 2], arg->x_size_tbl[i * 2 + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_XSIZE_01 + i * 4, data); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_XSIZE(i), data); /* program x grad tables */ - data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->x_grad_tbl[i * 2], + data = RKISP1_CIF_ISP_LSC_SECT_GRAD(arg->x_grad_tbl[i * 2], arg->x_grad_tbl[i * 2 + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_XGRAD_01 + i * 4, data); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_XGRAD(i), data); /* program y size tables */ data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->y_size_tbl[i * 2], arg->y_size_tbl[i * 2 + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_YSIZE_01 + i * 4, data); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_YSIZE(i), data); /* program y grad tables */ - data = RKISP1_CIF_ISP_LSC_SECT_SIZE(arg->y_grad_tbl[i * 2], + data = RKISP1_CIF_ISP_LSC_SECT_GRAD(arg->y_grad_tbl[i * 2], arg->y_grad_tbl[i * 2 + 1]); - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_LSC_YGRAD_01 + i * 4, data); + rkisp1_write(rkisp1, RKISP1_CIF_ISP_LSC_YGRAD(i), data); } /* restore the lsc ctrl status */ - if (lsc_ctrl & RKISP1_CIF_ISP_LSC_CTRL_ENA) { - rkisp1_param_set_bits(params, - RKISP1_CIF_ISP_LSC_CTRL, + if (lsc_ctrl & RKISP1_CIF_ISP_LSC_CTRL_ENA) + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_LSC_CTRL, RKISP1_CIF_ISP_LSC_CTRL_ENA); - } else { - rkisp1_param_clear_bits(params, - RKISP1_CIF_ISP_LSC_CTRL, + else + rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL, RKISP1_CIF_ISP_LSC_CTRL_ENA); - } } /* ISP Filtering function */ @@ -1066,39 +1051,96 @@ static void rkisp1_ie_enable(struct rkisp1_params *params, bool en) } } -static void rkisp1_csm_config(struct rkisp1_params *params, bool full_range) +static void rkisp1_csm_config(struct rkisp1_params *params) { - static const u16 full_range_coeff[] = { - 0x0026, 0x004b, 0x000f, - 0x01ea, 0x01d6, 0x0040, - 0x0040, 0x01ca, 0x01f6 + struct csm_coeffs { + u16 limited[9]; + u16 full[9]; + }; + static const struct csm_coeffs rec601_coeffs = { + .limited = { + 0x0021, 0x0042, 0x000d, + 0x01ed, 0x01db, 0x0038, + 0x0038, 0x01d1, 0x01f7, + }, + .full = { + 0x0026, 0x004b, 0x000f, + 0x01ea, 0x01d6, 0x0040, + 0x0040, 0x01ca, 0x01f6, + }, }; - static const u16 limited_range_coeff[] = { - 0x0021, 0x0040, 0x000d, - 0x01ed, 0x01db, 0x0038, - 0x0038, 0x01d1, 0x01f7, + static const struct csm_coeffs rec709_coeffs = { + .limited = { + 0x0018, 0x0050, 0x0008, + 0x01f3, 0x01d5, 0x0038, + 0x0038, 0x01cd, 0x01fb, + }, + .full = { + 0x001b, 0x005c, 0x0009, + 0x01f1, 0x01cf, 0x0040, + 0x0040, 0x01c6, 0x01fa, + }, }; + static const struct csm_coeffs rec2020_coeffs = { + .limited = { + 0x001d, 0x004c, 0x0007, + 0x01f0, 0x01d8, 0x0038, + 0x0038, 0x01cd, 0x01fb, + }, + .full = { + 0x0022, 0x0057, 0x0008, + 0x01ee, 0x01d2, 0x0040, + 0x0040, 0x01c5, 0x01fb, + }, + }; + static const struct csm_coeffs smpte240m_coeffs = { + .limited = { + 0x0018, 0x004f, 0x000a, + 0x01f3, 0x01d5, 0x0038, + 0x0038, 0x01ce, 0x01fa, + }, + .full = { + 0x001b, 0x005a, 0x000b, + 0x01f1, 0x01cf, 0x0040, + 0x0040, 0x01c7, 0x01f9, + }, + }; + + const struct csm_coeffs *coeffs; + const u16 *csm; unsigned int i; - if (full_range) { - for (i = 0; i < ARRAY_SIZE(full_range_coeff); i++) - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_CC_COEFF_0 + i * 4, - full_range_coeff[i]); + switch (params->ycbcr_encoding) { + case V4L2_YCBCR_ENC_601: + default: + coeffs = &rec601_coeffs; + break; + case V4L2_YCBCR_ENC_709: + coeffs = &rec709_coeffs; + break; + case V4L2_YCBCR_ENC_BT2020: + coeffs = &rec2020_coeffs; + break; + case V4L2_YCBCR_ENC_SMPTE240M: + coeffs = &smpte240m_coeffs; + break; + } + if (params->quantization == V4L2_QUANTIZATION_FULL_RANGE) { + csm = coeffs->full; rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL, RKISP1_CIF_ISP_CTRL_ISP_CSM_Y_FULL_ENA | RKISP1_CIF_ISP_CTRL_ISP_CSM_C_FULL_ENA); } else { - for (i = 0; i < ARRAY_SIZE(limited_range_coeff); i++) - rkisp1_write(params->rkisp1, - RKISP1_CIF_ISP_CC_COEFF_0 + i * 4, - limited_range_coeff[i]); - + csm = coeffs->limited; rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_CTRL, RKISP1_CIF_ISP_CTRL_ISP_CSM_Y_FULL_ENA | RKISP1_CIF_ISP_CTRL_ISP_CSM_C_FULL_ENA); } + + for (i = 0; i < 9; i++) + rkisp1_write(params->rkisp1, RKISP1_CIF_ISP_CC_COEFF_0 + i * 4, + csm[i]); } /* ISP De-noise Pre-Filter(DPF) function */ @@ -1216,11 +1258,11 @@ rkisp1_isp_isr_other_config(struct rkisp1_params *params, if (module_ens & RKISP1_CIF_ISP_MODULE_DPCC) rkisp1_param_set_bits(params, RKISP1_CIF_ISP_DPCC_MODE, - RKISP1_CIF_ISP_DPCC_ENA); + RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE); else rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DPCC_MODE, - RKISP1_CIF_ISP_DPCC_ENA); + RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE); } /* update bls config */ @@ -1255,22 +1297,6 @@ rkisp1_isp_isr_other_config(struct rkisp1_params *params, RKISP1_CIF_ISP_CTRL_ISP_GAMMA_IN_ENA); } - /* update lsc config */ - if (module_cfg_update & RKISP1_CIF_ISP_MODULE_LSC) - rkisp1_lsc_config(params, - &new_params->others.lsc_config); - - if (module_en_update & RKISP1_CIF_ISP_MODULE_LSC) { - if (module_ens & RKISP1_CIF_ISP_MODULE_LSC) - rkisp1_param_set_bits(params, - RKISP1_CIF_ISP_LSC_CTRL, - RKISP1_CIF_ISP_LSC_CTRL_ENA); - else - rkisp1_param_clear_bits(params, - RKISP1_CIF_ISP_LSC_CTRL, - RKISP1_CIF_ISP_LSC_CTRL_ENA); - } - /* update awb gains */ if (module_cfg_update & RKISP1_CIF_ISP_MODULE_AWB_GAIN) params->ops->awb_gain_config(params, &new_params->others.awb_gain_config); @@ -1387,6 +1413,33 @@ rkisp1_isp_isr_other_config(struct rkisp1_params *params, } } +static void +rkisp1_isp_isr_lsc_config(struct rkisp1_params *params, + const struct rkisp1_params_cfg *new_params) +{ + unsigned int module_en_update, module_cfg_update, module_ens; + + module_en_update = new_params->module_en_update; + module_cfg_update = new_params->module_cfg_update; + module_ens = new_params->module_ens; + + /* update lsc config */ + if (module_cfg_update & RKISP1_CIF_ISP_MODULE_LSC) + rkisp1_lsc_config(params, + &new_params->others.lsc_config); + + if (module_en_update & RKISP1_CIF_ISP_MODULE_LSC) { + if (module_ens & RKISP1_CIF_ISP_MODULE_LSC) + rkisp1_param_set_bits(params, + RKISP1_CIF_ISP_LSC_CTRL, + RKISP1_CIF_ISP_LSC_CTRL_ENA); + else + rkisp1_param_clear_bits(params, + RKISP1_CIF_ISP_LSC_CTRL, + RKISP1_CIF_ISP_LSC_CTRL_ENA); + } +} + static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params, struct rkisp1_params_cfg *new_params) { @@ -1448,47 +1501,60 @@ static void rkisp1_isp_isr_meas_config(struct rkisp1_params *params, } } -static void rkisp1_params_apply_params_cfg(struct rkisp1_params *params, - unsigned int frame_sequence) +static bool rkisp1_params_get_buffer(struct rkisp1_params *params, + struct rkisp1_buffer **buf, + struct rkisp1_params_cfg **cfg) { - struct rkisp1_params_cfg *new_params; - struct rkisp1_buffer *cur_buf = NULL; - if (list_empty(¶ms->params)) - return; - - cur_buf = list_first_entry(¶ms->params, - struct rkisp1_buffer, queue); + return false; - new_params = (struct rkisp1_params_cfg *)vb2_plane_vaddr(&cur_buf->vb.vb2_buf, 0); + *buf = list_first_entry(¶ms->params, struct rkisp1_buffer, queue); + *cfg = vb2_plane_vaddr(&(*buf)->vb.vb2_buf, 0); - rkisp1_isp_isr_other_config(params, new_params); - rkisp1_isp_isr_meas_config(params, new_params); - - /* update shadow register immediately */ - rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL, RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD); + return true; +} - list_del(&cur_buf->queue); +static void rkisp1_params_complete_buffer(struct rkisp1_params *params, + struct rkisp1_buffer *buf, + unsigned int frame_sequence) +{ + list_del(&buf->queue); - cur_buf->vb.sequence = frame_sequence; - vb2_buffer_done(&cur_buf->vb.vb2_buf, VB2_BUF_STATE_DONE); + buf->vb.sequence = frame_sequence; + vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_DONE); } void rkisp1_params_isr(struct rkisp1_device *rkisp1) { - /* - * This isr is called when the ISR finishes processing a frame (RKISP1_CIF_ISP_FRAME). - * Configurations performed here will be applied on the next frame. - * Since frame_sequence is updated on the vertical sync signal, we should use - * frame_sequence + 1 here to indicate to userspace on which frame these parameters - * are being applied. - */ - unsigned int frame_sequence = rkisp1->isp.frame_sequence + 1; struct rkisp1_params *params = &rkisp1->params; + struct rkisp1_params_cfg *new_params; + struct rkisp1_buffer *cur_buf; spin_lock(¶ms->config_lock); - rkisp1_params_apply_params_cfg(params, frame_sequence); + if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params)) + goto unlock; + + rkisp1_isp_isr_other_config(params, new_params); + rkisp1_isp_isr_lsc_config(params, new_params); + rkisp1_isp_isr_meas_config(params, new_params); + + /* update shadow register immediately */ + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL, + RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD); + + /* + * This isr is called when the ISR finishes processing a frame + * (RKISP1_CIF_ISP_FRAME). Configurations performed here will be + * applied on the next frame. Since frame_sequence is updated on the + * vertical sync signal, we should use frame_sequence + 1 here to + * indicate to userspace on which frame these parameters are being + * applied. + */ + rkisp1_params_complete_buffer(params, cur_buf, + rkisp1->isp.frame_sequence + 1); + +unlock: spin_unlock(¶ms->config_lock); } @@ -1531,9 +1597,18 @@ static const struct rkisp1_cif_isp_afc_config rkisp1_afc_params_default_config = 14 }; -static void rkisp1_params_config_parameter(struct rkisp1_params *params) +void rkisp1_params_pre_configure(struct rkisp1_params *params, + enum rkisp1_fmt_raw_pat_type bayer_pat, + enum v4l2_quantization quantization, + enum v4l2_ycbcr_encoding ycbcr_encoding) { struct rkisp1_cif_isp_hst_config hst = rkisp1_hst_params_default_config; + struct rkisp1_params_cfg *new_params; + struct rkisp1_buffer *cur_buf; + + params->quantization = quantization; + params->ycbcr_encoding = ycbcr_encoding; + params->raw_type = bayer_pat; params->ops->awb_meas_config(params, &rkisp1_awb_params_default_config); params->ops->awb_meas_enable(params, &rkisp1_awb_params_default_config, @@ -1552,27 +1627,55 @@ static void rkisp1_params_config_parameter(struct rkisp1_params *params) rkisp1_param_set_bits(params, RKISP1_CIF_ISP_HIST_PROP_V10, rkisp1_hst_params_default_config.mode); - /* set the range */ - if (params->quantization == V4L2_QUANTIZATION_FULL_RANGE) - rkisp1_csm_config(params, true); - else - rkisp1_csm_config(params, false); + rkisp1_csm_config(params); spin_lock_irq(¶ms->config_lock); /* apply the first buffer if there is one already */ - rkisp1_params_apply_params_cfg(params, 0); + if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params)) + goto unlock; + + rkisp1_isp_isr_other_config(params, new_params); + rkisp1_isp_isr_meas_config(params, new_params); + + /* update shadow register immediately */ + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL, + RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD); + +unlock: spin_unlock_irq(¶ms->config_lock); } -void rkisp1_params_configure(struct rkisp1_params *params, - enum rkisp1_fmt_raw_pat_type bayer_pat, - enum v4l2_quantization quantization) +void rkisp1_params_post_configure(struct rkisp1_params *params) { - params->quantization = quantization; - params->raw_type = bayer_pat; - rkisp1_params_config_parameter(params); + struct rkisp1_params_cfg *new_params; + struct rkisp1_buffer *cur_buf; + + spin_lock_irq(¶ms->config_lock); + + /* + * Apply LSC parameters from the first buffer (if any is already + * available. This must be done after the ISP gets started in the + * ISP8000Nano v18.02 (found in the i.MX8MP) as access to the LSC RAM + * is gated by the ISP_CTRL.ISP_ENABLE bit. As this initialization + * ordering doesn't affect other ISP versions negatively, do so + * unconditionally. + */ + + if (!rkisp1_params_get_buffer(params, &cur_buf, &new_params)) + goto unlock; + + rkisp1_isp_isr_lsc_config(params, new_params); + + /* update shadow register immediately */ + rkisp1_param_set_bits(params, RKISP1_CIF_ISP_CTRL, + RKISP1_CIF_ISP_CTRL_ISP_CFG_UPD); + + rkisp1_params_complete_buffer(params, cur_buf, 0); + +unlock: + spin_unlock_irq(¶ms->config_lock); } /* @@ -1582,7 +1685,7 @@ void rkisp1_params_configure(struct rkisp1_params *params, void rkisp1_params_disable(struct rkisp1_params *params) { rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_DPCC_MODE, - RKISP1_CIF_ISP_DPCC_ENA); + RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE); rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_LSC_CTRL, RKISP1_CIF_ISP_LSC_CTRL_ENA); rkisp1_param_clear_bits(params, RKISP1_CIF_ISP_BLS_CTRL, diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h index dd3e6c38be67..421cc73355db 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-regs.h @@ -576,7 +576,7 @@ (((v0) & 0x1FFF) | (((v1) & 0x1FFF) << 13)) #define RKISP1_CIF_ISP_LSC_SECT_SIZE(v0, v1) \ (((v0) & 0xFFF) | (((v1) & 0xFFF) << 16)) -#define RKISP1_CIF_ISP_LSC_GRAD_SIZE(v0, v1) \ +#define RKISP1_CIF_ISP_LSC_SECT_GRAD(v0, v1) \ (((v0) & 0xFFF) | (((v1) & 0xFFF) << 16)) /* LSC: ISP_LSC_TABLE_SEL */ @@ -618,19 +618,18 @@ #define RKISP1_CIF_ISP_CTRL_ISP_GAMMA_OUT_ENA_READ(x) (((x) >> 11) & 1) /* DPCC */ -/* ISP_DPCC_MODE */ -#define RKISP1_CIF_ISP_DPCC_ENA BIT(0) -#define RKISP1_CIF_ISP_DPCC_MODE_MAX 0x07 -#define RKISP1_CIF_ISP_DPCC_OUTPUTMODE_MAX 0x0F -#define RKISP1_CIF_ISP_DPCC_SETUSE_MAX 0x0F -#define RKISP1_CIF_ISP_DPCC_METHODS_SET_RESERVED 0xFFFFE000 -#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_RESERVED 0xFFFF0000 -#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_RESERVED 0xFFFFC0C0 -#define RKISP1_CIF_ISP_DPCC_PG_FAC_RESERVED 0xFFFFC0C0 -#define RKISP1_CIF_ISP_DPCC_RND_THRESH_RESERVED 0xFFFF0000 -#define RKISP1_CIF_ISP_DPCC_RG_FAC_RESERVED 0xFFFFC0C0 -#define RKISP1_CIF_ISP_DPCC_RO_LIMIT_RESERVED 0xFFFFF000 -#define RKISP1_CIF_ISP_DPCC_RND_OFFS_RESERVED 0xFFFFF000 +#define RKISP1_CIF_ISP_DPCC_MODE_DPCC_ENABLE BIT(0) +#define RKISP1_CIF_ISP_DPCC_MODE_GRAYSCALE_MODE BIT(1) +#define RKISP1_CIF_ISP_DPCC_OUTPUT_MODE_MASK GENMASK(3, 0) +#define RKISP1_CIF_ISP_DPCC_SET_USE_MASK GENMASK(3, 0) +#define RKISP1_CIF_ISP_DPCC_METHODS_SET_MASK 0x00001f1f +#define RKISP1_CIF_ISP_DPCC_LINE_THRESH_MASK 0x0000ffff +#define RKISP1_CIF_ISP_DPCC_LINE_MAD_FAC_MASK 0x00003f3f +#define RKISP1_CIF_ISP_DPCC_PG_FAC_MASK 0x00003f3f +#define RKISP1_CIF_ISP_DPCC_RND_THRESH_MASK 0x0000ffff +#define RKISP1_CIF_ISP_DPCC_RG_FAC_MASK 0x00003f3f +#define RKISP1_CIF_ISP_DPCC_RO_LIMIT_MASK 0x00000fff +#define RKISP1_CIF_ISP_DPCC_RND_OFFS_MASK 0x00000fff /* BLS */ /* ISP_BLS_CTRL */ @@ -1073,22 +1072,10 @@ #define RKISP1_CIF_ISP_LSC_GR_TABLE_DATA (RKISP1_CIF_ISP_LSC_BASE + 0x00000018) #define RKISP1_CIF_ISP_LSC_B_TABLE_DATA (RKISP1_CIF_ISP_LSC_BASE + 0x0000001C) #define RKISP1_CIF_ISP_LSC_GB_TABLE_DATA (RKISP1_CIF_ISP_LSC_BASE + 0x00000020) -#define RKISP1_CIF_ISP_LSC_XGRAD_01 (RKISP1_CIF_ISP_LSC_BASE + 0x00000024) -#define RKISP1_CIF_ISP_LSC_XGRAD_23 (RKISP1_CIF_ISP_LSC_BASE + 0x00000028) -#define RKISP1_CIF_ISP_LSC_XGRAD_45 (RKISP1_CIF_ISP_LSC_BASE + 0x0000002C) -#define RKISP1_CIF_ISP_LSC_XGRAD_67 (RKISP1_CIF_ISP_LSC_BASE + 0x00000030) -#define RKISP1_CIF_ISP_LSC_YGRAD_01 (RKISP1_CIF_ISP_LSC_BASE + 0x00000034) -#define RKISP1_CIF_ISP_LSC_YGRAD_23 (RKISP1_CIF_ISP_LSC_BASE + 0x00000038) -#define RKISP1_CIF_ISP_LSC_YGRAD_45 (RKISP1_CIF_ISP_LSC_BASE + 0x0000003C) -#define RKISP1_CIF_ISP_LSC_YGRAD_67 (RKISP1_CIF_ISP_LSC_BASE + 0x00000040) -#define RKISP1_CIF_ISP_LSC_XSIZE_01 (RKISP1_CIF_ISP_LSC_BASE + 0x00000044) -#define RKISP1_CIF_ISP_LSC_XSIZE_23 (RKISP1_CIF_ISP_LSC_BASE + 0x00000048) -#define RKISP1_CIF_ISP_LSC_XSIZE_45 (RKISP1_CIF_ISP_LSC_BASE + 0x0000004C) -#define RKISP1_CIF_ISP_LSC_XSIZE_67 (RKISP1_CIF_ISP_LSC_BASE + 0x00000050) -#define RKISP1_CIF_ISP_LSC_YSIZE_01 (RKISP1_CIF_ISP_LSC_BASE + 0x00000054) -#define RKISP1_CIF_ISP_LSC_YSIZE_23 (RKISP1_CIF_ISP_LSC_BASE + 0x00000058) -#define RKISP1_CIF_ISP_LSC_YSIZE_45 (RKISP1_CIF_ISP_LSC_BASE + 0x0000005C) -#define RKISP1_CIF_ISP_LSC_YSIZE_67 (RKISP1_CIF_ISP_LSC_BASE + 0x00000060) +#define RKISP1_CIF_ISP_LSC_XGRAD(n) (RKISP1_CIF_ISP_LSC_BASE + 0x00000024 + (n) * 4) +#define RKISP1_CIF_ISP_LSC_YGRAD(n) (RKISP1_CIF_ISP_LSC_BASE + 0x00000034 + (n) * 4) +#define RKISP1_CIF_ISP_LSC_XSIZE(n) (RKISP1_CIF_ISP_LSC_BASE + 0x00000044 + (n) * 4) +#define RKISP1_CIF_ISP_LSC_YSIZE(n) (RKISP1_CIF_ISP_LSC_BASE + 0x00000054 + (n) * 4) #define RKISP1_CIF_ISP_LSC_TABLE_SEL (RKISP1_CIF_ISP_LSC_BASE + 0x00000064) #define RKISP1_CIF_ISP_LSC_STATUS (RKISP1_CIF_ISP_LSC_BASE + 0x00000068) diff --git a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c index f4caa8f684aa..f76afd8112b2 100644 --- a/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c +++ b/drivers/media/platform/rockchip/rkisp1/rkisp1-resizer.c @@ -411,6 +411,10 @@ static int rkisp1_rsz_init_config(struct v4l2_subdev *sd, sink_fmt->height = RKISP1_DEFAULT_HEIGHT; sink_fmt->field = V4L2_FIELD_NONE; sink_fmt->code = RKISP1_DEF_FMT; + sink_fmt->colorspace = V4L2_COLORSPACE_SRGB; + sink_fmt->xfer_func = V4L2_XFER_FUNC_SRGB; + sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601; + sink_fmt->quantization = V4L2_QUANTIZATION_LIM_RANGE; sink_crop = v4l2_subdev_get_try_crop(sd, sd_state, RKISP1_RSZ_PAD_SINK); @@ -503,6 +507,7 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz, const struct rkisp1_mbus_info *mbus_info; struct v4l2_mbus_framefmt *sink_fmt, *src_fmt; struct v4l2_rect *sink_crop; + bool is_yuv; sink_fmt = rkisp1_rsz_get_pad_fmt(rsz, sd_state, RKISP1_RSZ_PAD_SINK, which); @@ -524,9 +529,6 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz, if (which == V4L2_SUBDEV_FORMAT_ACTIVE) rsz->pixel_enc = mbus_info->pixel_enc; - /* Propagete to source pad */ - src_fmt->code = sink_fmt->code; - sink_fmt->width = clamp_t(u32, format->width, RKISP1_ISP_MIN_WIDTH, RKISP1_ISP_MAX_WIDTH); @@ -534,8 +536,45 @@ static void rkisp1_rsz_set_sink_fmt(struct rkisp1_resizer *rsz, RKISP1_ISP_MIN_HEIGHT, RKISP1_ISP_MAX_HEIGHT); + /* + * Adjust the color space fields. Accept any color primaries and + * transfer function for both YUV and Bayer. For YUV any YCbCr encoding + * and quantization range is also accepted. For Bayer formats, the YCbCr + * encoding isn't applicable, and the quantization range can only be + * full. + */ + is_yuv = mbus_info->pixel_enc == V4L2_PIXEL_ENC_YUV; + + sink_fmt->colorspace = format->colorspace ? : + (is_yuv ? V4L2_COLORSPACE_SRGB : + V4L2_COLORSPACE_RAW); + sink_fmt->xfer_func = format->xfer_func ? : + V4L2_MAP_XFER_FUNC_DEFAULT(sink_fmt->colorspace); + if (is_yuv) { + sink_fmt->ycbcr_enc = format->ycbcr_enc ? : + V4L2_MAP_YCBCR_ENC_DEFAULT(sink_fmt->colorspace); + sink_fmt->quantization = format->quantization ? : + V4L2_MAP_QUANTIZATION_DEFAULT(false, sink_fmt->colorspace, + sink_fmt->ycbcr_enc); + } else { + /* + * The YCbCr encoding isn't applicable for non-YUV formats, but + * V4L2 has no "no encoding" value. Hardcode it to Rec. 601, it + * should be ignored by userspace. + */ + sink_fmt->ycbcr_enc = V4L2_YCBCR_ENC_601; + sink_fmt->quantization = V4L2_QUANTIZATION_FULL_RANGE; + } + *format = *sink_fmt; + /* Propagate the media bus code and color space to the source pad. */ + src_fmt->code = sink_fmt->code; + src_fmt->colorspace = sink_fmt->colorspace; + src_fmt->xfer_func = sink_fmt->xfer_func; + src_fmt->ycbcr_enc = sink_fmt->ycbcr_enc; + src_fmt->quantization = sink_fmt->quantization; + /* Update sink crop */ rkisp1_rsz_set_sink_crop(rsz, sd_state, sink_crop, which); } diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-capture.c b/drivers/media/platform/samsung/exynos4-is/fimc-capture.c index 03638c8f772d..e3b95a2b7e04 100644 --- a/drivers/media/platform/samsung/exynos4-is/fimc-capture.c +++ b/drivers/media/platform/samsung/exynos4-is/fimc-capture.c @@ -524,7 +524,7 @@ static int fimc_capture_release(struct file *file) mutex_lock(&fimc->lock); if (close && vc->streaming) { - media_pipeline_stop(&vc->ve.vdev.entity); + video_device_pipeline_stop(&vc->ve.vdev); vc->streaming = false; } @@ -1176,7 +1176,6 @@ static int fimc_cap_streamon(struct file *file, void *priv, { struct fimc_dev *fimc = video_drvdata(file); struct fimc_vid_cap *vc = &fimc->vid_cap; - struct media_entity *entity = &vc->ve.vdev.entity; struct fimc_source_info *si = NULL; struct v4l2_subdev *sd; int ret; @@ -1184,7 +1183,7 @@ static int fimc_cap_streamon(struct file *file, void *priv, if (fimc_capture_active(fimc)) return -EBUSY; - ret = media_pipeline_start(entity, &vc->ve.pipe->mp); + ret = video_device_pipeline_start(&vc->ve.vdev, &vc->ve.pipe->mp); if (ret < 0) return ret; @@ -1218,7 +1217,7 @@ static int fimc_cap_streamon(struct file *file, void *priv, } err_p_stop: - media_pipeline_stop(entity); + video_device_pipeline_stop(&vc->ve.vdev); return ret; } @@ -1234,7 +1233,7 @@ static int fimc_cap_streamoff(struct file *file, void *priv, return ret; if (vc->streaming) { - media_pipeline_stop(&vc->ve.vdev.entity); + video_device_pipeline_stop(&vc->ve.vdev); vc->streaming = false; } diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c b/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c index 8f12240b0eb7..f6a302fa8d37 100644 --- a/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c +++ b/drivers/media/platform/samsung/exynos4-is/fimc-isp-video.c @@ -312,7 +312,7 @@ static int isp_video_release(struct file *file) is_singular_file = v4l2_fh_is_singular_file(file); if (is_singular_file && ivc->streaming) { - media_pipeline_stop(entity); + video_device_pipeline_stop(&ivc->ve.vdev); ivc->streaming = 0; } @@ -490,10 +490,9 @@ static int isp_video_streamon(struct file *file, void *priv, { struct fimc_isp *isp = video_drvdata(file); struct exynos_video_entity *ve = &isp->video_capture.ve; - struct media_entity *me = &ve->vdev.entity; int ret; - ret = media_pipeline_start(me, &ve->pipe->mp); + ret = video_device_pipeline_start(&ve->vdev, &ve->pipe->mp); if (ret < 0) return ret; @@ -508,7 +507,7 @@ static int isp_video_streamon(struct file *file, void *priv, isp->video_capture.streaming = 1; return 0; p_stop: - media_pipeline_stop(me); + video_device_pipeline_stop(&ve->vdev); return ret; } @@ -523,7 +522,7 @@ static int isp_video_streamoff(struct file *file, void *priv, if (ret < 0) return ret; - media_pipeline_stop(&video->ve.vdev.entity); + video_device_pipeline_stop(&video->ve.vdev); video->streaming = 0; return 0; } diff --git a/drivers/media/platform/samsung/exynos4-is/fimc-lite.c b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c index 41b0a4a5929a..e185a40305a8 100644 --- a/drivers/media/platform/samsung/exynos4-is/fimc-lite.c +++ b/drivers/media/platform/samsung/exynos4-is/fimc-lite.c @@ -516,7 +516,7 @@ static int fimc_lite_release(struct file *file) if (v4l2_fh_is_singular_file(file) && atomic_read(&fimc->out_path) == FIMC_IO_DMA) { if (fimc->streaming) { - media_pipeline_stop(entity); + video_device_pipeline_stop(&fimc->ve.vdev); fimc->streaming = false; } fimc_lite_stop_capture(fimc, false); @@ -812,13 +812,12 @@ static int fimc_lite_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct fimc_lite *fimc = video_drvdata(file); - struct media_entity *entity = &fimc->ve.vdev.entity; int ret; if (fimc_lite_active(fimc)) return -EBUSY; - ret = media_pipeline_start(entity, &fimc->ve.pipe->mp); + ret = video_device_pipeline_start(&fimc->ve.vdev, &fimc->ve.pipe->mp); if (ret < 0) return ret; @@ -835,7 +834,7 @@ static int fimc_lite_streamon(struct file *file, void *priv, } err_p_stop: - media_pipeline_stop(entity); + video_device_pipeline_stop(&fimc->ve.vdev); return 0; } @@ -849,7 +848,7 @@ static int fimc_lite_streamoff(struct file *file, void *priv, if (ret < 0) return ret; - media_pipeline_stop(&fimc->ve.vdev.entity); + video_device_pipeline_stop(&fimc->ve.vdev); fimc->streaming = false; return 0; } diff --git a/drivers/media/platform/samsung/s3c-camif/camif-capture.c b/drivers/media/platform/samsung/s3c-camif/camif-capture.c index c2d8f1e425d8..db106ebdf870 100644 --- a/drivers/media/platform/samsung/s3c-camif/camif-capture.c +++ b/drivers/media/platform/samsung/s3c-camif/camif-capture.c @@ -848,13 +848,13 @@ static int s3c_camif_streamon(struct file *file, void *priv, if (s3c_vp_active(vp)) return 0; - ret = media_pipeline_start(sensor, camif->m_pipeline); + ret = media_pipeline_start(sensor->pads, camif->m_pipeline); if (ret < 0) return ret; ret = camif_pipeline_validate(camif); if (ret < 0) { - media_pipeline_stop(sensor); + media_pipeline_stop(sensor->pads); return ret; } @@ -878,7 +878,7 @@ static int s3c_camif_streamoff(struct file *file, void *priv, ret = vb2_streamoff(&vp->vb_queue, type); if (ret == 0) - media_pipeline_stop(&camif->sensor.sd->entity); + media_pipeline_stop(camif->sensor.sd->entity.pads); return ret; } diff --git a/drivers/media/platform/st/stm32/stm32-dcmi.c b/drivers/media/platform/st/stm32/stm32-dcmi.c index 2ca95ab2b0fe..37458d4d9564 100644 --- a/drivers/media/platform/st/stm32/stm32-dcmi.c +++ b/drivers/media/platform/st/stm32/stm32-dcmi.c @@ -751,7 +751,7 @@ static int dcmi_start_streaming(struct vb2_queue *vq, unsigned int count) goto err_unlocked; } - ret = media_pipeline_start(&dcmi->vdev->entity, &dcmi->pipeline); + ret = video_device_pipeline_start(dcmi->vdev, &dcmi->pipeline); if (ret < 0) { dev_err(dcmi->dev, "%s: Failed to start streaming, media pipeline start error (%d)\n", __func__, ret); @@ -865,7 +865,7 @@ err_pipeline_stop: dcmi_pipeline_stop(dcmi); err_media_pipeline_stop: - media_pipeline_stop(&dcmi->vdev->entity); + video_device_pipeline_stop(dcmi->vdev); err_pm_put: pm_runtime_put(dcmi->dev); @@ -892,7 +892,7 @@ static void dcmi_stop_streaming(struct vb2_queue *vq) dcmi_pipeline_stop(dcmi); - media_pipeline_stop(&dcmi->vdev->entity); + video_device_pipeline_stop(dcmi->vdev); spin_lock_irq(&dcmi->irqlock); diff --git a/drivers/media/platform/sunxi/sun4i-csi/Kconfig b/drivers/media/platform/sunxi/sun4i-csi/Kconfig index 7960e6836f41..60610c04d6a7 100644 --- a/drivers/media/platform/sunxi/sun4i-csi/Kconfig +++ b/drivers/media/platform/sunxi/sun4i-csi/Kconfig @@ -3,7 +3,7 @@ config VIDEO_SUN4I_CSI tristate "Allwinner A10 CMOS Sensor Interface Support" depends on V4L_PLATFORM_DRIVERS - depends on VIDEO_DEV && COMMON_CLK && HAS_DMA + depends on VIDEO_DEV && COMMON_CLK && RESET_CONTROLLER && HAS_DMA depends on ARCH_SUNXI || COMPILE_TEST select MEDIA_CONTROLLER select VIDEO_V4L2_SUBDEV_API diff --git a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c index 0912a1b6d525..a3e826a755fc 100644 --- a/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c +++ b/drivers/media/platform/sunxi/sun4i-csi/sun4i_dma.c @@ -266,7 +266,7 @@ static int sun4i_csi_start_streaming(struct vb2_queue *vq, unsigned int count) goto err_clear_dma_queue; } - ret = media_pipeline_start(&csi->vdev.entity, &csi->vdev.pipe); + ret = video_device_pipeline_alloc_start(&csi->vdev); if (ret < 0) goto err_free_scratch_buffer; @@ -330,7 +330,7 @@ err_disable_device: sun4i_csi_capture_stop(csi); err_disable_pipeline: - media_pipeline_stop(&csi->vdev.entity); + video_device_pipeline_stop(&csi->vdev); err_free_scratch_buffer: dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr, @@ -359,7 +359,7 @@ static void sun4i_csi_stop_streaming(struct vb2_queue *vq) return_all_buffers(csi, VB2_BUF_STATE_ERROR); spin_unlock_irqrestore(&csi->qlock, flags); - media_pipeline_stop(&csi->vdev.entity); + video_device_pipeline_stop(&csi->vdev); dma_free_coherent(csi->dev, csi->scratch.size, csi->scratch.vaddr, csi->scratch.paddr); diff --git a/drivers/media/platform/sunxi/sun6i-csi/Kconfig b/drivers/media/platform/sunxi/sun6i-csi/Kconfig index 0345901617d4..886006f6a48a 100644 --- a/drivers/media/platform/sunxi/sun6i-csi/Kconfig +++ b/drivers/media/platform/sunxi/sun6i-csi/Kconfig @@ -1,13 +1,15 @@ # SPDX-License-Identifier: GPL-2.0-only config VIDEO_SUN6I_CSI - tristate "Allwinner V3s Camera Sensor Interface driver" - depends on V4L_PLATFORM_DRIVERS - depends on VIDEO_DEV && COMMON_CLK && HAS_DMA + tristate "Allwinner A31 Camera Sensor Interface (CSI) Driver" + depends on V4L_PLATFORM_DRIVERS && VIDEO_DEV depends on ARCH_SUNXI || COMPILE_TEST + depends on PM && COMMON_CLK && RESET_CONTROLLER && HAS_DMA select MEDIA_CONTROLLER select VIDEO_V4L2_SUBDEV_API select VIDEOBUF2_DMA_CONTIG - select REGMAP_MMIO select V4L2_FWNODE + select REGMAP_MMIO help - Support for the Allwinner Camera Sensor Interface Controller on V3s. + Support for the Allwinner A31 Camera Sensor Interface (CSI) + controller, also found on other platforms such as the A83T, H3, + V3/V3s or A64. diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c index a971587dbbd1..8b99c17e8403 100644 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.c @@ -23,43 +23,27 @@ #include <linux/sched.h> #include <linux/sizes.h> #include <linux/slab.h> +#include <media/v4l2-mc.h> #include "sun6i_csi.h" #include "sun6i_csi_reg.h" -#define MODULE_NAME "sun6i-csi" - -struct sun6i_csi_dev { - struct sun6i_csi csi; - struct device *dev; - - struct regmap *regmap; - struct clk *clk_mod; - struct clk *clk_ram; - struct reset_control *rstc_bus; - - int planar_offset[3]; -}; - -static inline struct sun6i_csi_dev *sun6i_csi_to_dev(struct sun6i_csi *csi) -{ - return container_of(csi, struct sun6i_csi_dev, csi); -} +/* Helpers */ /* TODO add 10&12 bit YUV, RGB support */ -bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, +bool sun6i_csi_is_format_supported(struct sun6i_csi_device *csi_dev, u32 pixformat, u32 mbus_code) { - struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi); + struct sun6i_csi_v4l2 *v4l2 = &csi_dev->v4l2; /* * Some video receivers have the ability to be compatible with * 8bit and 16bit bus width. * Identify the media bus format from device tree. */ - if ((sdev->csi.v4l2_ep.bus_type == V4L2_MBUS_PARALLEL - || sdev->csi.v4l2_ep.bus_type == V4L2_MBUS_BT656) - && sdev->csi.v4l2_ep.bus.parallel.bus_width == 16) { + if ((v4l2->v4l2_ep.bus_type == V4L2_MBUS_PARALLEL + || v4l2->v4l2_ep.bus_type == V4L2_MBUS_BT656) + && v4l2->v4l2_ep.bus.parallel.bus_width == 16) { switch (pixformat) { case V4L2_PIX_FMT_NV12_16L16: case V4L2_PIX_FMT_NV12: @@ -76,13 +60,14 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, case MEDIA_BUS_FMT_YVYU8_1X16: return true; default: - dev_dbg(sdev->dev, "Unsupported mbus code: 0x%x\n", + dev_dbg(csi_dev->dev, + "Unsupported mbus code: 0x%x\n", mbus_code); break; } break; default: - dev_dbg(sdev->dev, "Unsupported pixformat: 0x%x\n", + dev_dbg(csi_dev->dev, "Unsupported pixformat: 0x%x\n", pixformat); break; } @@ -139,7 +124,7 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, case MEDIA_BUS_FMT_YVYU8_2X8: return true; default: - dev_dbg(sdev->dev, "Unsupported mbus code: 0x%x\n", + dev_dbg(csi_dev->dev, "Unsupported mbus code: 0x%x\n", mbus_code); break; } @@ -154,67 +139,37 @@ bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, return (mbus_code == MEDIA_BUS_FMT_JPEG_1X8); default: - dev_dbg(sdev->dev, "Unsupported pixformat: 0x%x\n", pixformat); + dev_dbg(csi_dev->dev, "Unsupported pixformat: 0x%x\n", + pixformat); break; } return false; } -int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable) +int sun6i_csi_set_power(struct sun6i_csi_device *csi_dev, bool enable) { - struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi); - struct device *dev = sdev->dev; - struct regmap *regmap = sdev->regmap; + struct device *dev = csi_dev->dev; + struct regmap *regmap = csi_dev->regmap; int ret; if (!enable) { regmap_update_bits(regmap, CSI_EN_REG, CSI_EN_CSI_EN, 0); + pm_runtime_put(dev); - clk_disable_unprepare(sdev->clk_ram); - if (of_device_is_compatible(dev->of_node, - "allwinner,sun50i-a64-csi")) - clk_rate_exclusive_put(sdev->clk_mod); - clk_disable_unprepare(sdev->clk_mod); - reset_control_assert(sdev->rstc_bus); return 0; } - ret = clk_prepare_enable(sdev->clk_mod); - if (ret) { - dev_err(sdev->dev, "Enable csi clk err %d\n", ret); + ret = pm_runtime_resume_and_get(dev); + if (ret < 0) return ret; - } - - if (of_device_is_compatible(dev->of_node, "allwinner,sun50i-a64-csi")) - clk_set_rate_exclusive(sdev->clk_mod, 300000000); - - ret = clk_prepare_enable(sdev->clk_ram); - if (ret) { - dev_err(sdev->dev, "Enable clk_dram_csi clk err %d\n", ret); - goto clk_mod_disable; - } - - ret = reset_control_deassert(sdev->rstc_bus); - if (ret) { - dev_err(sdev->dev, "reset err %d\n", ret); - goto clk_ram_disable; - } regmap_update_bits(regmap, CSI_EN_REG, CSI_EN_CSI_EN, CSI_EN_CSI_EN); return 0; - -clk_ram_disable: - clk_disable_unprepare(sdev->clk_ram); -clk_mod_disable: - if (of_device_is_compatible(dev->of_node, "allwinner,sun50i-a64-csi")) - clk_rate_exclusive_put(sdev->clk_mod); - clk_disable_unprepare(sdev->clk_mod); - return ret; } -static enum csi_input_fmt get_csi_input_format(struct sun6i_csi_dev *sdev, +static enum csi_input_fmt get_csi_input_format(struct sun6i_csi_device *csi_dev, u32 mbus_code, u32 pixformat) { /* non-YUV */ @@ -232,12 +187,13 @@ static enum csi_input_fmt get_csi_input_format(struct sun6i_csi_dev *sdev, } /* not support YUV420 input format yet */ - dev_dbg(sdev->dev, "Select YUV422 as default input format of CSI.\n"); + dev_dbg(csi_dev->dev, "Select YUV422 as default input format of CSI.\n"); return CSI_INPUT_FORMAT_YUV422; } -static enum csi_output_fmt get_csi_output_format(struct sun6i_csi_dev *sdev, - u32 pixformat, u32 field) +static enum csi_output_fmt +get_csi_output_format(struct sun6i_csi_device *csi_dev, u32 pixformat, + u32 field) { bool buf_interlaced = false; @@ -296,14 +252,14 @@ static enum csi_output_fmt get_csi_output_format(struct sun6i_csi_dev *sdev, return buf_interlaced ? CSI_FRAME_RAW_8 : CSI_FIELD_RAW_8; default: - dev_warn(sdev->dev, "Unsupported pixformat: 0x%x\n", pixformat); + dev_warn(csi_dev->dev, "Unsupported pixformat: 0x%x\n", pixformat); break; } return CSI_FIELD_RAW_8; } -static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev, +static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_device *csi_dev, u32 mbus_code, u32 pixformat) { /* Input sequence does not apply to non-YUV formats */ @@ -330,7 +286,7 @@ static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev, case MEDIA_BUS_FMT_YVYU8_2X8: return CSI_INPUT_SEQ_YVYU; default: - dev_warn(sdev->dev, "Unsupported mbus code: 0x%x\n", + dev_warn(csi_dev->dev, "Unsupported mbus code: 0x%x\n", mbus_code); break; } @@ -352,7 +308,7 @@ static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev, case MEDIA_BUS_FMT_YVYU8_2X8: return CSI_INPUT_SEQ_YUYV; default: - dev_warn(sdev->dev, "Unsupported mbus code: 0x%x\n", + dev_warn(csi_dev->dev, "Unsupported mbus code: 0x%x\n", mbus_code); break; } @@ -362,7 +318,7 @@ static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev, return CSI_INPUT_SEQ_YUYV; default: - dev_warn(sdev->dev, "Unsupported pixformat: 0x%x, defaulting to YUYV\n", + dev_warn(csi_dev->dev, "Unsupported pixformat: 0x%x, defaulting to YUYV\n", pixformat); break; } @@ -370,23 +326,23 @@ static enum csi_input_seq get_csi_input_seq(struct sun6i_csi_dev *sdev, return CSI_INPUT_SEQ_YUYV; } -static void sun6i_csi_setup_bus(struct sun6i_csi_dev *sdev) +static void sun6i_csi_setup_bus(struct sun6i_csi_device *csi_dev) { - struct v4l2_fwnode_endpoint *endpoint = &sdev->csi.v4l2_ep; - struct sun6i_csi *csi = &sdev->csi; + struct v4l2_fwnode_endpoint *endpoint = &csi_dev->v4l2.v4l2_ep; + struct sun6i_csi_config *config = &csi_dev->config; unsigned char bus_width; u32 flags; u32 cfg; bool input_interlaced = false; - if (csi->config.field == V4L2_FIELD_INTERLACED - || csi->config.field == V4L2_FIELD_INTERLACED_TB - || csi->config.field == V4L2_FIELD_INTERLACED_BT) + if (config->field == V4L2_FIELD_INTERLACED + || config->field == V4L2_FIELD_INTERLACED_TB + || config->field == V4L2_FIELD_INTERLACED_BT) input_interlaced = true; bus_width = endpoint->bus.parallel.bus_width; - regmap_read(sdev->regmap, CSI_IF_CFG_REG, &cfg); + regmap_read(csi_dev->regmap, CSI_IF_CFG_REG, &cfg); cfg &= ~(CSI_IF_CFG_CSI_IF_MASK | CSI_IF_CFG_MIPI_IF_MASK | CSI_IF_CFG_IF_DATA_WIDTH_MASK | @@ -434,7 +390,7 @@ static void sun6i_csi_setup_bus(struct sun6i_csi_dev *sdev) cfg |= CSI_IF_CFG_CLK_POL_FALLING_EDGE; break; default: - dev_warn(sdev->dev, "Unsupported bus type: %d\n", + dev_warn(csi_dev->dev, "Unsupported bus type: %d\n", endpoint->bus_type); break; } @@ -452,54 +408,54 @@ static void sun6i_csi_setup_bus(struct sun6i_csi_dev *sdev) case 16: /* No need to configure DATA_WIDTH for 16bit */ break; default: - dev_warn(sdev->dev, "Unsupported bus width: %u\n", bus_width); + dev_warn(csi_dev->dev, "Unsupported bus width: %u\n", bus_width); break; } - regmap_write(sdev->regmap, CSI_IF_CFG_REG, cfg); + regmap_write(csi_dev->regmap, CSI_IF_CFG_REG, cfg); } -static void sun6i_csi_set_format(struct sun6i_csi_dev *sdev) +static void sun6i_csi_set_format(struct sun6i_csi_device *csi_dev) { - struct sun6i_csi *csi = &sdev->csi; + struct sun6i_csi_config *config = &csi_dev->config; u32 cfg; u32 val; - regmap_read(sdev->regmap, CSI_CH_CFG_REG, &cfg); + regmap_read(csi_dev->regmap, CSI_CH_CFG_REG, &cfg); cfg &= ~(CSI_CH_CFG_INPUT_FMT_MASK | CSI_CH_CFG_OUTPUT_FMT_MASK | CSI_CH_CFG_VFLIP_EN | CSI_CH_CFG_HFLIP_EN | CSI_CH_CFG_FIELD_SEL_MASK | CSI_CH_CFG_INPUT_SEQ_MASK); - val = get_csi_input_format(sdev, csi->config.code, - csi->config.pixelformat); + val = get_csi_input_format(csi_dev, config->code, + config->pixelformat); cfg |= CSI_CH_CFG_INPUT_FMT(val); - val = get_csi_output_format(sdev, csi->config.pixelformat, - csi->config.field); + val = get_csi_output_format(csi_dev, config->pixelformat, + config->field); cfg |= CSI_CH_CFG_OUTPUT_FMT(val); - val = get_csi_input_seq(sdev, csi->config.code, - csi->config.pixelformat); + val = get_csi_input_seq(csi_dev, config->code, + config->pixelformat); cfg |= CSI_CH_CFG_INPUT_SEQ(val); - if (csi->config.field == V4L2_FIELD_TOP) + if (config->field == V4L2_FIELD_TOP) cfg |= CSI_CH_CFG_FIELD_SEL_FIELD0; - else if (csi->config.field == V4L2_FIELD_BOTTOM) + else if (config->field == V4L2_FIELD_BOTTOM) cfg |= CSI_CH_CFG_FIELD_SEL_FIELD1; else cfg |= CSI_CH_CFG_FIELD_SEL_BOTH; - regmap_write(sdev->regmap, CSI_CH_CFG_REG, cfg); + regmap_write(csi_dev->regmap, CSI_CH_CFG_REG, cfg); } -static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev) +static void sun6i_csi_set_window(struct sun6i_csi_device *csi_dev) { - struct sun6i_csi_config *config = &sdev->csi.config; + struct sun6i_csi_config *config = &csi_dev->config; u32 bytesperline_y; u32 bytesperline_c; - int *planar_offset = sdev->planar_offset; + int *planar_offset = csi_dev->planar_offset; u32 width = config->width; u32 height = config->height; u32 hor_len = width; @@ -509,7 +465,7 @@ static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev) case V4L2_PIX_FMT_YVYU: case V4L2_PIX_FMT_UYVY: case V4L2_PIX_FMT_VYUY: - dev_dbg(sdev->dev, + dev_dbg(csi_dev->dev, "Horizontal length should be 2 times of width for packed YUV formats!\n"); hor_len = width * 2; break; @@ -517,10 +473,10 @@ static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev) break; } - regmap_write(sdev->regmap, CSI_CH_HSIZE_REG, + regmap_write(csi_dev->regmap, CSI_CH_HSIZE_REG, CSI_CH_HSIZE_HOR_LEN(hor_len) | CSI_CH_HSIZE_HOR_START(0)); - regmap_write(sdev->regmap, CSI_CH_VSIZE_REG, + regmap_write(csi_dev->regmap, CSI_CH_VSIZE_REG, CSI_CH_VSIZE_VER_LEN(height) | CSI_CH_VSIZE_VER_START(0)); @@ -552,7 +508,7 @@ static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev) bytesperline_c * height; break; default: /* raw */ - dev_dbg(sdev->dev, + dev_dbg(csi_dev->dev, "Calculating pixelformat(0x%x)'s bytesperline as a packed format\n", config->pixelformat); bytesperline_y = (sun6i_csi_get_bpp(config->pixelformat) * @@ -563,46 +519,42 @@ static void sun6i_csi_set_window(struct sun6i_csi_dev *sdev) break; } - regmap_write(sdev->regmap, CSI_CH_BUF_LEN_REG, + regmap_write(csi_dev->regmap, CSI_CH_BUF_LEN_REG, CSI_CH_BUF_LEN_BUF_LEN_C(bytesperline_c) | CSI_CH_BUF_LEN_BUF_LEN_Y(bytesperline_y)); } -int sun6i_csi_update_config(struct sun6i_csi *csi, +int sun6i_csi_update_config(struct sun6i_csi_device *csi_dev, struct sun6i_csi_config *config) { - struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi); - if (!config) return -EINVAL; - memcpy(&csi->config, config, sizeof(csi->config)); + memcpy(&csi_dev->config, config, sizeof(csi_dev->config)); - sun6i_csi_setup_bus(sdev); - sun6i_csi_set_format(sdev); - sun6i_csi_set_window(sdev); + sun6i_csi_setup_bus(csi_dev); + sun6i_csi_set_format(csi_dev); + sun6i_csi_set_window(csi_dev); return 0; } -void sun6i_csi_update_buf_addr(struct sun6i_csi *csi, dma_addr_t addr) +void sun6i_csi_update_buf_addr(struct sun6i_csi_device *csi_dev, + dma_addr_t addr) { - struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi); - - regmap_write(sdev->regmap, CSI_CH_F0_BUFA_REG, - (addr + sdev->planar_offset[0]) >> 2); - if (sdev->planar_offset[1] != -1) - regmap_write(sdev->regmap, CSI_CH_F1_BUFA_REG, - (addr + sdev->planar_offset[1]) >> 2); - if (sdev->planar_offset[2] != -1) - regmap_write(sdev->regmap, CSI_CH_F2_BUFA_REG, - (addr + sdev->planar_offset[2]) >> 2); + regmap_write(csi_dev->regmap, CSI_CH_F0_BUFA_REG, + (addr + csi_dev->planar_offset[0]) >> 2); + if (csi_dev->planar_offset[1] != -1) + regmap_write(csi_dev->regmap, CSI_CH_F1_BUFA_REG, + (addr + csi_dev->planar_offset[1]) >> 2); + if (csi_dev->planar_offset[2] != -1) + regmap_write(csi_dev->regmap, CSI_CH_F2_BUFA_REG, + (addr + csi_dev->planar_offset[2]) >> 2); } -void sun6i_csi_set_stream(struct sun6i_csi *csi, bool enable) +void sun6i_csi_set_stream(struct sun6i_csi_device *csi_dev, bool enable) { - struct sun6i_csi_dev *sdev = sun6i_csi_to_dev(csi); - struct regmap *regmap = sdev->regmap; + struct regmap *regmap = csi_dev->regmap; if (!enable) { regmap_update_bits(regmap, CSI_CAP_REG, CSI_CAP_CH0_VCAP_ON, 0); @@ -623,10 +575,15 @@ void sun6i_csi_set_stream(struct sun6i_csi *csi, bool enable) CSI_CAP_CH0_VCAP_ON); } -/* ----------------------------------------------------------------------------- - * Media Controller and V4L2 - */ -static int sun6i_csi_link_entity(struct sun6i_csi *csi, +/* Media */ + +static const struct media_device_ops sun6i_csi_media_ops = { + .link_notify = v4l2_pipeline_link_notify, +}; + +/* V4L2 */ + +static int sun6i_csi_link_entity(struct sun6i_csi_device *csi_dev, struct media_entity *entity, struct fwnode_handle *fwnode) { @@ -637,24 +594,25 @@ static int sun6i_csi_link_entity(struct sun6i_csi *csi, ret = media_entity_get_fwnode_pad(entity, fwnode, MEDIA_PAD_FL_SOURCE); if (ret < 0) { - dev_err(csi->dev, "%s: no source pad in external entity %s\n", - __func__, entity->name); + dev_err(csi_dev->dev, + "%s: no source pad in external entity %s\n", __func__, + entity->name); return -EINVAL; } src_pad_index = ret; - sink = &csi->video.vdev.entity; - sink_pad = &csi->video.pad; + sink = &csi_dev->video.video_dev.entity; + sink_pad = &csi_dev->video.pad; - dev_dbg(csi->dev, "creating %s:%u -> %s:%u link\n", + dev_dbg(csi_dev->dev, "creating %s:%u -> %s:%u link\n", entity->name, src_pad_index, sink->name, sink_pad->index); ret = media_create_pad_link(entity, src_pad_index, sink, sink_pad->index, MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE); if (ret < 0) { - dev_err(csi->dev, "failed to create %s:%u -> %s:%u link\n", + dev_err(csi_dev->dev, "failed to create %s:%u -> %s:%u link\n", entity->name, src_pad_index, sink->name, sink_pad->index); return ret; @@ -665,27 +623,29 @@ static int sun6i_csi_link_entity(struct sun6i_csi *csi, static int sun6i_subdev_notify_complete(struct v4l2_async_notifier *notifier) { - struct sun6i_csi *csi = container_of(notifier, struct sun6i_csi, - notifier); - struct v4l2_device *v4l2_dev = &csi->v4l2_dev; + struct sun6i_csi_device *csi_dev = + container_of(notifier, struct sun6i_csi_device, + v4l2.notifier); + struct sun6i_csi_v4l2 *v4l2 = &csi_dev->v4l2; + struct v4l2_device *v4l2_dev = &v4l2->v4l2_dev; struct v4l2_subdev *sd; int ret; - dev_dbg(csi->dev, "notify complete, all subdevs registered\n"); + dev_dbg(csi_dev->dev, "notify complete, all subdevs registered\n"); sd = list_first_entry(&v4l2_dev->subdevs, struct v4l2_subdev, list); if (!sd) return -EINVAL; - ret = sun6i_csi_link_entity(csi, &sd->entity, sd->fwnode); + ret = sun6i_csi_link_entity(csi_dev, &sd->entity, sd->fwnode); if (ret < 0) return ret; - ret = v4l2_device_register_subdev_nodes(&csi->v4l2_dev); + ret = v4l2_device_register_subdev_nodes(v4l2_dev); if (ret < 0) return ret; - return media_device_register(&csi->media_dev); + return 0; } static const struct v4l2_async_notifier_operations sun6i_csi_async_ops = { @@ -696,7 +656,7 @@ static int sun6i_csi_fwnode_parse(struct device *dev, struct v4l2_fwnode_endpoint *vep, struct v4l2_async_subdev *asd) { - struct sun6i_csi *csi = dev_get_drvdata(dev); + struct sun6i_csi_device *csi_dev = dev_get_drvdata(dev); if (vep->base.port || vep->base.id) { dev_warn(dev, "Only support a single port with one endpoint\n"); @@ -706,7 +666,7 @@ static int sun6i_csi_fwnode_parse(struct device *dev, switch (vep->bus_type) { case V4L2_MBUS_PARALLEL: case V4L2_MBUS_BT656: - csi->v4l2_ep = *vep; + csi_dev->v4l2.v4l2_ep = *vep; return 0; default: dev_err(dev, "Unsupported media bus type\n"); @@ -714,87 +674,102 @@ static int sun6i_csi_fwnode_parse(struct device *dev, } } -static void sun6i_csi_v4l2_cleanup(struct sun6i_csi *csi) -{ - media_device_unregister(&csi->media_dev); - v4l2_async_nf_unregister(&csi->notifier); - v4l2_async_nf_cleanup(&csi->notifier); - sun6i_video_cleanup(&csi->video); - v4l2_device_unregister(&csi->v4l2_dev); - v4l2_ctrl_handler_free(&csi->ctrl_handler); - media_device_cleanup(&csi->media_dev); -} - -static int sun6i_csi_v4l2_init(struct sun6i_csi *csi) +static int sun6i_csi_v4l2_setup(struct sun6i_csi_device *csi_dev) { + struct sun6i_csi_v4l2 *v4l2 = &csi_dev->v4l2; + struct media_device *media_dev = &v4l2->media_dev; + struct v4l2_device *v4l2_dev = &v4l2->v4l2_dev; + struct v4l2_async_notifier *notifier = &v4l2->notifier; + struct device *dev = csi_dev->dev; int ret; - csi->media_dev.dev = csi->dev; - strscpy(csi->media_dev.model, "Allwinner Video Capture Device", - sizeof(csi->media_dev.model)); - csi->media_dev.hw_revision = 0; + /* Media Device */ + + strscpy(media_dev->model, SUN6I_CSI_DESCRIPTION, + sizeof(media_dev->model)); + media_dev->hw_revision = 0; + media_dev->ops = &sun6i_csi_media_ops; + media_dev->dev = dev; - media_device_init(&csi->media_dev); - v4l2_async_nf_init(&csi->notifier); + media_device_init(media_dev); - ret = v4l2_ctrl_handler_init(&csi->ctrl_handler, 0); + ret = media_device_register(media_dev); if (ret) { - dev_err(csi->dev, "V4L2 controls handler init failed (%d)\n", - ret); - goto clean_media; + dev_err(dev, "failed to register media device: %d\n", ret); + goto error_media; } - csi->v4l2_dev.mdev = &csi->media_dev; - csi->v4l2_dev.ctrl_handler = &csi->ctrl_handler; - ret = v4l2_device_register(csi->dev, &csi->v4l2_dev); + /* V4L2 Device */ + + v4l2_dev->mdev = media_dev; + + ret = v4l2_device_register(dev, v4l2_dev); if (ret) { - dev_err(csi->dev, "V4L2 device registration failed (%d)\n", - ret); - goto free_ctrl; + dev_err(dev, "failed to register v4l2 device: %d\n", ret); + goto error_media; } - ret = sun6i_video_init(&csi->video, csi, "sun6i-csi"); + /* Video */ + + ret = sun6i_video_setup(csi_dev); if (ret) - goto unreg_v4l2; + goto error_v4l2_device; - ret = v4l2_async_nf_parse_fwnode_endpoints(csi->dev, - &csi->notifier, + /* V4L2 Async */ + + v4l2_async_nf_init(notifier); + notifier->ops = &sun6i_csi_async_ops; + + ret = v4l2_async_nf_parse_fwnode_endpoints(dev, notifier, sizeof(struct v4l2_async_subdev), sun6i_csi_fwnode_parse); if (ret) - goto clean_video; + goto error_video; - csi->notifier.ops = &sun6i_csi_async_ops; - - ret = v4l2_async_nf_register(&csi->v4l2_dev, &csi->notifier); + ret = v4l2_async_nf_register(v4l2_dev, notifier); if (ret) { - dev_err(csi->dev, "notifier registration failed\n"); - goto clean_video; + dev_err(dev, "failed to register v4l2 async notifier: %d\n", + ret); + goto error_v4l2_async_notifier; } return 0; -clean_video: - sun6i_video_cleanup(&csi->video); -unreg_v4l2: - v4l2_device_unregister(&csi->v4l2_dev); -free_ctrl: - v4l2_ctrl_handler_free(&csi->ctrl_handler); -clean_media: - v4l2_async_nf_cleanup(&csi->notifier); - media_device_cleanup(&csi->media_dev); +error_v4l2_async_notifier: + v4l2_async_nf_cleanup(notifier); + +error_video: + sun6i_video_cleanup(csi_dev); + +error_v4l2_device: + v4l2_device_unregister(&v4l2->v4l2_dev); + +error_media: + media_device_unregister(media_dev); + media_device_cleanup(media_dev); return ret; } -/* ----------------------------------------------------------------------------- - * Resources and IRQ - */ -static irqreturn_t sun6i_csi_isr(int irq, void *dev_id) +static void sun6i_csi_v4l2_cleanup(struct sun6i_csi_device *csi_dev) { - struct sun6i_csi_dev *sdev = (struct sun6i_csi_dev *)dev_id; - struct regmap *regmap = sdev->regmap; + struct sun6i_csi_v4l2 *v4l2 = &csi_dev->v4l2; + + media_device_unregister(&v4l2->media_dev); + v4l2_async_nf_unregister(&v4l2->notifier); + v4l2_async_nf_cleanup(&v4l2->notifier); + sun6i_video_cleanup(csi_dev); + v4l2_device_unregister(&v4l2->v4l2_dev); + media_device_cleanup(&v4l2->media_dev); +} + +/* Platform */ + +static irqreturn_t sun6i_csi_interrupt(int irq, void *private) +{ + struct sun6i_csi_device *csi_dev = private; + struct regmap *regmap = csi_dev->regmap; u32 status; regmap_read(regmap, CSI_CH_INT_STA_REG, &status); @@ -814,13 +789,63 @@ static irqreturn_t sun6i_csi_isr(int irq, void *dev_id) } if (status & CSI_CH_INT_STA_FD_PD) - sun6i_video_frame_done(&sdev->csi.video); + sun6i_video_frame_done(csi_dev); regmap_write(regmap, CSI_CH_INT_STA_REG, status); return IRQ_HANDLED; } +static int sun6i_csi_suspend(struct device *dev) +{ + struct sun6i_csi_device *csi_dev = dev_get_drvdata(dev); + + reset_control_assert(csi_dev->reset); + clk_disable_unprepare(csi_dev->clock_ram); + clk_disable_unprepare(csi_dev->clock_mod); + + return 0; +} + +static int sun6i_csi_resume(struct device *dev) +{ + struct sun6i_csi_device *csi_dev = dev_get_drvdata(dev); + int ret; + + ret = reset_control_deassert(csi_dev->reset); + if (ret) { + dev_err(dev, "failed to deassert reset\n"); + return ret; + } + + ret = clk_prepare_enable(csi_dev->clock_mod); + if (ret) { + dev_err(dev, "failed to enable module clock\n"); + goto error_reset; + } + + ret = clk_prepare_enable(csi_dev->clock_ram); + if (ret) { + dev_err(dev, "failed to enable ram clock\n"); + goto error_clock_mod; + } + + return 0; + +error_clock_mod: + clk_disable_unprepare(csi_dev->clock_mod); + +error_reset: + reset_control_assert(csi_dev->reset); + + return ret; +} + +static const struct dev_pm_ops sun6i_csi_pm_ops = { + .runtime_suspend = sun6i_csi_suspend, + .runtime_resume = sun6i_csi_resume, +}; + static const struct regmap_config sun6i_csi_regmap_config = { .reg_bits = 32, .reg_stride = 4, @@ -828,106 +853,181 @@ static const struct regmap_config sun6i_csi_regmap_config = { .max_register = 0x9c, }; -static int sun6i_csi_resource_request(struct sun6i_csi_dev *sdev, - struct platform_device *pdev) +static int sun6i_csi_resources_setup(struct sun6i_csi_device *csi_dev, + struct platform_device *platform_dev) { + struct device *dev = csi_dev->dev; + const struct sun6i_csi_variant *variant; void __iomem *io_base; int ret; int irq; - io_base = devm_platform_ioremap_resource(pdev, 0); + variant = of_device_get_match_data(dev); + if (!variant) + return -EINVAL; + + /* Registers */ + + io_base = devm_platform_ioremap_resource(platform_dev, 0); if (IS_ERR(io_base)) return PTR_ERR(io_base); - sdev->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "bus", io_base, - &sun6i_csi_regmap_config); - if (IS_ERR(sdev->regmap)) { - dev_err(&pdev->dev, "Failed to init register map\n"); - return PTR_ERR(sdev->regmap); + csi_dev->regmap = devm_regmap_init_mmio_clk(dev, "bus", io_base, + &sun6i_csi_regmap_config); + if (IS_ERR(csi_dev->regmap)) { + dev_err(dev, "failed to init register map\n"); + return PTR_ERR(csi_dev->regmap); } - sdev->clk_mod = devm_clk_get(&pdev->dev, "mod"); - if (IS_ERR(sdev->clk_mod)) { - dev_err(&pdev->dev, "Unable to acquire csi clock\n"); - return PTR_ERR(sdev->clk_mod); + /* Clocks */ + + csi_dev->clock_mod = devm_clk_get(dev, "mod"); + if (IS_ERR(csi_dev->clock_mod)) { + dev_err(dev, "failed to acquire module clock\n"); + return PTR_ERR(csi_dev->clock_mod); } - sdev->clk_ram = devm_clk_get(&pdev->dev, "ram"); - if (IS_ERR(sdev->clk_ram)) { - dev_err(&pdev->dev, "Unable to acquire dram-csi clock\n"); - return PTR_ERR(sdev->clk_ram); + csi_dev->clock_ram = devm_clk_get(dev, "ram"); + if (IS_ERR(csi_dev->clock_ram)) { + dev_err(dev, "failed to acquire ram clock\n"); + return PTR_ERR(csi_dev->clock_ram); } - sdev->rstc_bus = devm_reset_control_get_shared(&pdev->dev, NULL); - if (IS_ERR(sdev->rstc_bus)) { - dev_err(&pdev->dev, "Cannot get reset controller\n"); - return PTR_ERR(sdev->rstc_bus); + ret = clk_set_rate_exclusive(csi_dev->clock_mod, + variant->clock_mod_rate); + if (ret) { + dev_err(dev, "failed to set mod clock rate\n"); + return ret; + } + + /* Reset */ + + csi_dev->reset = devm_reset_control_get_shared(dev, NULL); + if (IS_ERR(csi_dev->reset)) { + dev_err(dev, "failed to acquire reset\n"); + ret = PTR_ERR(csi_dev->reset); + goto error_clock_rate_exclusive; } - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return -ENXIO; + /* Interrupt */ - ret = devm_request_irq(&pdev->dev, irq, sun6i_csi_isr, 0, MODULE_NAME, - sdev); + irq = platform_get_irq(platform_dev, 0); + if (irq < 0) { + dev_err(dev, "failed to get interrupt\n"); + ret = -ENXIO; + goto error_clock_rate_exclusive; + } + + ret = devm_request_irq(dev, irq, sun6i_csi_interrupt, 0, SUN6I_CSI_NAME, + csi_dev); if (ret) { - dev_err(&pdev->dev, "Cannot request csi IRQ\n"); - return ret; + dev_err(dev, "failed to request interrupt\n"); + goto error_clock_rate_exclusive; } + /* Runtime PM */ + + pm_runtime_enable(dev); + return 0; + +error_clock_rate_exclusive: + clk_rate_exclusive_put(csi_dev->clock_mod); + + return ret; +} + +static void sun6i_csi_resources_cleanup(struct sun6i_csi_device *csi_dev) +{ + pm_runtime_disable(csi_dev->dev); + clk_rate_exclusive_put(csi_dev->clock_mod); } -static int sun6i_csi_probe(struct platform_device *pdev) +static int sun6i_csi_probe(struct platform_device *platform_dev) { - struct sun6i_csi_dev *sdev; + struct sun6i_csi_device *csi_dev; + struct device *dev = &platform_dev->dev; int ret; - sdev = devm_kzalloc(&pdev->dev, sizeof(*sdev), GFP_KERNEL); - if (!sdev) + csi_dev = devm_kzalloc(dev, sizeof(*csi_dev), GFP_KERNEL); + if (!csi_dev) return -ENOMEM; - sdev->dev = &pdev->dev; + csi_dev->dev = &platform_dev->dev; + platform_set_drvdata(platform_dev, csi_dev); - ret = sun6i_csi_resource_request(sdev, pdev); + ret = sun6i_csi_resources_setup(csi_dev, platform_dev); if (ret) return ret; - platform_set_drvdata(pdev, sdev); + ret = sun6i_csi_v4l2_setup(csi_dev); + if (ret) + goto error_resources; + + return 0; - sdev->csi.dev = &pdev->dev; - return sun6i_csi_v4l2_init(&sdev->csi); +error_resources: + sun6i_csi_resources_cleanup(csi_dev); + + return ret; } static int sun6i_csi_remove(struct platform_device *pdev) { - struct sun6i_csi_dev *sdev = platform_get_drvdata(pdev); + struct sun6i_csi_device *csi_dev = platform_get_drvdata(pdev); - sun6i_csi_v4l2_cleanup(&sdev->csi); + sun6i_csi_v4l2_cleanup(csi_dev); + sun6i_csi_resources_cleanup(csi_dev); return 0; } +static const struct sun6i_csi_variant sun6i_a31_csi_variant = { + .clock_mod_rate = 297000000, +}; + +static const struct sun6i_csi_variant sun50i_a64_csi_variant = { + .clock_mod_rate = 300000000, +}; + static const struct of_device_id sun6i_csi_of_match[] = { - { .compatible = "allwinner,sun6i-a31-csi", }, - { .compatible = "allwinner,sun8i-a83t-csi", }, - { .compatible = "allwinner,sun8i-h3-csi", }, - { .compatible = "allwinner,sun8i-v3s-csi", }, - { .compatible = "allwinner,sun50i-a64-csi", }, + { + .compatible = "allwinner,sun6i-a31-csi", + .data = &sun6i_a31_csi_variant, + }, + { + .compatible = "allwinner,sun8i-a83t-csi", + .data = &sun6i_a31_csi_variant, + }, + { + .compatible = "allwinner,sun8i-h3-csi", + .data = &sun6i_a31_csi_variant, + }, + { + .compatible = "allwinner,sun8i-v3s-csi", + .data = &sun6i_a31_csi_variant, + }, + { + .compatible = "allwinner,sun50i-a64-csi", + .data = &sun50i_a64_csi_variant, + }, {}, }; + MODULE_DEVICE_TABLE(of, sun6i_csi_of_match); static struct platform_driver sun6i_csi_platform_driver = { - .probe = sun6i_csi_probe, - .remove = sun6i_csi_remove, - .driver = { - .name = MODULE_NAME, - .of_match_table = of_match_ptr(sun6i_csi_of_match), + .probe = sun6i_csi_probe, + .remove = sun6i_csi_remove, + .driver = { + .name = SUN6I_CSI_NAME, + .of_match_table = of_match_ptr(sun6i_csi_of_match), + .pm = &sun6i_csi_pm_ops, }, }; + module_platform_driver(sun6i_csi_platform_driver); -MODULE_DESCRIPTION("Allwinner V3s Camera Sensor Interface driver"); +MODULE_DESCRIPTION("Allwinner A31 Camera Sensor Interface driver"); MODULE_AUTHOR("Yong Deng <yong.deng@magewell.com>"); MODULE_LICENSE("GPL"); diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h index 3a38d107ae3f..bab705678280 100644 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_csi.h @@ -8,13 +8,22 @@ #ifndef __SUN6I_CSI_H__ #define __SUN6I_CSI_H__ -#include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-fwnode.h> +#include <media/videobuf2-v4l2.h> #include "sun6i_video.h" -struct sun6i_csi; +#define SUN6I_CSI_NAME "sun6i-csi" +#define SUN6I_CSI_DESCRIPTION "Allwinner A31 CSI Device" + +struct sun6i_csi_buffer { + struct vb2_v4l2_buffer v4l2_buffer; + struct list_head list; + + dma_addr_t dma_addr; + bool queued_to_csi; +}; /** * struct sun6i_csi_config - configs for sun6i csi @@ -32,59 +41,78 @@ struct sun6i_csi_config { u32 height; }; -struct sun6i_csi { - struct device *dev; - struct v4l2_ctrl_handler ctrl_handler; +struct sun6i_csi_v4l2 { struct v4l2_device v4l2_dev; struct media_device media_dev; struct v4l2_async_notifier notifier; - /* video port settings */ struct v4l2_fwnode_endpoint v4l2_ep; +}; - struct sun6i_csi_config config; +struct sun6i_csi_device { + struct device *dev; + struct sun6i_csi_config config; + struct sun6i_csi_v4l2 v4l2; struct sun6i_video video; + + struct regmap *regmap; + struct clk *clock_mod; + struct clk *clock_ram; + struct reset_control *reset; + + int planar_offset[3]; +}; + +struct sun6i_csi_variant { + unsigned long clock_mod_rate; }; /** * sun6i_csi_is_format_supported() - check if the format supported by csi - * @csi: pointer to the csi + * @csi_dev: pointer to the csi device * @pixformat: v4l2 pixel format (V4L2_PIX_FMT_*) * @mbus_code: media bus format code (MEDIA_BUS_FMT_*) + * + * Return: true if format is supported, false otherwise. */ -bool sun6i_csi_is_format_supported(struct sun6i_csi *csi, u32 pixformat, - u32 mbus_code); +bool sun6i_csi_is_format_supported(struct sun6i_csi_device *csi_dev, + u32 pixformat, u32 mbus_code); /** * sun6i_csi_set_power() - power on/off the csi - * @csi: pointer to the csi + * @csi_dev: pointer to the csi device * @enable: on/off + * + * Return: 0 if successful, error code otherwise. */ -int sun6i_csi_set_power(struct sun6i_csi *csi, bool enable); +int sun6i_csi_set_power(struct sun6i_csi_device *csi_dev, bool enable); /** * sun6i_csi_update_config() - update the csi register settings - * @csi: pointer to the csi + * @csi_dev: pointer to the csi device * @config: see struct sun6i_csi_config + * + * Return: 0 if successful, error code otherwise. */ -int sun6i_csi_update_config(struct sun6i_csi *csi, +int sun6i_csi_update_config(struct sun6i_csi_device *csi_dev, struct sun6i_csi_config *config); /** * sun6i_csi_update_buf_addr() - update the csi frame buffer address - * @csi: pointer to the csi + * @csi_dev: pointer to the csi device * @addr: frame buffer's physical address */ -void sun6i_csi_update_buf_addr(struct sun6i_csi *csi, dma_addr_t addr); +void sun6i_csi_update_buf_addr(struct sun6i_csi_device *csi_dev, + dma_addr_t addr); /** * sun6i_csi_set_stream() - start/stop csi streaming - * @csi: pointer to the csi + * @csi_dev: pointer to the csi device * @enable: start/stop */ -void sun6i_csi_set_stream(struct sun6i_csi *csi, bool enable); +void sun6i_csi_set_stream(struct sun6i_csi_device *csi_dev, bool enable); /* get bpp form v4l2 pixformat */ static inline int sun6i_csi_get_bpp(unsigned int pixformat) diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c index 74d64a20ba5b..791583d23a65 100644 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.c @@ -23,15 +23,27 @@ #define MAX_WIDTH (4800) #define MAX_HEIGHT (4800) -struct sun6i_csi_buffer { - struct vb2_v4l2_buffer vb; - struct list_head list; +/* Helpers */ - dma_addr_t dma_addr; - bool queued_to_csi; -}; +static struct v4l2_subdev * +sun6i_video_remote_subdev(struct sun6i_video *video, u32 *pad) +{ + struct media_pad *remote; + + remote = media_pad_remote_pad_first(&video->pad); + + if (!remote || !is_media_entity_v4l2_subdev(remote->entity)) + return NULL; + + if (pad) + *pad = remote->index; -static const u32 supported_pixformats[] = { + return media_entity_to_v4l2_subdev(remote->entity); +} + +/* Format */ + +static const u32 sun6i_video_formats[] = { V4L2_PIX_FMT_SBGGR8, V4L2_PIX_FMT_SGBRG8, V4L2_PIX_FMT_SGRBG8, @@ -61,119 +73,138 @@ static const u32 supported_pixformats[] = { V4L2_PIX_FMT_JPEG, }; -static bool is_pixformat_valid(unsigned int pixformat) +static bool sun6i_video_format_check(u32 format) { unsigned int i; - for (i = 0; i < ARRAY_SIZE(supported_pixformats); i++) - if (supported_pixformats[i] == pixformat) + for (i = 0; i < ARRAY_SIZE(sun6i_video_formats); i++) + if (sun6i_video_formats[i] == format) return true; return false; } -static struct v4l2_subdev * -sun6i_video_remote_subdev(struct sun6i_video *video, u32 *pad) -{ - struct media_pad *remote; +/* Video */ - remote = media_pad_remote_pad_first(&video->pad); +static void sun6i_video_buffer_configure(struct sun6i_csi_device *csi_dev, + struct sun6i_csi_buffer *csi_buffer) +{ + csi_buffer->queued_to_csi = true; + sun6i_csi_update_buf_addr(csi_dev, csi_buffer->dma_addr); +} - if (!remote || !is_media_entity_v4l2_subdev(remote->entity)) - return NULL; +static void sun6i_video_configure(struct sun6i_csi_device *csi_dev) +{ + struct sun6i_video *video = &csi_dev->video; + struct sun6i_csi_config config = { 0 }; - if (pad) - *pad = remote->index; + config.pixelformat = video->format.fmt.pix.pixelformat; + config.code = video->mbus_code; + config.field = video->format.fmt.pix.field; + config.width = video->format.fmt.pix.width; + config.height = video->format.fmt.pix.height; - return media_entity_to_v4l2_subdev(remote->entity); + sun6i_csi_update_config(csi_dev, &config); } -static int sun6i_video_queue_setup(struct vb2_queue *vq, - unsigned int *nbuffers, - unsigned int *nplanes, +/* Queue */ + +static int sun6i_video_queue_setup(struct vb2_queue *queue, + unsigned int *buffers_count, + unsigned int *planes_count, unsigned int sizes[], struct device *alloc_devs[]) { - struct sun6i_video *video = vb2_get_drv_priv(vq); - unsigned int size = video->fmt.fmt.pix.sizeimage; + struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(queue); + struct sun6i_video *video = &csi_dev->video; + unsigned int size = video->format.fmt.pix.sizeimage; - if (*nplanes) + if (*planes_count) return sizes[0] < size ? -EINVAL : 0; - *nplanes = 1; + *planes_count = 1; sizes[0] = size; return 0; } -static int sun6i_video_buffer_prepare(struct vb2_buffer *vb) +static int sun6i_video_buffer_prepare(struct vb2_buffer *buffer) { - struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); - struct sun6i_csi_buffer *buf = - container_of(vbuf, struct sun6i_csi_buffer, vb); - struct sun6i_video *video = vb2_get_drv_priv(vb->vb2_queue); - unsigned long size = video->fmt.fmt.pix.sizeimage; - - if (vb2_plane_size(vb, 0) < size) { - v4l2_err(video->vdev.v4l2_dev, "buffer too small (%lu < %lu)\n", - vb2_plane_size(vb, 0), size); + struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(buffer->vb2_queue); + struct sun6i_video *video = &csi_dev->video; + struct v4l2_device *v4l2_dev = &csi_dev->v4l2.v4l2_dev; + struct vb2_v4l2_buffer *v4l2_buffer = to_vb2_v4l2_buffer(buffer); + struct sun6i_csi_buffer *csi_buffer = + container_of(v4l2_buffer, struct sun6i_csi_buffer, v4l2_buffer); + unsigned long size = video->format.fmt.pix.sizeimage; + + if (vb2_plane_size(buffer, 0) < size) { + v4l2_err(v4l2_dev, "buffer too small (%lu < %lu)\n", + vb2_plane_size(buffer, 0), size); return -EINVAL; } - vb2_set_plane_payload(vb, 0, size); - - buf->dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0); + vb2_set_plane_payload(buffer, 0, size); - vbuf->field = video->fmt.fmt.pix.field; + csi_buffer->dma_addr = vb2_dma_contig_plane_dma_addr(buffer, 0); + v4l2_buffer->field = video->format.fmt.pix.field; return 0; } -static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count) +static void sun6i_video_buffer_queue(struct vb2_buffer *buffer) +{ + struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(buffer->vb2_queue); + struct sun6i_video *video = &csi_dev->video; + struct vb2_v4l2_buffer *v4l2_buffer = to_vb2_v4l2_buffer(buffer); + struct sun6i_csi_buffer *csi_buffer = + container_of(v4l2_buffer, struct sun6i_csi_buffer, v4l2_buffer); + unsigned long flags; + + spin_lock_irqsave(&video->dma_queue_lock, flags); + csi_buffer->queued_to_csi = false; + list_add_tail(&csi_buffer->list, &video->dma_queue); + spin_unlock_irqrestore(&video->dma_queue_lock, flags); +} + +static int sun6i_video_start_streaming(struct vb2_queue *queue, + unsigned int count) { - struct sun6i_video *video = vb2_get_drv_priv(vq); + struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(queue); + struct sun6i_video *video = &csi_dev->video; + struct video_device *video_dev = &video->video_dev; struct sun6i_csi_buffer *buf; struct sun6i_csi_buffer *next_buf; - struct sun6i_csi_config config; struct v4l2_subdev *subdev; unsigned long flags; int ret; video->sequence = 0; - ret = media_pipeline_start(&video->vdev.entity, &video->vdev.pipe); + ret = video_device_pipeline_alloc_start(video_dev); if (ret < 0) - goto clear_dma_queue; + goto error_dma_queue_flush; if (video->mbus_code == 0) { ret = -EINVAL; - goto stop_media_pipeline; + goto error_media_pipeline; } subdev = sun6i_video_remote_subdev(video, NULL); if (!subdev) { ret = -EINVAL; - goto stop_media_pipeline; + goto error_media_pipeline; } - config.pixelformat = video->fmt.fmt.pix.pixelformat; - config.code = video->mbus_code; - config.field = video->fmt.fmt.pix.field; - config.width = video->fmt.fmt.pix.width; - config.height = video->fmt.fmt.pix.height; - - ret = sun6i_csi_update_config(video->csi, &config); - if (ret < 0) - goto stop_media_pipeline; + sun6i_video_configure(csi_dev); spin_lock_irqsave(&video->dma_queue_lock, flags); buf = list_first_entry(&video->dma_queue, struct sun6i_csi_buffer, list); - buf->queued_to_csi = true; - sun6i_csi_update_buf_addr(video->csi, buf->dma_addr); + sun6i_video_buffer_configure(csi_dev, buf); - sun6i_csi_set_stream(video->csi, true); + sun6i_csi_set_stream(csi_dev, true); /* * CSI will lookup the next dma buffer for next frame before the @@ -193,34 +224,37 @@ static int sun6i_video_start_streaming(struct vb2_queue *vq, unsigned int count) * would also drop frame when lacking of queued buffer. */ next_buf = list_next_entry(buf, list); - next_buf->queued_to_csi = true; - sun6i_csi_update_buf_addr(video->csi, next_buf->dma_addr); + sun6i_video_buffer_configure(csi_dev, next_buf); spin_unlock_irqrestore(&video->dma_queue_lock, flags); ret = v4l2_subdev_call(subdev, video, s_stream, 1); if (ret && ret != -ENOIOCTLCMD) - goto stop_csi_stream; + goto error_stream; return 0; -stop_csi_stream: - sun6i_csi_set_stream(video->csi, false); -stop_media_pipeline: - media_pipeline_stop(&video->vdev.entity); -clear_dma_queue: +error_stream: + sun6i_csi_set_stream(csi_dev, false); + +error_media_pipeline: + video_device_pipeline_stop(video_dev); + +error_dma_queue_flush: spin_lock_irqsave(&video->dma_queue_lock, flags); list_for_each_entry(buf, &video->dma_queue, list) - vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_QUEUED); + vb2_buffer_done(&buf->v4l2_buffer.vb2_buf, + VB2_BUF_STATE_QUEUED); INIT_LIST_HEAD(&video->dma_queue); spin_unlock_irqrestore(&video->dma_queue_lock, flags); return ret; } -static void sun6i_video_stop_streaming(struct vb2_queue *vq) +static void sun6i_video_stop_streaming(struct vb2_queue *queue) { - struct sun6i_video *video = vb2_get_drv_priv(vq); + struct sun6i_csi_device *csi_dev = vb2_get_drv_priv(queue); + struct sun6i_video *video = &csi_dev->video; struct v4l2_subdev *subdev; unsigned long flags; struct sun6i_csi_buffer *buf; @@ -229,45 +263,32 @@ static void sun6i_video_stop_streaming(struct vb2_queue *vq) if (subdev) v4l2_subdev_call(subdev, video, s_stream, 0); - sun6i_csi_set_stream(video->csi, false); + sun6i_csi_set_stream(csi_dev, false); - media_pipeline_stop(&video->vdev.entity); + video_device_pipeline_stop(&video->video_dev); /* Release all active buffers */ spin_lock_irqsave(&video->dma_queue_lock, flags); list_for_each_entry(buf, &video->dma_queue, list) - vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); + vb2_buffer_done(&buf->v4l2_buffer.vb2_buf, VB2_BUF_STATE_ERROR); INIT_LIST_HEAD(&video->dma_queue); spin_unlock_irqrestore(&video->dma_queue_lock, flags); } -static void sun6i_video_buffer_queue(struct vb2_buffer *vb) -{ - struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); - struct sun6i_csi_buffer *buf = - container_of(vbuf, struct sun6i_csi_buffer, vb); - struct sun6i_video *video = vb2_get_drv_priv(vb->vb2_queue); - unsigned long flags; - - spin_lock_irqsave(&video->dma_queue_lock, flags); - buf->queued_to_csi = false; - list_add_tail(&buf->list, &video->dma_queue); - spin_unlock_irqrestore(&video->dma_queue_lock, flags); -} - -void sun6i_video_frame_done(struct sun6i_video *video) +void sun6i_video_frame_done(struct sun6i_csi_device *csi_dev) { + struct sun6i_video *video = &csi_dev->video; struct sun6i_csi_buffer *buf; struct sun6i_csi_buffer *next_buf; - struct vb2_v4l2_buffer *vbuf; + struct vb2_v4l2_buffer *v4l2_buffer; spin_lock(&video->dma_queue_lock); buf = list_first_entry(&video->dma_queue, struct sun6i_csi_buffer, list); if (list_is_last(&buf->list, &video->dma_queue)) { - dev_dbg(video->csi->dev, "Frame dropped!\n"); - goto unlock; + dev_dbg(csi_dev->dev, "Frame dropped!\n"); + goto complete; } next_buf = list_next_entry(buf, list); @@ -277,200 +298,204 @@ void sun6i_video_frame_done(struct sun6i_video *video) * for next ISR call. */ if (!next_buf->queued_to_csi) { - next_buf->queued_to_csi = true; - sun6i_csi_update_buf_addr(video->csi, next_buf->dma_addr); - dev_dbg(video->csi->dev, "Frame dropped!\n"); - goto unlock; + sun6i_video_buffer_configure(csi_dev, next_buf); + dev_dbg(csi_dev->dev, "Frame dropped!\n"); + goto complete; } list_del(&buf->list); - vbuf = &buf->vb; - vbuf->vb2_buf.timestamp = ktime_get_ns(); - vbuf->sequence = video->sequence; - vb2_buffer_done(&vbuf->vb2_buf, VB2_BUF_STATE_DONE); + v4l2_buffer = &buf->v4l2_buffer; + v4l2_buffer->vb2_buf.timestamp = ktime_get_ns(); + v4l2_buffer->sequence = video->sequence; + vb2_buffer_done(&v4l2_buffer->vb2_buf, VB2_BUF_STATE_DONE); /* Prepare buffer for next frame but one. */ if (!list_is_last(&next_buf->list, &video->dma_queue)) { next_buf = list_next_entry(next_buf, list); - next_buf->queued_to_csi = true; - sun6i_csi_update_buf_addr(video->csi, next_buf->dma_addr); + sun6i_video_buffer_configure(csi_dev, next_buf); } else { - dev_dbg(video->csi->dev, "Next frame will be dropped!\n"); + dev_dbg(csi_dev->dev, "Next frame will be dropped!\n"); } -unlock: +complete: video->sequence++; spin_unlock(&video->dma_queue_lock); } -static const struct vb2_ops sun6i_csi_vb2_ops = { +static const struct vb2_ops sun6i_video_queue_ops = { .queue_setup = sun6i_video_queue_setup, - .wait_prepare = vb2_ops_wait_prepare, - .wait_finish = vb2_ops_wait_finish, .buf_prepare = sun6i_video_buffer_prepare, + .buf_queue = sun6i_video_buffer_queue, .start_streaming = sun6i_video_start_streaming, .stop_streaming = sun6i_video_stop_streaming, - .buf_queue = sun6i_video_buffer_queue, + .wait_prepare = vb2_ops_wait_prepare, + .wait_finish = vb2_ops_wait_finish, }; -static int vidioc_querycap(struct file *file, void *priv, - struct v4l2_capability *cap) +/* V4L2 Device */ + +static int sun6i_video_querycap(struct file *file, void *private, + struct v4l2_capability *capability) { - struct sun6i_video *video = video_drvdata(file); + struct sun6i_csi_device *csi_dev = video_drvdata(file); + struct video_device *video_dev = &csi_dev->video.video_dev; - strscpy(cap->driver, "sun6i-video", sizeof(cap->driver)); - strscpy(cap->card, video->vdev.name, sizeof(cap->card)); - snprintf(cap->bus_info, sizeof(cap->bus_info), "platform:%s", - video->csi->dev->of_node->name); + strscpy(capability->driver, SUN6I_CSI_NAME, sizeof(capability->driver)); + strscpy(capability->card, video_dev->name, sizeof(capability->card)); + snprintf(capability->bus_info, sizeof(capability->bus_info), + "platform:%s", dev_name(csi_dev->dev)); return 0; } -static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_fmtdesc *f) +static int sun6i_video_enum_fmt(struct file *file, void *private, + struct v4l2_fmtdesc *fmtdesc) { - u32 index = f->index; + u32 index = fmtdesc->index; - if (index >= ARRAY_SIZE(supported_pixformats)) + if (index >= ARRAY_SIZE(sun6i_video_formats)) return -EINVAL; - f->pixelformat = supported_pixformats[index]; + fmtdesc->pixelformat = sun6i_video_formats[index]; return 0; } -static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_format *fmt) +static int sun6i_video_g_fmt(struct file *file, void *private, + struct v4l2_format *format) { - struct sun6i_video *video = video_drvdata(file); + struct sun6i_csi_device *csi_dev = video_drvdata(file); + struct sun6i_video *video = &csi_dev->video; - *fmt = video->fmt; + *format = video->format; return 0; } -static int sun6i_video_try_fmt(struct sun6i_video *video, - struct v4l2_format *f) +static int sun6i_video_format_try(struct sun6i_video *video, + struct v4l2_format *format) { - struct v4l2_pix_format *pixfmt = &f->fmt.pix; + struct v4l2_pix_format *pix_format = &format->fmt.pix; int bpp; - if (!is_pixformat_valid(pixfmt->pixelformat)) - pixfmt->pixelformat = supported_pixformats[0]; + if (!sun6i_video_format_check(pix_format->pixelformat)) + pix_format->pixelformat = sun6i_video_formats[0]; - v4l_bound_align_image(&pixfmt->width, MIN_WIDTH, MAX_WIDTH, 1, - &pixfmt->height, MIN_HEIGHT, MAX_WIDTH, 1, 1); + v4l_bound_align_image(&pix_format->width, MIN_WIDTH, MAX_WIDTH, 1, + &pix_format->height, MIN_HEIGHT, MAX_WIDTH, 1, 1); - bpp = sun6i_csi_get_bpp(pixfmt->pixelformat); - pixfmt->bytesperline = (pixfmt->width * bpp) >> 3; - pixfmt->sizeimage = pixfmt->bytesperline * pixfmt->height; + bpp = sun6i_csi_get_bpp(pix_format->pixelformat); + pix_format->bytesperline = (pix_format->width * bpp) >> 3; + pix_format->sizeimage = pix_format->bytesperline * pix_format->height; - if (pixfmt->field == V4L2_FIELD_ANY) - pixfmt->field = V4L2_FIELD_NONE; + if (pix_format->field == V4L2_FIELD_ANY) + pix_format->field = V4L2_FIELD_NONE; - if (pixfmt->pixelformat == V4L2_PIX_FMT_JPEG) - pixfmt->colorspace = V4L2_COLORSPACE_JPEG; + if (pix_format->pixelformat == V4L2_PIX_FMT_JPEG) + pix_format->colorspace = V4L2_COLORSPACE_JPEG; else - pixfmt->colorspace = V4L2_COLORSPACE_SRGB; + pix_format->colorspace = V4L2_COLORSPACE_SRGB; - pixfmt->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; - pixfmt->quantization = V4L2_QUANTIZATION_DEFAULT; - pixfmt->xfer_func = V4L2_XFER_FUNC_DEFAULT; + pix_format->ycbcr_enc = V4L2_YCBCR_ENC_DEFAULT; + pix_format->quantization = V4L2_QUANTIZATION_DEFAULT; + pix_format->xfer_func = V4L2_XFER_FUNC_DEFAULT; return 0; } -static int sun6i_video_set_fmt(struct sun6i_video *video, struct v4l2_format *f) +static int sun6i_video_format_set(struct sun6i_video *video, + struct v4l2_format *format) { int ret; - ret = sun6i_video_try_fmt(video, f); + ret = sun6i_video_format_try(video, format); if (ret) return ret; - video->fmt = *f; + video->format = *format; return 0; } -static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_format *f) +static int sun6i_video_s_fmt(struct file *file, void *private, + struct v4l2_format *format) { - struct sun6i_video *video = video_drvdata(file); + struct sun6i_csi_device *csi_dev = video_drvdata(file); + struct sun6i_video *video = &csi_dev->video; - if (vb2_is_busy(&video->vb2_vidq)) + if (vb2_is_busy(&video->queue)) return -EBUSY; - return sun6i_video_set_fmt(video, f); + return sun6i_video_format_set(video, format); } -static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, - struct v4l2_format *f) +static int sun6i_video_try_fmt(struct file *file, void *private, + struct v4l2_format *format) { - struct sun6i_video *video = video_drvdata(file); + struct sun6i_csi_device *csi_dev = video_drvdata(file); + struct sun6i_video *video = &csi_dev->video; - return sun6i_video_try_fmt(video, f); + return sun6i_video_format_try(video, format); } -static int vidioc_enum_input(struct file *file, void *fh, - struct v4l2_input *inp) +static int sun6i_video_enum_input(struct file *file, void *private, + struct v4l2_input *input) { - if (inp->index != 0) + if (input->index != 0) return -EINVAL; - strscpy(inp->name, "camera", sizeof(inp->name)); - inp->type = V4L2_INPUT_TYPE_CAMERA; + input->type = V4L2_INPUT_TYPE_CAMERA; + strscpy(input->name, "Camera", sizeof(input->name)); return 0; } -static int vidioc_g_input(struct file *file, void *fh, unsigned int *i) +static int sun6i_video_g_input(struct file *file, void *private, + unsigned int *index) { - *i = 0; + *index = 0; return 0; } -static int vidioc_s_input(struct file *file, void *fh, unsigned int i) +static int sun6i_video_s_input(struct file *file, void *private, + unsigned int index) { - if (i != 0) + if (index != 0) return -EINVAL; return 0; } static const struct v4l2_ioctl_ops sun6i_video_ioctl_ops = { - .vidioc_querycap = vidioc_querycap, - .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, - .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, - .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, - .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, + .vidioc_querycap = sun6i_video_querycap, + + .vidioc_enum_fmt_vid_cap = sun6i_video_enum_fmt, + .vidioc_g_fmt_vid_cap = sun6i_video_g_fmt, + .vidioc_s_fmt_vid_cap = sun6i_video_s_fmt, + .vidioc_try_fmt_vid_cap = sun6i_video_try_fmt, - .vidioc_enum_input = vidioc_enum_input, - .vidioc_s_input = vidioc_s_input, - .vidioc_g_input = vidioc_g_input, + .vidioc_enum_input = sun6i_video_enum_input, + .vidioc_g_input = sun6i_video_g_input, + .vidioc_s_input = sun6i_video_s_input, + .vidioc_create_bufs = vb2_ioctl_create_bufs, + .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_reqbufs = vb2_ioctl_reqbufs, .vidioc_querybuf = vb2_ioctl_querybuf, - .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_expbuf = vb2_ioctl_expbuf, + .vidioc_qbuf = vb2_ioctl_qbuf, .vidioc_dqbuf = vb2_ioctl_dqbuf, - .vidioc_create_bufs = vb2_ioctl_create_bufs, - .vidioc_prepare_buf = vb2_ioctl_prepare_buf, .vidioc_streamon = vb2_ioctl_streamon, .vidioc_streamoff = vb2_ioctl_streamoff, - - .vidioc_log_status = v4l2_ctrl_log_status, - .vidioc_subscribe_event = v4l2_ctrl_subscribe_event, - .vidioc_unsubscribe_event = v4l2_event_unsubscribe, }; -/* ----------------------------------------------------------------------------- - * V4L2 file operations - */ +/* V4L2 File */ + static int sun6i_video_open(struct file *file) { - struct sun6i_video *video = video_drvdata(file); + struct sun6i_csi_device *csi_dev = video_drvdata(file); + struct sun6i_video *video = &csi_dev->video; int ret = 0; if (mutex_lock_interruptible(&video->lock)) @@ -478,45 +503,48 @@ static int sun6i_video_open(struct file *file) ret = v4l2_fh_open(file); if (ret < 0) - goto unlock; + goto error_lock; - ret = v4l2_pipeline_pm_get(&video->vdev.entity); + ret = v4l2_pipeline_pm_get(&video->video_dev.entity); if (ret < 0) - goto fh_release; - - /* check if already powered */ - if (!v4l2_fh_is_singular_file(file)) - goto unlock; + goto error_v4l2_fh; - ret = sun6i_csi_set_power(video->csi, true); - if (ret < 0) - goto fh_release; + /* Power on at first open. */ + if (v4l2_fh_is_singular_file(file)) { + ret = sun6i_csi_set_power(csi_dev, true); + if (ret < 0) + goto error_v4l2_fh; + } mutex_unlock(&video->lock); + return 0; -fh_release: +error_v4l2_fh: v4l2_fh_release(file); -unlock: + +error_lock: mutex_unlock(&video->lock); + return ret; } static int sun6i_video_close(struct file *file) { - struct sun6i_video *video = video_drvdata(file); - bool last_fh; + struct sun6i_csi_device *csi_dev = video_drvdata(file); + struct sun6i_video *video = &csi_dev->video; + bool last_close; mutex_lock(&video->lock); - last_fh = v4l2_fh_is_singular_file(file); + last_close = v4l2_fh_is_singular_file(file); _vb2_fop_release(file, NULL); + v4l2_pipeline_pm_put(&video->video_dev.entity); - v4l2_pipeline_pm_put(&video->vdev.entity); - - if (last_fh) - sun6i_csi_set_power(video->csi, false); + /* Power off at last close. */ + if (last_close) + sun6i_csi_set_power(csi_dev, false); mutex_unlock(&video->lock); @@ -532,9 +560,8 @@ static const struct v4l2_file_operations sun6i_video_fops = { .poll = vb2_fop_poll }; -/* ----------------------------------------------------------------------------- - * Media Operations - */ +/* Media Entity */ + static int sun6i_video_link_validate_get_format(struct media_pad *pad, struct v4l2_subdev_format *fmt) { @@ -554,15 +581,16 @@ static int sun6i_video_link_validate(struct media_link *link) { struct video_device *vdev = container_of(link->sink->entity, struct video_device, entity); - struct sun6i_video *video = video_get_drvdata(vdev); + struct sun6i_csi_device *csi_dev = video_get_drvdata(vdev); + struct sun6i_video *video = &csi_dev->video; struct v4l2_subdev_format source_fmt; int ret; video->mbus_code = 0; if (!media_pad_remote_pad_first(link->sink->entity->pads)) { - dev_info(video->csi->dev, - "video node %s pad not connected\n", vdev->name); + dev_info(csi_dev->dev, "video node %s pad not connected\n", + vdev->name); return -ENOLINK; } @@ -570,21 +598,21 @@ static int sun6i_video_link_validate(struct media_link *link) if (ret < 0) return ret; - if (!sun6i_csi_is_format_supported(video->csi, - video->fmt.fmt.pix.pixelformat, + if (!sun6i_csi_is_format_supported(csi_dev, + video->format.fmt.pix.pixelformat, source_fmt.format.code)) { - dev_err(video->csi->dev, + dev_err(csi_dev->dev, "Unsupported pixformat: 0x%x with mbus code: 0x%x!\n", - video->fmt.fmt.pix.pixelformat, + video->format.fmt.pix.pixelformat, source_fmt.format.code); return -EPIPE; } - if (source_fmt.format.width != video->fmt.fmt.pix.width || - source_fmt.format.height != video->fmt.fmt.pix.height) { - dev_err(video->csi->dev, + if (source_fmt.format.width != video->format.fmt.pix.width || + source_fmt.format.height != video->format.fmt.pix.height) { + dev_err(csi_dev->dev, "Wrong width or height %ux%u (%ux%u expected)\n", - video->fmt.fmt.pix.width, video->fmt.fmt.pix.height, + video->format.fmt.pix.width, video->format.fmt.pix.height, source_fmt.format.width, source_fmt.format.height); return -EPIPE; } @@ -598,88 +626,108 @@ static const struct media_entity_operations sun6i_video_media_ops = { .link_validate = sun6i_video_link_validate }; -int sun6i_video_init(struct sun6i_video *video, struct sun6i_csi *csi, - const char *name) +/* Video */ + +int sun6i_video_setup(struct sun6i_csi_device *csi_dev) { - struct video_device *vdev = &video->vdev; - struct vb2_queue *vidq = &video->vb2_vidq; - struct v4l2_format fmt = { 0 }; + struct sun6i_video *video = &csi_dev->video; + struct v4l2_device *v4l2_dev = &csi_dev->v4l2.v4l2_dev; + struct video_device *video_dev = &video->video_dev; + struct vb2_queue *queue = &video->queue; + struct media_pad *pad = &video->pad; + struct v4l2_format format = { 0 }; + struct v4l2_pix_format *pix_format = &format.fmt.pix; int ret; - video->csi = csi; + /* Media Entity */ - /* Initialize the media entity... */ - video->pad.flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; - vdev->entity.ops = &sun6i_video_media_ops; - ret = media_entity_pads_init(&vdev->entity, 1, &video->pad); + video_dev->entity.ops = &sun6i_video_media_ops; + + /* Media Pad */ + + pad->flags = MEDIA_PAD_FL_SINK | MEDIA_PAD_FL_MUST_CONNECT; + + ret = media_entity_pads_init(&video_dev->entity, 1, pad); if (ret < 0) return ret; - mutex_init(&video->lock); + /* DMA queue */ INIT_LIST_HEAD(&video->dma_queue); spin_lock_init(&video->dma_queue_lock); video->sequence = 0; - /* Setup default format */ - fmt.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - fmt.fmt.pix.pixelformat = supported_pixformats[0]; - fmt.fmt.pix.width = 1280; - fmt.fmt.pix.height = 720; - fmt.fmt.pix.field = V4L2_FIELD_NONE; - sun6i_video_set_fmt(video, &fmt); - - /* Initialize videobuf2 queue */ - vidq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; - vidq->io_modes = VB2_MMAP | VB2_DMABUF; - vidq->drv_priv = video; - vidq->buf_struct_size = sizeof(struct sun6i_csi_buffer); - vidq->ops = &sun6i_csi_vb2_ops; - vidq->mem_ops = &vb2_dma_contig_memops; - vidq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; - vidq->lock = &video->lock; - /* Make sure non-dropped frame */ - vidq->min_buffers_needed = 3; - vidq->dev = csi->dev; - - ret = vb2_queue_init(vidq); + /* Queue */ + + mutex_init(&video->lock); + + queue->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; + queue->io_modes = VB2_MMAP | VB2_DMABUF; + queue->buf_struct_size = sizeof(struct sun6i_csi_buffer); + queue->ops = &sun6i_video_queue_ops; + queue->mem_ops = &vb2_dma_contig_memops; + queue->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; + queue->lock = &video->lock; + queue->dev = csi_dev->dev; + queue->drv_priv = csi_dev; + + /* Make sure non-dropped frame. */ + queue->min_buffers_needed = 3; + + ret = vb2_queue_init(queue); if (ret) { - v4l2_err(&csi->v4l2_dev, "vb2_queue_init failed: %d\n", ret); - goto clean_entity; + v4l2_err(v4l2_dev, "failed to initialize vb2 queue: %d\n", ret); + goto error_media_entity; } - /* Register video device */ - strscpy(vdev->name, name, sizeof(vdev->name)); - vdev->release = video_device_release_empty; - vdev->fops = &sun6i_video_fops; - vdev->ioctl_ops = &sun6i_video_ioctl_ops; - vdev->vfl_type = VFL_TYPE_VIDEO; - vdev->vfl_dir = VFL_DIR_RX; - vdev->v4l2_dev = &csi->v4l2_dev; - vdev->queue = vidq; - vdev->lock = &video->lock; - vdev->device_caps = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE; - video_set_drvdata(vdev, video); - - ret = video_register_device(vdev, VFL_TYPE_VIDEO, -1); + /* V4L2 Format */ + + format.type = queue->type; + pix_format->pixelformat = sun6i_video_formats[0]; + pix_format->width = 1280; + pix_format->height = 720; + pix_format->field = V4L2_FIELD_NONE; + + sun6i_video_format_set(video, &format); + + /* Video Device */ + + strscpy(video_dev->name, SUN6I_CSI_NAME, sizeof(video_dev->name)); + video_dev->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; + video_dev->vfl_dir = VFL_DIR_RX; + video_dev->release = video_device_release_empty; + video_dev->fops = &sun6i_video_fops; + video_dev->ioctl_ops = &sun6i_video_ioctl_ops; + video_dev->v4l2_dev = v4l2_dev; + video_dev->queue = queue; + video_dev->lock = &video->lock; + + video_set_drvdata(video_dev, csi_dev); + + ret = video_register_device(video_dev, VFL_TYPE_VIDEO, -1); if (ret < 0) { - v4l2_err(&csi->v4l2_dev, - "video_register_device failed: %d\n", ret); - goto clean_entity; + v4l2_err(v4l2_dev, "failed to register video device: %d\n", + ret); + goto error_media_entity; } return 0; -clean_entity: - media_entity_cleanup(&video->vdev.entity); +error_media_entity: + media_entity_cleanup(&video_dev->entity); + mutex_destroy(&video->lock); + return ret; } -void sun6i_video_cleanup(struct sun6i_video *video) +void sun6i_video_cleanup(struct sun6i_csi_device *csi_dev) { - vb2_video_unregister_device(&video->vdev); - media_entity_cleanup(&video->vdev.entity); + struct sun6i_video *video = &csi_dev->video; + struct video_device *video_dev = &video->video_dev; + + vb2_video_unregister_device(video_dev); + media_entity_cleanup(&video_dev->entity); mutex_destroy(&video->lock); } diff --git a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.h b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.h index b9cd919c24ac..a917d2da6deb 100644 --- a/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.h +++ b/drivers/media/platform/sunxi/sun6i-csi/sun6i_video.h @@ -11,28 +11,25 @@ #include <media/v4l2-dev.h> #include <media/videobuf2-core.h> -struct sun6i_csi; +struct sun6i_csi_device; struct sun6i_video { - struct video_device vdev; + struct video_device video_dev; + struct vb2_queue queue; + struct mutex lock; /* Queue lock. */ struct media_pad pad; - struct sun6i_csi *csi; - struct mutex lock; - - struct vb2_queue vb2_vidq; - spinlock_t dma_queue_lock; struct list_head dma_queue; + spinlock_t dma_queue_lock; /* DMA queue lock. */ - unsigned int sequence; - struct v4l2_format fmt; + struct v4l2_format format; u32 mbus_code; + unsigned int sequence; }; -int sun6i_video_init(struct sun6i_video *video, struct sun6i_csi *csi, - const char *name); -void sun6i_video_cleanup(struct sun6i_video *video); +int sun6i_video_setup(struct sun6i_csi_device *csi_dev); +void sun6i_video_cleanup(struct sun6i_csi_device *csi_dev); -void sun6i_video_frame_done(struct sun6i_video *video); +void sun6i_video_frame_done(struct sun6i_csi_device *csi_dev); #endif /* __SUN6I_VIDEO_H__ */ diff --git a/drivers/media/platform/sunxi/sun6i-mipi-csi2/Kconfig b/drivers/media/platform/sunxi/sun6i-mipi-csi2/Kconfig index eb982466abd3..08852f63692b 100644 --- a/drivers/media/platform/sunxi/sun6i-mipi-csi2/Kconfig +++ b/drivers/media/platform/sunxi/sun6i-mipi-csi2/Kconfig @@ -3,11 +3,11 @@ config VIDEO_SUN6I_MIPI_CSI2 tristate "Allwinner A31 MIPI CSI-2 Controller Driver" depends on V4L_PLATFORM_DRIVERS && VIDEO_DEV depends on ARCH_SUNXI || COMPILE_TEST - depends on PM && COMMON_CLK + depends on PM && COMMON_CLK && RESET_CONTROLLER + depends on PHY_SUN6I_MIPI_DPHY select MEDIA_CONTROLLER select VIDEO_V4L2_SUBDEV_API select V4L2_FWNODE - select PHY_SUN6I_MIPI_DPHY select GENERIC_PHY_MIPI_DPHY select REGMAP_MMIO help diff --git a/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c b/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c index a4e3f9a6b2ff..30d6c0c5161f 100644 --- a/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c +++ b/drivers/media/platform/sunxi/sun6i-mipi-csi2/sun6i_mipi_csi2.c @@ -661,7 +661,8 @@ sun6i_mipi_csi2_resources_setup(struct sun6i_mipi_csi2_device *csi2_dev, csi2_dev->reset = devm_reset_control_get_shared(dev, NULL); if (IS_ERR(csi2_dev->reset)) { dev_err(dev, "failed to get reset controller\n"); - return PTR_ERR(csi2_dev->reset); + ret = PTR_ERR(csi2_dev->reset); + goto error_clock_rate_exclusive; } /* D-PHY */ @@ -669,13 +670,14 @@ sun6i_mipi_csi2_resources_setup(struct sun6i_mipi_csi2_device *csi2_dev, csi2_dev->dphy = devm_phy_get(dev, "dphy"); if (IS_ERR(csi2_dev->dphy)) { dev_err(dev, "failed to get MIPI D-PHY\n"); - return PTR_ERR(csi2_dev->dphy); + ret = PTR_ERR(csi2_dev->dphy); + goto error_clock_rate_exclusive; } ret = phy_init(csi2_dev->dphy); if (ret) { dev_err(dev, "failed to initialize MIPI D-PHY\n"); - return ret; + goto error_clock_rate_exclusive; } /* Runtime PM */ @@ -683,6 +685,11 @@ sun6i_mipi_csi2_resources_setup(struct sun6i_mipi_csi2_device *csi2_dev, pm_runtime_enable(dev); return 0; + +error_clock_rate_exclusive: + clk_rate_exclusive_put(csi2_dev->clock_mod); + + return ret; } static void @@ -712,9 +719,14 @@ static int sun6i_mipi_csi2_probe(struct platform_device *platform_dev) ret = sun6i_mipi_csi2_bridge_setup(csi2_dev); if (ret) - return ret; + goto error_resources; return 0; + +error_resources: + sun6i_mipi_csi2_resources_cleanup(csi2_dev); + + return ret; } static int sun6i_mipi_csi2_remove(struct platform_device *platform_dev) diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig index 789d58ee12ea..47a8c0fb7eb9 100644 --- a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig +++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/Kconfig @@ -3,7 +3,7 @@ config VIDEO_SUN8I_A83T_MIPI_CSI2 tristate "Allwinner A83T MIPI CSI-2 Controller and D-PHY Driver" depends on V4L_PLATFORM_DRIVERS && VIDEO_DEV depends on ARCH_SUNXI || COMPILE_TEST - depends on PM && COMMON_CLK + depends on PM && COMMON_CLK && RESET_CONTROLLER select MEDIA_CONTROLLER select VIDEO_V4L2_SUBDEV_API select V4L2_FWNODE diff --git a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c index d052ee77ef0a..b032ec13a683 100644 --- a/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c +++ b/drivers/media/platform/sunxi/sun8i-a83t-mipi-csi2/sun8i_a83t_mipi_csi2.c @@ -719,13 +719,15 @@ sun8i_a83t_mipi_csi2_resources_setup(struct sun8i_a83t_mipi_csi2_device *csi2_de csi2_dev->clock_mipi = devm_clk_get(dev, "mipi"); if (IS_ERR(csi2_dev->clock_mipi)) { dev_err(dev, "failed to acquire mipi clock\n"); - return PTR_ERR(csi2_dev->clock_mipi); + ret = PTR_ERR(csi2_dev->clock_mipi); + goto error_clock_rate_exclusive; } csi2_dev->clock_misc = devm_clk_get(dev, "misc"); if (IS_ERR(csi2_dev->clock_misc)) { dev_err(dev, "failed to acquire misc clock\n"); - return PTR_ERR(csi2_dev->clock_misc); + ret = PTR_ERR(csi2_dev->clock_misc); + goto error_clock_rate_exclusive; } /* Reset */ @@ -733,7 +735,8 @@ sun8i_a83t_mipi_csi2_resources_setup(struct sun8i_a83t_mipi_csi2_device *csi2_de csi2_dev->reset = devm_reset_control_get_shared(dev, NULL); if (IS_ERR(csi2_dev->reset)) { dev_err(dev, "failed to get reset controller\n"); - return PTR_ERR(csi2_dev->reset); + ret = PTR_ERR(csi2_dev->reset); + goto error_clock_rate_exclusive; } /* D-PHY */ @@ -741,7 +744,7 @@ sun8i_a83t_mipi_csi2_resources_setup(struct sun8i_a83t_mipi_csi2_device *csi2_de ret = sun8i_a83t_dphy_register(csi2_dev); if (ret) { dev_err(dev, "failed to initialize MIPI D-PHY\n"); - return ret; + goto error_clock_rate_exclusive; } /* Runtime PM */ @@ -749,6 +752,11 @@ sun8i_a83t_mipi_csi2_resources_setup(struct sun8i_a83t_mipi_csi2_device *csi2_de pm_runtime_enable(dev); return 0; + +error_clock_rate_exclusive: + clk_rate_exclusive_put(csi2_dev->clock_mod); + + return ret; } static void @@ -778,9 +786,14 @@ static int sun8i_a83t_mipi_csi2_probe(struct platform_device *platform_dev) ret = sun8i_a83t_mipi_csi2_bridge_setup(csi2_dev); if (ret) - return ret; + goto error_resources; return 0; + +error_resources: + sun8i_a83t_mipi_csi2_resources_cleanup(csi2_dev); + + return ret; } static int sun8i_a83t_mipi_csi2_remove(struct platform_device *platform_dev) diff --git a/drivers/media/platform/sunxi/sun8i-di/Kconfig b/drivers/media/platform/sunxi/sun8i-di/Kconfig index ff71e06ee2df..f688396913b7 100644 --- a/drivers/media/platform/sunxi/sun8i-di/Kconfig +++ b/drivers/media/platform/sunxi/sun8i-di/Kconfig @@ -4,7 +4,7 @@ config VIDEO_SUN8I_DEINTERLACE depends on V4L_MEM2MEM_DRIVERS depends on VIDEO_DEV depends on ARCH_SUNXI || COMPILE_TEST - depends on COMMON_CLK && OF + depends on COMMON_CLK && RESET_CONTROLLER && OF depends on PM select VIDEOBUF2_DMA_CONTIG select V4L2_MEM2MEM_DEV diff --git a/drivers/media/platform/sunxi/sun8i-rotate/Kconfig b/drivers/media/platform/sunxi/sun8i-rotate/Kconfig index cfba29072d75..ee2c1f248c64 100644 --- a/drivers/media/platform/sunxi/sun8i-rotate/Kconfig +++ b/drivers/media/platform/sunxi/sun8i-rotate/Kconfig @@ -5,7 +5,7 @@ config VIDEO_SUN8I_ROTATE depends on V4L_MEM2MEM_DRIVERS depends on VIDEO_DEV depends on ARCH_SUNXI || COMPILE_TEST - depends on COMMON_CLK && OF + depends on COMMON_CLK && RESET_CONTROLLER && OF depends on PM select VIDEOBUF2_DMA_CONTIG select V4L2_MEM2MEM_DEV diff --git a/drivers/media/platform/ti/cal/cal-video.c b/drivers/media/platform/ti/cal/cal-video.c index 21e3d0aabf70..4eade409d5d3 100644 --- a/drivers/media/platform/ti/cal/cal-video.c +++ b/drivers/media/platform/ti/cal/cal-video.c @@ -708,7 +708,7 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count) dma_addr_t addr; int ret; - ret = media_pipeline_start(&ctx->vdev.entity, &ctx->phy->pipe); + ret = video_device_pipeline_alloc_start(&ctx->vdev); if (ret < 0) { ctx_err(ctx, "Failed to start media pipeline: %d\n", ret); goto error_release_buffers; @@ -761,7 +761,7 @@ error_stop: cal_ctx_unprepare(ctx); error_pipeline: - media_pipeline_stop(&ctx->vdev.entity); + video_device_pipeline_stop(&ctx->vdev); error_release_buffers: cal_release_buffers(ctx, VB2_BUF_STATE_QUEUED); @@ -782,7 +782,7 @@ static void cal_stop_streaming(struct vb2_queue *vq) cal_release_buffers(ctx, VB2_BUF_STATE_ERROR); - media_pipeline_stop(&ctx->vdev.entity); + video_device_pipeline_stop(&ctx->vdev); } static const struct vb2_ops cal_video_qops = { diff --git a/drivers/media/platform/ti/cal/cal.h b/drivers/media/platform/ti/cal/cal.h index 80f2c9c73c71..de73d6d21b6f 100644 --- a/drivers/media/platform/ti/cal/cal.h +++ b/drivers/media/platform/ti/cal/cal.h @@ -174,7 +174,6 @@ struct cal_camerarx { struct device_node *source_ep_node; struct device_node *source_node; struct v4l2_subdev *source; - struct media_pipeline pipe; struct v4l2_subdev subdev; struct media_pad pads[CAL_CAMERARX_NUM_PADS]; diff --git a/drivers/media/platform/ti/omap3isp/isp.c b/drivers/media/platform/ti/omap3isp/isp.c index a6052df9bb19..24d2383400b0 100644 --- a/drivers/media/platform/ti/omap3isp/isp.c +++ b/drivers/media/platform/ti/omap3isp/isp.c @@ -937,10 +937,8 @@ static int isp_pipeline_is_last(struct media_entity *me) struct isp_pipeline *pipe; struct media_pad *pad; - if (!me->pipe) - return 0; pipe = to_isp_pipeline(me); - if (pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED) + if (!pipe || pipe->stream_state == ISP_PIPELINE_STREAM_STOPPED) return 0; pad = media_pad_remote_pad_first(&pipe->output->pad); return pad->entity == me; diff --git a/drivers/media/platform/ti/omap3isp/ispvideo.c b/drivers/media/platform/ti/omap3isp/ispvideo.c index cc9a97d5d505..3e5348c63773 100644 --- a/drivers/media/platform/ti/omap3isp/ispvideo.c +++ b/drivers/media/platform/ti/omap3isp/ispvideo.c @@ -1093,8 +1093,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) /* Start streaming on the pipeline. No link touching an entity in the * pipeline can be activated or deactivated once streaming is started. */ - pipe = video->video.entity.pipe - ? to_isp_pipeline(&video->video.entity) : &video->pipe; + pipe = to_isp_pipeline(&video->video.entity) ? : &video->pipe; ret = media_entity_enum_init(&pipe->ent_enum, &video->isp->media_dev); if (ret) @@ -1104,7 +1103,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) pipe->l3_ick = clk_get_rate(video->isp->clock[ISP_CLK_L3_ICK]); pipe->max_rate = pipe->l3_ick; - ret = media_pipeline_start(&video->video.entity, &pipe->pipe); + ret = video_device_pipeline_start(&video->video, &pipe->pipe); if (ret < 0) goto err_pipeline_start; @@ -1161,7 +1160,7 @@ isp_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) return 0; err_check_format: - media_pipeline_stop(&video->video.entity); + video_device_pipeline_stop(&video->video); err_pipeline_start: /* TODO: Implement PM QoS */ /* The DMA queue must be emptied here, otherwise CCDC interrupts that @@ -1228,7 +1227,7 @@ isp_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) video->error = false; /* TODO: Implement PM QoS */ - media_pipeline_stop(&video->video.entity); + video_device_pipeline_stop(&video->video); media_entity_enum_cleanup(&pipe->ent_enum); diff --git a/drivers/media/platform/ti/omap3isp/ispvideo.h b/drivers/media/platform/ti/omap3isp/ispvideo.h index a0908670c0cf..1d23df576e6b 100644 --- a/drivers/media/platform/ti/omap3isp/ispvideo.h +++ b/drivers/media/platform/ti/omap3isp/ispvideo.h @@ -99,8 +99,15 @@ struct isp_pipeline { unsigned int external_width; }; -#define to_isp_pipeline(__e) \ - container_of((__e)->pipe, struct isp_pipeline, pipe) +static inline struct isp_pipeline *to_isp_pipeline(struct media_entity *entity) +{ + struct media_pipeline *pipe = media_entity_pipeline(entity); + + if (!pipe) + return NULL; + + return container_of(pipe, struct isp_pipeline, pipe); +} static inline int isp_pipeline_ready(struct isp_pipeline *pipe) { diff --git a/drivers/media/platform/verisilicon/hantro_drv.c b/drivers/media/platform/verisilicon/hantro_drv.c index 2036f72eeb4a..8cb4a68c9119 100644 --- a/drivers/media/platform/verisilicon/hantro_drv.c +++ b/drivers/media/platform/verisilicon/hantro_drv.c @@ -251,6 +251,11 @@ queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq) static int hantro_try_ctrl(struct v4l2_ctrl *ctrl) { + struct hantro_ctx *ctx; + + ctx = container_of(ctrl->handler, + struct hantro_ctx, ctrl_handler); + if (ctrl->id == V4L2_CID_STATELESS_H264_SPS) { const struct v4l2_ctrl_h264_sps *sps = ctrl->p_new.p_h264_sps; @@ -266,12 +271,11 @@ static int hantro_try_ctrl(struct v4l2_ctrl *ctrl) } else if (ctrl->id == V4L2_CID_STATELESS_HEVC_SPS) { const struct v4l2_ctrl_hevc_sps *sps = ctrl->p_new.p_hevc_sps; - if (sps->bit_depth_luma_minus8 != sps->bit_depth_chroma_minus8) - /* Luma and chroma bit depth mismatch */ - return -EINVAL; - if (sps->bit_depth_luma_minus8 != 0) - /* Only 8-bit is supported */ + if (sps->bit_depth_luma_minus8 != 0 && sps->bit_depth_luma_minus8 != 2) + /* Only 8-bit and 10-bit are supported */ return -EINVAL; + + ctx->bit_depth = sps->bit_depth_luma_minus8 + 8; } else if (ctrl->id == V4L2_CID_STATELESS_VP9_FRAME) { const struct v4l2_ctrl_vp9_frame *dec_params = ctrl->p_new.p_vp9_frame; diff --git a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c index 233ecd863d5f..a9d4ac84a8d8 100644 --- a/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c +++ b/drivers/media/platform/verisilicon/hantro_g2_hevc_dec.c @@ -12,7 +12,7 @@ static size_t hantro_hevc_chroma_offset(struct hantro_ctx *ctx) { - return ctx->dst_fmt.width * ctx->dst_fmt.height; + return ctx->dst_fmt.width * ctx->dst_fmt.height * ctx->bit_depth / 8; } static size_t hantro_hevc_motion_vectors_offset(struct hantro_ctx *ctx) @@ -167,8 +167,6 @@ static void set_params(struct hantro_ctx *ctx) hantro_reg_write(vpu, &g2_bit_depth_y_minus8, sps->bit_depth_luma_minus8); hantro_reg_write(vpu, &g2_bit_depth_c_minus8, sps->bit_depth_chroma_minus8); - hantro_reg_write(vpu, &g2_output_8_bits, 0); - hantro_reg_write(vpu, &g2_hdr_skip_length, compute_header_skip_length(ctx)); min_log2_cb_size = sps->log2_min_luma_coding_block_size_minus3 + 3; diff --git a/drivers/media/platform/verisilicon/hantro_hevc.c b/drivers/media/platform/verisilicon/hantro_hevc.c index b990bc98164c..9383fb7081f6 100644 --- a/drivers/media/platform/verisilicon/hantro_hevc.c +++ b/drivers/media/platform/verisilicon/hantro_hevc.c @@ -104,7 +104,7 @@ static int tile_buffer_reallocate(struct hantro_ctx *ctx) hevc_dec->tile_bsd.cpu = NULL; } - size = VERT_FILTER_RAM_SIZE * height64 * (num_tile_cols - 1); + size = (VERT_FILTER_RAM_SIZE * height64 * (num_tile_cols - 1) * ctx->bit_depth) / 8; hevc_dec->tile_filter.cpu = dma_alloc_coherent(vpu->dev, size, &hevc_dec->tile_filter.dma, GFP_KERNEL); @@ -112,7 +112,7 @@ static int tile_buffer_reallocate(struct hantro_ctx *ctx) goto err_free_tile_buffers; hevc_dec->tile_filter.size = size; - size = VERT_SAO_RAM_SIZE * height64 * (num_tile_cols - 1); + size = (VERT_SAO_RAM_SIZE * height64 * (num_tile_cols - 1) * ctx->bit_depth) / 8; hevc_dec->tile_sao.cpu = dma_alloc_coherent(vpu->dev, size, &hevc_dec->tile_sao.dma, GFP_KERNEL); diff --git a/drivers/media/platform/verisilicon/hantro_postproc.c b/drivers/media/platform/verisilicon/hantro_postproc.c index a0928c508434..09d8cf942689 100644 --- a/drivers/media/platform/verisilicon/hantro_postproc.c +++ b/drivers/media/platform/verisilicon/hantro_postproc.c @@ -114,6 +114,7 @@ static void hantro_postproc_g2_enable(struct hantro_ctx *ctx) struct hantro_dev *vpu = ctx->dev; struct vb2_v4l2_buffer *dst_buf; int down_scale = down_scale_factor(ctx); + int out_depth; size_t chroma_offset; dma_addr_t dst_dma; @@ -132,8 +133,9 @@ static void hantro_postproc_g2_enable(struct hantro_ctx *ctx) hantro_write_addr(vpu, G2_RS_OUT_LUMA_ADDR, dst_dma); hantro_write_addr(vpu, G2_RS_OUT_CHROMA_ADDR, dst_dma + chroma_offset); } + + out_depth = hantro_get_format_depth(ctx->dst_fmt.pixelformat); if (ctx->dev->variant->legacy_regs) { - int out_depth = hantro_get_format_depth(ctx->dst_fmt.pixelformat); u8 pp_shift = 0; if (out_depth > 8) @@ -141,6 +143,9 @@ static void hantro_postproc_g2_enable(struct hantro_ctx *ctx) hantro_reg_write(ctx->dev, &g2_rs_out_bit_depth, out_depth); hantro_reg_write(ctx->dev, &g2_pp_pix_shift, pp_shift); + } else { + hantro_reg_write(vpu, &g2_output_8_bits, out_depth > 8 ? 0 : 1); + hantro_reg_write(vpu, &g2_output_format, out_depth > 8 ? 1 : 0); } hantro_reg_write(vpu, &g2_out_rs_e, 1); } diff --git a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c index 77f574fdfa77..b390228fd3b4 100644 --- a/drivers/media/platform/verisilicon/imx8m_vpu_hw.c +++ b/drivers/media/platform/verisilicon/imx8m_vpu_hw.c @@ -162,12 +162,39 @@ static const struct hantro_fmt imx8m_vpu_g2_postproc_fmts[] = { .step_height = MB_DIM, }, }, + { + .fourcc = V4L2_PIX_FMT_P010, + .codec_mode = HANTRO_MODE_NONE, + .postprocessed = true, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, + .step_width = MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, + .step_height = MB_DIM, + }, + }, }; static const struct hantro_fmt imx8m_vpu_g2_dec_fmts[] = { { .fourcc = V4L2_PIX_FMT_NV12_4L4, .codec_mode = HANTRO_MODE_NONE, + .match_depth = true, + .frmsize = { + .min_width = FMT_MIN_WIDTH, + .max_width = FMT_UHD_WIDTH, + .step_width = TILE_MB_DIM, + .min_height = FMT_MIN_HEIGHT, + .max_height = FMT_UHD_HEIGHT, + .step_height = TILE_MB_DIM, + }, + }, + { + .fourcc = V4L2_PIX_FMT_P010_4L4, + .codec_mode = HANTRO_MODE_NONE, + .match_depth = true, .frmsize = { .min_width = FMT_MIN_WIDTH, .max_width = FMT_UHD_WIDTH, diff --git a/drivers/media/platform/xilinx/xilinx-dma.c b/drivers/media/platform/xilinx/xilinx-dma.c index 2d1ef7a25c33..0a7fd8642a65 100644 --- a/drivers/media/platform/xilinx/xilinx-dma.c +++ b/drivers/media/platform/xilinx/xilinx-dma.c @@ -402,10 +402,9 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count) * Use the pipeline object embedded in the first DMA object that starts * streaming. */ - pipe = dma->video.entity.pipe - ? to_xvip_pipeline(&dma->video.entity) : &dma->pipe; + pipe = to_xvip_pipeline(&dma->video) ? : &dma->pipe; - ret = media_pipeline_start(&dma->video.entity, &pipe->pipe); + ret = video_device_pipeline_start(&dma->video, &pipe->pipe); if (ret < 0) goto error; @@ -431,7 +430,7 @@ static int xvip_dma_start_streaming(struct vb2_queue *vq, unsigned int count) return 0; error_stop: - media_pipeline_stop(&dma->video.entity); + video_device_pipeline_stop(&dma->video); error: /* Give back all queued buffers to videobuf2. */ @@ -448,7 +447,7 @@ error: static void xvip_dma_stop_streaming(struct vb2_queue *vq) { struct xvip_dma *dma = vb2_get_drv_priv(vq); - struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video.entity); + struct xvip_pipeline *pipe = to_xvip_pipeline(&dma->video); struct xvip_dma_buffer *buf, *nbuf; /* Stop the pipeline. */ @@ -459,7 +458,7 @@ static void xvip_dma_stop_streaming(struct vb2_queue *vq) /* Cleanup the pipeline and mark it as being stopped. */ xvip_pipeline_cleanup(pipe); - media_pipeline_stop(&dma->video.entity); + video_device_pipeline_stop(&dma->video); /* Give back all queued buffers to videobuf2. */ spin_lock_irq(&dma->queued_lock); diff --git a/drivers/media/platform/xilinx/xilinx-dma.h b/drivers/media/platform/xilinx/xilinx-dma.h index 2378bdae57ae..9c6d4c18d1a9 100644 --- a/drivers/media/platform/xilinx/xilinx-dma.h +++ b/drivers/media/platform/xilinx/xilinx-dma.h @@ -45,9 +45,14 @@ struct xvip_pipeline { struct xvip_dma *output; }; -static inline struct xvip_pipeline *to_xvip_pipeline(struct media_entity *e) +static inline struct xvip_pipeline *to_xvip_pipeline(struct video_device *vdev) { - return container_of(e->pipe, struct xvip_pipeline, pipe); + struct media_pipeline *pipe = video_device_pipeline(vdev); + + if (!pipe) + return NULL; + + return container_of(pipe, struct xvip_pipeline, pipe); } /** diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c index 0bf99e1cd1d8..171f9cc9ee5e 100644 --- a/drivers/media/radio/radio-si476x.c +++ b/drivers/media/radio/radio-si476x.c @@ -1072,7 +1072,6 @@ done: static int si476x_radio_fops_release(struct file *file) { - int err; struct si476x_radio *radio = video_drvdata(file); if (v4l2_fh_is_singular_file(file) && @@ -1080,9 +1079,7 @@ static int si476x_radio_fops_release(struct file *file) si476x_core_set_power_state(radio->core, SI476X_POWER_DOWN); - err = v4l2_fh_release(file); - - return err; + return v4l2_fh_release(file); } static ssize_t si476x_radio_fops_read(struct file *file, char __user *buf, diff --git a/drivers/media/radio/si4713/si4713.c b/drivers/media/radio/si4713/si4713.c index 2aec642133a1..93d847c294e8 100644 --- a/drivers/media/radio/si4713/si4713.c +++ b/drivers/media/radio/si4713/si4713.c @@ -14,7 +14,7 @@ #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/slab.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/module.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c index 735b925da998..5edfd8a9e849 100644 --- a/drivers/media/rc/imon.c +++ b/drivers/media/rc/imon.c @@ -684,7 +684,6 @@ static int send_packet(struct imon_context *ictx) */ static int send_associate_24g(struct imon_context *ictx) { - int retval; const unsigned char packet[8] = { 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x20 }; @@ -699,9 +698,8 @@ static int send_associate_24g(struct imon_context *ictx) } memcpy(ictx->usb_tx_buf, packet, sizeof(packet)); - retval = send_packet(ictx); - return retval; + return send_packet(ictx); } /* diff --git a/drivers/media/rc/mceusb.c b/drivers/media/rc/mceusb.c index 39d2b03e2631..c76ba24c1f55 100644 --- a/drivers/media/rc/mceusb.c +++ b/drivers/media/rc/mceusb.c @@ -1077,7 +1077,7 @@ static int mceusb_set_timeout(struct rc_dev *dev, unsigned int timeout) struct mceusb_dev *ir = dev->priv; unsigned int units; - units = DIV_ROUND_CLOSEST(timeout, MCE_TIME_UNIT); + units = DIV_ROUND_UP(timeout, MCE_TIME_UNIT); cmdbuf[2] = units >> 8; cmdbuf[3] = units; diff --git a/drivers/media/test-drivers/vimc/vimc-capture.c b/drivers/media/test-drivers/vimc/vimc-capture.c index 6c437802f91f..aa944270e716 100644 --- a/drivers/media/test-drivers/vimc/vimc-capture.c +++ b/drivers/media/test-drivers/vimc/vimc-capture.c @@ -241,13 +241,12 @@ static void vimc_capture_return_all_buffers(struct vimc_capture_device *vcapture static int vimc_capture_start_streaming(struct vb2_queue *vq, unsigned int count) { struct vimc_capture_device *vcapture = vb2_get_drv_priv(vq); - struct media_entity *entity = &vcapture->vdev.entity; int ret; vcapture->sequence = 0; /* Start the media pipeline */ - ret = media_pipeline_start(entity, &vcapture->stream.pipe); + ret = video_device_pipeline_start(&vcapture->vdev, &vcapture->stream.pipe); if (ret) { vimc_capture_return_all_buffers(vcapture, VB2_BUF_STATE_QUEUED); return ret; @@ -255,7 +254,7 @@ static int vimc_capture_start_streaming(struct vb2_queue *vq, unsigned int count ret = vimc_streamer_s_stream(&vcapture->stream, &vcapture->ved, 1); if (ret) { - media_pipeline_stop(entity); + video_device_pipeline_stop(&vcapture->vdev); vimc_capture_return_all_buffers(vcapture, VB2_BUF_STATE_QUEUED); return ret; } @@ -274,7 +273,7 @@ static void vimc_capture_stop_streaming(struct vb2_queue *vq) vimc_streamer_s_stream(&vcapture->stream, &vcapture->ved, 0); /* Stop the media pipeline */ - media_pipeline_stop(&vcapture->vdev.entity); + video_device_pipeline_stop(&vcapture->vdev); /* Release all active buffers */ vimc_capture_return_all_buffers(vcapture, VB2_BUF_STATE_ERROR); diff --git a/drivers/media/test-drivers/vivid/vivid-core.c b/drivers/media/test-drivers/vivid/vivid-core.c index 04b75666bad4..f28440e6c9f8 100644 --- a/drivers/media/test-drivers/vivid/vivid-core.c +++ b/drivers/media/test-drivers/vivid/vivid-core.c @@ -339,6 +339,28 @@ static int vidioc_g_fbuf(struct file *file, void *fh, struct v4l2_framebuffer *a return vivid_vid_out_g_fbuf(file, fh, a); } +/* + * Only support the framebuffer of one of the vivid instances. + * Anything else is rejected. + */ +bool vivid_validate_fb(const struct v4l2_framebuffer *a) +{ + struct vivid_dev *dev; + int i; + + for (i = 0; i < n_devs; i++) { + dev = vivid_devs[i]; + if (!dev || !dev->video_pbase) + continue; + if ((unsigned long)a->base == dev->video_pbase && + a->fmt.width <= dev->display_width && + a->fmt.height <= dev->display_height && + a->fmt.bytesperline <= dev->display_byte_stride) + return true; + } + return false; +} + static int vidioc_s_fbuf(struct file *file, void *fh, const struct v4l2_framebuffer *a) { struct video_device *vdev = video_devdata(file); @@ -920,8 +942,12 @@ static int vivid_detect_feature_set(struct vivid_dev *dev, int inst, /* how many inputs do we have and of what type? */ dev->num_inputs = num_inputs[inst]; - if (dev->num_inputs < 1) - dev->num_inputs = 1; + if (node_type & 0x20007) { + if (dev->num_inputs < 1) + dev->num_inputs = 1; + } else { + dev->num_inputs = 0; + } if (dev->num_inputs >= MAX_INPUTS) dev->num_inputs = MAX_INPUTS; for (i = 0; i < dev->num_inputs; i++) { @@ -938,8 +964,12 @@ static int vivid_detect_feature_set(struct vivid_dev *dev, int inst, /* how many outputs do we have and of what type? */ dev->num_outputs = num_outputs[inst]; - if (dev->num_outputs < 1) - dev->num_outputs = 1; + if (node_type & 0x40300) { + if (dev->num_outputs < 1) + dev->num_outputs = 1; + } else { + dev->num_outputs = 0; + } if (dev->num_outputs >= MAX_OUTPUTS) dev->num_outputs = MAX_OUTPUTS; for (i = 0; i < dev->num_outputs; i++) { diff --git a/drivers/media/test-drivers/vivid/vivid-core.h b/drivers/media/test-drivers/vivid/vivid-core.h index bfcfb3515901..473f3598db5a 100644 --- a/drivers/media/test-drivers/vivid/vivid-core.h +++ b/drivers/media/test-drivers/vivid/vivid-core.h @@ -613,4 +613,6 @@ static inline bool vivid_is_hdmi_out(const struct vivid_dev *dev) return dev->output_type[dev->output] == HDMI; } +bool vivid_validate_fb(const struct v4l2_framebuffer *a); + #endif diff --git a/drivers/media/test-drivers/vivid/vivid-osd.c b/drivers/media/test-drivers/vivid/vivid-osd.c index fbaec8acc161..ec25edc679b3 100644 --- a/drivers/media/test-drivers/vivid/vivid-osd.c +++ b/drivers/media/test-drivers/vivid/vivid-osd.c @@ -357,7 +357,7 @@ int vivid_fb_init(struct vivid_dev *dev) int ret; dev->video_buffer_size = MAX_OSD_HEIGHT * MAX_OSD_WIDTH * 2; - dev->video_vbase = kzalloc(dev->video_buffer_size, GFP_KERNEL | GFP_DMA32); + dev->video_vbase = kzalloc(dev->video_buffer_size, GFP_KERNEL); if (dev->video_vbase == NULL) return -ENOMEM; dev->video_pbase = virt_to_phys(dev->video_vbase); diff --git a/drivers/media/test-drivers/vivid/vivid-vid-cap.c b/drivers/media/test-drivers/vivid/vivid-vid-cap.c index 86b158eeb2d8..11620eaf941e 100644 --- a/drivers/media/test-drivers/vivid/vivid-vid-cap.c +++ b/drivers/media/test-drivers/vivid/vivid-vid-cap.c @@ -453,6 +453,12 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) tpg_reset_source(&dev->tpg, dev->src_rect.width, dev->src_rect.height, dev->field_cap); dev->crop_cap = dev->src_rect; dev->crop_bounds_cap = dev->src_rect; + if (dev->bitmap_cap && + (dev->compose_cap.width != dev->crop_cap.width || + dev->compose_cap.height != dev->crop_cap.height)) { + vfree(dev->bitmap_cap); + dev->bitmap_cap = NULL; + } dev->compose_cap = dev->crop_cap; if (V4L2_FIELD_HAS_T_OR_B(dev->field_cap)) dev->compose_cap.height /= 2; @@ -460,6 +466,14 @@ void vivid_update_format_cap(struct vivid_dev *dev, bool keep_controls) tpg_s_video_aspect(&dev->tpg, vivid_get_video_aspect(dev)); tpg_s_pixel_aspect(&dev->tpg, vivid_get_pixel_aspect(dev)); tpg_update_mv_step(&dev->tpg); + + /* + * We can be called from within s_ctrl, in that case we can't + * modify controls. Luckily we don't need to in that case. + */ + if (keep_controls) + return; + dims[0] = roundup(dev->src_rect.width, PIXEL_ARRAY_DIV); dims[1] = roundup(dev->src_rect.height, PIXEL_ARRAY_DIV); v4l2_ctrl_modify_dimensions(dev->pixel_array, dims); @@ -913,6 +927,8 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection struct vivid_dev *dev = video_drvdata(file); struct v4l2_rect *crop = &dev->crop_cap; struct v4l2_rect *compose = &dev->compose_cap; + unsigned orig_compose_w = compose->width; + unsigned orig_compose_h = compose->height; unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1; int ret; @@ -1029,17 +1045,17 @@ int vivid_vid_cap_s_selection(struct file *file, void *fh, struct v4l2_selection s->r.height /= factor; } v4l2_rect_map_inside(&s->r, &dev->fmt_cap_rect); - if (dev->bitmap_cap && (compose->width != s->r.width || - compose->height != s->r.height)) { - vfree(dev->bitmap_cap); - dev->bitmap_cap = NULL; - } *compose = s->r; break; default: return -EINVAL; } + if (dev->bitmap_cap && (compose->width != orig_compose_w || + compose->height != orig_compose_h)) { + vfree(dev->bitmap_cap); + dev->bitmap_cap = NULL; + } tpg_s_crop_compose(&dev->tpg, crop, compose); return 0; } @@ -1276,7 +1292,14 @@ int vivid_vid_cap_s_fbuf(struct file *file, void *fh, return -EINVAL; if (a->fmt.bytesperline < (a->fmt.width * fmt->bit_depth[0]) / 8) return -EINVAL; - if (a->fmt.height * a->fmt.bytesperline < a->fmt.sizeimage) + if (a->fmt.bytesperline > a->fmt.sizeimage / a->fmt.height) + return -EINVAL; + + /* + * Only support the framebuffer of one of the vivid instances. + * Anything else is rejected. + */ + if (!vivid_validate_fb(a)) return -EINVAL; dev->fb_vbase_cap = phys_to_virt((unsigned long)a->base); diff --git a/drivers/media/tuners/xc4000.c b/drivers/media/tuners/xc4000.c index a04dfd5799f7..d59b4ab77430 100644 --- a/drivers/media/tuners/xc4000.c +++ b/drivers/media/tuners/xc4000.c @@ -282,15 +282,13 @@ static int xc4000_tuner_reset(struct dvb_frontend *fe) static int xc_write_reg(struct xc4000_priv *priv, u16 regAddr, u16 i2cData) { u8 buf[4]; - int result; buf[0] = (regAddr >> 8) & 0xFF; buf[1] = regAddr & 0xFF; buf[2] = (i2cData >> 8) & 0xFF; buf[3] = i2cData & 0xFF; - result = xc_send_i2c_data(priv, buf, 4); - return result; + return xc_send_i2c_data(priv, buf, 4); } static int xc_load_i2c_sequence(struct dvb_frontend *fe, const u8 *i2c_sequence) diff --git a/drivers/media/usb/au0828/au0828-core.c b/drivers/media/usb/au0828/au0828-core.c index caefac07af92..877e85a451cb 100644 --- a/drivers/media/usb/au0828/au0828-core.c +++ b/drivers/media/usb/au0828/au0828-core.c @@ -410,7 +410,7 @@ static int au0828_enable_source(struct media_entity *entity, goto end; } - ret = __media_pipeline_start(entity, pipe); + ret = __media_pipeline_start(entity->pads, pipe); if (ret) { pr_err("Start Pipeline: %s->%s Error %d\n", source->name, entity->name, ret); @@ -501,12 +501,12 @@ static void au0828_disable_source(struct media_entity *entity) return; /* stop pipeline */ - __media_pipeline_stop(dev->active_link_owner); + __media_pipeline_stop(dev->active_link_owner->pads); pr_debug("Pipeline stop for %s\n", dev->active_link_owner->name); ret = __media_pipeline_start( - dev->active_link_user, + dev->active_link_user->pads, dev->active_link_user_pipe); if (ret) { pr_err("Start Pipeline: %s->%s %d\n", @@ -532,7 +532,7 @@ static void au0828_disable_source(struct media_entity *entity) return; /* stop pipeline */ - __media_pipeline_stop(dev->active_link_owner); + __media_pipeline_stop(dev->active_link_owner->pads); pr_debug("Pipeline stop for %s\n", dev->active_link_owner->name); diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c index 5eef37b00a52..1e9c8d01523b 100644 --- a/drivers/media/usb/dvb-usb-v2/af9035.c +++ b/drivers/media/usb/dvb-usb-v2/af9035.c @@ -1497,7 +1497,7 @@ static int af9035_tuner_attach(struct dvb_usb_adapter *adap) /* * AF9035 gpiot2 = FC0012 enable * XXX: there seems to be something on gpioh8 too, but on my - * my test I didn't find any difference. + * test I didn't find any difference. */ if (adap->id == 0) { diff --git a/drivers/media/usb/msi2500/msi2500.c b/drivers/media/usb/msi2500/msi2500.c index 5a1f2698efb7..9759996ee6a4 100644 --- a/drivers/media/usb/msi2500/msi2500.c +++ b/drivers/media/usb/msi2500/msi2500.c @@ -209,7 +209,7 @@ leave: * * Control bits for previous samples is 32-bit field, containing 16 x 2-bit * numbers. This results one 2-bit number for 8 samples. It is likely used for - * for bit shifting sample by given bits, increasing actual sampling resolution. + * bit shifting sample by given bits, increasing actual sampling resolution. * Number 2 (0b10) was never seen. * * 6 * 16 * 2 * 4 = 768 samples. 768 * 4 = 3072 bytes diff --git a/drivers/media/v4l2-core/v4l2-ctrls-api.c b/drivers/media/v4l2-core/v4l2-ctrls-api.c index a8c354ad3d23..d0a3aa3806fb 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls-api.c +++ b/drivers/media/v4l2-core/v4l2-ctrls-api.c @@ -89,7 +89,7 @@ static int req_to_user(struct v4l2_ext_control *c, /* Helper function: copy the initial control value back to the caller */ static int def_to_user(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) { - ctrl->type_ops->init(ctrl, 0, ctrl->elems, ctrl->p_new); + ctrl->type_ops->init(ctrl, 0, ctrl->p_new); return ptr_to_user(c, ctrl, ctrl->p_new); } @@ -126,7 +126,7 @@ static int user_to_new(struct v4l2_ext_control *c, struct v4l2_ctrl *ctrl) if (ctrl->is_dyn_array) ctrl->new_elems = elems; else if (ctrl->is_array) - ctrl->type_ops->init(ctrl, elems, ctrl->elems, ctrl->p_new); + ctrl->type_ops->init(ctrl, elems, ctrl->p_new); return 0; } @@ -494,7 +494,7 @@ EXPORT_SYMBOL(v4l2_g_ext_ctrls); /* Validate a new control */ static int validate_new(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr p_new) { - return ctrl->type_ops->validate(ctrl, ctrl->new_elems, p_new); + return ctrl->type_ops->validate(ctrl, p_new); } /* Validate controls. */ @@ -1007,7 +1007,7 @@ int __v4l2_ctrl_modify_dimensions(struct v4l2_ctrl *ctrl, ctrl->p_cur.p = p_array + elems * ctrl->elem_size; for (i = 0; i < ctrl->nr_of_dims; i++) ctrl->dims[i] = dims[i]; - ctrl->type_ops->init(ctrl, 0, elems, ctrl->p_cur); + ctrl->type_ops->init(ctrl, 0, ctrl->p_cur); cur_to_new(ctrl); send_event(NULL, ctrl, V4L2_EVENT_CTRL_CH_VALUE | V4L2_EVENT_CTRL_CH_DIMENSIONS); diff --git a/drivers/media/v4l2-core/v4l2-ctrls-core.c b/drivers/media/v4l2-core/v4l2-ctrls-core.c index 01f00093f259..0dab1d7b90f0 100644 --- a/drivers/media/v4l2-core/v4l2-ctrls-core.c +++ b/drivers/media/v4l2-core/v4l2-ctrls-core.c @@ -65,7 +65,7 @@ void send_event(struct v4l2_fh *fh, struct v4l2_ctrl *ctrl, u32 changes) v4l2_event_queue_fh(sev->fh, &ev); } -bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, u32 elems, +bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr1, union v4l2_ctrl_ptr ptr2) { unsigned int i; @@ -74,7 +74,7 @@ bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, u32 elems, case V4L2_CTRL_TYPE_BUTTON: return false; case V4L2_CTRL_TYPE_STRING: - for (i = 0; i < elems; i++) { + for (i = 0; i < ctrl->elems; i++) { unsigned int idx = i * ctrl->elem_size; /* strings are always 0-terminated */ @@ -84,7 +84,7 @@ bool v4l2_ctrl_type_op_equal(const struct v4l2_ctrl *ctrl, u32 elems, return true; default: return !memcmp(ptr1.p_const, ptr2.p_const, - elems * ctrl->elem_size); + ctrl->elems * ctrl->elem_size); } } EXPORT_SYMBOL(v4l2_ctrl_type_op_equal); @@ -178,9 +178,10 @@ static void std_init_compound(const struct v4l2_ctrl *ctrl, u32 idx, } void v4l2_ctrl_type_op_init(const struct v4l2_ctrl *ctrl, u32 from_idx, - u32 tot_elems, union v4l2_ctrl_ptr ptr) + union v4l2_ctrl_ptr ptr) { unsigned int i; + u32 tot_elems = ctrl->elems; u32 elems = tot_elems - from_idx; if (from_idx >= tot_elems) @@ -995,7 +996,7 @@ static int std_validate_elem(const struct v4l2_ctrl *ctrl, u32 idx, } } -int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, u32 elems, +int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, union v4l2_ctrl_ptr ptr) { unsigned int i; @@ -1017,11 +1018,11 @@ int v4l2_ctrl_type_op_validate(const struct v4l2_ctrl *ctrl, u32 elems, case V4L2_CTRL_TYPE_BUTTON: case V4L2_CTRL_TYPE_CTRL_CLASS: - memset(ptr.p_s32, 0, elems * sizeof(s32)); + memset(ptr.p_s32, 0, ctrl->new_elems * sizeof(s32)); return 0; } - for (i = 0; !ret && i < elems; i++) + for (i = 0; !ret && i < ctrl->new_elems; i++) ret = std_validate_elem(ctrl, i, ptr); return ret; } @@ -1724,7 +1725,7 @@ static struct v4l2_ctrl *v4l2_ctrl_new(struct v4l2_ctrl_handler *hdl, memcpy(ctrl->p_def.p, p_def.p_const, elem_size); } - ctrl->type_ops->init(ctrl, 0, elems, ctrl->p_cur); + ctrl->type_ops->init(ctrl, 0, ctrl->p_cur); cur_to_new(ctrl); if (handler_new_ref(hdl, ctrl, NULL, false, false)) { @@ -2069,7 +2070,7 @@ static int cluster_changed(struct v4l2_ctrl *master) ctrl_changed = true; if (!ctrl_changed) ctrl_changed = !ctrl->type_ops->equal(ctrl, - ctrl->elems, ctrl->p_cur, ctrl->p_new); + ctrl->p_cur, ctrl->p_new); ctrl->has_changed = ctrl_changed; changed |= ctrl->has_changed; } diff --git a/drivers/media/v4l2-core/v4l2-dev.c b/drivers/media/v4l2-core/v4l2-dev.c index d00237ee4cae..397d553177fa 100644 --- a/drivers/media/v4l2-core/v4l2-dev.c +++ b/drivers/media/v4l2-core/v4l2-dev.c @@ -1095,6 +1095,78 @@ void video_unregister_device(struct video_device *vdev) } EXPORT_SYMBOL(video_unregister_device); +#if defined(CONFIG_MEDIA_CONTROLLER) + +__must_check int video_device_pipeline_start(struct video_device *vdev, + struct media_pipeline *pipe) +{ + struct media_entity *entity = &vdev->entity; + + if (entity->num_pads != 1) + return -ENODEV; + + return media_pipeline_start(&entity->pads[0], pipe); +} +EXPORT_SYMBOL_GPL(video_device_pipeline_start); + +__must_check int __video_device_pipeline_start(struct video_device *vdev, + struct media_pipeline *pipe) +{ + struct media_entity *entity = &vdev->entity; + + if (entity->num_pads != 1) + return -ENODEV; + + return __media_pipeline_start(&entity->pads[0], pipe); +} +EXPORT_SYMBOL_GPL(__video_device_pipeline_start); + +void video_device_pipeline_stop(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (WARN_ON(entity->num_pads != 1)) + return; + + return media_pipeline_stop(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(video_device_pipeline_stop); + +void __video_device_pipeline_stop(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (WARN_ON(entity->num_pads != 1)) + return; + + return __media_pipeline_stop(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(__video_device_pipeline_stop); + +__must_check int video_device_pipeline_alloc_start(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (entity->num_pads != 1) + return -ENODEV; + + return media_pipeline_alloc_start(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(video_device_pipeline_alloc_start); + +struct media_pipeline *video_device_pipeline(struct video_device *vdev) +{ + struct media_entity *entity = &vdev->entity; + + if (WARN_ON(entity->num_pads != 1)) + return NULL; + + return media_pad_pipeline(&entity->pads[0]); +} +EXPORT_SYMBOL_GPL(video_device_pipeline); + +#endif /* CONFIG_MEDIA_CONTROLLER */ + /* * Initialise video for linux */ diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c index af48705c704f..003c32fed3f7 100644 --- a/drivers/media/v4l2-core/v4l2-dv-timings.c +++ b/drivers/media/v4l2-core/v4l2-dv-timings.c @@ -161,6 +161,20 @@ bool v4l2_valid_dv_timings(const struct v4l2_dv_timings *t, (bt->interlaced && !(caps & V4L2_DV_BT_CAP_INTERLACED)) || (!bt->interlaced && !(caps & V4L2_DV_BT_CAP_PROGRESSIVE))) return false; + + /* sanity checks for the blanking timings */ + if (!bt->interlaced && + (bt->il_vbackporch || bt->il_vsync || bt->il_vfrontporch)) + return false; + if (bt->hfrontporch > 2 * bt->width || + bt->hsync > 1024 || bt->hbackporch > 1024) + return false; + if (bt->vfrontporch > 4096 || + bt->vsync > 128 || bt->vbackporch > 4096) + return false; + if (bt->interlaced && (bt->il_vfrontporch > 4096 || + bt->il_vsync > 128 || bt->il_vbackporch > 4096)) + return false; return fnc == NULL || fnc(t, fnc_handle); } EXPORT_SYMBOL_GPL(v4l2_valid_dv_timings); diff --git a/drivers/mfd/syscon.c b/drivers/mfd/syscon.c index 9489e80e905a..bdb2ce7ff03b 100644 --- a/drivers/mfd/syscon.c +++ b/drivers/mfd/syscon.c @@ -66,6 +66,14 @@ static struct syscon *of_syscon_register(struct device_node *np, bool check_clk) goto err_map; } + /* Parse the device's DT node for an endianness specification */ + if (of_property_read_bool(np, "big-endian")) + syscon_config.val_format_endian = REGMAP_ENDIAN_BIG; + else if (of_property_read_bool(np, "little-endian")) + syscon_config.val_format_endian = REGMAP_ENDIAN_LITTLE; + else if (of_property_read_bool(np, "native-endian")) + syscon_config.val_format_endian = REGMAP_ENDIAN_NATIVE; + /* * search for reg-io-width property in DT. If it is not provided, * default to 4 bytes. regmap_init_mmio will return an error if values diff --git a/drivers/misc/sgi-gru/grumain.c b/drivers/misc/sgi-gru/grumain.c index 9afda47efbf2..6706ef3c5977 100644 --- a/drivers/misc/sgi-gru/grumain.c +++ b/drivers/misc/sgi-gru/grumain.c @@ -152,7 +152,7 @@ static int gru_assign_asid(struct gru_state *gru) * Optionally, build an array of chars that contain the bit numbers allocated. */ static unsigned long reserve_resources(unsigned long *p, int n, int mmax, - char *idx) + signed char *idx) { unsigned long bits = 0; int i; @@ -170,14 +170,14 @@ static unsigned long reserve_resources(unsigned long *p, int n, int mmax, } unsigned long gru_reserve_cb_resources(struct gru_state *gru, int cbr_au_count, - char *cbmap) + signed char *cbmap) { return reserve_resources(&gru->gs_cbr_map, cbr_au_count, GRU_CBR_AU, cbmap); } unsigned long gru_reserve_ds_resources(struct gru_state *gru, int dsr_au_count, - char *dsmap) + signed char *dsmap) { return reserve_resources(&gru->gs_dsr_map, dsr_au_count, GRU_DSR_AU, dsmap); diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h index 5efc869fe59a..8c52776db234 100644 --- a/drivers/misc/sgi-gru/grutables.h +++ b/drivers/misc/sgi-gru/grutables.h @@ -351,7 +351,7 @@ struct gru_thread_state { pid_t ts_tgid_owner; /* task that is using the context - for migration */ short ts_user_blade_id;/* user selected blade */ - char ts_user_chiplet_id;/* user selected chiplet */ + signed char ts_user_chiplet_id;/* user selected chiplet */ unsigned short ts_sizeavail; /* Pagesizes in use */ int ts_tsid; /* thread that owns the structure */ @@ -364,11 +364,11 @@ struct gru_thread_state { required for contest */ unsigned char ts_cbr_au_count;/* Number of CBR resources required for contest */ - char ts_cch_req_slice;/* CCH packet slice */ - char ts_blade; /* If >= 0, migrate context if + signed char ts_cch_req_slice;/* CCH packet slice */ + signed char ts_blade; /* If >= 0, migrate context if ref from different blade */ - char ts_force_cch_reload; - char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each + signed char ts_force_cch_reload; + signed char ts_cbr_idx[GRU_CBR_AU];/* CBR numbers of each allocated CB */ int ts_data_valid; /* Indicates if ts_gdata has valid data */ @@ -643,9 +643,9 @@ extern struct gru_thread_state *gru_alloc_gts(struct vm_area_struct *vma, int cbr_au_count, int dsr_au_count, unsigned char tlb_preload_count, int options, int tsid); extern unsigned long gru_reserve_cb_resources(struct gru_state *gru, - int cbr_au_count, char *cbmap); + int cbr_au_count, signed char *cbmap); extern unsigned long gru_reserve_ds_resources(struct gru_state *gru, - int dsr_au_count, char *dsmap); + int dsr_au_count, signed char *dsmap); extern vm_fault_t gru_fault(struct vm_fault *vmf); extern struct gru_mm_struct *gru_register_mmu_notifier(void); extern void gru_drop_mmu_notifier(struct gru_mm_struct *gms); diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c index e71068f7759b..844264e1b88c 100644 --- a/drivers/misc/vmw_vmci/vmci_queue_pair.c +++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c @@ -854,6 +854,7 @@ static int qp_notify_peer_local(bool attach, struct vmci_handle handle) u32 context_id = vmci_get_context_id(); struct vmci_event_qp ev; + memset(&ev, 0, sizeof(ev)); ev.msg.hdr.dst = vmci_make_handle(context_id, VMCI_EVENT_HANDLER); ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_CONTEXT_RESOURCE_ID); @@ -1467,6 +1468,7 @@ static int qp_notify_peer(bool attach, * kernel. */ + memset(&ev, 0, sizeof(ev)); ev.msg.hdr.dst = vmci_make_handle(peer_id, VMCI_EVENT_HANDLER); ev.msg.hdr.src = vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID, VMCI_CONTEXT_RESOURCE_ID); diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 54cd009aee50..db6d8a099910 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -134,6 +134,7 @@ struct mmc_blk_data { * track of the current selected device partition. */ unsigned int part_curr; +#define MMC_BLK_PART_INVALID UINT_MAX /* Unknown partition active */ int area_type; /* debugfs files (only in main mmc_blk_data) */ @@ -987,33 +988,39 @@ static unsigned int mmc_blk_data_timeout_ms(struct mmc_host *host, return ms; } +/* + * Attempts to reset the card and get back to the requested partition. + * Therefore any error here must result in cancelling the block layer + * request, it must not be reattempted without going through the mmc_blk + * partition sanity checks. + */ static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, int type) { int err; + struct mmc_blk_data *main_md = dev_get_drvdata(&host->card->dev); if (md->reset_done & type) return -EEXIST; md->reset_done |= type; err = mmc_hw_reset(host->card); + /* + * A successful reset will leave the card in the main partition, but + * upon failure it might not be, so set it to MMC_BLK_PART_INVALID + * in that case. + */ + main_md->part_curr = err ? MMC_BLK_PART_INVALID : main_md->part_type; + if (err) + return err; /* Ensure we switch back to the correct partition */ - if (err) { - struct mmc_blk_data *main_md = - dev_get_drvdata(&host->card->dev); - int part_err; - - main_md->part_curr = main_md->part_type; - part_err = mmc_blk_part_switch(host->card, md->part_type); - if (part_err) { - /* - * We have failed to get back into the correct - * partition, so we need to abort the whole request. - */ - return -ENODEV; - } - } - return err; + if (mmc_blk_part_switch(host->card, md->part_type)) + /* + * We have failed to get back into the correct + * partition, so we need to abort the whole request. + */ + return -ENODEV; + return 0; } static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) @@ -1871,8 +1878,9 @@ static void mmc_blk_mq_rw_recovery(struct mmc_queue *mq, struct request *req) return; /* Reset before last retry */ - if (mqrq->retries + 1 == MMC_MAX_RETRIES) - mmc_blk_reset(md, card->host, type); + if (mqrq->retries + 1 == MMC_MAX_RETRIES && + mmc_blk_reset(md, card->host, type)) + return; /* Command errors fail fast, so use all MMC_MAX_RETRIES */ if (brq->sbc.error || brq->cmd.error) diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 95fa8fb1d45f..c5de202f530a 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -1134,7 +1134,13 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) mmc_power_cycle(host, ocr); } else { bit = fls(ocr) - 1; - ocr &= 3 << bit; + /* + * The bit variable represents the highest voltage bit set in + * the OCR register. + * To keep a range of 2 values (e.g. 3.2V/3.3V and 3.3V/3.4V), + * we must shift the mask '3' with (bit - 1). + */ + ocr &= 3 << (bit - 1); if (bit != host->ios.vdd) dev_warn(mmc_dev(host), "exceeding card's volts\n"); } diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c index fefaa901b50f..b396e3900717 100644 --- a/drivers/mmc/core/queue.c +++ b/drivers/mmc/core/queue.c @@ -48,6 +48,7 @@ static enum mmc_issue_type mmc_cqe_issue_type(struct mmc_host *host, case REQ_OP_DRV_OUT: case REQ_OP_DISCARD: case REQ_OP_SECURE_ERASE: + case REQ_OP_WRITE_ZEROES: return MMC_ISSUE_SYNC; case REQ_OP_FLUSH: return mmc_cqe_can_dcmd(host) ? MMC_ISSUE_DCMD : MMC_ISSUE_SYNC; @@ -493,6 +494,13 @@ void mmc_cleanup_queue(struct mmc_queue *mq) if (blk_queue_quiesced(q)) blk_mq_unquiesce_queue(q); + /* + * If the recovery completes the last (and only remaining) request in + * the queue, and the card has been removed, we could end up here with + * the recovery not quite finished yet, so cancel it. + */ + cancel_work_sync(&mq->recovery_work); + blk_mq_free_tag_set(&mq->tag_set); /* diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index c6268c38c69e..babf21a0adeb 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -291,7 +291,8 @@ static void sdio_release_func(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); - sdio_free_func_cis(func); + if (!(func->card->quirks & MMC_QUIRK_NONSTD_SDIO)) + sdio_free_func_cis(func); kfree(func->info); kfree(func->tmpbuf); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index f324daadaf70..fb1062a6394c 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -1075,9 +1075,10 @@ config MMC_SDHCI_OMAP config MMC_SDHCI_AM654 tristate "Support for the SDHCI Controller in TI's AM654 SOCs" - depends on MMC_SDHCI_PLTFM && OF && REGMAP_MMIO + depends on MMC_SDHCI_PLTFM && OF select MMC_SDHCI_IO_ACCESSORS select MMC_CQHCI + select REGMAP_MMIO help This selects the Secure Digital Host Controller Interface (SDHCI) support present in TI's AM654 SOCs. The controller supports diff --git a/drivers/mmc/host/sdhci-brcmstb.c b/drivers/mmc/host/sdhci-brcmstb.c index aff36a933ebe..55d8bd232695 100644 --- a/drivers/mmc/host/sdhci-brcmstb.c +++ b/drivers/mmc/host/sdhci-brcmstb.c @@ -12,6 +12,7 @@ #include <linux/bitops.h> #include <linux/delay.h> +#include "sdhci-cqhci.h" #include "sdhci-pltfm.h" #include "cqhci.h" @@ -55,7 +56,7 @@ static void brcmstb_reset(struct sdhci_host *host, u8 mask) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_brcmstb_priv *priv = sdhci_pltfm_priv(pltfm_host); - sdhci_reset(host, mask); + sdhci_and_cqhci_reset(host, mask); /* Reset will clear this, so re-enable it */ if (priv->flags & BRCMSTB_PRIV_FLAGS_GATE_CLOCK) diff --git a/drivers/mmc/host/sdhci-cqhci.h b/drivers/mmc/host/sdhci-cqhci.h new file mode 100644 index 000000000000..cf8e7ba71bbd --- /dev/null +++ b/drivers/mmc/host/sdhci-cqhci.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright 2022 The Chromium OS Authors + * + * Support that applies to the combination of SDHCI and CQHCI, while not + * expressing a dependency between the two modules. + */ + +#ifndef __MMC_HOST_SDHCI_CQHCI_H__ +#define __MMC_HOST_SDHCI_CQHCI_H__ + +#include "cqhci.h" +#include "sdhci.h" + +static inline void sdhci_and_cqhci_reset(struct sdhci_host *host, u8 mask) +{ + if ((host->mmc->caps2 & MMC_CAP2_CQE) && (mask & SDHCI_RESET_ALL) && + host->mmc->cqe_private) + cqhci_deactivate(host->mmc); + + sdhci_reset(host, mask); +} + +#endif /* __MMC_HOST_SDHCI_CQHCI_H__ */ diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 55981b0f0b10..31ea0a2fce35 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -25,6 +25,7 @@ #include <linux/of_device.h> #include <linux/pinctrl/consumer.h> #include <linux/pm_runtime.h> +#include "sdhci-cqhci.h" #include "sdhci-pltfm.h" #include "sdhci-esdhc.h" #include "cqhci.h" @@ -1288,7 +1289,7 @@ static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing) static void esdhc_reset(struct sdhci_host *host, u8 mask) { - sdhci_reset(host, mask); + sdhci_and_cqhci_reset(host, mask); sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); @@ -1660,6 +1661,10 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) host->mmc_host_ops.execute_tuning = usdhc_execute_tuning; } + err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); + if (err) + goto disable_ahb_clk; + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) sdhci_esdhc_ops.platform_execute_tuning = esdhc_executing_tuning; @@ -1667,13 +1672,15 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; - if (imx_data->socdata->flags & ESDHC_FLAG_HS400) + if (host->mmc->caps & MMC_CAP_8_BIT_DATA && + imx_data->socdata->flags & ESDHC_FLAG_HS400) host->mmc->caps2 |= MMC_CAP2_HS400; if (imx_data->socdata->flags & ESDHC_FLAG_BROKEN_AUTO_CMD23) host->quirks2 |= SDHCI_QUIRK2_ACMD23_BROKEN; - if (imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) { + if (host->mmc->caps & MMC_CAP_8_BIT_DATA && + imx_data->socdata->flags & ESDHC_FLAG_HS400_ES) { host->mmc->caps2 |= MMC_CAP2_HS400_ES; host->mmc_host_ops.hs400_enhanced_strobe = esdhc_hs400_enhanced_strobe; @@ -1695,10 +1702,6 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev) goto disable_ahb_clk; } - err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data); - if (err) - goto disable_ahb_clk; - sdhci_esdhc_imx_hwinit(host); err = sdhci_add_host(host); diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c index 3997cad1f793..cfb891430174 100644 --- a/drivers/mmc/host/sdhci-of-arasan.c +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -25,6 +25,7 @@ #include <linux/firmware/xlnx-zynqmp.h> #include "cqhci.h" +#include "sdhci-cqhci.h" #include "sdhci-pltfm.h" #define SDHCI_ARASAN_VENDOR_REGISTER 0x78 @@ -366,7 +367,7 @@ static void sdhci_arasan_reset(struct sdhci_host *host, u8 mask) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_arasan_data *sdhci_arasan = sdhci_pltfm_priv(pltfm_host); - sdhci_reset(host, mask); + sdhci_and_cqhci_reset(host, mask); if (sdhci_arasan->quirks & SDHCI_ARASAN_QUIRK_FORCE_CDTEST) { ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index 169b84761041..28dc65023fa9 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -914,6 +914,12 @@ static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) dmi_match(DMI_SYS_VENDOR, "IRBIS")); } +static bool jsl_broken_hs400es(struct sdhci_pci_slot *slot) +{ + return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_JSL_EMMC && + dmi_match(DMI_BIOS_VENDOR, "ASUSTeK COMPUTER INC."); +} + static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) { int ret = byt_emmc_probe_slot(slot); @@ -922,9 +928,11 @@ static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) slot->host->mmc->caps2 |= MMC_CAP2_CQE; if (slot->chip->pdev->device != PCI_DEVICE_ID_INTEL_GLK_EMMC) { - slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES; - slot->host->mmc_host_ops.hs400_enhanced_strobe = - intel_hs400_enhanced_strobe; + if (!jsl_broken_hs400es(slot)) { + slot->host->mmc->caps2 |= MMC_CAP2_HS400_ES; + slot->host->mmc_host_ops.hs400_enhanced_strobe = + intel_hs400_enhanced_strobe; + } slot->host->mmc->caps2 |= MMC_CAP2_CQE_DCMD; } @@ -1741,6 +1749,8 @@ static int amd_probe(struct sdhci_pci_chip *chip) } } + pci_dev_put(smbus_dev); + if (gen == AMD_CHIPSET_BEFORE_ML || gen == AMD_CHIPSET_CZ) chip->quirks2 |= SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD; diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c index ad457cd9cbaa..bca1d095b759 100644 --- a/drivers/mmc/host/sdhci-pci-o2micro.c +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -32,6 +32,7 @@ #define O2_SD_CAPS 0xE0 #define O2_SD_ADMA1 0xE2 #define O2_SD_ADMA2 0xE7 +#define O2_SD_MISC_CTRL2 0xF0 #define O2_SD_INF_MOD 0xF1 #define O2_SD_MISC_CTRL4 0xFC #define O2_SD_MISC_CTRL 0x1C0 @@ -877,6 +878,12 @@ static int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) /* Set Tuning Windows to 5 */ pci_write_config_byte(chip->pdev, O2_SD_TUNING_CTRL, 0x55); + //Adjust 1st and 2nd CD debounce time + pci_read_config_dword(chip->pdev, O2_SD_MISC_CTRL2, &scratch_32); + scratch_32 &= 0xFFE7FFFF; + scratch_32 |= 0x00180000; + pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL2, scratch_32); + pci_write_config_dword(chip->pdev, O2_SD_DETECT_SETTING, 1); /* Lock WP */ ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 413925bce0ca..c71000a07656 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -28,6 +28,7 @@ #include <soc/tegra/common.h> +#include "sdhci-cqhci.h" #include "sdhci-pltfm.h" #include "cqhci.h" @@ -367,7 +368,7 @@ static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; u32 misc_ctrl, clk_ctrl, pad_ctrl; - sdhci_reset(host, mask); + sdhci_and_cqhci_reset(host, mask); if (!(mask & SDHCI_RESET_ALL)) return; diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index 8f1023480e12..c2333c7acac9 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -15,6 +15,7 @@ #include <linux/sys_soc.h> #include "cqhci.h" +#include "sdhci-cqhci.h" #include "sdhci-pltfm.h" /* CTL_CFG Registers */ @@ -378,7 +379,7 @@ static void sdhci_am654_reset(struct sdhci_host *host, u8 mask) struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_am654_data *sdhci_am654 = sdhci_pltfm_priv(pltfm_host); - sdhci_reset(host, mask); + sdhci_and_cqhci_reset(host, mask); if (sdhci_am654->quirks & SDHCI_AM654_QUIRK_FORCE_CDTEST) { ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); @@ -464,7 +465,7 @@ static struct sdhci_ops sdhci_am654_ops = { .set_clock = sdhci_am654_set_clock, .write_b = sdhci_am654_write_b, .irq = sdhci_am654_cqhci_irq, - .reset = sdhci_reset, + .reset = sdhci_and_cqhci_reset, }; static const struct sdhci_pltfm_data sdhci_am654_pdata = { @@ -494,7 +495,7 @@ static struct sdhci_ops sdhci_j721e_8bit_ops = { .set_clock = sdhci_am654_set_clock, .write_b = sdhci_am654_write_b, .irq = sdhci_am654_cqhci_irq, - .reset = sdhci_reset, + .reset = sdhci_and_cqhci_reset, }; static const struct sdhci_pltfm_data sdhci_j721e_8bit_pdata = { diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c index 18aa54460d36..0b4ca0aa4132 100644 --- a/drivers/mtd/mtdcore.c +++ b/drivers/mtd/mtdcore.c @@ -562,7 +562,7 @@ static void mtd_check_of_node(struct mtd_info *mtd) if (!mtd_is_partition(mtd)) return; parent = mtd->parent; - parent_dn = dev_of_node(&parent->dev); + parent_dn = of_node_get(dev_of_node(&parent->dev)); if (!parent_dn) return; diff --git a/drivers/mtd/nand/onenand/Kconfig b/drivers/mtd/nand/onenand/Kconfig index 34d9a7a82ad4..c94bf483541e 100644 --- a/drivers/mtd/nand/onenand/Kconfig +++ b/drivers/mtd/nand/onenand/Kconfig @@ -26,6 +26,7 @@ config MTD_ONENAND_OMAP2 tristate "OneNAND on OMAP2/OMAP3 support" depends on ARCH_OMAP2 || ARCH_OMAP3 || (COMPILE_TEST && ARM) depends on OF || COMPILE_TEST + depends on OMAP_GPMC help Support for a OneNAND flash device connected to an OMAP2/OMAP3 SoC via the GPMC memory controller. diff --git a/drivers/mtd/nand/raw/intel-nand-controller.c b/drivers/mtd/nand/raw/intel-nand-controller.c index d4a0987e93ac..6f4cea81f97c 100644 --- a/drivers/mtd/nand/raw/intel-nand-controller.c +++ b/drivers/mtd/nand/raw/intel-nand-controller.c @@ -608,11 +608,12 @@ static int ebu_nand_probe(struct platform_device *pdev) ret = of_property_read_u32(chip_np, "reg", &cs); if (ret) { dev_err(dev, "failed to get chip select: %d\n", ret); - return ret; + goto err_of_node_put; } if (cs >= MAX_CS) { dev_err(dev, "got invalid chip select: %d\n", cs); - return -EINVAL; + ret = -EINVAL; + goto err_of_node_put; } ebu_host->cs_num = cs; @@ -620,18 +621,22 @@ static int ebu_nand_probe(struct platform_device *pdev) resname = devm_kasprintf(dev, GFP_KERNEL, "nand_cs%d", cs); ebu_host->cs[cs].chipaddr = devm_platform_ioremap_resource_byname(pdev, resname); - if (IS_ERR(ebu_host->cs[cs].chipaddr)) - return PTR_ERR(ebu_host->cs[cs].chipaddr); + if (IS_ERR(ebu_host->cs[cs].chipaddr)) { + ret = PTR_ERR(ebu_host->cs[cs].chipaddr); + goto err_of_node_put; + } ebu_host->clk = devm_clk_get(dev, NULL); - if (IS_ERR(ebu_host->clk)) - return dev_err_probe(dev, PTR_ERR(ebu_host->clk), - "failed to get clock\n"); + if (IS_ERR(ebu_host->clk)) { + ret = dev_err_probe(dev, PTR_ERR(ebu_host->clk), + "failed to get clock\n"); + goto err_of_node_put; + } ret = clk_prepare_enable(ebu_host->clk); if (ret) { dev_err(dev, "failed to enable clock: %d\n", ret); - return ret; + goto err_of_node_put; } ebu_host->dma_tx = dma_request_chan(dev, "tx"); @@ -695,6 +700,8 @@ err_cleanup_dma: ebu_dma_cleanup(ebu_host); err_disable_unprepare_clk: clk_disable_unprepare(ebu_host->clk); +err_of_node_put: + of_node_put(chip_np); return ret; } diff --git a/drivers/mtd/nand/raw/marvell_nand.c b/drivers/mtd/nand/raw/marvell_nand.c index d9f2f1d0b5ef..b9d1e96e3334 100644 --- a/drivers/mtd/nand/raw/marvell_nand.c +++ b/drivers/mtd/nand/raw/marvell_nand.c @@ -2678,7 +2678,7 @@ static int marvell_nand_chip_init(struct device *dev, struct marvell_nfc *nfc, chip->controller = &nfc->controller; nand_set_flash_node(chip, np); - if (!of_property_read_bool(np, "marvell,nand-keep-config")) + if (of_property_read_bool(np, "marvell,nand-keep-config")) chip->options |= NAND_KEEP_TIMINGS; mtd = nand_to_mtd(chip); diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c index 33f2c98a030e..c3cc66039925 100644 --- a/drivers/mtd/nand/raw/nand_base.c +++ b/drivers/mtd/nand/raw/nand_base.c @@ -5834,7 +5834,7 @@ nand_match_ecc_req(struct nand_chip *chip, int req_step = requirements->step_size; int req_strength = requirements->strength; int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total; - int best_step, best_strength, best_ecc_bytes; + int best_step = 0, best_strength = 0, best_ecc_bytes = 0; int best_ecc_bytes_total = INT_MAX; int i, j; @@ -5915,7 +5915,7 @@ nand_maximize_ecc(struct nand_chip *chip, int step_size, strength, nsteps, ecc_bytes, corr; int best_corr = 0; int best_step = 0; - int best_strength, best_ecc_bytes; + int best_strength = 0, best_ecc_bytes = 0; int i, j; for (i = 0; i < caps->nstepinfos; i++) { diff --git a/drivers/mtd/nand/raw/qcom_nandc.c b/drivers/mtd/nand/raw/qcom_nandc.c index 8f80019a9f01..198a44794d2d 100644 --- a/drivers/mtd/nand/raw/qcom_nandc.c +++ b/drivers/mtd/nand/raw/qcom_nandc.c @@ -3167,16 +3167,18 @@ static int qcom_nand_host_init_and_register(struct qcom_nand_controller *nandc, ret = mtd_device_parse_register(mtd, probes, NULL, NULL, 0); if (ret) - nand_cleanup(chip); + goto err; if (nandc->props->use_codeword_fixup) { ret = qcom_nand_host_parse_boot_partitions(nandc, host, dn); - if (ret) { - nand_cleanup(chip); - return ret; - } + if (ret) + goto err; } + return 0; + +err: + nand_cleanup(chip); return ret; } diff --git a/drivers/mtd/nand/raw/tegra_nand.c b/drivers/mtd/nand/raw/tegra_nand.c index e12f9f580a15..a9b9031ce616 100644 --- a/drivers/mtd/nand/raw/tegra_nand.c +++ b/drivers/mtd/nand/raw/tegra_nand.c @@ -1181,7 +1181,7 @@ static int tegra_nand_probe(struct platform_device *pdev) pm_runtime_enable(&pdev->dev); err = pm_runtime_resume_and_get(&pdev->dev); if (err) - return err; + goto err_dis_pm; err = reset_control_reset(rst); if (err) { @@ -1215,6 +1215,8 @@ static int tegra_nand_probe(struct platform_device *pdev) err_put_pm: pm_runtime_put_sync_suspend(ctrl->dev); pm_runtime_force_suspend(ctrl->dev); +err_dis_pm: + pm_runtime_disable(&pdev->dev); return err; } diff --git a/drivers/mtd/parsers/bcm47xxpart.c b/drivers/mtd/parsers/bcm47xxpart.c index 50fcf4c2174b..13daf9bffd08 100644 --- a/drivers/mtd/parsers/bcm47xxpart.c +++ b/drivers/mtd/parsers/bcm47xxpart.c @@ -233,11 +233,11 @@ static int bcm47xxpart_parse(struct mtd_info *master, } /* Read middle of the block */ - err = mtd_read(master, offset + 0x8000, 0x4, &bytes_read, + err = mtd_read(master, offset + (blocksize / 2), 0x4, &bytes_read, (uint8_t *)buf); if (err && !mtd_is_bitflip(err)) { pr_err("mtd_read error while parsing (offset: 0x%X): %d\n", - offset + 0x8000, err); + offset + (blocksize / 2), err); continue; } diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c index f2c64006f8d7..bee8fc4c9f07 100644 --- a/drivers/mtd/spi-nor/core.c +++ b/drivers/mtd/spi-nor/core.c @@ -2724,7 +2724,9 @@ static int spi_nor_init(struct spi_nor *nor) */ WARN_ONCE(nor->flags & SNOR_F_BROKEN_RESET, "enabling reset hack; may not recover from unexpected reboots\n"); - return nor->params->set_4byte_addr_mode(nor, true); + err = nor->params->set_4byte_addr_mode(nor, true); + if (err && err != -ENOTSUPP) + return err; } return 0; diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index 3a2d109a3792..199cb200f2bd 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -452,7 +452,7 @@ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) unsigned int mb, prio; u32 reg_mid, reg_mcr; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; mb = get_tx_next_mb(priv); diff --git a/drivers/net/can/c_can/c_can_main.c b/drivers/net/can/c_can/c_can_main.c index d6605dbb7737..c63f7fc1e691 100644 --- a/drivers/net/can/c_can/c_can_main.c +++ b/drivers/net/can/c_can/c_can_main.c @@ -457,7 +457,7 @@ static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, struct c_can_tx_ring *tx_ring = &priv->tx; u32 idx, obj, cmd = IF_COMM_TX; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; if (c_can_tx_busy(priv, tx_ring)) diff --git a/drivers/net/can/can327.c b/drivers/net/can/can327.c index 0aa1af31d0fe..094197780776 100644 --- a/drivers/net/can/can327.c +++ b/drivers/net/can/can327.c @@ -813,7 +813,7 @@ static netdev_tx_t can327_netdev_start_xmit(struct sk_buff *skb, struct can327 *elm = netdev_priv(dev); struct can_frame *frame = (struct can_frame *)skb->data; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; /* We shouldn't get here after a hardware fault: diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c index 0b9dfc76e769..30909f3aab57 100644 --- a/drivers/net/can/cc770/cc770.c +++ b/drivers/net/can/cc770/cc770.c @@ -429,7 +429,7 @@ static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) struct cc770_priv *priv = netdev_priv(dev); unsigned int mo = obj2msgobj(CC770_OBJ_TX); - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; netif_stop_queue(dev); diff --git a/drivers/net/can/ctucanfd/ctucanfd_base.c b/drivers/net/can/ctucanfd/ctucanfd_base.c index b8da15ea6ad9..64c349fd4600 100644 --- a/drivers/net/can/ctucanfd/ctucanfd_base.c +++ b/drivers/net/can/ctucanfd/ctucanfd_base.c @@ -600,7 +600,7 @@ static netdev_tx_t ctucan_start_xmit(struct sk_buff *skb, struct net_device *nde bool ok; unsigned long flags; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; if (unlikely(!CTU_CAN_FD_TXTNF(priv))) { diff --git a/drivers/net/can/dev/skb.c b/drivers/net/can/dev/skb.c index 791a51e2f5d6..241ec636e91f 100644 --- a/drivers/net/can/dev/skb.c +++ b/drivers/net/can/dev/skb.c @@ -5,7 +5,6 @@ */ #include <linux/can/dev.h> -#include <linux/can/netlink.h> #include <linux/module.h> #define MOD_DESC "CAN device driver interface" @@ -337,8 +336,6 @@ static bool can_skb_headroom_valid(struct net_device *dev, struct sk_buff *skb) /* Drop a given socketbuffer if it does not contain a valid CAN frame. */ bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb) { - struct can_priv *priv = netdev_priv(dev); - switch (ntohs(skb->protocol)) { case ETH_P_CAN: if (!can_is_can_skb(skb)) @@ -359,13 +356,8 @@ bool can_dropped_invalid_skb(struct net_device *dev, struct sk_buff *skb) goto inval_skb; } - if (!can_skb_headroom_valid(dev, skb)) { + if (!can_skb_headroom_valid(dev, skb)) goto inval_skb; - } else if (priv->ctrlmode & CAN_CTRLMODE_LISTENONLY) { - netdev_info_once(dev, - "interface in listen only mode, dropping skb\n"); - goto inval_skb; - } return false; diff --git a/drivers/net/can/flexcan/flexcan-core.c b/drivers/net/can/flexcan/flexcan-core.c index 5ee38e586fd8..9bdadd716f4e 100644 --- a/drivers/net/can/flexcan/flexcan-core.c +++ b/drivers/net/can/flexcan/flexcan-core.c @@ -742,7 +742,7 @@ static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *de u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | ((can_fd_len2dlc(cfd->len)) << 16); int i; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; netif_stop_queue(dev); diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c index 6c37aab93eb3..4bedcc3eea0d 100644 --- a/drivers/net/can/grcan.c +++ b/drivers/net/can/grcan.c @@ -1345,7 +1345,7 @@ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb, unsigned long flags; u32 oneshotmode = priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; /* Trying to transmit in silent mode will generate error interrupts, but diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c index 8d42b7e6661f..07eaf724a572 100644 --- a/drivers/net/can/ifi_canfd/ifi_canfd.c +++ b/drivers/net/can/ifi_canfd/ifi_canfd.c @@ -860,7 +860,7 @@ static netdev_tx_t ifi_canfd_start_xmit(struct sk_buff *skb, u32 txst, txid, txdlc; int i; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; /* Check if the TX buffer is full */ diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index 71a2caae0757..0732a5092141 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c @@ -1693,7 +1693,7 @@ static netdev_tx_t ican3_xmit(struct sk_buff *skb, struct net_device *ndev) void __iomem *desc_addr; unsigned long flags; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; spin_lock_irqsave(&mod->lock, flags); diff --git a/drivers/net/can/kvaser_pciefd.c b/drivers/net/can/kvaser_pciefd.c index 4e9680c8eb34..bcad11709bc9 100644 --- a/drivers/net/can/kvaser_pciefd.c +++ b/drivers/net/can/kvaser_pciefd.c @@ -772,7 +772,7 @@ static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, int nwords; u8 count; - if (can_dropped_invalid_skb(netdev, skb)) + if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; nwords = kvaser_pciefd_prepare_tx_packet(&packet, can, skb); diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index dcb582563d5e..00d11e95fd98 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -1721,7 +1721,7 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb, { struct m_can_classdev *cdev = netdev_priv(dev); - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; if (cdev->is_peripheral) { diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c index c469b2f3e57d..b0ed798ae70f 100644 --- a/drivers/net/can/mscan/mpc5xxx_can.c +++ b/drivers/net/can/mscan/mpc5xxx_can.c @@ -322,14 +322,14 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev) &mscan_clksrc); if (!priv->can.clock.freq) { dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n"); - goto exit_free_mscan; + goto exit_put_clock; } err = register_mscandev(dev, mscan_clksrc); if (err) { dev_err(&ofdev->dev, "registering %s failed (err=%d)\n", DRV_NAME, err); - goto exit_free_mscan; + goto exit_put_clock; } dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n", @@ -337,7 +337,9 @@ static int mpc5xxx_can_probe(struct platform_device *ofdev) return 0; -exit_free_mscan: +exit_put_clock: + if (data->put_clock) + data->put_clock(ofdev); free_candev(dev); exit_dispose_irq: irq_dispose_mapping(irq); diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c index 2119fbb287ef..a6829cdc0e81 100644 --- a/drivers/net/can/mscan/mscan.c +++ b/drivers/net/can/mscan/mscan.c @@ -191,7 +191,7 @@ static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev) int i, rtr, buf_id; u32 can_id; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; out_8(®s->cantier, 0); diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c index 0558ff67ec6a..2a44b2803e55 100644 --- a/drivers/net/can/pch_can.c +++ b/drivers/net/can/pch_can.c @@ -882,7 +882,7 @@ static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev) int i; u32 id2; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; tx_obj_no = priv->tx_obj; diff --git a/drivers/net/can/peak_canfd/peak_canfd.c b/drivers/net/can/peak_canfd/peak_canfd.c index f8420cc1d907..31c9c127e24b 100644 --- a/drivers/net/can/peak_canfd/peak_canfd.c +++ b/drivers/net/can/peak_canfd/peak_canfd.c @@ -651,7 +651,7 @@ static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, int room_left; u8 len; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; msg_size = ALIGN(sizeof(*msg) + cf->len, 4); diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c index 6ee968c59ac9..cc43c9c5e38c 100644 --- a/drivers/net/can/rcar/rcar_can.c +++ b/drivers/net/can/rcar/rcar_can.c @@ -590,7 +590,7 @@ static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb, struct can_frame *cf = (struct can_frame *)skb->data; u32 data, i; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */ diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c index 567620d215f8..b306cf554634 100644 --- a/drivers/net/can/rcar/rcar_canfd.c +++ b/drivers/net/can/rcar/rcar_canfd.c @@ -81,8 +81,7 @@ enum rcanfd_chip_id { /* RSCFDnCFDGERFL / RSCFDnGERFL */ #define RCANFD_GERFL_EEF0_7 GENMASK(23, 16) -#define RCANFD_GERFL_EEF1 BIT(17) -#define RCANFD_GERFL_EEF0 BIT(16) +#define RCANFD_GERFL_EEF(ch) BIT(16 + (ch)) #define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */ #define RCANFD_GERFL_THLES BIT(2) #define RCANFD_GERFL_MES BIT(1) @@ -90,7 +89,7 @@ enum rcanfd_chip_id { #define RCANFD_GERFL_ERR(gpriv, x) \ ((x) & (reg_v3u(gpriv, RCANFD_GERFL_EEF0_7, \ - RCANFD_GERFL_EEF0 | RCANFD_GERFL_EEF1) | \ + RCANFD_GERFL_EEF(0) | RCANFD_GERFL_EEF(1)) | \ RCANFD_GERFL_MES | \ ((gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0))) @@ -936,12 +935,8 @@ static void rcar_canfd_global_error(struct net_device *ndev) u32 ridx = ch + RCANFD_RFFIFO_IDX; gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL); - if ((gerfl & RCANFD_GERFL_EEF0) && (ch == 0)) { - netdev_dbg(ndev, "Ch0: ECC Error flag\n"); - stats->tx_dropped++; - } - if ((gerfl & RCANFD_GERFL_EEF1) && (ch == 1)) { - netdev_dbg(ndev, "Ch1: ECC Error flag\n"); + if (gerfl & RCANFD_GERFL_EEF(ch)) { + netdev_dbg(ndev, "Ch%u: ECC Error flag\n", ch); stats->tx_dropped++; } if (gerfl & RCANFD_GERFL_MES) { @@ -1157,11 +1152,13 @@ static void rcar_canfd_handle_global_receive(struct rcar_canfd_global *gpriv, u3 { struct rcar_canfd_channel *priv = gpriv->ch[ch]; u32 ridx = ch + RCANFD_RFFIFO_IDX; - u32 sts; + u32 sts, cc; /* Handle Rx interrupts */ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx)); - if (likely(sts & RCANFD_RFSTS_RFIF)) { + cc = rcar_canfd_read(priv->base, RCANFD_RFCC(gpriv, ridx)); + if (likely(sts & RCANFD_RFSTS_RFIF && + cc & RCANFD_RFCC_RFIE)) { if (napi_schedule_prep(&priv->napi)) { /* Disable Rx FIFO interrupts */ rcar_canfd_clear_bit(priv->base, @@ -1244,11 +1241,9 @@ static void rcar_canfd_handle_channel_tx(struct rcar_canfd_global *gpriv, u32 ch static irqreturn_t rcar_canfd_channel_tx_interrupt(int irq, void *dev_id) { - struct rcar_canfd_global *gpriv = dev_id; - u32 ch; + struct rcar_canfd_channel *priv = dev_id; - for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) - rcar_canfd_handle_channel_tx(gpriv, ch); + rcar_canfd_handle_channel_tx(priv->gpriv, priv->channel); return IRQ_HANDLED; } @@ -1276,11 +1271,9 @@ static void rcar_canfd_handle_channel_err(struct rcar_canfd_global *gpriv, u32 c static irqreturn_t rcar_canfd_channel_err_interrupt(int irq, void *dev_id) { - struct rcar_canfd_global *gpriv = dev_id; - u32 ch; + struct rcar_canfd_channel *priv = dev_id; - for_each_set_bit(ch, &gpriv->channels_mask, gpriv->max_channels) - rcar_canfd_handle_channel_err(gpriv, ch); + rcar_canfd_handle_channel_err(priv->gpriv, priv->channel); return IRQ_HANDLED; } @@ -1483,7 +1476,7 @@ static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb, unsigned long flags; u32 ch = priv->channel; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; if (cf->can_id & CAN_EFF_FLAG) { @@ -1721,6 +1714,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, priv->ndev = ndev; priv->base = gpriv->base; priv->channel = ch; + priv->gpriv = gpriv; priv->can.clock.freq = fcan_freq; dev_info(&pdev->dev, "can_clk rate is %u\n", priv->can.clock.freq); @@ -1749,7 +1743,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, } err = devm_request_irq(&pdev->dev, err_irq, rcar_canfd_channel_err_interrupt, 0, - irq_name, gpriv); + irq_name, priv); if (err) { dev_err(&pdev->dev, "devm_request_irq CH Err(%d) failed, error %d\n", err_irq, err); @@ -1763,7 +1757,7 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, } err = devm_request_irq(&pdev->dev, tx_irq, rcar_canfd_channel_tx_interrupt, 0, - irq_name, gpriv); + irq_name, priv); if (err) { dev_err(&pdev->dev, "devm_request_irq Tx (%d) failed, error %d\n", tx_irq, err); @@ -1789,7 +1783,6 @@ static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, priv->can.do_set_mode = rcar_canfd_do_set_mode; priv->can.do_get_berr_counter = rcar_canfd_get_berr_counter; - priv->gpriv = gpriv; SET_NETDEV_DEV(ndev, &pdev->dev); netif_napi_add_weight(ndev, &priv->napi, rcar_canfd_rx_poll, diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c index 1bb1129b0450..aac5956e4a53 100644 --- a/drivers/net/can/sja1000/sja1000.c +++ b/drivers/net/can/sja1000/sja1000.c @@ -291,7 +291,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb, u8 cmd_reg_val = 0x00; int i; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; netif_stop_queue(dev); diff --git a/drivers/net/can/slcan/slcan-core.c b/drivers/net/can/slcan/slcan-core.c index 8d13fdf8c28a..fbb34139daa1 100644 --- a/drivers/net/can/slcan/slcan-core.c +++ b/drivers/net/can/slcan/slcan-core.c @@ -594,7 +594,7 @@ static netdev_tx_t slcan_netdev_xmit(struct sk_buff *skb, { struct slcan *sl = netdev_priv(dev); - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; spin_lock(&sl->lock); diff --git a/drivers/net/can/softing/softing_main.c b/drivers/net/can/softing/softing_main.c index a5ef57f415f7..c72f505d29fe 100644 --- a/drivers/net/can/softing/softing_main.c +++ b/drivers/net/can/softing/softing_main.c @@ -60,7 +60,7 @@ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb, struct can_frame *cf = (struct can_frame *)skb->data; uint8_t buf[DPRAM_TX_SIZE]; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; spin_lock(&card->spin); diff --git a/drivers/net/can/spi/hi311x.c b/drivers/net/can/spi/hi311x.c index b87dc420428d..e1b8533a602e 100644 --- a/drivers/net/can/spi/hi311x.c +++ b/drivers/net/can/spi/hi311x.c @@ -373,7 +373,7 @@ static netdev_tx_t hi3110_hard_start_xmit(struct sk_buff *skb, return NETDEV_TX_BUSY; } - if (can_dropped_invalid_skb(net, skb)) + if (can_dev_dropped_skb(net, skb)) return NETDEV_TX_OK; netif_stop_queue(net); diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c index c320de474f40..79c4bab5f724 100644 --- a/drivers/net/can/spi/mcp251x.c +++ b/drivers/net/can/spi/mcp251x.c @@ -789,7 +789,7 @@ static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb, return NETDEV_TX_BUSY; } - if (can_dropped_invalid_skb(net, skb)) + if (can_dev_dropped_skb(net, skb)) return NETDEV_TX_OK; netif_stop_queue(net); @@ -1415,11 +1415,14 @@ static int mcp251x_can_probe(struct spi_device *spi) ret = mcp251x_gpio_setup(priv); if (ret) - goto error_probe; + goto out_unregister_candev; netdev_info(net, "MCP%x successfully initialized.\n", priv->model); return 0; +out_unregister_candev: + unregister_candev(net); + error_probe: destroy_workqueue(priv->wq); priv->wq = NULL; diff --git a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c index ffb6c36b7d9b..160528d3cc26 100644 --- a/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c +++ b/drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c @@ -172,7 +172,7 @@ netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, u8 tx_head; int err; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; if (mcp251xfd_tx_busy(priv, tx_ring)) diff --git a/drivers/net/can/sun4i_can.c b/drivers/net/can/sun4i_can.c index 525309da1320..2b78f9197681 100644 --- a/drivers/net/can/sun4i_can.c +++ b/drivers/net/can/sun4i_can.c @@ -429,7 +429,7 @@ static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *d canid_t id; int i; - if (can_dropped_invalid_skb(dev, skb)) + if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; netif_stop_queue(dev); diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c index b218fb3c6b76..27700f72eac2 100644 --- a/drivers/net/can/ti_hecc.c +++ b/drivers/net/can/ti_hecc.c @@ -470,7 +470,7 @@ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) u32 mbxno, mbx_mask, data; unsigned long flags; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; mbxno = get_tx_head_mb(priv); diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c index d31191686a54..050c0b49938a 100644 --- a/drivers/net/can/usb/ems_usb.c +++ b/drivers/net/can/usb/ems_usb.c @@ -747,7 +747,7 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN + sizeof(struct cpc_can_msg); - if (can_dropped_invalid_skb(netdev, skb)) + if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; /* create a URB, and a buffer for it, and copy the data to the URB */ diff --git a/drivers/net/can/usb/esd_usb.c b/drivers/net/can/usb/esd_usb.c index 1bcfad11b1e4..81b88e9e5bdc 100644 --- a/drivers/net/can/usb/esd_usb.c +++ b/drivers/net/can/usb/esd_usb.c @@ -725,7 +725,7 @@ static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb, int ret = NETDEV_TX_OK; size_t size = sizeof(struct esd_usb_msg); - if (can_dropped_invalid_skb(netdev, skb)) + if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; /* create a URB, and a buffer for it, and copy the data to the URB */ diff --git a/drivers/net/can/usb/etas_es58x/es58x_core.c b/drivers/net/can/usb/etas_es58x/es58x_core.c index 51294b717040..25f863b4f5f0 100644 --- a/drivers/net/can/usb/etas_es58x/es58x_core.c +++ b/drivers/net/can/usb/etas_es58x/es58x_core.c @@ -1913,7 +1913,7 @@ static netdev_tx_t es58x_start_xmit(struct sk_buff *skb, unsigned int frame_len; int ret; - if (can_dropped_invalid_skb(netdev, skb)) { + if (can_dev_dropped_skb(netdev, skb)) { if (priv->tx_urb) goto xmit_commit; return NETDEV_TX_OK; diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index f0065d40eb24..9c2c25fde3d1 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -723,7 +723,7 @@ static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, unsigned int idx; struct gs_tx_context *txc; - if (can_dropped_invalid_skb(netdev, skb)) + if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; /* find an empty context to keep track of transmission */ diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c index e91648ed7386..802e27c0eced 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c @@ -570,7 +570,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, unsigned int i; unsigned long flags; - if (can_dropped_invalid_skb(netdev, skb)) + if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; urb = usb_alloc_urb(0, GFP_ATOMIC); diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c index 7b52fda73d82..66f672ea631b 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c @@ -1875,7 +1875,7 @@ static int kvaser_usb_hydra_start_chip(struct kvaser_usb_net_priv *priv) { int err; - init_completion(&priv->start_comp); + reinit_completion(&priv->start_comp); err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ, priv->channel); @@ -1893,7 +1893,7 @@ static int kvaser_usb_hydra_stop_chip(struct kvaser_usb_net_priv *priv) { int err; - init_completion(&priv->stop_comp); + reinit_completion(&priv->stop_comp); /* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT * see comment in kvaser_usb_hydra_update_state() diff --git a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c index 50f2ac8319ff..19958037720f 100644 --- a/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c +++ b/drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c @@ -1320,7 +1320,7 @@ static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv) { int err; - init_completion(&priv->start_comp); + reinit_completion(&priv->start_comp); err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP, priv->channel); @@ -1338,7 +1338,7 @@ static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv) { int err; - init_completion(&priv->stop_comp); + reinit_completion(&priv->stop_comp); err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP, priv->channel); diff --git a/drivers/net/can/usb/mcba_usb.c b/drivers/net/can/usb/mcba_usb.c index 69346c63021f..218b098b261d 100644 --- a/drivers/net/can/usb/mcba_usb.c +++ b/drivers/net/can/usb/mcba_usb.c @@ -311,7 +311,7 @@ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb, .cmd_id = MBCA_CMD_TRANSMIT_MESSAGE_EV }; - if (can_dropped_invalid_skb(netdev, skb)) + if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; ctx = mcba_usb_get_free_ctx(priv, cf); diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_core.c b/drivers/net/can/usb/peak_usb/pcan_usb_core.c index 225697d70a9a..1d996d3320fe 100644 --- a/drivers/net/can/usb/peak_usb/pcan_usb_core.c +++ b/drivers/net/can/usb/peak_usb/pcan_usb_core.c @@ -351,7 +351,7 @@ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb, int i, err; size_t size = dev->adapter->tx_buffer_size; - if (can_dropped_invalid_skb(netdev, skb)) + if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) diff --git a/drivers/net/can/usb/ucan.c b/drivers/net/can/usb/ucan.c index 7c35f50fda4e..67c2ff407d06 100644 --- a/drivers/net/can/usb/ucan.c +++ b/drivers/net/can/usb/ucan.c @@ -1120,7 +1120,7 @@ static netdev_tx_t ucan_start_xmit(struct sk_buff *skb, struct can_frame *cf = (struct can_frame *)skb->data; /* check skb */ - if (can_dropped_invalid_skb(netdev, skb)) + if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; /* allocate a context and slow down tx path, if fifo state is low */ diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c index 64c00abe91cf..8a5596ce4e46 100644 --- a/drivers/net/can/usb/usb_8dev.c +++ b/drivers/net/can/usb/usb_8dev.c @@ -602,7 +602,7 @@ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, int i, err; size_t size = sizeof(struct usb_8dev_tx_msg); - if (can_dropped_invalid_skb(netdev, skb)) + if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; /* create a URB, and a buffer for it, and copy the data to the URB */ diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c index 5d3172795ad0..43c812ea1de0 100644 --- a/drivers/net/can/xilinx_can.c +++ b/drivers/net/can/xilinx_can.c @@ -743,7 +743,7 @@ static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) struct xcan_priv *priv = netdev_priv(ndev); int ret; - if (can_dropped_invalid_skb(ndev, skb)) + if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) diff --git a/drivers/net/dsa/dsa_loop.c b/drivers/net/dsa/dsa_loop.c index b9107fe40023..5b139f2206b6 100644 --- a/drivers/net/dsa/dsa_loop.c +++ b/drivers/net/dsa/dsa_loop.c @@ -376,6 +376,17 @@ static struct mdio_driver dsa_loop_drv = { #define NUM_FIXED_PHYS (DSA_LOOP_NUM_PORTS - 2) +static void dsa_loop_phydevs_unregister(void) +{ + unsigned int i; + + for (i = 0; i < NUM_FIXED_PHYS; i++) + if (!IS_ERR(phydevs[i])) { + fixed_phy_unregister(phydevs[i]); + phy_device_free(phydevs[i]); + } +} + static int __init dsa_loop_init(void) { struct fixed_phy_status status = { @@ -383,23 +394,23 @@ static int __init dsa_loop_init(void) .speed = SPEED_100, .duplex = DUPLEX_FULL, }; - unsigned int i; + unsigned int i, ret; for (i = 0; i < NUM_FIXED_PHYS; i++) phydevs[i] = fixed_phy_register(PHY_POLL, &status, NULL); - return mdio_driver_register(&dsa_loop_drv); + ret = mdio_driver_register(&dsa_loop_drv); + if (ret) + dsa_loop_phydevs_unregister(); + + return ret; } module_init(dsa_loop_init); static void __exit dsa_loop_exit(void) { - unsigned int i; - mdio_driver_unregister(&dsa_loop_drv); - for (i = 0; i < NUM_FIXED_PHYS; i++) - if (!IS_ERR(phydevs[i])) - fixed_phy_unregister(phydevs[i]); + dsa_loop_phydevs_unregister(); } module_exit(dsa_loop_exit); diff --git a/drivers/net/dsa/qca/qca8k-8xxx.c b/drivers/net/dsa/qca/qca8k-8xxx.c index 5669c92c93f7..c5c3b4e92f28 100644 --- a/drivers/net/dsa/qca/qca8k-8xxx.c +++ b/drivers/net/dsa/qca/qca8k-8xxx.c @@ -137,27 +137,42 @@ static void qca8k_rw_reg_ack_handler(struct dsa_switch *ds, struct sk_buff *skb) struct qca8k_mgmt_eth_data *mgmt_eth_data; struct qca8k_priv *priv = ds->priv; struct qca_mgmt_ethhdr *mgmt_ethhdr; + u32 command; u8 len, cmd; + int i; mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb_mac_header(skb); mgmt_eth_data = &priv->mgmt_eth_data; - cmd = FIELD_GET(QCA_HDR_MGMT_CMD, mgmt_ethhdr->command); - len = FIELD_GET(QCA_HDR_MGMT_LENGTH, mgmt_ethhdr->command); + command = get_unaligned_le32(&mgmt_ethhdr->command); + cmd = FIELD_GET(QCA_HDR_MGMT_CMD, command); + len = FIELD_GET(QCA_HDR_MGMT_LENGTH, command); /* Make sure the seq match the requested packet */ - if (mgmt_ethhdr->seq == mgmt_eth_data->seq) + if (get_unaligned_le32(&mgmt_ethhdr->seq) == mgmt_eth_data->seq) mgmt_eth_data->ack = true; if (cmd == MDIO_READ) { - mgmt_eth_data->data[0] = mgmt_ethhdr->mdio_data; + u32 *val = mgmt_eth_data->data; + + *val = get_unaligned_le32(&mgmt_ethhdr->mdio_data); /* Get the rest of the 12 byte of data. * The read/write function will extract the requested data. */ - if (len > QCA_HDR_MGMT_DATA1_LEN) - memcpy(mgmt_eth_data->data + 1, skb->data, - QCA_HDR_MGMT_DATA2_LEN); + if (len > QCA_HDR_MGMT_DATA1_LEN) { + __le32 *data2 = (__le32 *)skb->data; + int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN, + len - QCA_HDR_MGMT_DATA1_LEN); + + val++; + + for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) { + *val = get_unaligned_le32(data2); + val++; + data2++; + } + } } complete(&mgmt_eth_data->rw_done); @@ -169,8 +184,10 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 * struct qca_mgmt_ethhdr *mgmt_ethhdr; unsigned int real_len; struct sk_buff *skb; - u32 *data2; + __le32 *data2; + u32 command; u16 hdr; + int i; skb = dev_alloc_skb(QCA_HDR_MGMT_PKT_LEN); if (!skb) @@ -199,20 +216,32 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 * hdr |= FIELD_PREP(QCA_HDR_XMIT_DP_BIT, BIT(0)); hdr |= FIELD_PREP(QCA_HDR_XMIT_CONTROL, QCA_HDR_XMIT_TYPE_RW_REG); - mgmt_ethhdr->command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg); - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len); - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd); - mgmt_ethhdr->command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE, + command = FIELD_PREP(QCA_HDR_MGMT_ADDR, reg); + command |= FIELD_PREP(QCA_HDR_MGMT_LENGTH, real_len); + command |= FIELD_PREP(QCA_HDR_MGMT_CMD, cmd); + command |= FIELD_PREP(QCA_HDR_MGMT_CHECK_CODE, QCA_HDR_MGMT_CHECK_CODE_VAL); + put_unaligned_le32(command, &mgmt_ethhdr->command); + if (cmd == MDIO_WRITE) - mgmt_ethhdr->mdio_data = *val; + put_unaligned_le32(*val, &mgmt_ethhdr->mdio_data); mgmt_ethhdr->hdr = htons(hdr); data2 = skb_put_zero(skb, QCA_HDR_MGMT_DATA2_LEN + QCA_HDR_MGMT_PADDING_LEN); - if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) - memcpy(data2, val + 1, len - QCA_HDR_MGMT_DATA1_LEN); + if (cmd == MDIO_WRITE && len > QCA_HDR_MGMT_DATA1_LEN) { + int data_len = min_t(int, QCA_HDR_MGMT_DATA2_LEN, + len - QCA_HDR_MGMT_DATA1_LEN); + + val++; + + for (i = sizeof(u32); i <= data_len; i += sizeof(u32)) { + put_unaligned_le32(*val, data2); + data2++; + val++; + } + } return skb; } @@ -220,9 +249,11 @@ static struct sk_buff *qca8k_alloc_mdio_header(enum mdio_cmd cmd, u32 reg, u32 * static void qca8k_mdio_header_fill_seq_num(struct sk_buff *skb, u32 seq_num) { struct qca_mgmt_ethhdr *mgmt_ethhdr; + u32 seq; + seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num); mgmt_ethhdr = (struct qca_mgmt_ethhdr *)skb->data; - mgmt_ethhdr->seq = FIELD_PREP(QCA_HDR_MGMT_SEQ_NUM, seq_num); + put_unaligned_le32(seq, &mgmt_ethhdr->seq); } static int qca8k_read_eth(struct qca8k_priv *priv, u32 reg, u32 *val, int len) @@ -1487,9 +1518,9 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk struct qca8k_priv *priv = ds->priv; const struct qca8k_mib_desc *mib; struct mib_ethhdr *mib_ethhdr; - int i, mib_len, offset = 0; - u64 *data; + __le32 *data2; u8 port; + int i; mib_ethhdr = (struct mib_ethhdr *)skb_mac_header(skb); mib_eth_data = &priv->mib_eth_data; @@ -1501,28 +1532,24 @@ static void qca8k_mib_autocast_handler(struct dsa_switch *ds, struct sk_buff *sk if (port != mib_eth_data->req_port) goto exit; - data = mib_eth_data->data; + data2 = (__le32 *)skb->data; for (i = 0; i < priv->info->mib_count; i++) { mib = &ar8327_mib[i]; /* First 3 mib are present in the skb head */ if (i < 3) { - data[i] = mib_ethhdr->data[i]; + mib_eth_data->data[i] = get_unaligned_le32(mib_ethhdr->data + i); continue; } - mib_len = sizeof(uint32_t); - /* Some mib are 64 bit wide */ if (mib->size == 2) - mib_len = sizeof(uint64_t); - - /* Copy the mib value from packet to the */ - memcpy(data + i, skb->data + offset, mib_len); + mib_eth_data->data[i] = get_unaligned_le64((__le64 *)data2); + else + mib_eth_data->data[i] = get_unaligned_le32(data2); - /* Set the offset for the next mib */ - offset += mib_len; + data2 += mib->size; } exit: diff --git a/drivers/net/ethernet/adi/adin1110.c b/drivers/net/ethernet/adi/adin1110.c index 1744d623999d..606c97610808 100644 --- a/drivers/net/ethernet/adi/adin1110.c +++ b/drivers/net/ethernet/adi/adin1110.c @@ -1512,16 +1512,15 @@ static struct notifier_block adin1110_switchdev_notifier = { .notifier_call = adin1110_switchdev_event, }; -static void adin1110_unregister_notifiers(void *data) +static void adin1110_unregister_notifiers(void) { unregister_switchdev_blocking_notifier(&adin1110_switchdev_blocking_notifier); unregister_switchdev_notifier(&adin1110_switchdev_notifier); unregister_netdevice_notifier(&adin1110_netdevice_nb); } -static int adin1110_setup_notifiers(struct adin1110_priv *priv) +static int adin1110_setup_notifiers(void) { - struct device *dev = &priv->spidev->dev; int ret; ret = register_netdevice_notifier(&adin1110_netdevice_nb); @@ -1536,13 +1535,14 @@ static int adin1110_setup_notifiers(struct adin1110_priv *priv) if (ret < 0) goto err_sdev; - return devm_add_action_or_reset(dev, adin1110_unregister_notifiers, NULL); + return 0; err_sdev: unregister_switchdev_notifier(&adin1110_switchdev_notifier); err_netdev: unregister_netdevice_notifier(&adin1110_netdevice_nb); + return ret; } @@ -1613,10 +1613,6 @@ static int adin1110_probe_netdevs(struct adin1110_priv *priv) if (ret < 0) return ret; - ret = adin1110_setup_notifiers(priv); - if (ret < 0) - return ret; - for (i = 0; i < priv->cfg->ports_nr; i++) { ret = devm_register_netdev(dev, priv->ports[i]->netdev); if (ret < 0) { @@ -1693,7 +1689,31 @@ static struct spi_driver adin1110_driver = { .probe = adin1110_probe, .id_table = adin1110_spi_id, }; -module_spi_driver(adin1110_driver); + +static int __init adin1110_driver_init(void) +{ + int ret; + + ret = adin1110_setup_notifiers(); + if (ret < 0) + return ret; + + ret = spi_register_driver(&adin1110_driver); + if (ret < 0) { + adin1110_unregister_notifiers(); + return ret; + } + + return 0; +} + +static void __exit adin1110_exit(void) +{ + adin1110_unregister_notifiers(); + spi_unregister_driver(&adin1110_driver); +} +module_init(adin1110_driver_init); +module_exit(adin1110_exit); MODULE_DESCRIPTION("ADIN1110 Network driver"); MODULE_AUTHOR("Alexandru Tachici <alexandru.tachici@analog.com>"); diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index d350eeec8bad..5a454b58498f 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -4543,13 +4543,19 @@ static struct pci_driver ena_pci_driver = { static int __init ena_init(void) { + int ret; + ena_wq = create_singlethread_workqueue(DRV_MODULE_NAME); if (!ena_wq) { pr_err("Failed to create workqueue\n"); return -ENOMEM; } - return pci_register_driver(&ena_pci_driver); + ret = pci_register_driver(&ena_pci_driver); + if (ret) + destroy_workqueue(ena_wq); + + return ret; } static void __exit ena_cleanup(void) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c index 2af3da4b2d05..f409d7bd1f1e 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c @@ -285,6 +285,9 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) /* Yellow Carp devices do not need cdr workaround */ pdata->vdata->an_cdr_workaround = 0; + + /* Yellow Carp devices do not need rrc */ + pdata->vdata->enable_rrc = 0; } else { pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF; pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT; @@ -483,6 +486,7 @@ static struct xgbe_version_data xgbe_v2a = { .tx_desc_prefetch = 5, .rx_desc_prefetch = 5, .an_cdr_workaround = 1, + .enable_rrc = 1, }; static struct xgbe_version_data xgbe_v2b = { @@ -498,6 +502,7 @@ static struct xgbe_version_data xgbe_v2b = { .tx_desc_prefetch = 5, .rx_desc_prefetch = 5, .an_cdr_workaround = 1, + .enable_rrc = 1, }; static const struct pci_device_id xgbe_pci_table[] = { diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c index 2156600641b6..4064c3e3dd49 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-phy-v2.c @@ -239,6 +239,7 @@ enum xgbe_sfp_speed { #define XGBE_SFP_BASE_BR_1GBE_MAX 0x0d #define XGBE_SFP_BASE_BR_10GBE_MIN 0x64 #define XGBE_SFP_BASE_BR_10GBE_MAX 0x68 +#define XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX 0x78 #define XGBE_SFP_BASE_CU_CABLE_LEN 18 @@ -284,6 +285,8 @@ struct xgbe_sfp_eeprom { #define XGBE_BEL_FUSE_VENDOR "BEL-FUSE " #define XGBE_BEL_FUSE_PARTNO "1GBT-SFP06 " +#define XGBE_MOLEX_VENDOR "Molex Inc. " + struct xgbe_sfp_ascii { union { char vendor[XGBE_SFP_BASE_VENDOR_NAME_LEN + 1]; @@ -834,7 +837,11 @@ static bool xgbe_phy_sfp_bit_rate(struct xgbe_sfp_eeprom *sfp_eeprom, break; case XGBE_SFP_SPEED_10000: min = XGBE_SFP_BASE_BR_10GBE_MIN; - max = XGBE_SFP_BASE_BR_10GBE_MAX; + if (memcmp(&sfp_eeprom->base[XGBE_SFP_BASE_VENDOR_NAME], + XGBE_MOLEX_VENDOR, XGBE_SFP_BASE_VENDOR_NAME_LEN) == 0) + max = XGBE_MOLEX_SFP_BASE_BR_10GBE_MAX; + else + max = XGBE_SFP_BASE_BR_10GBE_MAX; break; default: return false; @@ -1151,7 +1158,10 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) } /* Determine the type of SFP */ - if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR) + if (phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE && + xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000)) + phy_data->sfp_base = XGBE_SFP_BASE_10000_CR; + else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_SR) phy_data->sfp_base = XGBE_SFP_BASE_10000_SR; else if (sfp_base[XGBE_SFP_BASE_10GBE_CC] & XGBE_SFP_BASE_10GBE_CC_LR) phy_data->sfp_base = XGBE_SFP_BASE_10000_LR; @@ -1167,9 +1177,6 @@ static void xgbe_phy_sfp_parse_eeprom(struct xgbe_prv_data *pdata) phy_data->sfp_base = XGBE_SFP_BASE_1000_CX; else if (sfp_base[XGBE_SFP_BASE_1GBE_CC] & XGBE_SFP_BASE_1GBE_CC_T) phy_data->sfp_base = XGBE_SFP_BASE_1000_T; - else if ((phy_data->sfp_cable == XGBE_SFP_CABLE_PASSIVE) && - xgbe_phy_sfp_bit_rate(sfp_eeprom, XGBE_SFP_SPEED_10000)) - phy_data->sfp_base = XGBE_SFP_BASE_10000_CR; switch (phy_data->sfp_base) { case XGBE_SFP_BASE_1000_T: @@ -1979,6 +1986,10 @@ static void xgbe_phy_rx_reset(struct xgbe_prv_data *pdata) static void xgbe_phy_pll_ctrl(struct xgbe_prv_data *pdata, bool enable) { + /* PLL_CTRL feature needs to be enabled for fixed PHY modes (Non-Autoneg) only */ + if (pdata->phy.autoneg != AUTONEG_DISABLE) + return; + XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_MISC_CTRL0, XGBE_PMA_PLL_CTRL_MASK, enable ? XGBE_PMA_PLL_CTRL_ENABLE @@ -1989,7 +2000,7 @@ static void xgbe_phy_pll_ctrl(struct xgbe_prv_data *pdata, bool enable) } static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata, - unsigned int cmd, unsigned int sub_cmd) + enum xgbe_mb_cmd cmd, enum xgbe_mb_subcmd sub_cmd) { unsigned int s0 = 0; unsigned int wait; @@ -2029,14 +2040,16 @@ static void xgbe_phy_perform_ratechange(struct xgbe_prv_data *pdata, xgbe_phy_rx_reset(pdata); reenable_pll: - /* Enable PLL re-initialization */ - xgbe_phy_pll_ctrl(pdata, true); + /* Enable PLL re-initialization, not needed for PHY Power Off and RRC cmds */ + if (cmd != XGBE_MB_CMD_POWER_OFF && + cmd != XGBE_MB_CMD_RRC) + xgbe_phy_pll_ctrl(pdata, true); } static void xgbe_phy_rrc(struct xgbe_prv_data *pdata) { /* Receiver Reset Cycle */ - xgbe_phy_perform_ratechange(pdata, 5, 0); + xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_RRC, XGBE_MB_SUBCMD_NONE); netif_dbg(pdata, link, pdata->netdev, "receiver reset complete\n"); } @@ -2046,7 +2059,7 @@ static void xgbe_phy_power_off(struct xgbe_prv_data *pdata) struct xgbe_phy_data *phy_data = pdata->phy_data; /* Power off */ - xgbe_phy_perform_ratechange(pdata, 0, 0); + xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_POWER_OFF, XGBE_MB_SUBCMD_NONE); phy_data->cur_mode = XGBE_MODE_UNKNOWN; @@ -2061,14 +2074,17 @@ static void xgbe_phy_sfi_mode(struct xgbe_prv_data *pdata) /* 10G/SFI */ if (phy_data->sfp_cable != XGBE_SFP_CABLE_PASSIVE) { - xgbe_phy_perform_ratechange(pdata, 3, 0); + xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI, XGBE_MB_SUBCMD_ACTIVE); } else { if (phy_data->sfp_cable_len <= 1) - xgbe_phy_perform_ratechange(pdata, 3, 1); + xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI, + XGBE_MB_SUBCMD_PASSIVE_1M); else if (phy_data->sfp_cable_len <= 3) - xgbe_phy_perform_ratechange(pdata, 3, 2); + xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI, + XGBE_MB_SUBCMD_PASSIVE_3M); else - xgbe_phy_perform_ratechange(pdata, 3, 3); + xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_SFI, + XGBE_MB_SUBCMD_PASSIVE_OTHER); } phy_data->cur_mode = XGBE_MODE_SFI; @@ -2083,7 +2099,7 @@ static void xgbe_phy_x_mode(struct xgbe_prv_data *pdata) xgbe_phy_set_redrv_mode(pdata); /* 1G/X */ - xgbe_phy_perform_ratechange(pdata, 1, 3); + xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_1G_KX); phy_data->cur_mode = XGBE_MODE_X; @@ -2097,7 +2113,7 @@ static void xgbe_phy_sgmii_1000_mode(struct xgbe_prv_data *pdata) xgbe_phy_set_redrv_mode(pdata); /* 1G/SGMII */ - xgbe_phy_perform_ratechange(pdata, 1, 2); + xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_1G_SGMII); phy_data->cur_mode = XGBE_MODE_SGMII_1000; @@ -2111,7 +2127,7 @@ static void xgbe_phy_sgmii_100_mode(struct xgbe_prv_data *pdata) xgbe_phy_set_redrv_mode(pdata); /* 100M/SGMII */ - xgbe_phy_perform_ratechange(pdata, 1, 1); + xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_100MBITS); phy_data->cur_mode = XGBE_MODE_SGMII_100; @@ -2125,7 +2141,7 @@ static void xgbe_phy_kr_mode(struct xgbe_prv_data *pdata) xgbe_phy_set_redrv_mode(pdata); /* 10G/KR */ - xgbe_phy_perform_ratechange(pdata, 4, 0); + xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_10G_KR, XGBE_MB_SUBCMD_NONE); phy_data->cur_mode = XGBE_MODE_KR; @@ -2139,7 +2155,7 @@ static void xgbe_phy_kx_2500_mode(struct xgbe_prv_data *pdata) xgbe_phy_set_redrv_mode(pdata); /* 2.5G/KX */ - xgbe_phy_perform_ratechange(pdata, 2, 0); + xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_2_5G, XGBE_MB_SUBCMD_NONE); phy_data->cur_mode = XGBE_MODE_KX_2500; @@ -2153,7 +2169,7 @@ static void xgbe_phy_kx_1000_mode(struct xgbe_prv_data *pdata) xgbe_phy_set_redrv_mode(pdata); /* 1G/KX */ - xgbe_phy_perform_ratechange(pdata, 1, 3); + xgbe_phy_perform_ratechange(pdata, XGBE_MB_CMD_SET_1G, XGBE_MB_SUBCMD_1G_KX); phy_data->cur_mode = XGBE_MODE_KX_1000; @@ -2640,7 +2656,7 @@ static int xgbe_phy_link_status(struct xgbe_prv_data *pdata, int *an_restart) } /* No link, attempt a receiver reset cycle */ - if (phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) { + if (pdata->vdata->enable_rrc && phy_data->rrc_count++ > XGBE_RRC_FREQUENCY) { phy_data->rrc_count = 0; xgbe_phy_rrc(pdata); } diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h index b875c430222e..71f24cb47935 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe.h +++ b/drivers/net/ethernet/amd/xgbe/xgbe.h @@ -611,6 +611,31 @@ enum xgbe_mdio_mode { XGBE_MDIO_MODE_CL45, }; +enum xgbe_mb_cmd { + XGBE_MB_CMD_POWER_OFF = 0, + XGBE_MB_CMD_SET_1G, + XGBE_MB_CMD_SET_2_5G, + XGBE_MB_CMD_SET_10G_SFI, + XGBE_MB_CMD_SET_10G_KR, + XGBE_MB_CMD_RRC +}; + +enum xgbe_mb_subcmd { + XGBE_MB_SUBCMD_NONE = 0, + + /* 10GbE SFP subcommands */ + XGBE_MB_SUBCMD_ACTIVE = 0, + XGBE_MB_SUBCMD_PASSIVE_1M, + XGBE_MB_SUBCMD_PASSIVE_3M, + XGBE_MB_SUBCMD_PASSIVE_OTHER, + + /* 1GbE Mode subcommands */ + XGBE_MB_SUBCMD_10MBITS = 0, + XGBE_MB_SUBCMD_100MBITS, + XGBE_MB_SUBCMD_1G_SGMII, + XGBE_MB_SUBCMD_1G_KX +}; + struct xgbe_phy { struct ethtool_link_ksettings lks; @@ -1013,6 +1038,7 @@ struct xgbe_version_data { unsigned int tx_desc_prefetch; unsigned int rx_desc_prefetch; unsigned int an_cdr_workaround; + unsigned int enable_rrc; }; struct xgbe_prv_data { diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c index d6cfea65a714..390671640388 100644 --- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c +++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c @@ -1004,8 +1004,10 @@ static int xgene_enet_open(struct net_device *ndev) xgene_enet_napi_enable(pdata); ret = xgene_enet_register_irq(ndev); - if (ret) + if (ret) { + xgene_enet_napi_disable(pdata); return ret; + } if (ndev->phydev) { phy_start(ndev->phydev); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c index 3d0e16791e1c..7eb5851eb95d 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_macsec.c @@ -570,6 +570,7 @@ static int aq_update_txsa(struct aq_nic_s *nic, const unsigned int sc_idx, ret = aq_mss_set_egress_sakey_record(hw, &key_rec, sa_idx); + memzero_explicit(&key_rec, sizeof(key_rec)); return ret; } @@ -899,6 +900,7 @@ static int aq_update_rxsa(struct aq_nic_s *nic, const unsigned int sc_idx, ret = aq_mss_set_ingress_sakey_record(hw, &sa_key_record, sa_idx); + memzero_explicit(&sa_key_record, sizeof(sa_key_record)); return ret; } @@ -1394,26 +1396,57 @@ static void aq_check_txsa_expiration(struct aq_nic_s *nic) egress_sa_threshold_expired); } +#define AQ_LOCKED_MDO_DEF(mdo) \ +static int aq_locked_mdo_##mdo(struct macsec_context *ctx) \ +{ \ + struct aq_nic_s *nic = netdev_priv(ctx->netdev); \ + int ret; \ + mutex_lock(&nic->macsec_mutex); \ + ret = aq_mdo_##mdo(ctx); \ + mutex_unlock(&nic->macsec_mutex); \ + return ret; \ +} + +AQ_LOCKED_MDO_DEF(dev_open) +AQ_LOCKED_MDO_DEF(dev_stop) +AQ_LOCKED_MDO_DEF(add_secy) +AQ_LOCKED_MDO_DEF(upd_secy) +AQ_LOCKED_MDO_DEF(del_secy) +AQ_LOCKED_MDO_DEF(add_rxsc) +AQ_LOCKED_MDO_DEF(upd_rxsc) +AQ_LOCKED_MDO_DEF(del_rxsc) +AQ_LOCKED_MDO_DEF(add_rxsa) +AQ_LOCKED_MDO_DEF(upd_rxsa) +AQ_LOCKED_MDO_DEF(del_rxsa) +AQ_LOCKED_MDO_DEF(add_txsa) +AQ_LOCKED_MDO_DEF(upd_txsa) +AQ_LOCKED_MDO_DEF(del_txsa) +AQ_LOCKED_MDO_DEF(get_dev_stats) +AQ_LOCKED_MDO_DEF(get_tx_sc_stats) +AQ_LOCKED_MDO_DEF(get_tx_sa_stats) +AQ_LOCKED_MDO_DEF(get_rx_sc_stats) +AQ_LOCKED_MDO_DEF(get_rx_sa_stats) + const struct macsec_ops aq_macsec_ops = { - .mdo_dev_open = aq_mdo_dev_open, - .mdo_dev_stop = aq_mdo_dev_stop, - .mdo_add_secy = aq_mdo_add_secy, - .mdo_upd_secy = aq_mdo_upd_secy, - .mdo_del_secy = aq_mdo_del_secy, - .mdo_add_rxsc = aq_mdo_add_rxsc, - .mdo_upd_rxsc = aq_mdo_upd_rxsc, - .mdo_del_rxsc = aq_mdo_del_rxsc, - .mdo_add_rxsa = aq_mdo_add_rxsa, - .mdo_upd_rxsa = aq_mdo_upd_rxsa, - .mdo_del_rxsa = aq_mdo_del_rxsa, - .mdo_add_txsa = aq_mdo_add_txsa, - .mdo_upd_txsa = aq_mdo_upd_txsa, - .mdo_del_txsa = aq_mdo_del_txsa, - .mdo_get_dev_stats = aq_mdo_get_dev_stats, - .mdo_get_tx_sc_stats = aq_mdo_get_tx_sc_stats, - .mdo_get_tx_sa_stats = aq_mdo_get_tx_sa_stats, - .mdo_get_rx_sc_stats = aq_mdo_get_rx_sc_stats, - .mdo_get_rx_sa_stats = aq_mdo_get_rx_sa_stats, + .mdo_dev_open = aq_locked_mdo_dev_open, + .mdo_dev_stop = aq_locked_mdo_dev_stop, + .mdo_add_secy = aq_locked_mdo_add_secy, + .mdo_upd_secy = aq_locked_mdo_upd_secy, + .mdo_del_secy = aq_locked_mdo_del_secy, + .mdo_add_rxsc = aq_locked_mdo_add_rxsc, + .mdo_upd_rxsc = aq_locked_mdo_upd_rxsc, + .mdo_del_rxsc = aq_locked_mdo_del_rxsc, + .mdo_add_rxsa = aq_locked_mdo_add_rxsa, + .mdo_upd_rxsa = aq_locked_mdo_upd_rxsa, + .mdo_del_rxsa = aq_locked_mdo_del_rxsa, + .mdo_add_txsa = aq_locked_mdo_add_txsa, + .mdo_upd_txsa = aq_locked_mdo_upd_txsa, + .mdo_del_txsa = aq_locked_mdo_del_txsa, + .mdo_get_dev_stats = aq_locked_mdo_get_dev_stats, + .mdo_get_tx_sc_stats = aq_locked_mdo_get_tx_sc_stats, + .mdo_get_tx_sa_stats = aq_locked_mdo_get_tx_sa_stats, + .mdo_get_rx_sc_stats = aq_locked_mdo_get_rx_sc_stats, + .mdo_get_rx_sa_stats = aq_locked_mdo_get_rx_sa_stats, }; int aq_macsec_init(struct aq_nic_s *nic) @@ -1435,6 +1468,7 @@ int aq_macsec_init(struct aq_nic_s *nic) nic->ndev->features |= NETIF_F_HW_MACSEC; nic->ndev->macsec_ops = &aq_macsec_ops; + mutex_init(&nic->macsec_mutex); return 0; } @@ -1458,7 +1492,7 @@ int aq_macsec_enable(struct aq_nic_s *nic) if (!nic->macsec_cfg) return 0; - rtnl_lock(); + mutex_lock(&nic->macsec_mutex); if (nic->aq_fw_ops->send_macsec_req) { struct macsec_cfg_request cfg = { 0 }; @@ -1507,7 +1541,7 @@ int aq_macsec_enable(struct aq_nic_s *nic) ret = aq_apply_macsec_cfg(nic); unlock: - rtnl_unlock(); + mutex_unlock(&nic->macsec_mutex); return ret; } @@ -1519,9 +1553,9 @@ void aq_macsec_work(struct aq_nic_s *nic) if (!netif_carrier_ok(nic->ndev)) return; - rtnl_lock(); + mutex_lock(&nic->macsec_mutex); aq_check_txsa_expiration(nic); - rtnl_unlock(); + mutex_unlock(&nic->macsec_mutex); } int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic) @@ -1532,21 +1566,30 @@ int aq_macsec_rx_sa_cnt(struct aq_nic_s *nic) if (!cfg) return 0; + mutex_lock(&nic->macsec_mutex); + for (i = 0; i < AQ_MACSEC_MAX_SC; i++) { if (!test_bit(i, &cfg->rxsc_idx_busy)) continue; cnt += hweight_long(cfg->aq_rxsc[i].rx_sa_idx_busy); } + mutex_unlock(&nic->macsec_mutex); return cnt; } int aq_macsec_tx_sc_cnt(struct aq_nic_s *nic) { + int cnt; + if (!nic->macsec_cfg) return 0; - return hweight_long(nic->macsec_cfg->txsc_idx_busy); + mutex_lock(&nic->macsec_mutex); + cnt = hweight_long(nic->macsec_cfg->txsc_idx_busy); + mutex_unlock(&nic->macsec_mutex); + + return cnt; } int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic) @@ -1557,12 +1600,15 @@ int aq_macsec_tx_sa_cnt(struct aq_nic_s *nic) if (!cfg) return 0; + mutex_lock(&nic->macsec_mutex); + for (i = 0; i < AQ_MACSEC_MAX_SC; i++) { if (!test_bit(i, &cfg->txsc_idx_busy)) continue; cnt += hweight_long(cfg->aq_txsc[i].tx_sa_idx_busy); } + mutex_unlock(&nic->macsec_mutex); return cnt; } @@ -1634,6 +1680,8 @@ u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data) if (!cfg) return data; + mutex_lock(&nic->macsec_mutex); + aq_macsec_update_stats(nic); common_stats = &cfg->stats; @@ -1716,5 +1764,7 @@ u64 *aq_macsec_get_stats(struct aq_nic_s *nic, u64 *data) data += i; + mutex_unlock(&nic->macsec_mutex); + return data; } diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h index 935ba889bd9a..ad33f8586532 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h +++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h @@ -157,6 +157,8 @@ struct aq_nic_s { struct mutex fwreq_mutex; #if IS_ENABLED(CONFIG_MACSEC) struct aq_macsec_cfg *macsec_cfg; + /* mutex to protect data in macsec_cfg */ + struct mutex macsec_mutex; #endif /* PTP support */ struct aq_ptp_s *aq_ptp; diff --git a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c index 36c7cf05630a..431924959520 100644 --- a/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c +++ b/drivers/net/ethernet/aquantia/atlantic/macsec/macsec_api.c @@ -757,6 +757,7 @@ set_ingress_sakey_record(struct aq_hw_s *hw, u16 table_index) { u16 packed_record[18]; + int ret; if (table_index >= NUMROWS_INGRESSSAKEYRECORD) return -EINVAL; @@ -789,9 +790,12 @@ set_ingress_sakey_record(struct aq_hw_s *hw, packed_record[16] = rec->key_len & 0x3; - return set_raw_ingress_record(hw, packed_record, 18, 2, - ROWOFFSET_INGRESSSAKEYRECORD + - table_index); + ret = set_raw_ingress_record(hw, packed_record, 18, 2, + ROWOFFSET_INGRESSSAKEYRECORD + + table_index); + + memzero_explicit(packed_record, sizeof(packed_record)); + return ret; } int aq_mss_set_ingress_sakey_record(struct aq_hw_s *hw, @@ -1739,14 +1743,14 @@ static int set_egress_sakey_record(struct aq_hw_s *hw, ret = set_raw_egress_record(hw, packed_record, 8, 2, ROWOFFSET_EGRESSSAKEYRECORD + table_index); if (unlikely(ret)) - return ret; + goto clear_key; ret = set_raw_egress_record(hw, packed_record + 8, 8, 2, ROWOFFSET_EGRESSSAKEYRECORD + table_index - 32); - if (unlikely(ret)) - return ret; - return 0; +clear_key: + memzero_explicit(packed_record, sizeof(packed_record)); + return ret; } int aq_mss_set_egress_sakey_record(struct aq_hw_s *hw, diff --git a/drivers/net/ethernet/atheros/ag71xx.c b/drivers/net/ethernet/atheros/ag71xx.c index cc932b3cf873..4a1efe9b37d0 100644 --- a/drivers/net/ethernet/atheros/ag71xx.c +++ b/drivers/net/ethernet/atheros/ag71xx.c @@ -1427,7 +1427,7 @@ static int ag71xx_open(struct net_device *ndev) if (ret) { netif_err(ag, link, ndev, "phylink_of_phy_connect filed with err: %i\n", ret); - goto err; + return ret; } max_frame_len = ag71xx_max_frame_len(ndev->mtu); @@ -1448,6 +1448,7 @@ static int ag71xx_open(struct net_device *ndev) err: ag71xx_rings_cleanup(ag); + phylink_disconnect_phy(ag->phylink); return ret; } diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig index f4e1ca68d831..55dfdb34e37b 100644 --- a/drivers/net/ethernet/broadcom/Kconfig +++ b/drivers/net/ethernet/broadcom/Kconfig @@ -77,7 +77,7 @@ config BCMGENET select BCM7XXX_PHY select MDIO_BCM_UNIMAC select DIMLIB - select BROADCOM_PHY if ARCH_BCM2835 + select BROADCOM_PHY if (ARCH_BCM2835 && PTP_1588_CLOCK_OPTIONAL) help This driver supports the built-in Ethernet MACs found in the Broadcom BCM7xxx Set Top Box family chipset. diff --git a/drivers/net/ethernet/broadcom/bcm4908_enet.c b/drivers/net/ethernet/broadcom/bcm4908_enet.c index 93ccf549e2ed..a737b1913cf9 100644 --- a/drivers/net/ethernet/broadcom/bcm4908_enet.c +++ b/drivers/net/ethernet/broadcom/bcm4908_enet.c @@ -561,8 +561,6 @@ static netdev_tx_t bcm4908_enet_start_xmit(struct sk_buff *skb, struct net_devic if (++ring->write_idx == ring->length - 1) ring->write_idx = 0; - enet->netdev->stats.tx_bytes += skb->len; - enet->netdev->stats.tx_packets++; return NETDEV_TX_OK; } @@ -635,6 +633,7 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight) struct bcm4908_enet_dma_ring_bd *buf_desc; struct bcm4908_enet_dma_ring_slot *slot; struct device *dev = enet->dev; + unsigned int bytes = 0; int handled = 0; while (handled < weight && tx_ring->read_idx != tx_ring->write_idx) { @@ -645,12 +644,17 @@ static int bcm4908_enet_poll_tx(struct napi_struct *napi, int weight) dma_unmap_single(dev, slot->dma_addr, slot->len, DMA_TO_DEVICE); dev_kfree_skb(slot->skb); - if (++tx_ring->read_idx == tx_ring->length) - tx_ring->read_idx = 0; handled++; + bytes += slot->len; + + if (++tx_ring->read_idx == tx_ring->length) + tx_ring->read_idx = 0; } + enet->netdev->stats.tx_packets += handled; + enet->netdev->stats.tx_bytes += bytes; + if (handled < weight) { napi_complete_done(napi, handled); bcm4908_enet_dma_ring_intrs_on(enet, tx_ring); diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c index 867f14c30e09..425d6ccd5413 100644 --- a/drivers/net/ethernet/broadcom/bcmsysport.c +++ b/drivers/net/ethernet/broadcom/bcmsysport.c @@ -1991,6 +1991,9 @@ static int bcm_sysport_open(struct net_device *dev) goto out_clk_disable; } + /* Indicate that the MAC is responsible for PHY PM */ + phydev->mac_managed_pm = true; + /* Reset house keeping link status */ priv->old_duplex = -1; priv->old_link = -1; diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 5fb3af5670ec..3038386a5afd 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1568,7 +1568,6 @@ void bgmac_enet_remove(struct bgmac *bgmac) phy_disconnect(bgmac->net_dev->phydev); netif_napi_del(&bgmac->napi); bgmac_dma_free(bgmac); - free_netdev(bgmac->net_dev); } EXPORT_SYMBOL_GPL(bgmac_enet_remove); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 04cf7684f1b0..9f8a6ce4b356 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -9983,17 +9983,12 @@ static int bnxt_try_recover_fw(struct bnxt *bp) return -ENODEV; } -int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset) +static void bnxt_clear_reservations(struct bnxt *bp, bool fw_reset) { struct bnxt_hw_resc *hw_resc = &bp->hw_resc; - int rc; if (!BNXT_NEW_RM(bp)) - return 0; /* no resource reservations required */ - - rc = bnxt_hwrm_func_resc_qcaps(bp, true); - if (rc) - netdev_err(bp->dev, "resc_qcaps failed\n"); + return; /* no resource reservations required */ hw_resc->resv_cp_rings = 0; hw_resc->resv_stat_ctxs = 0; @@ -10006,6 +10001,20 @@ int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset) bp->tx_nr_rings = 0; bp->rx_nr_rings = 0; } +} + +int bnxt_cancel_reservations(struct bnxt *bp, bool fw_reset) +{ + int rc; + + if (!BNXT_NEW_RM(bp)) + return 0; /* no resource reservations required */ + + rc = bnxt_hwrm_func_resc_qcaps(bp, true); + if (rc) + netdev_err(bp->dev, "resc_qcaps failed\n"); + + bnxt_clear_reservations(bp, fw_reset); return rc; } @@ -12894,8 +12903,8 @@ static int bnxt_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, rcu_read_lock(); hlist_for_each_entry_rcu(fltr, head, hash) { if (bnxt_fltr_match(fltr, new_fltr)) { + rc = fltr->sw_id; rcu_read_unlock(); - rc = 0; goto err_free; } } @@ -13913,7 +13922,9 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) pci_ers_result_t result = PCI_ERS_RESULT_DISCONNECT; struct net_device *netdev = pci_get_drvdata(pdev); struct bnxt *bp = netdev_priv(netdev); - int err = 0, off; + int retry = 0; + int err = 0; + int off; netdev_info(bp->dev, "PCI Slot Reset\n"); @@ -13941,11 +13952,36 @@ static pci_ers_result_t bnxt_io_slot_reset(struct pci_dev *pdev) pci_restore_state(pdev); pci_save_state(pdev); + bnxt_inv_fw_health_reg(bp); + bnxt_try_map_fw_health_reg(bp); + + /* In some PCIe AER scenarios, firmware may take up to + * 10 seconds to become ready in the worst case. + */ + do { + err = bnxt_try_recover_fw(bp); + if (!err) + break; + retry++; + } while (retry < BNXT_FW_SLOT_RESET_RETRY); + + if (err) { + dev_err(&pdev->dev, "Firmware not ready\n"); + goto reset_exit; + } + err = bnxt_hwrm_func_reset(bp); if (!err) result = PCI_ERS_RESULT_RECOVERED; + + bnxt_ulp_irq_stop(bp); + bnxt_clear_int_mode(bp); + err = bnxt_init_int_mode(bp); + bnxt_ulp_irq_restart(bp, err); } +reset_exit: + bnxt_clear_reservations(bp, true); rtnl_unlock(); return result; @@ -14001,8 +14037,16 @@ static struct pci_driver bnxt_pci_driver = { static int __init bnxt_init(void) { + int err; + bnxt_debug_init(); - return pci_register_driver(&bnxt_pci_driver); + err = pci_register_driver(&bnxt_pci_driver); + if (err) { + bnxt_debug_exit(); + return err; + } + + return 0; } static void __exit bnxt_exit(void) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index b1b17f911300..d5fa43cfe524 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1621,6 +1621,7 @@ struct bnxt_fw_health { #define BNXT_FW_RETRY 5 #define BNXT_FW_IF_RETRY 10 +#define BNXT_FW_SLOT_RESET_RETRY 4 enum board_idx { BCM57301, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c index a36803e79e92..8a6f788f6294 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c @@ -613,6 +613,7 @@ static int bnxt_dl_reload_up(struct devlink *dl, enum devlink_reload_action acti static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack) { + bool rc = false; u32 datalen; u16 index; u8 *buf; @@ -632,20 +633,20 @@ static bool bnxt_nvm_test(struct bnxt *bp, struct netlink_ext_ack *extack) if (bnxt_get_nvram_item(bp->dev, index, 0, datalen, buf)) { NL_SET_ERR_MSG_MOD(extack, "nvm test vpd read error"); - goto err; + goto done; } if (bnxt_flash_nvram(bp->dev, BNX_DIR_TYPE_VPD, BNX_DIR_ORDINAL_FIRST, BNX_DIR_EXT_NONE, 0, 0, buf, datalen)) { NL_SET_ERR_MSG_MOD(extack, "nvm test vpd write error"); - goto err; + goto done; } - return true; + rc = true; -err: +done: kfree(buf); - return false; + return rc; } static bool bnxt_dl_selftest_check(struct devlink *dl, unsigned int id, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index f57e524c7e30..8cad15c458b3 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -162,7 +162,7 @@ static int bnxt_set_coalesce(struct net_device *dev, } reset_coalesce: - if (netif_running(dev)) { + if (test_bit(BNXT_STATE_OPEN, &bp->state)) { if (update_stats) { rc = bnxt_close_nic(bp, true, false); if (!rc) diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c index b01d42928a53..132442f16fe6 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hwrm.c @@ -476,7 +476,8 @@ static int __hwrm_send(struct bnxt *bp, struct bnxt_hwrm_ctx *ctx) memset(ctx->resp, 0, PAGE_SIZE); req_type = le16_to_cpu(ctx->req->req_type); - if (BNXT_NO_FW_ACCESS(bp) && req_type != HWRM_FUNC_RESET) { + if (BNXT_NO_FW_ACCESS(bp) && + (req_type != HWRM_FUNC_RESET && req_type != HWRM_VER_GET)) { netdev_dbg(bp->dev, "hwrm req_type 0x%x skipped, FW channel down\n", req_type); goto exit; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 51c9fd6f68a4..4f63f1ba3161 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -806,6 +806,7 @@ static int macb_mii_probe(struct net_device *dev) bp->phylink_config.dev = &dev->dev; bp->phylink_config.type = PHYLINK_NETDEV; + bp->phylink_config.mac_managed_pm = true; if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) { bp->phylink_config.poll_fixed_state = true; diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index d312bd594935..75771825c3f9 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1794,13 +1794,10 @@ static int liquidio_open(struct net_device *netdev) ifstate_set(lio, LIO_IFSTATE_RUNNING); - if (OCTEON_CN23XX_PF(oct)) { - if (!oct->msix_on) - if (setup_tx_poll_fn(netdev)) - return -1; - } else { - if (setup_tx_poll_fn(netdev)) - return -1; + if (!OCTEON_CN23XX_PF(oct) || (OCTEON_CN23XX_PF(oct) && !oct->msix_on)) { + ret = setup_tx_poll_fn(netdev); + if (ret) + goto err_poll; } netif_tx_start_all_queues(netdev); @@ -1813,7 +1810,7 @@ static int liquidio_open(struct net_device *netdev) /* tell Octeon to start forwarding packets to host */ ret = send_rx_ctrl_cmd(lio, 1); if (ret) - return ret; + goto err_rx_ctrl; /* start periodical statistics fetch */ INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats); @@ -1824,6 +1821,27 @@ static int liquidio_open(struct net_device *netdev) dev_info(&oct->pci_dev->dev, "%s interface is opened\n", netdev->name); + return 0; + +err_rx_ctrl: + if (!OCTEON_CN23XX_PF(oct) || (OCTEON_CN23XX_PF(oct) && !oct->msix_on)) + cleanup_tx_poll_fn(netdev); +err_poll: + if (lio->ptp_clock) { + ptp_clock_unregister(lio->ptp_clock); + lio->ptp_clock = NULL; + } + + if (oct->props[lio->ifidx].napi_enabled == 1) { + list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list) + napi_disable(napi); + + oct->props[lio->ifidx].napi_enabled = 0; + + if (OCTEON_CN23XX_PF(oct)) + oct->droq[0]->ops.poll_mode = 0; + } + return ret; } diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c index a52e6b6e2876..9b84c8d8d309 100644 --- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c +++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c @@ -1301,6 +1301,7 @@ static int cxgb_up(struct adapter *adap) if (ret < 0) { CH_ERR(adap, "failed to bind qsets, err %d\n", ret); t3_intr_disable(adap); + quiesce_rx(adap); free_irq_resources(adap); err = ret; goto out; diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 54db79f4dcfe..63b2bd084130 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -858,7 +858,7 @@ static int cxgb4vf_open(struct net_device *dev) */ err = t4vf_update_port_info(pi); if (err < 0) - return err; + goto err_unwind; /* * Note that this interface is up and start everything up ... diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 31cfa121333d..fc68a32ce2f7 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -221,8 +221,8 @@ static int dpaa_netdev_init(struct net_device *net_dev, net_dev->netdev_ops = dpaa_ops; mac_addr = mac_dev->addr; - net_dev->mem_start = (unsigned long)mac_dev->vaddr; - net_dev->mem_end = (unsigned long)mac_dev->vaddr_end; + net_dev->mem_start = (unsigned long)priv->mac_dev->res->start; + net_dev->mem_end = (unsigned long)priv->mac_dev->res->end; net_dev->min_mtu = ETH_MIN_MTU; net_dev->max_mtu = dpaa_get_max_mtu(); diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c index 258eb6c8f4c0..4fee74c024bd 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth_sysfs.c @@ -18,7 +18,7 @@ static ssize_t dpaa_eth_show_addr(struct device *dev, if (mac_dev) return sprintf(buf, "%llx", - (unsigned long long)mac_dev->vaddr); + (unsigned long long)mac_dev->res->start); else return sprintf(buf, "none"); } diff --git a/drivers/net/ethernet/freescale/enetc/enetc.c b/drivers/net/ethernet/freescale/enetc/enetc.c index 54bc92fc6bf0..f8c06c3f9464 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc.c +++ b/drivers/net/ethernet/freescale/enetc/enetc.c @@ -2090,7 +2090,12 @@ static void enetc_setup_rxbdr(struct enetc_hw *hw, struct enetc_bdr *rx_ring) else enetc_rxbdr_wr(hw, idx, ENETC_RBBSR, ENETC_RXB_DMA_SIZE); + /* Also prepare the consumer index in case page allocation never + * succeeds. In that case, hardware will never advance producer index + * to match consumer index, and will drop all frames. + */ enetc_rxbdr_wr(hw, idx, ENETC_RBPIR, 0); + enetc_rxbdr_wr(hw, idx, ENETC_RBCIR, 1); /* enable Rx ints by setting pkt thr to 1 */ enetc_rxbdr_wr(hw, idx, ENETC_RBICR0, ENETC_RBICR0_ICEN | 0x1); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 98d5cd313fdd..f623c12eaf95 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -713,7 +713,7 @@ fec_enet_txq_put_data_tso(struct fec_enet_priv_tx_q *txq, struct sk_buff *skb, dev_kfree_skb_any(skb); if (net_ratelimit()) netdev_err(ndev, "Tx DMA memory map failed\n"); - return NETDEV_TX_BUSY; + return NETDEV_TX_OK; } bdp->cbd_datlen = cpu_to_fec16(size); @@ -775,7 +775,7 @@ fec_enet_txq_put_hdr_tso(struct fec_enet_priv_tx_q *txq, dev_kfree_skb_any(skb); if (net_ratelimit()) netdev_err(ndev, "Tx DMA memory map failed\n"); - return NETDEV_TX_BUSY; + return NETDEV_TX_OK; } } @@ -2432,6 +2432,31 @@ static u32 fec_enet_register_offset[] = { IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, IEEE_R_FDXFC, IEEE_R_OCTETS_OK }; +/* for i.MX6ul */ +static u32 fec_enet_register_offset_6ul[] = { + FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0, + FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL, + FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_RXIC0, + FEC_HASH_TABLE_HIGH, FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, + FEC_GRP_HASH_TABLE_LOW, FEC_X_WMRK, FEC_R_DES_START_0, + FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM, + FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, + RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT, + RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG, + RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255, + RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047, + RMON_T_P_GTE2048, RMON_T_OCTETS, + IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF, + IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE, + IEEE_T_FDXFC, IEEE_T_OCTETS_OK, + RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN, + RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB, + RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255, + RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047, + RMON_R_P_GTE2048, RMON_R_OCTETS, + IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR, + IEEE_R_FDXFC, IEEE_R_OCTETS_OK +}; #else static __u32 fec_enet_register_version = 1; static u32 fec_enet_register_offset[] = { @@ -2456,7 +2481,24 @@ static void fec_enet_get_regs(struct net_device *ndev, u32 *buf = (u32 *)regbuf; u32 i, off; int ret; +#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ + defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \ + defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST) + u32 *reg_list; + u32 reg_cnt; + if (!of_machine_is_compatible("fsl,imx6ul")) { + reg_list = fec_enet_register_offset; + reg_cnt = ARRAY_SIZE(fec_enet_register_offset); + } else { + reg_list = fec_enet_register_offset_6ul; + reg_cnt = ARRAY_SIZE(fec_enet_register_offset_6ul); + } +#else + /* coldfire */ + static u32 *reg_list = fec_enet_register_offset; + static const u32 reg_cnt = ARRAY_SIZE(fec_enet_register_offset); +#endif ret = pm_runtime_resume_and_get(dev); if (ret < 0) return; @@ -2465,8 +2507,8 @@ static void fec_enet_get_regs(struct net_device *ndev, memset(buf, 0, regs->len); - for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) { - off = fec_enet_register_offset[i]; + for (i = 0; i < reg_cnt; i++) { + off = reg_list[i]; if ((off == FEC_R_BOUND || off == FEC_R_FSTART) && !(fep->quirks & FEC_QUIRK_HAS_FRREG)) diff --git a/drivers/net/ethernet/freescale/fman/mac.c b/drivers/net/ethernet/freescale/fman/mac.c index 7b7526fd7da3..13e67f2864be 100644 --- a/drivers/net/ethernet/freescale/fman/mac.c +++ b/drivers/net/ethernet/freescale/fman/mac.c @@ -279,7 +279,6 @@ static int mac_probe(struct platform_device *_of_dev) struct device_node *mac_node, *dev_node; struct mac_device *mac_dev; struct platform_device *of_dev; - struct resource *res; struct mac_priv_s *priv; struct fman_mac_params params; u32 val; @@ -338,24 +337,25 @@ static int mac_probe(struct platform_device *_of_dev) of_node_put(dev_node); /* Get the address of the memory mapped registers */ - res = platform_get_mem_or_io(_of_dev, 0); - if (!res) { + mac_dev->res = platform_get_mem_or_io(_of_dev, 0); + if (!mac_dev->res) { dev_err(dev, "could not get registers\n"); return -EINVAL; } - err = devm_request_resource(dev, fman_get_mem_region(priv->fman), res); + err = devm_request_resource(dev, fman_get_mem_region(priv->fman), + mac_dev->res); if (err) { dev_err_probe(dev, err, "could not request resource\n"); return err; } - mac_dev->vaddr = devm_ioremap(dev, res->start, resource_size(res)); + mac_dev->vaddr = devm_ioremap(dev, mac_dev->res->start, + resource_size(mac_dev->res)); if (!mac_dev->vaddr) { dev_err(dev, "devm_ioremap() failed\n"); return -EIO; } - mac_dev->vaddr_end = mac_dev->vaddr + resource_size(res); if (!of_device_is_available(mac_node)) return -ENODEV; @@ -487,12 +487,21 @@ _return_of_node_put: return err; } +static int mac_remove(struct platform_device *pdev) +{ + struct mac_device *mac_dev = platform_get_drvdata(pdev); + + platform_device_unregister(mac_dev->priv->eth_dev); + return 0; +} + static struct platform_driver mac_driver = { .driver = { .name = KBUILD_MODNAME, .of_match_table = mac_match, }, .probe = mac_probe, + .remove = mac_remove, }; builtin_platform_driver(mac_driver); diff --git a/drivers/net/ethernet/freescale/fman/mac.h b/drivers/net/ethernet/freescale/fman/mac.h index b95d384271bd..13b69ca5f00c 100644 --- a/drivers/net/ethernet/freescale/fman/mac.h +++ b/drivers/net/ethernet/freescale/fman/mac.h @@ -20,8 +20,8 @@ struct mac_priv_s; struct mac_device { void __iomem *vaddr; - void __iomem *vaddr_end; struct device *dev; + struct resource *res; u8 addr[ETH_ALEN]; struct fman_port *port[2]; u32 if_support; diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c index 00fafc0f8512..430eccea8e5e 100644 --- a/drivers/net/ethernet/hisilicon/hns/hnae.c +++ b/drivers/net/ethernet/hisilicon/hns/hnae.c @@ -419,8 +419,10 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner) hdev->cls_dev.release = hnae_release; (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id); ret = device_register(&hdev->cls_dev); - if (ret) + if (ret) { + put_device(&hdev->cls_dev); return ret; + } __module_get(THIS_MODULE); diff --git a/drivers/net/ethernet/hisilicon/hns3/hnae3.h b/drivers/net/ethernet/hisilicon/hns3/hnae3.h index 0179fc288f5f..17137de9338c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hnae3.h +++ b/drivers/net/ethernet/hisilicon/hns3/hnae3.h @@ -819,7 +819,6 @@ struct hnae3_knic_private_info { const struct hnae3_dcb_ops *dcb_ops; u16 int_rl_setting; - enum pkt_hash_types rss_type; void __iomem *io_base; }; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c index e23729ac3bb8..ae2736549526 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.c @@ -191,23 +191,6 @@ u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle) return HCLGE_COMM_RSS_KEY_SIZE; } -void hclge_comm_get_rss_type(struct hnae3_handle *nic, - struct hclge_comm_rss_tuple_cfg *rss_tuple_sets) -{ - if (rss_tuple_sets->ipv4_tcp_en || - rss_tuple_sets->ipv4_udp_en || - rss_tuple_sets->ipv4_sctp_en || - rss_tuple_sets->ipv6_tcp_en || - rss_tuple_sets->ipv6_udp_en || - rss_tuple_sets->ipv6_sctp_en) - nic->kinfo.rss_type = PKT_HASH_TYPE_L4; - else if (rss_tuple_sets->ipv4_fragment_en || - rss_tuple_sets->ipv6_fragment_en) - nic->kinfo.rss_type = PKT_HASH_TYPE_L3; - else - nic->kinfo.rss_type = PKT_HASH_TYPE_NONE; -} - int hclge_comm_parse_rss_hfunc(struct hclge_comm_rss_cfg *rss_cfg, const u8 hfunc, u8 *hash_algo) { @@ -344,9 +327,6 @@ int hclge_comm_set_rss_input_tuple(struct hnae3_handle *nic, req->ipv6_sctp_en = rss_cfg->rss_tuple_sets.ipv6_sctp_en; req->ipv6_fragment_en = rss_cfg->rss_tuple_sets.ipv6_fragment_en; - if (is_pf) - hclge_comm_get_rss_type(nic, &rss_cfg->rss_tuple_sets); - ret = hclge_comm_cmd_send(hw, &desc, 1); if (ret) dev_err(&hw->cmq.csq.pdev->dev, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h index 946d166a452d..92af3d2980d3 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_common/hclge_comm_rss.h @@ -95,8 +95,6 @@ struct hclge_comm_rss_tc_mode_cmd { }; u32 hclge_comm_get_rss_key_size(struct hnae3_handle *handle); -void hclge_comm_get_rss_type(struct hnae3_handle *nic, - struct hclge_comm_rss_tuple_cfg *rss_tuple_sets); void hclge_comm_rss_indir_init_cfg(struct hnae3_ae_dev *ae_dev, struct hclge_comm_rss_cfg *rss_cfg); int hclge_comm_get_rss_tuple(struct hclge_comm_rss_cfg *rss_cfg, int flow_type, diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 4cb2421e71a7..028577943ec5 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -105,26 +105,28 @@ static const struct pci_device_id hns3_pci_tbl[] = { }; MODULE_DEVICE_TABLE(pci, hns3_pci_tbl); -#define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t) \ +#define HNS3_RX_PTYPE_ENTRY(ptype, l, s, t, h) \ { ptype, \ l, \ CHECKSUM_##s, \ HNS3_L3_TYPE_##t, \ - 1 } + 1, \ + h} #define HNS3_RX_PTYPE_UNUSED_ENTRY(ptype) \ - { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0 } + { ptype, 0, CHECKSUM_NONE, HNS3_L3_TYPE_PARSE_FAIL, 0, \ + PKT_HASH_TYPE_NONE } static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = { HNS3_RX_PTYPE_UNUSED_ENTRY(0), - HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP), - HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP), - HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP), - HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL), - HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL), - HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL), - HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM), - HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL), + HNS3_RX_PTYPE_ENTRY(1, 0, COMPLETE, ARP, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(2, 0, COMPLETE, RARP, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(3, 0, COMPLETE, LLDP, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(4, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(5, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(6, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(7, 0, COMPLETE, CNM, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(8, 0, NONE, PARSE_FAIL, PKT_HASH_TYPE_NONE), HNS3_RX_PTYPE_UNUSED_ENTRY(9), HNS3_RX_PTYPE_UNUSED_ENTRY(10), HNS3_RX_PTYPE_UNUSED_ENTRY(11), @@ -132,36 +134,36 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = { HNS3_RX_PTYPE_UNUSED_ENTRY(13), HNS3_RX_PTYPE_UNUSED_ENTRY(14), HNS3_RX_PTYPE_UNUSED_ENTRY(15), - HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL), - HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4), - HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4), - HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4), - HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4), - HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4), - HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4), - HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4), - HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4), - HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4), + HNS3_RX_PTYPE_ENTRY(16, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(17, 0, COMPLETE, IPV4, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(18, 0, COMPLETE, IPV4, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(19, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(20, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(21, 0, NONE, IPV4, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(22, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(23, 0, NONE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(24, 0, NONE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(25, 0, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), HNS3_RX_PTYPE_UNUSED_ENTRY(26), HNS3_RX_PTYPE_UNUSED_ENTRY(27), HNS3_RX_PTYPE_UNUSED_ENTRY(28), - HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL), - HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL), - HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4), - HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4), - HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4), - HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4), - HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4), - HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4), - HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4), + HNS3_RX_PTYPE_ENTRY(29, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(30, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(31, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(32, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(33, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(34, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(35, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(36, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(37, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), HNS3_RX_PTYPE_UNUSED_ENTRY(38), - HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6), - HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6), - HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6), - HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6), - HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6), - HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6), - HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6), + HNS3_RX_PTYPE_ENTRY(39, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(40, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(41, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(42, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(43, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(44, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(45, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), HNS3_RX_PTYPE_UNUSED_ENTRY(46), HNS3_RX_PTYPE_UNUSED_ENTRY(47), HNS3_RX_PTYPE_UNUSED_ENTRY(48), @@ -227,35 +229,35 @@ static const struct hns3_rx_ptype hns3_rx_ptype_tbl[] = { HNS3_RX_PTYPE_UNUSED_ENTRY(108), HNS3_RX_PTYPE_UNUSED_ENTRY(109), HNS3_RX_PTYPE_UNUSED_ENTRY(110), - HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6), - HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6), - HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6), - HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6), - HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6), - HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6), - HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6), - HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6), - HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6), + HNS3_RX_PTYPE_ENTRY(111, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(112, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(113, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(114, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(115, 0, NONE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(116, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(117, 0, NONE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(118, 0, NONE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(119, 0, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), HNS3_RX_PTYPE_UNUSED_ENTRY(120), HNS3_RX_PTYPE_UNUSED_ENTRY(121), HNS3_RX_PTYPE_UNUSED_ENTRY(122), - HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL), - HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL), - HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4), - HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4), - HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4), - HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4), - HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4), - HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4), - HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4), + HNS3_RX_PTYPE_ENTRY(123, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(124, 0, COMPLETE, PARSE_FAIL, PKT_HASH_TYPE_NONE), + HNS3_RX_PTYPE_ENTRY(125, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(126, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(127, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(128, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(129, 1, UNNECESSARY, IPV4, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(130, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(131, 0, COMPLETE, IPV4, PKT_HASH_TYPE_L3), HNS3_RX_PTYPE_UNUSED_ENTRY(132), - HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6), - HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6), - HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6), - HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6), - HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6), - HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6), - HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6), + HNS3_RX_PTYPE_ENTRY(133, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(134, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(135, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(136, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(137, 1, UNNECESSARY, IPV6, PKT_HASH_TYPE_L4), + HNS3_RX_PTYPE_ENTRY(138, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), + HNS3_RX_PTYPE_ENTRY(139, 0, COMPLETE, IPV6, PKT_HASH_TYPE_L3), HNS3_RX_PTYPE_UNUSED_ENTRY(140), HNS3_RX_PTYPE_UNUSED_ENTRY(141), HNS3_RX_PTYPE_UNUSED_ENTRY(142), @@ -3776,8 +3778,8 @@ static void hns3_nic_reuse_page(struct sk_buff *skb, int i, desc_cb->reuse_flag = 1; } else if (frag_size <= ring->rx_copybreak) { ret = hns3_handle_rx_copybreak(skb, i, ring, pull_len, desc_cb); - if (ret) - goto out; + if (!ret) + return; } out: @@ -4171,15 +4173,35 @@ static int hns3_set_gro_and_checksum(struct hns3_enet_ring *ring, } static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, - struct sk_buff *skb, u32 rss_hash) + struct sk_buff *skb, u32 rss_hash, + u32 l234info, u32 ol_info) { - struct hnae3_handle *handle = ring->tqp->handle; - enum pkt_hash_types rss_type; + enum pkt_hash_types rss_type = PKT_HASH_TYPE_NONE; + struct net_device *netdev = ring_to_netdev(ring); + struct hns3_nic_priv *priv = netdev_priv(netdev); - if (rss_hash) - rss_type = handle->kinfo.rss_type; - else - rss_type = PKT_HASH_TYPE_NONE; + if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { + u32 ptype = hnae3_get_field(ol_info, HNS3_RXD_PTYPE_M, + HNS3_RXD_PTYPE_S); + + rss_type = hns3_rx_ptype_tbl[ptype].hash_type; + } else { + int l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M, + HNS3_RXD_L3ID_S); + int l4_type = hnae3_get_field(l234info, HNS3_RXD_L4ID_M, + HNS3_RXD_L4ID_S); + + if (l3_type == HNS3_L3_TYPE_IPV4 || + l3_type == HNS3_L3_TYPE_IPV6) { + if (l4_type == HNS3_L4_TYPE_UDP || + l4_type == HNS3_L4_TYPE_TCP || + l4_type == HNS3_L4_TYPE_SCTP) + rss_type = PKT_HASH_TYPE_L4; + else if (l4_type == HNS3_L4_TYPE_IGMP || + l4_type == HNS3_L4_TYPE_ICMP) + rss_type = PKT_HASH_TYPE_L3; + } + } skb_set_hash(skb, rss_hash, rss_type); } @@ -4282,7 +4304,8 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) ring->tqp_vector->rx_group.total_bytes += len; - hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash)); + hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash), + l234info, ol_info); return 0; } diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h index 133a054af6b7..294a14b4fdef 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h @@ -404,6 +404,7 @@ struct hns3_rx_ptype { u32 ip_summed : 2; u32 l3_type : 4; u32 valid : 1; + u32 hash_type: 3; }; struct ring_stats { diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index 6962a9d69cf8..4e54f91f7a6c 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -3443,6 +3443,7 @@ static int hclge_update_tp_port_info(struct hclge_dev *hdev) hdev->hw.mac.autoneg = cmd.base.autoneg; hdev->hw.mac.speed = cmd.base.speed; hdev->hw.mac.duplex = cmd.base.duplex; + linkmode_copy(hdev->hw.mac.advertising, cmd.link_modes.advertising); return 0; } @@ -4859,7 +4860,6 @@ static int hclge_set_rss_tuple(struct hnae3_handle *handle, return ret; } - hclge_comm_get_rss_type(&vport->nic, &hdev->rss_cfg.rss_tuple_sets); return 0; } @@ -11587,9 +11587,12 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev) if (ret) goto err_msi_irq_uninit; - if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER && - !hnae3_dev_phy_imp_supported(hdev)) { - ret = hclge_mac_mdio_config(hdev); + if (hdev->hw.mac.media_type == HNAE3_MEDIA_TYPE_COPPER) { + if (hnae3_dev_phy_imp_supported(hdev)) + ret = hclge_update_tp_port_info(hdev); + else + ret = hclge_mac_mdio_config(hdev); + if (ret) goto err_msi_irq_uninit; } @@ -12984,14 +12987,16 @@ static void hclge_clean_vport_config(struct hnae3_ae_dev *ae_dev, int num_vfs) static int hclge_get_dscp_prio(struct hnae3_handle *h, u8 dscp, u8 *tc_mode, u8 *priority) { + struct hclge_vport *vport = hclge_get_vport(h); + if (dscp >= HNAE3_MAX_DSCP) return -EINVAL; if (tc_mode) - *tc_mode = h->kinfo.tc_map_mode; + *tc_mode = vport->nic.kinfo.tc_map_mode; if (priority) - *priority = h->kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 : - h->kinfo.dscp_prio[dscp]; + *priority = vport->nic.kinfo.dscp_prio[dscp] == HNAE3_PRIO_ID_INVALID ? 0 : + vport->nic.kinfo.dscp_prio[dscp]; return 0; } diff --git a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c index 19eb839177ec..061952c6c21a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_debugfs.c @@ -85,6 +85,7 @@ static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx) struct tag_sml_funcfg_tbl *funcfg_table_elem; struct hinic_cmd_lt_rd *read_data; u16 out_size = sizeof(*read_data); + int ret = ~0; int err; read_data = kzalloc(sizeof(*read_data), GFP_KERNEL); @@ -111,20 +112,25 @@ static int hinic_dbg_get_func_table(struct hinic_dev *nic_dev, int idx) switch (idx) { case VALID: - return funcfg_table_elem->dw0.bs.valid; + ret = funcfg_table_elem->dw0.bs.valid; + break; case RX_MODE: - return funcfg_table_elem->dw0.bs.nic_rx_mode; + ret = funcfg_table_elem->dw0.bs.nic_rx_mode; + break; case MTU: - return funcfg_table_elem->dw1.bs.mtu; + ret = funcfg_table_elem->dw1.bs.mtu; + break; case RQ_DEPTH: - return funcfg_table_elem->dw13.bs.cfg_rq_depth; + ret = funcfg_table_elem->dw13.bs.cfg_rq_depth; + break; case QUEUE_NUM: - return funcfg_table_elem->dw13.bs.cfg_q_num; + ret = funcfg_table_elem->dw13.bs.cfg_q_num; + break; } kfree(read_data); - return ~0; + return ret; } static ssize_t hinic_dbg_cmd_read(struct file *filp, char __user *buffer, size_t count, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c index 78190e88cd75..d39eec9c62bf 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_cmdq.c @@ -924,7 +924,7 @@ int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif, err_set_cmdq_depth: hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ); - + free_cmdq(&cmdqs->cmdq[HINIC_CMDQ_SYNC]); err_cmdq_ctxt: hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs, HINIC_MAX_CMDQ_TYPES); diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c index 94f470556295..27795288c586 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c @@ -877,7 +877,7 @@ int hinic_set_interrupt_cfg(struct hinic_hwdev *hwdev, if (err) return -EINVAL; - interrupt_info->lli_credit_cnt = temp_info.lli_timer_cnt; + interrupt_info->lli_credit_cnt = temp_info.lli_credit_cnt; interrupt_info->lli_timer_cnt = temp_info.lli_timer_cnt; err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index e1f54a2f28b2..2d6906aba2a2 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -1474,8 +1474,15 @@ static struct pci_driver hinic_driver = { static int __init hinic_module_init(void) { + int ret; + hinic_dbg_register_debugfs(HINIC_DRV_NAME); - return pci_register_driver(&hinic_driver); + + ret = pci_register_driver(&hinic_driver); + if (ret) + hinic_dbg_unregister_debugfs(); + + return ret; } static void __exit hinic_module_exit(void) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c index a5f08b969e3f..f7e05b41385b 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_sriov.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_sriov.c @@ -1174,7 +1174,6 @@ int hinic_vf_func_init(struct hinic_hwdev *hwdev) dev_err(&hwdev->hwif->pdev->dev, "Failed to register VF, err: %d, status: 0x%x, out size: 0x%x\n", err, register_info.status, out_size); - hinic_unregister_vf_mbox_cb(hwdev, HINIC_MOD_L2NIC); return -EIO; } } else { diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c index 294bdbbeacc3..b4aff59b3eb4 100644 --- a/drivers/net/ethernet/ibm/ehea/ehea_main.c +++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c @@ -2900,6 +2900,7 @@ static struct device *ehea_register_port(struct ehea_port *port, ret = of_device_register(&port->ofdev); if (ret) { pr_err("failed to register device. ret=%d\n", ret); + put_device(&port->ofdev.dev); goto out; } diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 3b14dc93f59d..5b96cd94dcd2 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -1757,7 +1757,8 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) kobject_uevent(kobj, KOBJ_ADD); } - rc = netif_set_real_num_tx_queues(netdev, ibmveth_real_max_tx_queues()); + rc = netif_set_real_num_tx_queues(netdev, min(num_online_cpus(), + IBMVETH_DEFAULT_QUEUES)); if (rc) { netdev_dbg(netdev, "failed to set number of tx queues rc=%d\n", rc); diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index daf6f615c03f..115d4c45aa77 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h @@ -100,6 +100,7 @@ static inline long h_illan_attributes(unsigned long unit_address, #define IBMVETH_MAX_BUF_SIZE (1024 * 128) #define IBMVETH_MAX_TX_BUF_SIZE (1024 * 64) #define IBMVETH_MAX_QUEUES 16U +#define IBMVETH_DEFAULT_QUEUES 8U static int pool_size[] = { 512, 1024 * 2, 1024 * 16, 1024 * 32, 1024 * 64 }; static int pool_count[] = { 256, 512, 256, 256, 256 }; diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 65dbfbec487a..9282381a438f 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -3007,19 +3007,19 @@ static void __ibmvnic_reset(struct work_struct *work) rwi = get_next_rwi(adapter); /* - * If there is another reset queued, free the previous rwi - * and process the new reset even if previous reset failed - * (the previous reset could have failed because of a fail - * over for instance, so process the fail over). - * * If there are no resets queued and the previous reset failed, * the adapter would be in an undefined state. So retry the * previous reset as a hard reset. + * + * Else, free the previous rwi and, if there is another reset + * queued, process the new reset even if previous reset failed + * (the previous reset could have failed because of a fail + * over for instance, so process the fail over). */ - if (rwi) - kfree(tmprwi); - else if (rc) + if (!rwi && rc) rwi = tmprwi; + else + kfree(tmprwi); if (rwi && (rwi->reset_reason == VNIC_RESET_FAILOVER || rwi->reset_reason == VNIC_RESET_MOBILITY || rc)) diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c index 7e75706f76db..4a6a6e48c615 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c +++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c @@ -2183,9 +2183,6 @@ static int i40e_set_ringparam(struct net_device *netdev, err = i40e_setup_rx_descriptors(&rx_rings[i]); if (err) goto rx_unwind; - err = i40e_alloc_rx_bi(&rx_rings[i]); - if (err) - goto rx_unwind; /* now allocate the Rx buffers to make sure the OS * has enough memory, any failure here means abort @@ -3188,10 +3185,17 @@ static int i40e_get_rss_hash_opts(struct i40e_pf *pf, struct ethtool_rxnfc *cmd) if (cmd->flow_type == TCP_V4_FLOW || cmd->flow_type == UDP_V4_FLOW) { - if (i_set & I40E_L3_SRC_MASK) - cmd->data |= RXH_IP_SRC; - if (i_set & I40E_L3_DST_MASK) - cmd->data |= RXH_IP_DST; + if (hw->mac.type == I40E_MAC_X722) { + if (i_set & I40E_X722_L3_SRC_MASK) + cmd->data |= RXH_IP_SRC; + if (i_set & I40E_X722_L3_DST_MASK) + cmd->data |= RXH_IP_DST; + } else { + if (i_set & I40E_L3_SRC_MASK) + cmd->data |= RXH_IP_SRC; + if (i_set & I40E_L3_DST_MASK) + cmd->data |= RXH_IP_DST; + } } else if (cmd->flow_type == TCP_V6_FLOW || cmd->flow_type == UDP_V6_FLOW) { if (i_set & I40E_L3_V6_SRC_MASK) @@ -3549,12 +3553,15 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, /** * i40e_get_rss_hash_bits - Read RSS Hash bits from register + * @hw: hw structure * @nfc: pointer to user request * @i_setc: bits currently set * * Returns value of bits to be set per user request **/ -static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc) +static u64 i40e_get_rss_hash_bits(struct i40e_hw *hw, + struct ethtool_rxnfc *nfc, + u64 i_setc) { u64 i_set = i_setc; u64 src_l3 = 0, dst_l3 = 0; @@ -3573,8 +3580,13 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc) dst_l3 = I40E_L3_V6_DST_MASK; } else if (nfc->flow_type == TCP_V4_FLOW || nfc->flow_type == UDP_V4_FLOW) { - src_l3 = I40E_L3_SRC_MASK; - dst_l3 = I40E_L3_DST_MASK; + if (hw->mac.type == I40E_MAC_X722) { + src_l3 = I40E_X722_L3_SRC_MASK; + dst_l3 = I40E_X722_L3_DST_MASK; + } else { + src_l3 = I40E_L3_SRC_MASK; + dst_l3 = I40E_L3_DST_MASK; + } } else { /* Any other flow type are not supported here */ return i_set; @@ -3592,6 +3604,7 @@ static u64 i40e_get_rss_hash_bits(struct ethtool_rxnfc *nfc, u64 i_setc) return i_set; } +#define FLOW_PCTYPES_SIZE 64 /** * i40e_set_rss_hash_opt - Enable/Disable flow types for RSS hash * @pf: pointer to the physical function struct @@ -3604,9 +3617,11 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) struct i40e_hw *hw = &pf->hw; u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) | ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32); - u8 flow_pctype = 0; + DECLARE_BITMAP(flow_pctypes, FLOW_PCTYPES_SIZE); u64 i_set, i_setc; + bitmap_zero(flow_pctypes, FLOW_PCTYPES_SIZE); + if (pf->flags & I40E_FLAG_MFP_ENABLED) { dev_err(&pf->pdev->dev, "Change of RSS hash input set is not supported when MFP mode is enabled\n"); @@ -3622,36 +3637,35 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) switch (nfc->flow_type) { case TCP_V4_FLOW: - flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP; + set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, flow_pctypes); if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); + set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK, + flow_pctypes); break; case TCP_V6_FLOW: - flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_TCP; - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK); + set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, flow_pctypes); if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); + set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK, + flow_pctypes); break; case UDP_V4_FLOW: - flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV4_UDP; - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP); - + set_bit(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, flow_pctypes); + if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { + set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP, + flow_pctypes); + set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP, + flow_pctypes); + } hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4); break; case UDP_V6_FLOW: - flow_pctype = I40E_FILTER_PCTYPE_NONF_IPV6_UDP; - if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) - hena |= - BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | - BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP); - + set_bit(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, flow_pctypes); + if (pf->hw_features & I40E_HW_MULTIPLE_TCP_UDP_RSS_PCTYPE) { + set_bit(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP, + flow_pctypes); + set_bit(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP, + flow_pctypes); + } hena |= BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6); break; case AH_ESP_V4_FLOW: @@ -3684,17 +3698,20 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) return -EINVAL; } - if (flow_pctype) { - i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, - flow_pctype)) | - ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, - flow_pctype)) << 32); - i_set = i40e_get_rss_hash_bits(nfc, i_setc); - i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_pctype), - (u32)i_set); - i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_pctype), - (u32)(i_set >> 32)); - hena |= BIT_ULL(flow_pctype); + if (bitmap_weight(flow_pctypes, FLOW_PCTYPES_SIZE)) { + u8 flow_id; + + for_each_set_bit(flow_id, flow_pctypes, FLOW_PCTYPES_SIZE) { + i_setc = (u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id)) | + ((u64)i40e_read_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id)) << 32); + i_set = i40e_get_rss_hash_bits(&pf->hw, nfc, i_setc); + + i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(0, flow_id), + (u32)i_set); + i40e_write_rx_ctl(hw, I40E_GLQF_HASH_INSET(1, flow_id), + (u32)(i_set >> 32)); + hena |= BIT_ULL(flow_id); + } } i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena); diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 2c07fa8ecfc8..b5dcd15ced36 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -3566,12 +3566,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) if (ring->vsi->type == I40E_VSI_MAIN) xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); - kfree(ring->rx_bi); ring->xsk_pool = i40e_xsk_pool(ring); if (ring->xsk_pool) { - ret = i40e_alloc_rx_bi_zc(ring); - if (ret) - return ret; ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool); /* For AF_XDP ZC, we disallow packets to span on @@ -3589,9 +3585,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring) ring->queue_index); } else { - ret = i40e_alloc_rx_bi(ring); - if (ret) - return ret; ring->rx_buf_len = vsi->rx_buf_len; if (ring->vsi->type == I40E_VSI_MAIN) { ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, @@ -13296,6 +13289,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, i40e_reset_and_rebuild(pf, true, true); } + if (!i40e_enabled_xdp_vsi(vsi) && prog) { + if (i40e_realloc_rx_bi_zc(vsi, true)) + return -ENOMEM; + } else if (i40e_enabled_xdp_vsi(vsi) && !prog) { + if (i40e_realloc_rx_bi_zc(vsi, false)) + return -ENOMEM; + } + for (i = 0; i < vsi->num_queue_pairs; i++) WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog); @@ -13528,6 +13529,7 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair) i40e_queue_pair_disable_irq(vsi, queue_pair); err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */); + i40e_clean_rx_ring(vsi->rx_rings[queue_pair]); i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */); i40e_queue_pair_clean_rings(vsi, queue_pair); i40e_queue_pair_reset_stats(vsi, queue_pair); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 69e67eb6aea7..b97c95f89fa0 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -1457,14 +1457,6 @@ err: return -ENOMEM; } -int i40e_alloc_rx_bi(struct i40e_ring *rx_ring) -{ - unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count; - - rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL); - return rx_ring->rx_bi ? 0 : -ENOMEM; -} - static void i40e_clear_rx_bi(struct i40e_ring *rx_ring) { memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count); @@ -1593,6 +1585,11 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring) rx_ring->xdp_prog = rx_ring->vsi->xdp_prog; + rx_ring->rx_bi = + kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL); + if (!rx_ring->rx_bi) + return -ENOMEM; + return 0; } diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index 41f86e9535a0..768290dc6f48 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -469,7 +469,6 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size); bool __i40e_chk_linearize(struct sk_buff *skb); int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, u32 flags); -int i40e_alloc_rx_bi(struct i40e_ring *rx_ring); /** * i40e_get_head - Retrieve head from head writeback diff --git a/drivers/net/ethernet/intel/i40e/i40e_type.h b/drivers/net/ethernet/intel/i40e/i40e_type.h index 7b3f30beb757..388c3d36d96a 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_type.h +++ b/drivers/net/ethernet/intel/i40e/i40e_type.h @@ -1404,6 +1404,10 @@ struct i40e_lldp_variables { #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 /* INPUT SET MASK for RSS, flow director, and flexible payload */ +#define I40E_X722_L3_SRC_SHIFT 49 +#define I40E_X722_L3_SRC_MASK (0x3ULL << I40E_X722_L3_SRC_SHIFT) +#define I40E_X722_L3_DST_SHIFT 41 +#define I40E_X722_L3_DST_MASK (0x3ULL << I40E_X722_L3_DST_SHIFT) #define I40E_L3_SRC_SHIFT 47 #define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT) #define I40E_L3_V6_SRC_SHIFT 43 diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 7e9f6a69eb10..72ddcefc45b1 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1536,10 +1536,12 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr) if (test_bit(__I40E_VF_RESETS_DISABLED, pf->state)) return true; - /* If the VFs have been disabled, this means something else is - * resetting the VF, so we shouldn't continue. - */ - if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) + /* Bail out if VFs are disabled. */ + if (test_bit(__I40E_VF_DISABLE, pf->state)) + return true; + + /* If VF is being reset already we don't need to continue. */ + if (test_and_set_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) return true; i40e_trigger_vf_reset(vf, flr); @@ -1576,7 +1578,7 @@ bool i40e_reset_vf(struct i40e_vf *vf, bool flr) i40e_cleanup_reset_vf(vf); i40e_flush(hw); - clear_bit(__I40E_VF_DISABLE, pf->state); + clear_bit(I40E_VF_STATE_RESETTING, &vf->vf_states); return true; } @@ -1609,8 +1611,12 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) return false; /* Begin reset on all VFs at once */ - for (v = 0; v < pf->num_alloc_vfs; v++) - i40e_trigger_vf_reset(&pf->vf[v], flr); + for (v = 0; v < pf->num_alloc_vfs; v++) { + vf = &pf->vf[v]; + /* If VF is being reset no need to trigger reset again */ + if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) + i40e_trigger_vf_reset(&pf->vf[v], flr); + } /* HW requires some time to make sure it can flush the FIFO for a VF * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in @@ -1626,9 +1632,11 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) */ while (v < pf->num_alloc_vfs) { vf = &pf->vf[v]; - reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); - if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) - break; + if (!test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) { + reg = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_id)); + if (!(reg & I40E_VPGEN_VFRSTAT_VFRD_MASK)) + break; + } /* If the current VF has finished resetting, move on * to the next VF in sequence. @@ -1656,6 +1664,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) if (pf->vf[v].lan_vsi_idx == 0) continue; + /* If VF is reset in another thread just continue */ + if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) + continue; + i40e_vsi_stop_rings_no_wait(pf->vsi[pf->vf[v].lan_vsi_idx]); } @@ -1667,6 +1679,10 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) if (pf->vf[v].lan_vsi_idx == 0) continue; + /* If VF is reset in another thread just continue */ + if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) + continue; + i40e_vsi_wait_queues_disabled(pf->vsi[pf->vf[v].lan_vsi_idx]); } @@ -1676,8 +1692,13 @@ bool i40e_reset_all_vfs(struct i40e_pf *pf, bool flr) mdelay(50); /* Finish the reset on each VF */ - for (v = 0; v < pf->num_alloc_vfs; v++) + for (v = 0; v < pf->num_alloc_vfs; v++) { + /* If VF is reset in another thread just continue */ + if (test_bit(I40E_VF_STATE_RESETTING, &vf->vf_states)) + continue; + i40e_cleanup_reset_vf(&pf->vf[v]); + } i40e_flush(hw); clear_bit(__I40E_VF_DISABLE, pf->state); diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h index a554d0a0b09b..358bbdb58795 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.h @@ -39,6 +39,7 @@ enum i40e_vf_states { I40E_VF_STATE_MC_PROMISC, I40E_VF_STATE_UC_PROMISC, I40E_VF_STATE_PRE_ENABLE, + I40E_VF_STATE_RESETTING }; /* VF capabilities */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c index 6d4009e0cbd6..cd7b52fb6b46 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c @@ -10,14 +10,6 @@ #include "i40e_txrx_common.h" #include "i40e_xsk.h" -int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring) -{ - unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count; - - rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL); - return rx_ring->rx_bi_zc ? 0 : -ENOMEM; -} - void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring) { memset(rx_ring->rx_bi_zc, 0, @@ -30,6 +22,58 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx) } /** + * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer + * @rx_ring: Current rx ring + * @pool_present: is pool for XSK present + * + * Try allocating memory and return ENOMEM, if failed to allocate. + * If allocation was successful, substitute buffer with allocated one. + * Returns 0 on success, negative on failure + */ +static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present) +{ + size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) : + sizeof(*rx_ring->rx_bi); + void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL); + + if (!sw_ring) + return -ENOMEM; + + if (pool_present) { + kfree(rx_ring->rx_bi); + rx_ring->rx_bi = NULL; + rx_ring->rx_bi_zc = sw_ring; + } else { + kfree(rx_ring->rx_bi_zc); + rx_ring->rx_bi_zc = NULL; + rx_ring->rx_bi = sw_ring; + } + return 0; +} + +/** + * i40e_realloc_rx_bi_zc - reallocate rx SW rings + * @vsi: Current VSI + * @zc: is zero copy set + * + * Reallocate buffer for rx_rings that might be used by XSK. + * XDP requires more memory, than rx_buf provides. + * Returns 0 on success, negative on failure + */ +int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc) +{ + struct i40e_ring *rx_ring; + unsigned long q; + + for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) { + rx_ring = vsi->rx_rings[q]; + if (i40e_realloc_rx_xdp_bi(rx_ring, zc)) + return -ENOMEM; + } + return 0; +} + +/** * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a * certain ring/qid * @vsi: Current VSI @@ -69,6 +113,10 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi, if (err) return err; + err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true); + if (err) + return err; + err = i40e_queue_pair_enable(vsi, qid); if (err) return err; @@ -113,6 +161,9 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid) xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR); if (if_running) { + err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false); + if (err) + return err; err = i40e_queue_pair_enable(vsi, qid); if (err) return err; diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h index bb962987f300..821df248f8be 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h +++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h @@ -32,7 +32,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget); bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring); int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags); -int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring); +int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc); void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring); #endif /* _I40E_XSK_H_ */ diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c index 5a9e6563923e..24a701fd140e 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c +++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c @@ -2438,6 +2438,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter, list_for_each_entry(f, &adapter->vlan_filter_list, list) { if (f->is_new_vlan) { f->is_new_vlan = false; + if (!f->vlan.vid) + continue; if (f->vlan.tpid == ETH_P_8021Q) set_bit(f->vlan.vid, adapter->vsi.active_cvlans); diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c index 9e36f01dfa4f..e864634d66bc 100644 --- a/drivers/net/ethernet/intel/ice/ice_base.c +++ b/drivers/net/ethernet/intel/ice/ice_base.c @@ -958,7 +958,7 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, * associated to the queue to schedule NAPI handler */ q_vector = ring->q_vector; - if (q_vector) + if (q_vector && !(vsi->vf && ice_is_vf_disabled(vsi->vf))) ice_trigger_sw_intr(hw, q_vector); status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx, diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 938ba8c215cb..7276badfa19e 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2240,6 +2240,31 @@ int ice_vsi_stop_xdp_tx_rings(struct ice_vsi *vsi) } /** + * ice_vsi_is_rx_queue_active + * @vsi: the VSI being configured + * + * Return true if at least one queue is active. + */ +bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi) +{ + struct ice_pf *pf = vsi->back; + struct ice_hw *hw = &pf->hw; + int i; + + ice_for_each_rxq(vsi, i) { + u32 rx_reg; + int pf_q; + + pf_q = vsi->rxq_map[i]; + rx_reg = rd32(hw, QRX_CTRL(pf_q)); + if (rx_reg & QRX_CTRL_QENA_STAT_M) + return true; + } + + return false; +} + +/** * ice_vsi_is_vlan_pruning_ena - check if VLAN pruning is enabled or not * @vsi: VSI to check whether or not VLAN pruning is enabled. * diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h index ec4bf0c89857..dcdf69a693e9 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.h +++ b/drivers/net/ethernet/intel/ice/ice_lib.h @@ -129,4 +129,5 @@ u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi); bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f); void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f); void ice_init_feature_support(struct ice_pf *pf); +bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi); #endif /* !_ICE_LIB_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_vf_lib.c b/drivers/net/ethernet/intel/ice/ice_vf_lib.c index 0abeed092de1..1c51778db951 100644 --- a/drivers/net/ethernet/intel/ice/ice_vf_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_vf_lib.c @@ -576,7 +576,10 @@ int ice_reset_vf(struct ice_vf *vf, u32 flags) return -EINVAL; } ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id); - ice_vsi_stop_all_rx_rings(vsi); + + if (ice_vsi_is_rx_queue_active(vsi)) + ice_vsi_stop_all_rx_rings(vsi); + dev_dbg(dev, "VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n", vf->vf_id); return 0; diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 59aab4086dcc..f5961bdcc480 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c @@ -485,7 +485,6 @@ ltq_etop_tx(struct sk_buff *skb, struct net_device *dev) len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) { - dev_kfree_skb_any(skb); netdev_err(dev, "tx ring full\n"); netif_tx_stop_queue(txq); return NETDEV_TX_BUSY; diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c index 707993b445d1..8941f69d93e9 100644 --- a/drivers/net/ethernet/marvell/mv643xx_eth.c +++ b/drivers/net/ethernet/marvell/mv643xx_eth.c @@ -2481,6 +2481,7 @@ out_free: for (i = 0; i < mp->rxq_count; i++) rxq_deinit(mp->rxq + i); out: + napi_disable(&mp->napi); free_irq(dev->irq, dev); return err; diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index 9089adcb75f9..b45dd7f04e21 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -521,14 +521,12 @@ static int octep_open(struct net_device *netdev) octep_oq_dbell_init(oct); ret = octep_get_link_status(oct); - if (ret) + if (ret > 0) octep_link_up(netdev); return 0; set_queues_err: - octep_napi_disable(oct); - octep_napi_delete(oct); octep_clean_irqs(oct); setup_irq_err: octep_free_oqs(oct); @@ -958,7 +956,7 @@ int octep_device_setup(struct octep_device *oct) ret = octep_ctrl_mbox_init(ctrl_mbox); if (ret) { dev_err(&pdev->dev, "Failed to initialize control mbox\n"); - return -1; + goto unsupported_dev; } oct->ctrl_mbox_ifstats_offset = OCTEP_CTRL_MBOX_SZ(ctrl_mbox->h2fq.elem_sz, ctrl_mbox->h2fq.elem_cnt, @@ -968,6 +966,10 @@ int octep_device_setup(struct octep_device *oct) return 0; unsupported_dev: + for (i = 0; i < OCTEP_MMIO_REGIONS; i++) + iounmap(oct->mmio[i].hw_addr); + + kfree(oct->conf); return -1; } @@ -1070,7 +1072,11 @@ static int octep_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->max_mtu = OCTEP_MAX_MTU; netdev->mtu = OCTEP_DEFAULT_MTU; - octep_get_mac_addr(octep_dev, octep_dev->mac_addr); + err = octep_get_mac_addr(octep_dev, octep_dev->mac_addr); + if (err) { + dev_err(&pdev->dev, "Failed to get mac address\n"); + goto register_dev_err; + } eth_hw_addr_set(netdev, octep_dev->mac_addr); err = register_netdev(netdev); diff --git a/drivers/net/ethernet/marvell/octeontx2/Kconfig b/drivers/net/ethernet/marvell/octeontx2/Kconfig index e1036b0eb6b1..6b4f640163f7 100644 --- a/drivers/net/ethernet/marvell/octeontx2/Kconfig +++ b/drivers/net/ethernet/marvell/octeontx2/Kconfig @@ -32,10 +32,12 @@ config OCTEONTX2_PF tristate "Marvell OcteonTX2 NIC Physical Function driver" select OCTEONTX2_MBOX select NET_DEVLINK + depends on MACSEC || !MACSEC depends on (64BIT && COMPILE_TEST) || ARM64 select DIMLIB depends on PCI depends on PTP_1588_CLOCK_OPTIONAL + depends on MACSEC || !MACSEC help This driver supports Marvell's OcteonTX2 NIC physical function. diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c index 9809f551fc2e..9ec5f38d38a8 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/cn10k_macsec.c @@ -815,6 +815,7 @@ free_flowid: cn10k_mcs_free_rsrc(pfvf, MCS_TX, MCS_RSRC_TYPE_FLOWID, txsc->hw_flow_id, false); fail: + kfree(txsc); return ERR_PTR(ret); } @@ -870,6 +871,7 @@ free_flowid: cn10k_mcs_free_rsrc(pfvf, MCS_RX, MCS_RSRC_TYPE_FLOWID, rxsc->hw_flow_id, false); fail: + kfree(rxsc); return ERR_PTR(ret); } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c index 9ac9e6615ae7..9e10e7471b88 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_common.c @@ -898,6 +898,7 @@ static int otx2_sq_init(struct otx2_nic *pfvf, u16 qidx, u16 sqb_aura) } sq->head = 0; + sq->cons_head = 0; sq->sqe_per_sqb = (pfvf->hw.sqb_size / sq->sqe_size) - 1; sq->num_sqbs = (qset->sqe_cnt + sq->sqe_per_sqb) / sq->sqe_per_sqb; /* Set SQE threshold to 10% of total SQEs */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 892ca88e0cf4..303930499a4c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -15,6 +15,7 @@ #include <net/ip.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> +#include <linux/bitfield.h> #include "otx2_reg.h" #include "otx2_common.h" @@ -1171,6 +1172,59 @@ int otx2_set_real_num_queues(struct net_device *netdev, } EXPORT_SYMBOL(otx2_set_real_num_queues); +static char *nix_sqoperr_e_str[NIX_SQOPERR_MAX] = { + "NIX_SQOPERR_OOR", + "NIX_SQOPERR_CTX_FAULT", + "NIX_SQOPERR_CTX_POISON", + "NIX_SQOPERR_DISABLED", + "NIX_SQOPERR_SIZE_ERR", + "NIX_SQOPERR_OFLOW", + "NIX_SQOPERR_SQB_NULL", + "NIX_SQOPERR_SQB_FAULT", + "NIX_SQOPERR_SQE_SZ_ZERO", +}; + +static char *nix_mnqerr_e_str[NIX_MNQERR_MAX] = { + "NIX_MNQERR_SQ_CTX_FAULT", + "NIX_MNQERR_SQ_CTX_POISON", + "NIX_MNQERR_SQB_FAULT", + "NIX_MNQERR_SQB_POISON", + "NIX_MNQERR_TOTAL_ERR", + "NIX_MNQERR_LSO_ERR", + "NIX_MNQERR_CQ_QUERY_ERR", + "NIX_MNQERR_MAX_SQE_SIZE_ERR", + "NIX_MNQERR_MAXLEN_ERR", + "NIX_MNQERR_SQE_SIZEM1_ZERO", +}; + +static char *nix_snd_status_e_str[NIX_SND_STATUS_MAX] = { + "NIX_SND_STATUS_GOOD", + "NIX_SND_STATUS_SQ_CTX_FAULT", + "NIX_SND_STATUS_SQ_CTX_POISON", + "NIX_SND_STATUS_SQB_FAULT", + "NIX_SND_STATUS_SQB_POISON", + "NIX_SND_STATUS_HDR_ERR", + "NIX_SND_STATUS_EXT_ERR", + "NIX_SND_STATUS_JUMP_FAULT", + "NIX_SND_STATUS_JUMP_POISON", + "NIX_SND_STATUS_CRC_ERR", + "NIX_SND_STATUS_IMM_ERR", + "NIX_SND_STATUS_SG_ERR", + "NIX_SND_STATUS_MEM_ERR", + "NIX_SND_STATUS_INVALID_SUBDC", + "NIX_SND_STATUS_SUBDC_ORDER_ERR", + "NIX_SND_STATUS_DATA_FAULT", + "NIX_SND_STATUS_DATA_POISON", + "NIX_SND_STATUS_NPC_DROP_ACTION", + "NIX_SND_STATUS_LOCK_VIOL", + "NIX_SND_STATUS_NPC_UCAST_CHAN_ERR", + "NIX_SND_STATUS_NPC_MCAST_CHAN_ERR", + "NIX_SND_STATUS_NPC_MCAST_ABORT", + "NIX_SND_STATUS_NPC_VTAG_PTR_ERR", + "NIX_SND_STATUS_NPC_VTAG_SIZE_ERR", + "NIX_SND_STATUS_SEND_STATS_ERR", +}; + static irqreturn_t otx2_q_intr_handler(int irq, void *data) { struct otx2_nic *pf = data; @@ -1204,46 +1258,67 @@ static irqreturn_t otx2_q_intr_handler(int irq, void *data) /* SQ */ for (qidx = 0; qidx < pf->hw.tot_tx_queues; qidx++) { + u64 sq_op_err_dbg, mnq_err_dbg, snd_err_dbg; + u8 sq_op_err_code, mnq_err_code, snd_err_code; + + /* Below debug registers captures first errors corresponding to + * those registers. We don't have to check against SQ qid as + * these are fatal errors. + */ + ptr = otx2_get_regaddr(pf, NIX_LF_SQ_OP_INT); val = otx2_atomic64_add((qidx << 44), ptr); otx2_write64(pf, NIX_LF_SQ_OP_INT, (qidx << 44) | (val & NIX_SQINT_BITS)); - if (!(val & (NIX_SQINT_BITS | BIT_ULL(42)))) - continue; - if (val & BIT_ULL(42)) { netdev_err(pf->netdev, "SQ%lld: error reading NIX_LF_SQ_OP_INT, NIX_LF_ERR_INT 0x%llx\n", qidx, otx2_read64(pf, NIX_LF_ERR_INT)); - } else { - if (val & BIT_ULL(NIX_SQINT_LMT_ERR)) { - netdev_err(pf->netdev, "SQ%lld: LMT store error NIX_LF_SQ_OP_ERR_DBG:0x%llx", - qidx, - otx2_read64(pf, - NIX_LF_SQ_OP_ERR_DBG)); - otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, - BIT_ULL(44)); - } - if (val & BIT_ULL(NIX_SQINT_MNQ_ERR)) { - netdev_err(pf->netdev, "SQ%lld: Meta-descriptor enqueue error NIX_LF_MNQ_ERR_DGB:0x%llx\n", - qidx, - otx2_read64(pf, NIX_LF_MNQ_ERR_DBG)); - otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, - BIT_ULL(44)); - } - if (val & BIT_ULL(NIX_SQINT_SEND_ERR)) { - netdev_err(pf->netdev, "SQ%lld: Send error, NIX_LF_SEND_ERR_DBG 0x%llx", - qidx, - otx2_read64(pf, - NIX_LF_SEND_ERR_DBG)); - otx2_write64(pf, NIX_LF_SEND_ERR_DBG, - BIT_ULL(44)); - } - if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) - netdev_err(pf->netdev, "SQ%lld: SQB allocation failed", - qidx); + goto done; } + sq_op_err_dbg = otx2_read64(pf, NIX_LF_SQ_OP_ERR_DBG); + if (!(sq_op_err_dbg & BIT(44))) + goto chk_mnq_err_dbg; + + sq_op_err_code = FIELD_GET(GENMASK(7, 0), sq_op_err_dbg); + netdev_err(pf->netdev, "SQ%lld: NIX_LF_SQ_OP_ERR_DBG(%llx) err=%s\n", + qidx, sq_op_err_dbg, nix_sqoperr_e_str[sq_op_err_code]); + + otx2_write64(pf, NIX_LF_SQ_OP_ERR_DBG, BIT_ULL(44)); + + if (sq_op_err_code == NIX_SQOPERR_SQB_NULL) + goto chk_mnq_err_dbg; + + /* Err is not NIX_SQOPERR_SQB_NULL, call aq function to read SQ structure. + * TODO: But we are in irq context. How to call mbox functions which does sleep + */ + +chk_mnq_err_dbg: + mnq_err_dbg = otx2_read64(pf, NIX_LF_MNQ_ERR_DBG); + if (!(mnq_err_dbg & BIT(44))) + goto chk_snd_err_dbg; + + mnq_err_code = FIELD_GET(GENMASK(7, 0), mnq_err_dbg); + netdev_err(pf->netdev, "SQ%lld: NIX_LF_MNQ_ERR_DBG(%llx) err=%s\n", + qidx, mnq_err_dbg, nix_mnqerr_e_str[mnq_err_code]); + otx2_write64(pf, NIX_LF_MNQ_ERR_DBG, BIT_ULL(44)); + +chk_snd_err_dbg: + snd_err_dbg = otx2_read64(pf, NIX_LF_SEND_ERR_DBG); + if (snd_err_dbg & BIT(44)) { + snd_err_code = FIELD_GET(GENMASK(7, 0), snd_err_dbg); + netdev_err(pf->netdev, "SQ%lld: NIX_LF_SND_ERR_DBG:0x%llx err=%s\n", + qidx, snd_err_dbg, nix_snd_status_e_str[snd_err_code]); + otx2_write64(pf, NIX_LF_SEND_ERR_DBG, BIT_ULL(44)); + } + +done: + /* Print values and reset */ + if (val & BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) + netdev_err(pf->netdev, "SQ%lld: SQB allocation failed", + qidx); + schedule_work(&pf->reset_task); } diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h index aa205a0d158f..fa37b9f312ca 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_struct.h @@ -281,4 +281,61 @@ enum nix_sqint_e { BIT_ULL(NIX_SQINT_SEND_ERR) | \ BIT_ULL(NIX_SQINT_SQB_ALLOC_FAIL)) +enum nix_sqoperr_e { + NIX_SQOPERR_OOR = 0, + NIX_SQOPERR_CTX_FAULT = 1, + NIX_SQOPERR_CTX_POISON = 2, + NIX_SQOPERR_DISABLED = 3, + NIX_SQOPERR_SIZE_ERR = 4, + NIX_SQOPERR_OFLOW = 5, + NIX_SQOPERR_SQB_NULL = 6, + NIX_SQOPERR_SQB_FAULT = 7, + NIX_SQOPERR_SQE_SZ_ZERO = 8, + NIX_SQOPERR_MAX, +}; + +enum nix_mnqerr_e { + NIX_MNQERR_SQ_CTX_FAULT = 0, + NIX_MNQERR_SQ_CTX_POISON = 1, + NIX_MNQERR_SQB_FAULT = 2, + NIX_MNQERR_SQB_POISON = 3, + NIX_MNQERR_TOTAL_ERR = 4, + NIX_MNQERR_LSO_ERR = 5, + NIX_MNQERR_CQ_QUERY_ERR = 6, + NIX_MNQERR_MAX_SQE_SIZE_ERR = 7, + NIX_MNQERR_MAXLEN_ERR = 8, + NIX_MNQERR_SQE_SIZEM1_ZERO = 9, + NIX_MNQERR_MAX, +}; + +enum nix_snd_status_e { + NIX_SND_STATUS_GOOD = 0x0, + NIX_SND_STATUS_SQ_CTX_FAULT = 0x1, + NIX_SND_STATUS_SQ_CTX_POISON = 0x2, + NIX_SND_STATUS_SQB_FAULT = 0x3, + NIX_SND_STATUS_SQB_POISON = 0x4, + NIX_SND_STATUS_HDR_ERR = 0x5, + NIX_SND_STATUS_EXT_ERR = 0x6, + NIX_SND_STATUS_JUMP_FAULT = 0x7, + NIX_SND_STATUS_JUMP_POISON = 0x8, + NIX_SND_STATUS_CRC_ERR = 0x9, + NIX_SND_STATUS_IMM_ERR = 0x10, + NIX_SND_STATUS_SG_ERR = 0x11, + NIX_SND_STATUS_MEM_ERR = 0x12, + NIX_SND_STATUS_INVALID_SUBDC = 0x13, + NIX_SND_STATUS_SUBDC_ORDER_ERR = 0x14, + NIX_SND_STATUS_DATA_FAULT = 0x15, + NIX_SND_STATUS_DATA_POISON = 0x16, + NIX_SND_STATUS_NPC_DROP_ACTION = 0x17, + NIX_SND_STATUS_LOCK_VIOL = 0x18, + NIX_SND_STATUS_NPC_UCAST_CHAN_ERR = 0x19, + NIX_SND_STATUS_NPC_MCAST_CHAN_ERR = 0x20, + NIX_SND_STATUS_NPC_MCAST_ABORT = 0x21, + NIX_SND_STATUS_NPC_VTAG_PTR_ERR = 0x22, + NIX_SND_STATUS_NPC_VTAG_SIZE_ERR = 0x23, + NIX_SND_STATUS_SEND_MEM_FAULT = 0x24, + NIX_SND_STATUS_SEND_STATS_ERR = 0x25, + NIX_SND_STATUS_MAX, +}; + #endif /* OTX2_STRUCT_H */ diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c index 5ec11d71bf60..ef10aef3cda0 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.c @@ -441,6 +441,7 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf, struct otx2_cq_queue *cq, int budget) { int tx_pkts = 0, tx_bytes = 0, qidx; + struct otx2_snd_queue *sq; struct nix_cqe_tx_s *cqe; int processed_cqe = 0; @@ -451,6 +452,9 @@ static int otx2_tx_napi_handler(struct otx2_nic *pfvf, return 0; process_cqe: + qidx = cq->cq_idx - pfvf->hw.rx_queues; + sq = &pfvf->qset.sq[qidx]; + while (likely(processed_cqe < budget) && cq->pend_cqe) { cqe = (struct nix_cqe_tx_s *)otx2_get_next_cqe(cq); if (unlikely(!cqe)) { @@ -458,18 +462,20 @@ process_cqe: return 0; break; } + if (cq->cq_type == CQ_XDP) { - qidx = cq->cq_idx - pfvf->hw.rx_queues; - otx2_xdp_snd_pkt_handler(pfvf, &pfvf->qset.sq[qidx], - cqe); + otx2_xdp_snd_pkt_handler(pfvf, sq, cqe); } else { - otx2_snd_pkt_handler(pfvf, cq, - &pfvf->qset.sq[cq->cint_idx], - cqe, budget, &tx_pkts, &tx_bytes); + otx2_snd_pkt_handler(pfvf, cq, sq, cqe, budget, + &tx_pkts, &tx_bytes); } + cqe->hdr.cqe_type = NIX_XQE_TYPE_INVALID; processed_cqe++; cq->pend_cqe--; + + sq->cons_head++; + sq->cons_head &= (sq->sqe_cnt - 1); } /* Free CQEs to HW */ @@ -1072,17 +1078,17 @@ bool otx2_sq_append_skb(struct net_device *netdev, struct otx2_snd_queue *sq, { struct netdev_queue *txq = netdev_get_tx_queue(netdev, qidx); struct otx2_nic *pfvf = netdev_priv(netdev); - int offset, num_segs, free_sqe; + int offset, num_segs, free_desc; struct nix_sqe_hdr_s *sqe_hdr; - /* Check if there is room for new SQE. - * 'Num of SQBs freed to SQ's pool - SQ's Aura count' - * will give free SQE count. + /* Check if there is enough room between producer + * and consumer index. */ - free_sqe = (sq->num_sqbs - *sq->aura_fc_addr) * sq->sqe_per_sqb; + free_desc = (sq->cons_head - sq->head - 1 + sq->sqe_cnt) & (sq->sqe_cnt - 1); + if (free_desc < sq->sqe_thresh) + return false; - if (free_sqe < sq->sqe_thresh || - free_sqe < otx2_get_sqe_count(pfvf, skb)) + if (free_desc < otx2_get_sqe_count(pfvf, skb)) return false; num_segs = skb_shinfo(skb)->nr_frags + 1; diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h index fbe62bbfb789..93cac2c2664c 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_txrx.h @@ -79,6 +79,7 @@ struct sg_list { struct otx2_snd_queue { u8 aura_id; u16 head; + u16 cons_head; u16 sqe_size; u32 sqe_cnt; u16 num_sqbs; diff --git a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c index 42ee963e9f75..9277a8fd1339 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_rxtx.c @@ -776,6 +776,7 @@ tx_done: int prestera_rxtx_switch_init(struct prestera_switch *sw) { struct prestera_rxtx *rxtx; + int err; rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL); if (!rxtx) @@ -783,7 +784,11 @@ int prestera_rxtx_switch_init(struct prestera_switch *sw) sw->rxtx = rxtx; - return prestera_sdma_switch_init(sw); + err = prestera_sdma_switch_init(sw); + if (err) + kfree(rxtx); + + return err; } void prestera_rxtx_switch_fini(struct prestera_switch *sw) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 4fba7cb0144b..7cd381530aa4 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -4060,19 +4060,23 @@ static int mtk_probe(struct platform_device *pdev) eth->irq[i] = platform_get_irq(pdev, i); if (eth->irq[i] < 0) { dev_err(&pdev->dev, "no IRQ%d resource found\n", i); - return -ENXIO; + err = -ENXIO; + goto err_wed_exit; } } for (i = 0; i < ARRAY_SIZE(eth->clks); i++) { eth->clks[i] = devm_clk_get(eth->dev, mtk_clks_source_name[i]); if (IS_ERR(eth->clks[i])) { - if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) - return -EPROBE_DEFER; + if (PTR_ERR(eth->clks[i]) == -EPROBE_DEFER) { + err = -EPROBE_DEFER; + goto err_wed_exit; + } if (eth->soc->required_clks & BIT(i)) { dev_err(&pdev->dev, "clock %s not found\n", mtk_clks_source_name[i]); - return -EINVAL; + err = -EINVAL; + goto err_wed_exit; } eth->clks[i] = NULL; } @@ -4083,7 +4087,7 @@ static int mtk_probe(struct platform_device *pdev) err = mtk_hw_init(eth); if (err) - return err; + goto err_wed_exit; eth->hwlro = MTK_HAS_CAPS(eth->soc->caps, MTK_HWLRO); @@ -4179,6 +4183,8 @@ err_free_dev: mtk_free_dev(eth); err_deinit_hw: mtk_hw_deinit(eth); +err_wed_exit: + mtk_wed_exit(); return err; } @@ -4198,6 +4204,7 @@ static int mtk_remove(struct platform_device *pdev) phylink_disconnect_phy(mac->phylink); } + mtk_wed_exit(); mtk_hw_deinit(eth); netif_napi_del(ð->tx_napi); diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index ae00e572390d..2d8ca99f2467 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -397,12 +397,6 @@ int mtk_foe_entry_set_wdma(struct mtk_eth *eth, struct mtk_foe_entry *entry, return 0; } -static inline bool mtk_foe_entry_usable(struct mtk_foe_entry *entry) -{ - return !(entry->ib1 & MTK_FOE_IB1_STATIC) && - FIELD_GET(MTK_FOE_IB1_STATE, entry->ib1) != MTK_FOE_STATE_BIND; -} - static bool mtk_flow_entry_match(struct mtk_eth *eth, struct mtk_flow_entry *entry, struct mtk_foe_entry *data) diff --git a/drivers/net/ethernet/mediatek/mtk_star_emac.c b/drivers/net/ethernet/mediatek/mtk_star_emac.c index 7e890f81148e..7050351250b7 100644 --- a/drivers/net/ethernet/mediatek/mtk_star_emac.c +++ b/drivers/net/ethernet/mediatek/mtk_star_emac.c @@ -1026,6 +1026,8 @@ static int mtk_star_enable(struct net_device *ndev) return 0; err_free_irq: + napi_disable(&priv->rx_napi); + napi_disable(&priv->tx_napi); free_irq(ndev->irq, ndev); err_free_skbs: mtk_star_free_rx_skbs(priv); diff --git a/drivers/net/ethernet/mediatek/mtk_wed.c b/drivers/net/ethernet/mediatek/mtk_wed.c index 099b6e0df619..65e01bf4b4d2 100644 --- a/drivers/net/ethernet/mediatek/mtk_wed.c +++ b/drivers/net/ethernet/mediatek/mtk_wed.c @@ -1072,16 +1072,16 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, pdev = of_find_device_by_node(np); if (!pdev) - return; + goto err_of_node_put; get_device(&pdev->dev); irq = platform_get_irq(pdev, 0); if (irq < 0) - return; + goto err_put_device; regs = syscon_regmap_lookup_by_phandle(np, NULL); if (IS_ERR(regs)) - return; + goto err_put_device; rcu_assign_pointer(mtk_soc_wed_ops, &wed_ops); @@ -1124,8 +1124,16 @@ void mtk_wed_add_hw(struct device_node *np, struct mtk_eth *eth, hw_list[index] = hw; + mutex_unlock(&hw_lock); + + return; + unlock: mutex_unlock(&hw_lock); +err_put_device: + put_device(&pdev->dev); +err_of_node_put: + of_node_put(np); } void mtk_wed_exit(void) @@ -1146,6 +1154,7 @@ void mtk_wed_exit(void) hw_list[i] = NULL; debugfs_remove(hw->debugfs_dir); put_device(hw->dev); + of_node_put(hw->node); kfree(hw); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c index 0377392848d9..2e0d59ca62b5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c @@ -1770,12 +1770,17 @@ void mlx5_cmd_flush(struct mlx5_core_dev *dev) struct mlx5_cmd *cmd = &dev->cmd; int i; - for (i = 0; i < cmd->max_reg_cmds; i++) - while (down_trylock(&cmd->sem)) + for (i = 0; i < cmd->max_reg_cmds; i++) { + while (down_trylock(&cmd->sem)) { mlx5_cmd_trigger_completions(dev); + cond_resched(); + } + } - while (down_trylock(&cmd->pages_sem)) + while (down_trylock(&cmd->pages_sem)) { mlx5_cmd_trigger_completions(dev); + cond_resched(); + } /* Unlock cmdif */ up(&cmd->pages_sem); @@ -2004,7 +2009,7 @@ void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, ctx->dev = dev; /* Starts at 1 to avoid doing wake_up if we are not cleaning up */ atomic_set(&ctx->num_inflight, 1); - init_waitqueue_head(&ctx->wait); + init_completion(&ctx->inflight_done); } EXPORT_SYMBOL(mlx5_cmd_init_async_ctx); @@ -2018,8 +2023,8 @@ EXPORT_SYMBOL(mlx5_cmd_init_async_ctx); */ void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx) { - atomic_dec(&ctx->num_inflight); - wait_event(ctx->wait, atomic_read(&ctx->num_inflight) == 0); + if (!atomic_dec_and_test(&ctx->num_inflight)) + wait_for_completion(&ctx->inflight_done); } EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx); @@ -2032,7 +2037,7 @@ static void mlx5_cmd_exec_cb_handler(int status, void *_work) status = cmd_status_err(ctx->dev, status, work->opcode, work->out); work->user_callback(status, work); if (atomic_dec_and_test(&ctx->num_inflight)) - wake_up(&ctx->wait); + complete(&ctx->inflight_done); } int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, @@ -2050,7 +2055,7 @@ int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, ret = cmd_exec(ctx->dev, in, in_size, out, out_size, mlx5_cmd_exec_cb_handler, work, false); if (ret && atomic_dec_and_test(&ctx->num_inflight)) - wake_up(&ctx->wait); + complete(&ctx->inflight_done); return ret; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h index 5bce554e131a..cc7efde88ac3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/ptp.h @@ -6,6 +6,7 @@ #include "en.h" #include "en_stats.h" +#include "en/txrx.h" #include <linux/ptp_classify.h> #define MLX5E_PTP_CHANNEL_IX 0 @@ -68,6 +69,14 @@ static inline bool mlx5e_use_ptpsq(struct sk_buff *skb) fk.ports.dst == htons(PTP_EV_PORT)); } +static inline bool mlx5e_ptpsq_fifo_has_room(struct mlx5e_txqsq *sq) +{ + if (!sq->ptpsq) + return true; + + return mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo); +} + int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, u8 lag_port, struct mlx5e_ptp **cp); void mlx5e_ptp_close(struct mlx5e_ptp *c); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c index 39ef2a2561a3..8099a21e674c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c @@ -164,6 +164,36 @@ static int mlx5_esw_bridge_port_changeupper(struct notifier_block *nb, void *ptr return err; } +static int +mlx5_esw_bridge_changeupper_validate_netdev(void *ptr) +{ + struct net_device *dev = netdev_notifier_info_to_dev(ptr); + struct netdev_notifier_changeupper_info *info = ptr; + struct net_device *upper = info->upper_dev; + struct net_device *lower; + struct list_head *iter; + + if (!netif_is_bridge_master(upper) || !netif_is_lag_master(dev)) + return 0; + + netdev_for_each_lower_dev(dev, lower, iter) { + struct mlx5_core_dev *mdev; + struct mlx5e_priv *priv; + + if (!mlx5e_eswitch_rep(lower)) + continue; + + priv = netdev_priv(lower); + mdev = priv->mdev; + if (!mlx5_lag_is_active(mdev)) + return -EAGAIN; + if (!mlx5_lag_is_shared_fdb(mdev)) + return -EOPNOTSUPP; + } + + return 0; +} + static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb, unsigned long event, void *ptr) { @@ -171,6 +201,7 @@ static int mlx5_esw_bridge_switchdev_port_event(struct notifier_block *nb, switch (event) { case NETDEV_PRECHANGEUPPER: + err = mlx5_esw_bridge_changeupper_validate_netdev(ptr); break; case NETDEV_CHANGEUPPER: diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c index 305fde62a78d..3337241cfd84 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/act.c @@ -6,70 +6,42 @@ #include "en/tc_priv.h" #include "mlx5_core.h" -/* Must be aligned with enum flow_action_id. */ static struct mlx5e_tc_act *tc_acts_fdb[NUM_FLOW_ACTIONS] = { - &mlx5e_tc_act_accept, - &mlx5e_tc_act_drop, - &mlx5e_tc_act_trap, - &mlx5e_tc_act_goto, - &mlx5e_tc_act_mirred, - &mlx5e_tc_act_mirred, - &mlx5e_tc_act_redirect_ingress, - NULL, /* FLOW_ACTION_MIRRED_INGRESS, */ - &mlx5e_tc_act_vlan, - &mlx5e_tc_act_vlan, - &mlx5e_tc_act_vlan_mangle, - &mlx5e_tc_act_tun_encap, - &mlx5e_tc_act_tun_decap, - &mlx5e_tc_act_pedit, - &mlx5e_tc_act_pedit, - &mlx5e_tc_act_csum, - NULL, /* FLOW_ACTION_MARK, */ - &mlx5e_tc_act_ptype, - NULL, /* FLOW_ACTION_PRIORITY, */ - NULL, /* FLOW_ACTION_WAKE, */ - NULL, /* FLOW_ACTION_QUEUE, */ - &mlx5e_tc_act_sample, - &mlx5e_tc_act_police, - &mlx5e_tc_act_ct, - NULL, /* FLOW_ACTION_CT_METADATA, */ - &mlx5e_tc_act_mpls_push, - &mlx5e_tc_act_mpls_pop, - NULL, /* FLOW_ACTION_MPLS_MANGLE, */ - NULL, /* FLOW_ACTION_GATE, */ - NULL, /* FLOW_ACTION_PPPOE_PUSH, */ - NULL, /* FLOW_ACTION_JUMP, */ - NULL, /* FLOW_ACTION_PIPE, */ - &mlx5e_tc_act_vlan, - &mlx5e_tc_act_vlan, + [FLOW_ACTION_ACCEPT] = &mlx5e_tc_act_accept, + [FLOW_ACTION_DROP] = &mlx5e_tc_act_drop, + [FLOW_ACTION_TRAP] = &mlx5e_tc_act_trap, + [FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto, + [FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred, + [FLOW_ACTION_MIRRED] = &mlx5e_tc_act_mirred, + [FLOW_ACTION_REDIRECT_INGRESS] = &mlx5e_tc_act_redirect_ingress, + [FLOW_ACTION_VLAN_PUSH] = &mlx5e_tc_act_vlan, + [FLOW_ACTION_VLAN_POP] = &mlx5e_tc_act_vlan, + [FLOW_ACTION_VLAN_MANGLE] = &mlx5e_tc_act_vlan_mangle, + [FLOW_ACTION_TUNNEL_ENCAP] = &mlx5e_tc_act_tun_encap, + [FLOW_ACTION_TUNNEL_DECAP] = &mlx5e_tc_act_tun_decap, + [FLOW_ACTION_MANGLE] = &mlx5e_tc_act_pedit, + [FLOW_ACTION_ADD] = &mlx5e_tc_act_pedit, + [FLOW_ACTION_CSUM] = &mlx5e_tc_act_csum, + [FLOW_ACTION_PTYPE] = &mlx5e_tc_act_ptype, + [FLOW_ACTION_SAMPLE] = &mlx5e_tc_act_sample, + [FLOW_ACTION_POLICE] = &mlx5e_tc_act_police, + [FLOW_ACTION_CT] = &mlx5e_tc_act_ct, + [FLOW_ACTION_MPLS_PUSH] = &mlx5e_tc_act_mpls_push, + [FLOW_ACTION_MPLS_POP] = &mlx5e_tc_act_mpls_pop, + [FLOW_ACTION_VLAN_PUSH_ETH] = &mlx5e_tc_act_vlan, + [FLOW_ACTION_VLAN_POP_ETH] = &mlx5e_tc_act_vlan, }; -/* Must be aligned with enum flow_action_id. */ static struct mlx5e_tc_act *tc_acts_nic[NUM_FLOW_ACTIONS] = { - &mlx5e_tc_act_accept, - &mlx5e_tc_act_drop, - NULL, /* FLOW_ACTION_TRAP, */ - &mlx5e_tc_act_goto, - &mlx5e_tc_act_mirred_nic, - NULL, /* FLOW_ACTION_MIRRED, */ - NULL, /* FLOW_ACTION_REDIRECT_INGRESS, */ - NULL, /* FLOW_ACTION_MIRRED_INGRESS, */ - NULL, /* FLOW_ACTION_VLAN_PUSH, */ - NULL, /* FLOW_ACTION_VLAN_POP, */ - NULL, /* FLOW_ACTION_VLAN_MANGLE, */ - NULL, /* FLOW_ACTION_TUNNEL_ENCAP, */ - NULL, /* FLOW_ACTION_TUNNEL_DECAP, */ - &mlx5e_tc_act_pedit, - &mlx5e_tc_act_pedit, - &mlx5e_tc_act_csum, - &mlx5e_tc_act_mark, - NULL, /* FLOW_ACTION_PTYPE, */ - NULL, /* FLOW_ACTION_PRIORITY, */ - NULL, /* FLOW_ACTION_WAKE, */ - NULL, /* FLOW_ACTION_QUEUE, */ - NULL, /* FLOW_ACTION_SAMPLE, */ - NULL, /* FLOW_ACTION_POLICE, */ - &mlx5e_tc_act_ct, + [FLOW_ACTION_ACCEPT] = &mlx5e_tc_act_accept, + [FLOW_ACTION_DROP] = &mlx5e_tc_act_drop, + [FLOW_ACTION_GOTO] = &mlx5e_tc_act_goto, + [FLOW_ACTION_REDIRECT] = &mlx5e_tc_act_mirred_nic, + [FLOW_ACTION_MANGLE] = &mlx5e_tc_act_pedit, + [FLOW_ACTION_ADD] = &mlx5e_tc_act_pedit, + [FLOW_ACTION_CSUM] = &mlx5e_tc_act_csum, + [FLOW_ACTION_MARK] = &mlx5e_tc_act_mark, + [FLOW_ACTION_CT] = &mlx5e_tc_act_ct, }; /** diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h index 10c9a8a79d00..2e42d7c5451e 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_priv.h @@ -96,6 +96,7 @@ struct mlx5e_tc_flow { struct encap_flow_item encaps[MLX5_MAX_FLOW_FWD_VPORTS]; struct mlx5e_tc_flow *peer_flow; struct mlx5e_mod_hdr_handle *mh; /* attached mod header instance */ + struct mlx5e_mod_hdr_handle *slow_mh; /* attached mod header instance for slow path */ struct mlx5e_hairpin_entry *hpe; /* attached hairpin instance */ struct list_head hairpin; /* flows sharing the same hairpin */ struct list_head peer; /* flows with peer flow */ @@ -111,6 +112,7 @@ struct mlx5e_tc_flow { struct completion del_hw_done; struct mlx5_flow_attr *attr; struct list_head attrs; + u32 chain_mapping; }; struct mlx5_flow_handle * diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 4456ad5cedf1..853f312cd757 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -11,6 +11,27 @@ #define INL_HDR_START_SZ (sizeof(((struct mlx5_wqe_eth_seg *)NULL)->inline_hdr.start)) +/* IPSEC inline data includes: + * 1. ESP trailer: up to 255 bytes of padding, 1 byte for pad length, 1 byte for + * next header. + * 2. ESP authentication data: 16 bytes for ICV. + */ +#define MLX5E_MAX_TX_IPSEC_DS DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + \ + 255 + 1 + 1 + 16, MLX5_SEND_WQE_DS) + +/* 366 should be big enough to cover all L2, L3 and L4 headers with possible + * encapsulations. + */ +#define MLX5E_MAX_TX_INLINE_DS DIV_ROUND_UP(366 - INL_HDR_START_SZ + VLAN_HLEN, \ + MLX5_SEND_WQE_DS) + +/* Sync the calculation with mlx5e_sq_calc_wqe_attr. */ +#define MLX5E_MAX_TX_WQEBBS DIV_ROUND_UP(MLX5E_TX_WQE_EMPTY_DS_COUNT + \ + MLX5E_MAX_TX_INLINE_DS + \ + MLX5E_MAX_TX_IPSEC_DS + \ + MAX_SKB_FRAGS + 1, \ + MLX5_SEND_WQEBB_NUM_DS) + #define MLX5E_RX_ERR_CQE(cqe) (get_cqe_opcode(cqe) != MLX5_CQE_RESP_SEND) static inline @@ -58,6 +79,12 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget); void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); static inline bool +mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo *fifo) +{ + return (*fifo->pc - *fifo->cc) < fifo->mask; +} + +static inline bool mlx5e_wqc_has_room_for(struct mlx5_wq_cyc *wq, u16 cc, u16 pc, u16 n) { return (mlx5_wq_cyc_ctr2ix(wq, cc - pc) >= n) || (cc == pc); @@ -418,6 +445,8 @@ mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_size) { + WARN_ON_ONCE(PAGE_SIZE / MLX5_SEND_WQE_BB < mlx5e_get_max_sq_wqebbs(mdev)); + /* A WQE must not cross the page boundary, hence two conditions: * 1. Its size must not exceed the page size. * 2. If the WQE size is X, and the space remaining in a page is less @@ -430,7 +459,6 @@ static inline u16 mlx5e_stop_room_for_wqe(struct mlx5_core_dev *mdev, u16 wqe_si "wqe_size %u is greater than max SQ WQEBBs %u", wqe_size, mlx5e_get_max_sq_wqebbs(mdev)); - return MLX5E_STOP_ROOM(wqe_size); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 4685c652c97e..20507ef2f956 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -117,7 +117,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, xdpi.page.rq = rq; dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf); - dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_TO_DEVICE); + dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_BIDIRECTIONAL); if (unlikely(xdp_frame_has_frags(xdpf))) { sinfo = xdp_get_shared_info_from_frame(xdpf); @@ -131,7 +131,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, skb_frag_off(frag); len = skb_frag_size(frag); dma_sync_single_for_device(sq->pdev, addr, len, - DMA_TO_DEVICE); + DMA_BIDIRECTIONAL); } } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index 2a8fd7020622..a715601865d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -101,7 +101,6 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) struct xfrm_replay_state_esn *replay_esn; u32 seq_bottom = 0; u8 overlap; - u32 *esn; if (!(sa_entry->x->props.flags & XFRM_STATE_ESN)) { sa_entry->esn_state.trigger = 0; @@ -116,11 +115,9 @@ static bool mlx5e_ipsec_update_esn_state(struct mlx5e_ipsec_sa_entry *sa_entry) sa_entry->esn_state.esn = xfrm_replay_seqhi(sa_entry->x, htonl(seq_bottom)); - esn = &sa_entry->esn_state.esn; sa_entry->esn_state.trigger = 1; if (unlikely(overlap && seq_bottom < MLX5E_IPSEC_ESN_SCOPE_MID)) { - ++(*esn); sa_entry->esn_state.overlap = 0; return true; } else if (unlikely(!overlap && diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c index 41970067917b..2ef36cb9555a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c @@ -432,7 +432,7 @@ static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec, bool active) { struct mlx5_core_dev *mdev = macsec->mdev; - struct mlx5_macsec_obj_attrs attrs; + struct mlx5_macsec_obj_attrs attrs = {}; int err = 0; if (rx_sa->active != active) @@ -444,7 +444,7 @@ static int mlx5e_macsec_update_rx_sa(struct mlx5e_macsec *macsec, return 0; } - attrs.sci = rx_sa->sci; + attrs.sci = cpu_to_be64((__force u64)rx_sa->sci); attrs.enc_key_id = rx_sa->enc_key_id; err = mlx5e_macsec_create_object(mdev, &attrs, false, &rx_sa->macsec_obj_id); if (err) @@ -999,11 +999,11 @@ static int mlx5e_macsec_upd_rxsa(struct macsec_context *ctx) } rx_sa = rx_sc->rx_sa[assoc_num]; - if (rx_sa) { + if (!rx_sa) { netdev_err(ctx->netdev, - "MACsec offload rx_sc sci %lld rx_sa %d already exist\n", + "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n", sci, assoc_num); - err = -EEXIST; + err = -EINVAL; goto out; } @@ -1055,11 +1055,11 @@ static int mlx5e_macsec_del_rxsa(struct macsec_context *ctx) } rx_sa = rx_sc->rx_sa[assoc_num]; - if (rx_sa) { + if (!rx_sa) { netdev_err(ctx->netdev, - "MACsec offload rx_sc sci %lld rx_sa %d already exist\n", + "MACsec offload rx_sc sci %lld rx_sa %d doesn't exist\n", sci, assoc_num); - err = -EEXIST; + err = -EINVAL; goto out; } @@ -1846,25 +1846,16 @@ err_hash: void mlx5e_macsec_cleanup(struct mlx5e_priv *priv) { struct mlx5e_macsec *macsec = priv->macsec; - struct mlx5_core_dev *mdev = macsec->mdev; + struct mlx5_core_dev *mdev = priv->mdev; if (!macsec) return; mlx5_notifier_unregister(mdev, &macsec->nb); - mlx5e_macsec_fs_cleanup(macsec->macsec_fs); - - /* Cleanup workqueue */ destroy_workqueue(macsec->wq); - mlx5e_macsec_aso_cleanup(&macsec->aso, mdev); - - priv->macsec = NULL; - rhashtable_destroy(&macsec->sci_hash); - mutex_destroy(&macsec->lock); - kfree(macsec); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c index 13dc628b988a..1ac0cf04e811 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec_fs.c @@ -1180,7 +1180,7 @@ macsec_fs_rx_add_rule(struct mlx5e_macsec_fs *macsec_fs, rx_rule->rule[0] = rule; /* Rx crypto table without SCI rule */ - if (cpu_to_be64((__force u64)attrs->sci) & ntohs(MACSEC_PORT_ES)) { + if ((cpu_to_be64((__force u64)attrs->sci) & 0xFFFF) == ntohs(MACSEC_PORT_ES)) { memset(spec, 0, sizeof(struct mlx5_flow_spec)); memset(&dest, 0, sizeof(struct mlx5_flow_destination)); memset(&flow_act, 0, sizeof(flow_act)); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 364f04309149..e3a4f01bcceb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -5694,6 +5694,13 @@ int mlx5e_attach_netdev(struct mlx5e_priv *priv) mlx5e_fs_set_state_destroy(priv->fs, !test_bit(MLX5E_STATE_DESTROYING, &priv->state)); + /* Validate the max_wqe_size_sq capability. */ + if (WARN_ON_ONCE(mlx5e_get_max_sq_wqebbs(priv->mdev) < MLX5E_MAX_TX_WQEBBS)) { + mlx5_core_warn(priv->mdev, "MLX5E: Max SQ WQEBBs firmware capability: %u, needed %lu\n", + mlx5e_get_max_sq_wqebbs(priv->mdev), MLX5E_MAX_TX_WQEBBS); + return -EIO; + } + /* max number of channels may have changed */ max_nch = mlx5e_calc_max_nch(priv->mdev, priv->netdev, profile); if (priv->channels.params.num_channels > max_nch) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 58084650151f..a61a43fc8d5c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -266,7 +266,7 @@ static inline bool mlx5e_rx_cache_get(struct mlx5e_rq *rq, union mlx5e_alloc_uni addr = page_pool_get_dma_addr(au->page); /* Non-XSK always uses PAGE_SIZE. */ - dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, DMA_FROM_DEVICE); + dma_sync_single_for_device(rq->pdev, addr, PAGE_SIZE, rq->buff.map_dir); return true; } @@ -282,8 +282,7 @@ static inline int mlx5e_page_alloc_pool(struct mlx5e_rq *rq, union mlx5e_alloc_u return -ENOMEM; /* Non-XSK always uses PAGE_SIZE. */ - addr = dma_map_page_attrs(rq->pdev, au->page, 0, PAGE_SIZE, - rq->buff.map_dir, DMA_ATTR_SKIP_CPU_SYNC); + addr = dma_map_page(rq->pdev, au->page, 0, PAGE_SIZE, rq->buff.map_dir); if (unlikely(dma_mapping_error(rq->pdev, addr))) { page_pool_recycle_direct(rq->page_pool, au->page); au->page = NULL; @@ -427,14 +426,15 @@ mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb, { dma_addr_t addr = page_pool_get_dma_addr(au->page); - dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, + rq->buff.map_dir); page_ref_inc(au->page); skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, au->page, frag_offset, len, truesize); } static inline void -mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, +mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb, struct page *page, dma_addr_t addr, int offset_from, int dma_offset, u32 headlen) { @@ -442,7 +442,8 @@ mlx5e_copy_skb_header(struct device *pdev, struct sk_buff *skb, /* Aligning len to sizeof(long) optimizes memcpy performance */ unsigned int len = ALIGN(headlen, sizeof(long)); - dma_sync_single_for_cpu(pdev, addr + dma_offset, len, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len, + rq->buff.map_dir); skb_copy_to_linear_data(skb, from, len); } @@ -1538,7 +1539,7 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi, addr = page_pool_get_dma_addr(au->page); dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset, - frag_size, DMA_FROM_DEVICE); + frag_size, rq->buff.map_dir); net_prefetch(data); prog = rcu_dereference(rq->xdp_prog); @@ -1587,7 +1588,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi addr = page_pool_get_dma_addr(au->page); dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset, - rq->buff.frame0_sz, DMA_FROM_DEVICE); + rq->buff.frame0_sz, rq->buff.map_dir); net_prefetchw(va); /* xdp_frame data area */ net_prefetch(va + rx_headroom); @@ -1608,7 +1609,7 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi addr = page_pool_get_dma_addr(au->page); dma_sync_single_for_cpu(rq->pdev, addr + wi->offset, - frag_consumed_bytes, DMA_FROM_DEVICE); + frag_consumed_bytes, rq->buff.map_dir); if (!xdp_buff_has_frags(&xdp)) { /* Init on the first fragment to avoid cold cache access @@ -1905,7 +1906,7 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w mlx5e_fill_skb_data(skb, rq, au, byte_cnt, frag_offset); /* copy header */ addr = page_pool_get_dma_addr(head_au->page); - mlx5e_copy_skb_header(rq->pdev, skb, head_au->page, addr, + mlx5e_copy_skb_header(rq, skb, head_au->page, addr, head_offset, head_offset, headlen); /* skb linear part was allocated with headlen and aligned to long */ skb->tail += headlen; @@ -1939,7 +1940,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, addr = page_pool_get_dma_addr(au->page); dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset, - frag_size, DMA_FROM_DEVICE); + frag_size, rq->buff.map_dir); net_prefetch(data); prog = rcu_dereference(rq->xdp_prog); @@ -1987,7 +1988,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) { /* build SKB around header */ - dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, DMA_FROM_DEVICE); + dma_sync_single_range_for_cpu(rq->pdev, head->addr, 0, frag_size, rq->buff.map_dir); prefetchw(hdr); prefetch(data); skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0); @@ -2009,7 +2010,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, } prefetchw(skb->data); - mlx5e_copy_skb_header(rq->pdev, skb, head->page, head->addr, + mlx5e_copy_skb_header(rq, skb, head->page, head->addr, head_offset + rx_headroom, rx_headroom, head_size); /* skb linear part was allocated with headlen and aligned to long */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 70a7a61f9708..5a6aa61ec82a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1405,8 +1405,13 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, struct mlx5e_tc_flow *flow, struct mlx5_flow_spec *spec) { + struct mlx5e_tc_mod_hdr_acts mod_acts = {}; + struct mlx5e_mod_hdr_handle *mh = NULL; struct mlx5_flow_attr *slow_attr; struct mlx5_flow_handle *rule; + bool fwd_and_modify_cap; + u32 chain_mapping = 0; + int err; slow_attr = mlx5_alloc_flow_attr(MLX5_FLOW_NAMESPACE_FDB); if (!slow_attr) @@ -1417,13 +1422,56 @@ mlx5e_tc_offload_to_slow_path(struct mlx5_eswitch *esw, slow_attr->esw_attr->split_count = 0; slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH; + fwd_and_modify_cap = MLX5_CAP_ESW_FLOWTABLE((esw)->dev, fdb_modify_header_fwd_to_table); + if (!fwd_and_modify_cap) + goto skip_restore; + + err = mlx5_chains_get_chain_mapping(esw_chains(esw), flow->attr->chain, &chain_mapping); + if (err) + goto err_get_chain; + + err = mlx5e_tc_match_to_reg_set(esw->dev, &mod_acts, MLX5_FLOW_NAMESPACE_FDB, + CHAIN_TO_REG, chain_mapping); + if (err) + goto err_reg_set; + + mh = mlx5e_mod_hdr_attach(esw->dev, get_mod_hdr_table(flow->priv, flow), + MLX5_FLOW_NAMESPACE_FDB, &mod_acts); + if (IS_ERR(mh)) { + err = PTR_ERR(mh); + goto err_attach; + } + + slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + slow_attr->modify_hdr = mlx5e_mod_hdr_get(mh); + +skip_restore: rule = mlx5e_tc_offload_fdb_rules(esw, flow, spec, slow_attr); - if (!IS_ERR(rule)) - flow_flag_set(flow, SLOW); + if (IS_ERR(rule)) { + err = PTR_ERR(rule); + goto err_offload; + } + + flow->slow_mh = mh; + flow->chain_mapping = chain_mapping; + flow_flag_set(flow, SLOW); + mlx5e_mod_hdr_dealloc(&mod_acts); kfree(slow_attr); return rule; + +err_offload: + if (fwd_and_modify_cap) + mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), mh); +err_attach: +err_reg_set: + if (fwd_and_modify_cap) + mlx5_chains_put_chain_mapping(esw_chains(esw), chain_mapping); +err_get_chain: + mlx5e_mod_hdr_dealloc(&mod_acts); + kfree(slow_attr); + return ERR_PTR(err); } void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, @@ -1441,7 +1489,17 @@ void mlx5e_tc_unoffload_from_slow_path(struct mlx5_eswitch *esw, slow_attr->action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; slow_attr->esw_attr->split_count = 0; slow_attr->flags |= MLX5_ATTR_FLAG_SLOW_PATH; + if (flow->slow_mh) { + slow_attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; + slow_attr->modify_hdr = mlx5e_mod_hdr_get(flow->slow_mh); + } mlx5e_tc_unoffload_fdb_rules(esw, flow, slow_attr); + if (flow->slow_mh) { + mlx5e_mod_hdr_detach(esw->dev, get_mod_hdr_table(flow->priv, flow), flow->slow_mh); + mlx5_chains_put_chain_mapping(esw_chains(esw), flow->chain_mapping); + flow->chain_mapping = 0; + flow->slow_mh = NULL; + } flow_flag_clear(flow, SLOW); kfree(slow_attr); } @@ -3575,6 +3633,14 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr, attr2->action = 0; attr2->flags = 0; attr2->parse_attr = parse_attr; + attr2->dest_chain = 0; + attr2->dest_ft = NULL; + + if (ns_type == MLX5_FLOW_NAMESPACE_FDB) { + attr2->esw_attr->out_count = 0; + attr2->esw_attr->split_count = 0; + } + return attr2; } @@ -4008,6 +4074,7 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv, struct mlx5e_tc_flow_parse_attr *parse_attr; struct mlx5_flow_attr *attr = flow->attr; struct mlx5_esw_flow_attr *esw_attr; + struct net_device *filter_dev; int err; err = flow_action_supported(flow_action, extack); @@ -4016,6 +4083,7 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv, esw_attr = attr->esw_attr; parse_attr = attr->parse_attr; + filter_dev = parse_attr->filter_dev; parse_state = &parse_attr->parse_state; mlx5e_tc_act_init_parse_state(parse_state, flow, flow_action, extack); parse_state->ct_priv = get_ct_priv(priv); @@ -4025,13 +4093,21 @@ parse_tc_fdb_actions(struct mlx5e_priv *priv, return err; /* Forward to/from internal port can only have 1 dest */ - if ((netif_is_ovs_master(parse_attr->filter_dev) || esw_attr->dest_int_port) && + if ((netif_is_ovs_master(filter_dev) || esw_attr->dest_int_port) && esw_attr->out_count > 1) { NL_SET_ERR_MSG_MOD(extack, "Rules with internal port can have only one destination"); return -EOPNOTSUPP; } + /* Forward from tunnel/internal port to internal port is not supported */ + if ((mlx5e_get_tc_tun(filter_dev) || netif_is_ovs_master(filter_dev)) && + esw_attr->dest_int_port) { + NL_SET_ERR_MSG_MOD(extack, + "Forwarding from tunnel/internal port to internal port is not supported"); + return -EOPNOTSUPP; + } + err = actions_prepare_mod_hdr_actions(priv, flow, attr, extack); if (err) return err; @@ -4686,12 +4762,6 @@ int mlx5e_policer_validate(const struct flow_action *action, return -EOPNOTSUPP; } - if (act->police.rate_pkt_ps) { - NL_SET_ERR_MSG_MOD(extack, - "QoS offload not support packets per second"); - return -EOPNOTSUPP; - } - return 0; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index bf2232a2a836..f7897ddb29c5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -305,6 +305,8 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at u16 ds_cnt_inl = 0; u16 ds_cnt_ids = 0; + /* Sync the calculation with MLX5E_MAX_TX_WQEBBS. */ + if (attr->insz) ds_cnt_ids = DIV_ROUND_UP(sizeof(struct mlx5_wqe_inline_seg) + attr->insz, MLX5_SEND_WQE_DS); @@ -317,6 +319,9 @@ static void mlx5e_sq_calc_wqe_attr(struct sk_buff *skb, const struct mlx5e_tx_at inl += VLAN_HLEN; ds_cnt_inl = DIV_ROUND_UP(inl, MLX5_SEND_WQE_DS); + if (WARN_ON_ONCE(ds_cnt_inl > MLX5E_MAX_TX_INLINE_DS)) + netdev_warn(skb->dev, "ds_cnt_inl = %u > max %u\n", ds_cnt_inl, + (u16)MLX5E_MAX_TX_INLINE_DS); ds_cnt += ds_cnt_inl; } @@ -392,6 +397,11 @@ mlx5e_txwqe_complete(struct mlx5e_txqsq *sq, struct sk_buff *skb, if (unlikely(sq->ptpsq)) { mlx5e_skb_cb_hwtstamp_init(skb); mlx5e_skb_fifo_push(&sq->ptpsq->skb_fifo, skb); + if (!netif_tx_queue_stopped(sq->txq) && + !mlx5e_skb_fifo_has_room(&sq->ptpsq->skb_fifo)) { + netif_tx_stop_queue(sq->txq); + sq->stats->stopped++; + } skb_get(skb); } @@ -868,6 +878,7 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq, int napi_budget) if (netif_tx_queue_stopped(sq->txq) && mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, sq->stop_room) && + mlx5e_ptpsq_fifo_has_room(sq) && !test_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state)) { netif_tx_wake_queue(sq->txq); stats->wake++; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c index c59107fa9e6d..2169486c4bfb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c @@ -1387,12 +1387,14 @@ void mlx5_eswitch_disable_locked(struct mlx5_eswitch *esw) esw->mode == MLX5_ESWITCH_LEGACY ? "LEGACY" : "OFFLOADS", esw->esw_funcs.num_vfs, esw->enabled_vports); - esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED; - if (esw->mode == MLX5_ESWITCH_OFFLOADS) - esw_offloads_disable(esw); - else if (esw->mode == MLX5_ESWITCH_LEGACY) - esw_legacy_disable(esw); - mlx5_esw_acls_ns_cleanup(esw); + if (esw->fdb_table.flags & MLX5_ESW_FDB_CREATED) { + esw->fdb_table.flags &= ~MLX5_ESW_FDB_CREATED; + if (esw->mode == MLX5_ESWITCH_OFFLOADS) + esw_offloads_disable(esw); + else if (esw->mode == MLX5_ESWITCH_LEGACY) + esw_legacy_disable(esw); + mlx5_esw_acls_ns_cleanup(esw); + } if (esw->mode == MLX5_ESWITCH_OFFLOADS) devl_rate_nodes_destroy(devlink); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index 4e50df3139c6..728ca9f2bb9d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -2310,7 +2310,7 @@ out_free: static int esw_offloads_start(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) { - int err, err1; + int err; esw->mode = MLX5_ESWITCH_OFFLOADS; err = mlx5_eswitch_enable_locked(esw, esw->dev->priv.sriov.num_vfs); @@ -2318,11 +2318,6 @@ static int esw_offloads_start(struct mlx5_eswitch *esw, NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to offloads"); esw->mode = MLX5_ESWITCH_LEGACY; - err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); - if (err1) { - NL_SET_ERR_MSG_MOD(extack, - "Failed setting eswitch back to legacy"); - } mlx5_rescan_drivers(esw->dev); } if (esw->offloads.inline_mode == MLX5_INLINE_MODE_NONE) { @@ -3389,19 +3384,12 @@ err_metadata: static int esw_offloads_stop(struct mlx5_eswitch *esw, struct netlink_ext_ack *extack) { - int err, err1; + int err; esw->mode = MLX5_ESWITCH_LEGACY; err = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); - if (err) { + if (err) NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy"); - esw->mode = MLX5_ESWITCH_OFFLOADS; - err1 = mlx5_eswitch_enable_locked(esw, MLX5_ESWITCH_IGNORE_NUM_VFS); - if (err1) { - NL_SET_ERR_MSG_MOD(extack, - "Failed setting eswitch back to offloads"); - } - } return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c index ee568bf34ae2..108a3503f413 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_termtbl.c @@ -30,9 +30,9 @@ mlx5_eswitch_termtbl_hash(struct mlx5_flow_act *flow_act, sizeof(dest->vport.num), hash); hash = jhash((const void *)&dest->vport.vhca_id, sizeof(dest->vport.num), hash); - if (dest->vport.pkt_reformat) - hash = jhash(dest->vport.pkt_reformat, - sizeof(*dest->vport.pkt_reformat), + if (flow_act->pkt_reformat) + hash = jhash(flow_act->pkt_reformat, + sizeof(*flow_act->pkt_reformat), hash); return hash; } @@ -53,9 +53,11 @@ mlx5_eswitch_termtbl_cmp(struct mlx5_flow_act *flow_act1, if (ret) return ret; - return dest1->vport.pkt_reformat && dest2->vport.pkt_reformat ? - memcmp(dest1->vport.pkt_reformat, dest2->vport.pkt_reformat, - sizeof(*dest1->vport.pkt_reformat)) : 0; + if (flow_act1->pkt_reformat && flow_act2->pkt_reformat) + return memcmp(flow_act1->pkt_reformat, flow_act2->pkt_reformat, + sizeof(*flow_act1->pkt_reformat)); + + return !(flow_act1->pkt_reformat == flow_act2->pkt_reformat); } static int diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index e8896f368362..9d908a0ccfef 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -152,7 +152,8 @@ static void mlx5_fw_reset_complete_reload(struct mlx5_core_dev *dev) mlx5_unload_one(dev); if (mlx5_health_wait_pci_up(dev)) mlx5_core_err(dev, "reset reload flow aborted, PCI reads still not working\n"); - mlx5_load_one(dev, false); + else + mlx5_load_one(dev, false); devlink_remote_reload_actions_performed(priv_to_devlink(dev), 0, BIT(DEVLINK_RELOAD_ACTION_DRIVER_REINIT) | BIT(DEVLINK_RELOAD_ACTION_FW_ACTIVATE)); @@ -358,6 +359,23 @@ static int mlx5_pci_link_toggle(struct mlx5_core_dev *dev) err = -ETIMEDOUT; } + do { + err = pci_read_config_word(dev->pdev, PCI_DEVICE_ID, ®16); + if (err) + return err; + if (reg16 == dev_id) + break; + msleep(20); + } while (!time_after(jiffies, timeout)); + + if (reg16 == dev_id) { + mlx5_core_info(dev, "Firmware responds to PCI config cycles again\n"); + } else { + mlx5_core_err(dev, "Firmware is not responsive (0x%04x) after %llu ms\n", + reg16, mlx5_tout_ms(dev, PCI_TOGGLE)); + err = -ETIMEDOUT; + } + restore: list_for_each_entry(sdev, &bridge_bus->devices, bus_list) { pci_cfg_access_unlock(sdev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c index baa8092f335e..c971ff04dd04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c @@ -3,6 +3,7 @@ #include <linux/mlx5/device.h> #include <linux/mlx5/transobj.h> +#include "clock.h" #include "aso.h" #include "wq.h" @@ -179,6 +180,7 @@ static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn, { void *in, *sqc, *wq; int inlen, err; + u8 ts_format; inlen = MLX5_ST_SZ_BYTES(create_sq_in) + sizeof(u64) * sq->wq_ctrl.buf.npages; @@ -195,6 +197,11 @@ static int create_aso_sq(struct mlx5_core_dev *mdev, int pdn, MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST); MLX5_SET(sqc, sqc, flush_in_error_en, 1); + ts_format = mlx5_is_real_time_sq(mdev) ? + MLX5_TIMESTAMP_FORMAT_REAL_TIME : + MLX5_TIMESTAMP_FORMAT_FREE_RUNNING; + MLX5_SET(sqc, sqc, ts_format, ts_format); + MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC); MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.hw_objs.bfreg.index); MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift - diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c index 839a01da110f..8ff16318e32d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c @@ -122,7 +122,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev) { struct mlx5_mpfs *mpfs = dev->priv.mpfs; - if (!MLX5_ESWITCH_MANAGER(dev)) + if (!mpfs) return; WARN_ON(!hlist_empty(mpfs->hash)); @@ -137,7 +137,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac) int err = 0; u32 index; - if (!MLX5_ESWITCH_MANAGER(dev)) + if (!mpfs) return 0; mutex_lock(&mpfs->lock); @@ -185,7 +185,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac) int err = 0; u32 index; - if (!MLX5_ESWITCH_MANAGER(dev)) + if (!mpfs) return 0; mutex_lock(&mpfs->lock); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 0b459d841c3a..283c4cc28944 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1872,6 +1872,10 @@ static void mlx5_pci_resume(struct pci_dev *pdev) err = mlx5_load_one(dev, false); + if (!err) + devlink_health_reporter_state_update(dev->priv.health.fw_fatal_reporter, + DEVLINK_HEALTH_REPORTER_STATE_HEALTHY); + mlx5_pci_trace(dev, "Done, err = %d, device %s\n", err, !err ? "recovered" : "Failed"); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c index ddfaf7891188..91ff19f67695 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_rule.c @@ -1200,7 +1200,8 @@ free_rule: } remove_from_nic_tbl: - mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher); + if (!nic_matcher->rules) + mlx5dr_matcher_remove_from_tbl_nic(dmn, nic_matcher); free_hw_ste: mlx5dr_domain_nic_unlock(nic_dmn); diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c index 4efccd942fb8..1290b2d3eae6 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c @@ -3470,6 +3470,8 @@ mlxsw_sp_switchdev_vxlan_fdb_del(struct mlxsw_sp *mlxsw_sp, u16 vid; vxlan_fdb_info = &switchdev_work->vxlan_fdb_info; + if (!vxlan_fdb_info->offloaded) + return; bridge_device = mlxsw_sp_bridge_device_find(mlxsw_sp->bridge, br_dev); if (!bridge_device) diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c index 468520079c65..e6acd1e7b263 100644 --- a/drivers/net/ethernet/micrel/ksz884x.c +++ b/drivers/net/ethernet/micrel/ksz884x.c @@ -6851,7 +6851,7 @@ static int pcidev_init(struct pci_dev *pdev, const struct pci_device_id *id) char banner[sizeof(version)]; struct ksz_switch *sw = NULL; - result = pci_enable_device(pdev); + result = pcim_enable_device(pdev); if (result) return result; diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c index e58a27fd8b50..06811c60d598 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_ethtool.c @@ -656,7 +656,15 @@ void lan966x_stats_get(struct net_device *dev, stats->rx_dropped = dev->stats.rx_dropped + lan966x->stats[idx + SYS_COUNT_RX_LONG] + lan966x->stats[idx + SYS_COUNT_DR_LOCAL] + - lan966x->stats[idx + SYS_COUNT_DR_TAIL]; + lan966x->stats[idx + SYS_COUNT_DR_TAIL] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_0] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_1] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_2] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_3] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_4] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_5] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_6] + + lan966x->stats[idx + SYS_COUNT_RX_RED_PRIO_7]; for (i = 0; i < LAN966X_NUM_TC; i++) { stats->rx_dropped += @@ -708,6 +716,9 @@ int lan966x_stats_init(struct lan966x *lan966x) snprintf(queue_name, sizeof(queue_name), "%s-stats", dev_name(lan966x->dev)); lan966x->stats_queue = create_singlethread_workqueue(queue_name); + if (!lan966x->stats_queue) + return -ENOMEM; + INIT_DELAYED_WORK(&lan966x->stats_work, lan966x_check_stats_work); queue_delayed_work(lan966x->stats_queue, &lan966x->stats_work, LAN966X_STATS_CHECK_DELAY); diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c index 7e4061c854f0..e6948939ccc2 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_fdma.c @@ -309,6 +309,7 @@ static void lan966x_fdma_tx_disable(struct lan966x_tx *tx) lan966x, FDMA_CH_DB_DISCARD); tx->activated = false; + tx->last_in_use = -1; } static void lan966x_fdma_tx_reload(struct lan966x_tx *tx) @@ -413,13 +414,15 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx) /* Get the received frame and unmap it */ db = &rx->dcbs[rx->dcb_index].db[rx->db_index]; page = rx->page[rx->dcb_index][rx->db_index]; + + dma_sync_single_for_cpu(lan966x->dev, (dma_addr_t)db->dataptr, + FDMA_DCB_STATUS_BLOCKL(db->status), + DMA_FROM_DEVICE); + skb = build_skb(page_address(page), PAGE_SIZE << rx->page_order); if (unlikely(!skb)) goto unmap_page; - dma_unmap_single(lan966x->dev, (dma_addr_t)db->dataptr, - FDMA_DCB_STATUS_BLOCKL(db->status), - DMA_FROM_DEVICE); skb_put(skb, FDMA_DCB_STATUS_BLOCKL(db->status)); lan966x_ifh_get_src_port(skb->data, &src_port); @@ -428,6 +431,10 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx) if (WARN_ON(src_port >= lan966x->num_phys_ports)) goto free_skb; + dma_unmap_single_attrs(lan966x->dev, (dma_addr_t)db->dataptr, + PAGE_SIZE << rx->page_order, DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); + skb->dev = lan966x->ports[src_port]->dev; skb_pull(skb, IFH_LEN * sizeof(u32)); @@ -453,9 +460,9 @@ static struct sk_buff *lan966x_fdma_rx_get_frame(struct lan966x_rx *rx) free_skb: kfree_skb(skb); unmap_page: - dma_unmap_page(lan966x->dev, (dma_addr_t)db->dataptr, - FDMA_DCB_STATUS_BLOCKL(db->status), - DMA_FROM_DEVICE); + dma_unmap_single_attrs(lan966x->dev, (dma_addr_t)db->dataptr, + PAGE_SIZE << rx->page_order, DMA_FROM_DEVICE, + DMA_ATTR_SKIP_CPU_SYNC); __free_pages(page, rx->page_order); return NULL; @@ -667,12 +674,14 @@ static int lan966x_fdma_get_max_mtu(struct lan966x *lan966x) int i; for (i = 0; i < lan966x->num_phys_ports; ++i) { + struct lan966x_port *port; int mtu; - if (!lan966x->ports[i]) + port = lan966x->ports[i]; + if (!port) continue; - mtu = lan966x->ports[i]->dev->mtu; + mtu = lan_rd(lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); if (mtu > max_mtu) max_mtu = mtu; } @@ -687,17 +696,14 @@ static int lan966x_qsys_sw_status(struct lan966x *lan966x) static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) { - void *rx_dcbs, *tx_dcbs, *tx_dcbs_buf; - dma_addr_t rx_dma, tx_dma; + dma_addr_t rx_dma; + void *rx_dcbs; u32 size; int err; /* Store these for later to free them */ rx_dma = lan966x->rx.dma; - tx_dma = lan966x->tx.dma; rx_dcbs = lan966x->rx.dcbs; - tx_dcbs = lan966x->tx.dcbs; - tx_dcbs_buf = lan966x->tx.dcbs_buf; napi_synchronize(&lan966x->napi); napi_disable(&lan966x->napi); @@ -715,17 +721,6 @@ static int lan966x_fdma_reload(struct lan966x *lan966x, int new_mtu) size = ALIGN(size, PAGE_SIZE); dma_free_coherent(lan966x->dev, size, rx_dcbs, rx_dma); - lan966x_fdma_tx_disable(&lan966x->tx); - err = lan966x_fdma_tx_alloc(&lan966x->tx); - if (err) - goto restore_tx; - - size = sizeof(struct lan966x_tx_dcb) * FDMA_DCB_MAX; - size = ALIGN(size, PAGE_SIZE); - dma_free_coherent(lan966x->dev, size, tx_dcbs, tx_dma); - - kfree(tx_dcbs_buf); - lan966x_fdma_wakeup_netdev(lan966x); napi_enable(&lan966x->napi); @@ -735,11 +730,6 @@ restore: lan966x->rx.dcbs = rx_dcbs; lan966x_fdma_rx_start(&lan966x->rx); -restore_tx: - lan966x->tx.dma = tx_dma; - lan966x->tx.dcbs = tx_dcbs; - lan966x->tx.dcbs_buf = tx_dcbs_buf; - return err; } @@ -751,6 +741,8 @@ int lan966x_fdma_change_mtu(struct lan966x *lan966x) max_mtu = lan966x_fdma_get_max_mtu(lan966x); max_mtu += IFH_LEN * sizeof(u32); + max_mtu += SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); + max_mtu += VLAN_HLEN * 2; if (round_up(max_mtu, PAGE_SIZE) / PAGE_SIZE - 1 == lan966x->rx.page_order) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c index be2fd030cccb..20ee5b28f70a 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.c @@ -386,7 +386,7 @@ static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu) int old_mtu = dev->mtu; int err; - lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(new_mtu), + lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(new_mtu)), lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); dev->mtu = new_mtu; @@ -395,7 +395,7 @@ static int lan966x_port_change_mtu(struct net_device *dev, int new_mtu) err = lan966x_fdma_change_mtu(lan966x); if (err) { - lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(old_mtu), + lan_wr(DEV_MAC_MAXLEN_CFG_MAX_LEN_SET(LAN966X_HW_MTU(old_mtu)), lan966x, DEV_MAC_MAXLEN_CFG(port->chip_port)); dev->mtu = old_mtu; } diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h index 9656071b8289..4ec33999e4df 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_main.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_main.h @@ -26,6 +26,8 @@ #define LAN966X_BUFFER_MEMORY (160 * 1024) #define LAN966X_BUFFER_MIN_SZ 60 +#define LAN966X_HW_MTU(mtu) ((mtu) + ETH_HLEN + ETH_FCS_LEN) + #define PGID_AGGR 64 #define PGID_SRC 80 #define PGID_ENTRIES 89 diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h index 1d90b93dd417..fb5087fef22e 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_regs.h @@ -585,6 +585,21 @@ enum lan966x_target { #define DEV_MAC_MAXLEN_CFG_MAX_LEN_GET(x)\ FIELD_GET(DEV_MAC_MAXLEN_CFG_MAX_LEN, x) +/* DEV:MAC_CFG_STATUS:MAC_TAGS_CFG */ +#define DEV_MAC_TAGS_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 12, 0, 1, 4) + +#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA BIT(1) +#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, x) +#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_GET(x)\ + FIELD_GET(DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, x) + +#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0) +#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(x)\ + FIELD_PREP(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA, x) +#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_GET(x)\ + FIELD_GET(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA, x) + /* DEV:MAC_CFG_STATUS:MAC_IFG_CFG */ #define DEV_MAC_IFG_CFG(t) __REG(TARGET_DEV, t, 8, 28, 0, 1, 44, 20, 0, 1, 4) diff --git a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c index 8d7260cd7da9..3c44660128da 100644 --- a/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c +++ b/drivers/net/ethernet/microchip/lan966x/lan966x_vlan.c @@ -169,6 +169,12 @@ void lan966x_vlan_port_apply(struct lan966x_port *port) ANA_VLAN_CFG_VLAN_POP_CNT, lan966x, ANA_VLAN_CFG(port->chip_port)); + lan_rmw(DEV_MAC_TAGS_CFG_VLAN_AWR_ENA_SET(port->vlan_aware) | + DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA_SET(port->vlan_aware), + DEV_MAC_TAGS_CFG_VLAN_AWR_ENA | + DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA, + lan966x, DEV_MAC_TAGS_CFG(port->chip_port)); + /* Drop frames with multicast source address */ val = ANA_DROP_CFG_DROP_MC_SMAC_ENA_SET(1); if (port->vlan_aware && !pvid) diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c index 6b0febcb7fa9..01f3a3a41cdb 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_ethtool.c @@ -1253,6 +1253,9 @@ int sparx_stats_init(struct sparx5 *sparx5) snprintf(queue_name, sizeof(queue_name), "%s-stats", dev_name(sparx5->dev)); sparx5->stats_queue = create_singlethread_workqueue(queue_name); + if (!sparx5->stats_queue) + return -ENOMEM; + INIT_DELAYED_WORK(&sparx5->stats_work, sparx5_check_stats_work); queue_delayed_work(sparx5->stats_queue, &sparx5->stats_work, SPX5_STATS_CHECK_DELAY); diff --git a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c index 62a325e96345..eeac04b84638 100644 --- a/drivers/net/ethernet/microchip/sparx5/sparx5_main.c +++ b/drivers/net/ethernet/microchip/sparx5/sparx5_main.c @@ -659,6 +659,9 @@ static int sparx5_start(struct sparx5 *sparx5) snprintf(queue_name, sizeof(queue_name), "%s-mact", dev_name(sparx5->dev)); sparx5->mact_queue = create_singlethread_workqueue(queue_name); + if (!sparx5->mact_queue) + return -ENOMEM; + INIT_DELAYED_WORK(&sparx5->mact_work, sparx5_mact_pull_work); queue_delayed_work(sparx5->mact_queue, &sparx5->mact_work, SPX5_MACT_PULL_DELAY); diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index dcf8212119f9..1d3c4474b7cb 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -7128,9 +7128,8 @@ static int s2io_card_up(struct s2io_nic *sp) if (ret) { DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n", dev->name); - s2io_reset(sp); - free_rx_buffers(sp); - return -ENOMEM; + ret = -ENOMEM; + goto err_fill_buff; } DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i, ring->rx_bufs_left); @@ -7168,18 +7167,16 @@ static int s2io_card_up(struct s2io_nic *sp) /* Enable Rx Traffic and interrupts on the NIC */ if (start_nic(sp)) { DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name); - s2io_reset(sp); - free_rx_buffers(sp); - return -ENODEV; + ret = -ENODEV; + goto err_out; } /* Add interrupt service routine */ if (s2io_add_isr(sp) != 0) { if (sp->config.intr_type == MSI_X) s2io_rem_isr(sp); - s2io_reset(sp); - free_rx_buffers(sp); - return -ENODEV; + ret = -ENODEV; + goto err_out; } timer_setup(&sp->alarm_timer, s2io_alarm_handle, 0); @@ -7199,6 +7196,20 @@ static int s2io_card_up(struct s2io_nic *sp) } return 0; + +err_out: + if (config->napi) { + if (config->intr_type == MSI_X) { + for (i = 0; i < sp->config.rx_ring_num; i++) + napi_disable(&sp->mac_control.rings[i].napi); + } else { + napi_disable(&sp->napi); + } + } +err_fill_buff: + s2io_reset(sp); + free_rx_buffers(sp); + return ret; } /** diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c index e66e548919d4..71301dbd8fb5 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_main.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c @@ -716,16 +716,26 @@ static u64 nfp_net_pf_get_app_cap(struct nfp_pf *pf) return val; } -static int nfp_pf_cfg_hwinfo(struct nfp_pf *pf, bool sp_indiff) +static void nfp_pf_cfg_hwinfo(struct nfp_pf *pf) { struct nfp_nsp *nsp; char hwinfo[32]; + bool sp_indiff; int err; nsp = nfp_nsp_open(pf->cpp); if (IS_ERR(nsp)) - return PTR_ERR(nsp); + return; + + if (!nfp_nsp_has_hwinfo_set(nsp)) + goto end; + sp_indiff = (nfp_net_pf_get_app_id(pf) == NFP_APP_FLOWER_NIC) || + (nfp_net_pf_get_app_cap(pf) & NFP_NET_APP_CAP_SP_INDIFF); + + /* No need to clean `sp_indiff` in driver, management firmware + * will do it when application firmware is unloaded. + */ snprintf(hwinfo, sizeof(hwinfo), "sp_indiff=%d", sp_indiff); err = nfp_nsp_hwinfo_set(nsp, hwinfo, sizeof(hwinfo)); /* Not a fatal error, no need to return error to stop driver from loading */ @@ -739,21 +749,8 @@ static int nfp_pf_cfg_hwinfo(struct nfp_pf *pf, bool sp_indiff) pf->eth_tbl = __nfp_eth_read_ports(pf->cpp, nsp); } +end: nfp_nsp_close(nsp); - return 0; -} - -static int nfp_pf_nsp_cfg(struct nfp_pf *pf) -{ - bool sp_indiff = (nfp_net_pf_get_app_id(pf) == NFP_APP_FLOWER_NIC) || - (nfp_net_pf_get_app_cap(pf) & NFP_NET_APP_CAP_SP_INDIFF); - - return nfp_pf_cfg_hwinfo(pf, sp_indiff); -} - -static void nfp_pf_nsp_clean(struct nfp_pf *pf) -{ - nfp_pf_cfg_hwinfo(pf, false); } static int nfp_pci_probe(struct pci_dev *pdev, @@ -856,13 +853,11 @@ static int nfp_pci_probe(struct pci_dev *pdev, goto err_fw_unload; } - err = nfp_pf_nsp_cfg(pf); - if (err) - goto err_fw_unload; + nfp_pf_cfg_hwinfo(pf); err = nfp_net_pci_probe(pf); if (err) - goto err_nsp_clean; + goto err_fw_unload; err = nfp_hwmon_register(pf); if (err) { @@ -874,8 +869,6 @@ static int nfp_pci_probe(struct pci_dev *pdev, err_net_remove: nfp_net_pci_remove(pf); -err_nsp_clean: - nfp_pf_nsp_clean(pf); err_fw_unload: kfree(pf->rtbl); nfp_mip_close(pf->mip); @@ -915,7 +908,6 @@ static void __nfp_pci_shutdown(struct pci_dev *pdev, bool unload_fw) nfp_net_pci_remove(pf); - nfp_pf_nsp_clean(pf); vfree(pf->dumpspec); kfree(pf->rtbl); nfp_mip_close(pf->mip); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 22a5d2419084..1775997f9c69 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -1477,15 +1477,15 @@ nfp_port_get_module_info(struct net_device *netdev, if (data < 0x3) { modinfo->type = ETH_MODULE_SFF_8436; - modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN; + modinfo->eeprom_len = ETH_MODULE_SFF_8436_MAX_LEN; } else { modinfo->type = ETH_MODULE_SFF_8636; - modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; } break; case NFP_INTERFACE_QSFP28: modinfo->type = ETH_MODULE_SFF_8636; - modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN; + modinfo->eeprom_len = ETH_MODULE_SFF_8636_MAX_LEN; break; default: netdev_err(netdev, "Unsupported module 0x%x detected\n", diff --git a/drivers/net/ethernet/ni/nixge.c b/drivers/net/ethernet/ni/nixge.c index 3db4a2431741..19d043b593cc 100644 --- a/drivers/net/ethernet/ni/nixge.c +++ b/drivers/net/ethernet/ni/nixge.c @@ -900,6 +900,7 @@ static int nixge_open(struct net_device *ndev) err_rx_irq: free_irq(priv->tx_irq, ndev); err_tx_irq: + napi_disable(&priv->napi); phy_stop(phy); phy_disconnect(phy); tasklet_kill(&priv->dma_err_tasklet); diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c index 5d58fd99be3c..19d4848df17d 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c @@ -2817,11 +2817,15 @@ err_out: * than the full array, but leave the qcq shells in place */ for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) { - lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; - ionic_qcq_free(lif, lif->txqcqs[i]); + if (lif->txqcqs && lif->txqcqs[i]) { + lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; + ionic_qcq_free(lif, lif->txqcqs[i]); + } - lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; - ionic_qcq_free(lif, lif->rxqcqs[i]); + if (lif->rxqcqs && lif->rxqcqs[i]) { + lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR; + ionic_qcq_free(lif, lif->rxqcqs[i]); + } } if (err) diff --git a/drivers/net/ethernet/pensando/ionic/ionic_main.c b/drivers/net/ethernet/pensando/ionic/ionic_main.c index 56f93b030551..5456c2b15d9b 100644 --- a/drivers/net/ethernet/pensando/ionic/ionic_main.c +++ b/drivers/net/ethernet/pensando/ionic/ionic_main.c @@ -687,8 +687,14 @@ int ionic_port_reset(struct ionic *ionic) static int __init ionic_init_module(void) { + int ret; + ionic_debugfs_create(); - return ionic_bus_register_driver(); + ret = ionic_bus_register_driver(); + if (ret) + ionic_debugfs_destroy(); + + return ret; } static void __exit ionic_cleanup_module(void) diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c index d1e1aa19a68e..7022fb2005a2 100644 --- a/drivers/net/ethernet/sfc/ef10.c +++ b/drivers/net/ethernet/sfc/ef10.c @@ -3277,6 +3277,30 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) bool was_enabled = efx->port_enabled; int rc; +#ifdef CONFIG_SFC_SRIOV + /* If this function is a VF and we have access to the parent PF, + * then use the PF control path to attempt to change the VF MAC address. + */ + if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { + struct efx_nic *efx_pf = pci_get_drvdata(efx->pci_dev->physfn); + struct efx_ef10_nic_data *nic_data = efx->nic_data; + u8 mac[ETH_ALEN]; + + /* net_dev->dev_addr can be zeroed by efx_net_stop in + * efx_ef10_sriov_set_vf_mac, so pass in a copy. + */ + ether_addr_copy(mac, efx->net_dev->dev_addr); + + rc = efx_ef10_sriov_set_vf_mac(efx_pf, nic_data->vf_index, mac); + if (!rc) + return 0; + + netif_dbg(efx, drv, efx->net_dev, + "Updating VF mac via PF failed (%d), setting directly\n", + rc); + } +#endif + efx_device_detach_sync(efx); efx_net_stop(efx->net_dev); @@ -3297,40 +3321,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx) efx_net_open(efx->net_dev); efx_device_attach_if_not_resetting(efx); -#ifdef CONFIG_SFC_SRIOV - if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) { - struct efx_ef10_nic_data *nic_data = efx->nic_data; - struct pci_dev *pci_dev_pf = efx->pci_dev->physfn; - - if (rc == -EPERM) { - struct efx_nic *efx_pf; - - /* Switch to PF and change MAC address on vport */ - efx_pf = pci_get_drvdata(pci_dev_pf); - - rc = efx_ef10_sriov_set_vf_mac(efx_pf, - nic_data->vf_index, - efx->net_dev->dev_addr); - } else if (!rc) { - struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf); - struct efx_ef10_nic_data *nic_data = efx_pf->nic_data; - unsigned int i; - - /* MAC address successfully changed by VF (with MAC - * spoofing) so update the parent PF if possible. - */ - for (i = 0; i < efx_pf->vf_count; ++i) { - struct ef10_vf *vf = nic_data->vf + i; - - if (vf->efx == efx) { - ether_addr_copy(vf->mac, - efx->net_dev->dev_addr); - return 0; - } - } - } - } else -#endif if (rc == -EPERM) { netif_err(efx, drv, efx->net_dev, "Cannot change MAC address; use sfboot to enable" diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c index 054d5ce6029e..0556542d7a6b 100644 --- a/drivers/net/ethernet/sfc/efx.c +++ b/drivers/net/ethernet/sfc/efx.c @@ -1059,8 +1059,10 @@ static int efx_pci_probe(struct pci_dev *pci_dev, /* Allocate and initialise a struct net_device */ net_dev = alloc_etherdev_mq(sizeof(probe_data), EFX_MAX_CORE_TX_QUEUES); - if (!net_dev) - return -ENOMEM; + if (!net_dev) { + rc = -ENOMEM; + goto fail0; + } probe_ptr = netdev_priv(net_dev); *probe_ptr = probe_data; efx->net_dev = net_dev; @@ -1132,6 +1134,8 @@ static int efx_pci_probe(struct pci_dev *pci_dev, WARN_ON(rc > 0); netif_dbg(efx, drv, efx->net_dev, "initialisation failed. rc=%d\n", rc); free_netdev(net_dev); + fail0: + kfree(probe_data); return rc; } diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h index be72e71da027..5f201a547e5b 100644 --- a/drivers/net/ethernet/sfc/filter.h +++ b/drivers/net/ethernet/sfc/filter.h @@ -162,9 +162,9 @@ struct efx_filter_spec { u32 priority:2; u32 flags:6; u32 dmaq_id:12; - u32 vport_id; u32 rss_context; - __be16 outer_vid __aligned(4); /* allow jhash2() of match values */ + u32 vport_id; + __be16 outer_vid; __be16 inner_vid; u8 loc_mac[ETH_ALEN]; u8 rem_mac[ETH_ALEN]; diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c index 4826e6a7e4ce..9220afeddee8 100644 --- a/drivers/net/ethernet/sfc/rx_common.c +++ b/drivers/net/ethernet/sfc/rx_common.c @@ -660,17 +660,17 @@ bool efx_filter_spec_equal(const struct efx_filter_spec *left, (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX))) return false; - return memcmp(&left->outer_vid, &right->outer_vid, + return memcmp(&left->vport_id, &right->vport_id, sizeof(struct efx_filter_spec) - - offsetof(struct efx_filter_spec, outer_vid)) == 0; + offsetof(struct efx_filter_spec, vport_id)) == 0; } u32 efx_filter_spec_hash(const struct efx_filter_spec *spec) { - BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3); - return jhash2((const u32 *)&spec->outer_vid, + BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3); + return jhash2((const u32 *)&spec->vport_id, (sizeof(struct efx_filter_spec) - - offsetof(struct efx_filter_spec, outer_vid)) / 4, + offsetof(struct efx_filter_spec, vport_id)) / 4, 0); } diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 2240f6d0b89b..9b46579b5a10 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1961,11 +1961,13 @@ static int netsec_register_mdio(struct netsec_priv *priv, u32 phy_addr) ret = PTR_ERR(priv->phydev); dev_err(priv->dev, "get_phy_device err(%d)\n", ret); priv->phydev = NULL; + mdiobus_unregister(bus); return -ENODEV; } ret = phy_device_register(priv->phydev); if (ret) { + phy_device_free(priv->phydev); mdiobus_unregister(bus); dev_err(priv->dev, "phy_device_register err(%d)\n", ret); diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c index 1fa09b49ba7f..d2c6a5dfdc0e 100644 --- a/drivers/net/ethernet/socionext/sni_ave.c +++ b/drivers/net/ethernet/socionext/sni_ave.c @@ -1229,6 +1229,8 @@ static int ave_init(struct net_device *ndev) phy_support_asym_pause(phydev); + phydev->mac_managed_pm = true; + phy_attached_info(phydev); return 0; @@ -1756,6 +1758,10 @@ static int ave_resume(struct device *dev) ave_global_reset(ndev); + ret = phy_init_hw(ndev->phydev); + if (ret) + return ret; + ave_ethtool_get_wol(ndev, &wol); wol.wolopts = priv->wolopts; __ave_ethtool_set_wol(ndev, &wol); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c index 0a2afc1a3124..7deb1f817dac 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-intel.c @@ -629,7 +629,6 @@ static int ehl_common_data(struct pci_dev *pdev, { plat->rx_queues_to_use = 8; plat->tx_queues_to_use = 8; - plat->clk_ptp_rate = 200000000; plat->use_phy_wol = 1; plat->safety_feat_cfg->tsoee = 1; @@ -654,6 +653,8 @@ static int ehl_sgmii_data(struct pci_dev *pdev, plat->serdes_powerup = intel_serdes_powerup; plat->serdes_powerdown = intel_serdes_powerdown; + plat->clk_ptp_rate = 204800000; + return ehl_common_data(pdev, plat); } @@ -667,6 +668,8 @@ static int ehl_rgmii_data(struct pci_dev *pdev, plat->bus_id = 1; plat->phy_interface = PHY_INTERFACE_MODE_RGMII; + plat->clk_ptp_rate = 204800000; + return ehl_common_data(pdev, plat); } @@ -683,6 +686,8 @@ static int ehl_pse0_common_data(struct pci_dev *pdev, plat->bus_id = 2; plat->addr64 = 32; + plat->clk_ptp_rate = 200000000; + intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ); return ehl_common_data(pdev, plat); @@ -722,6 +727,8 @@ static int ehl_pse1_common_data(struct pci_dev *pdev, plat->bus_id = 3; plat->addr64 = 32; + plat->clk_ptp_rate = 200000000; + intel_mgbe_pse_crossts_adj(intel_priv, EHL_PSE_ART_MHZ); return ehl_common_data(pdev, plat); @@ -757,7 +764,7 @@ static int tgl_common_data(struct pci_dev *pdev, { plat->rx_queues_to_use = 6; plat->tx_queues_to_use = 4; - plat->clk_ptp_rate = 200000000; + plat->clk_ptp_rate = 204800000; plat->speed_mode_2500 = intel_speed_mode_2500; plat->safety_feat_cfg->tsoee = 1; diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c index 017dbbda0c1c..a25c187d3185 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-loongson.c @@ -51,7 +51,6 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id struct stmmac_resources res; struct device_node *np; int ret, i, phy_mode; - bool mdio = false; np = dev_of_node(&pdev->dev); @@ -69,29 +68,31 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id if (!plat) return -ENOMEM; + plat->mdio_node = of_get_child_by_name(np, "mdio"); if (plat->mdio_node) { - dev_err(&pdev->dev, "Found MDIO subnode\n"); - mdio = true; - } + dev_info(&pdev->dev, "Found MDIO subnode\n"); - if (mdio) { plat->mdio_bus_data = devm_kzalloc(&pdev->dev, sizeof(*plat->mdio_bus_data), GFP_KERNEL); - if (!plat->mdio_bus_data) - return -ENOMEM; + if (!plat->mdio_bus_data) { + ret = -ENOMEM; + goto err_put_node; + } plat->mdio_bus_data->needs_reset = true; } plat->dma_cfg = devm_kzalloc(&pdev->dev, sizeof(*plat->dma_cfg), GFP_KERNEL); - if (!plat->dma_cfg) - return -ENOMEM; + if (!plat->dma_cfg) { + ret = -ENOMEM; + goto err_put_node; + } /* Enable pci device */ ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "%s: ERROR: failed to enable device\n", __func__); - return ret; + goto err_put_node; } /* Get the base address of device */ @@ -100,7 +101,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id continue; ret = pcim_iomap_regions(pdev, BIT(0), pci_name(pdev)); if (ret) - return ret; + goto err_disable_device; break; } @@ -111,7 +112,8 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id phy_mode = device_get_phy_mode(&pdev->dev); if (phy_mode < 0) { dev_err(&pdev->dev, "phy_mode not found\n"); - return phy_mode; + ret = phy_mode; + goto err_disable_device; } plat->phy_interface = phy_mode; @@ -128,6 +130,7 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id if (res.irq < 0) { dev_err(&pdev->dev, "IRQ macirq not found\n"); ret = -ENODEV; + goto err_disable_msi; } res.wol_irq = of_irq_get_byname(np, "eth_wake_irq"); @@ -140,15 +143,31 @@ static int loongson_dwmac_probe(struct pci_dev *pdev, const struct pci_device_id if (res.lpi_irq < 0) { dev_err(&pdev->dev, "IRQ eth_lpi not found\n"); ret = -ENODEV; + goto err_disable_msi; } - return stmmac_dvr_probe(&pdev->dev, plat, &res); + ret = stmmac_dvr_probe(&pdev->dev, plat, &res); + if (ret) + goto err_disable_msi; + + return ret; + +err_disable_msi: + pci_disable_msi(pdev); +err_disable_device: + pci_disable_device(pdev); +err_put_node: + of_node_put(plat->mdio_node); + return ret; } static void loongson_dwmac_remove(struct pci_dev *pdev) { + struct net_device *ndev = dev_get_drvdata(&pdev->dev); + struct stmmac_priv *priv = netdev_priv(ndev); int i; + of_node_put(priv->plat->mdio_node); stmmac_dvr_remove(&pdev->dev); for (i = 0; i < PCI_STD_NUM_BARS; i++) { @@ -158,6 +177,7 @@ static void loongson_dwmac_remove(struct pci_dev *pdev) break; } + pci_disable_msi(pdev); pci_disable_device(pdev); } diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c index c7a6588d9398..e8b507f88fbc 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -272,11 +272,9 @@ static int meson8b_devm_clk_prepare_enable(struct meson8b_dwmac *dwmac, if (ret) return ret; - devm_add_action_or_reset(dwmac->dev, - (void(*)(void *))clk_disable_unprepare, - dwmac->rgmii_tx_clk); - - return 0; + return devm_add_action_or_reset(dwmac->dev, + (void(*)(void *))clk_disable_unprepare, + clk); } static int meson8b_init_rgmii_delays(struct meson8b_dwmac *dwmac) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c index f7269d79a385..6656d76b6766 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c @@ -1243,6 +1243,12 @@ static const struct rk_gmac_ops rk3588_ops = { .set_rgmii_speed = rk3588_set_gmac_speed, .set_rmii_speed = rk3588_set_gmac_speed, .set_clock_selection = rk3588_set_clock_selection, + .regs_valid = true, + .regs = { + 0xfe1b0000, /* gmac0 */ + 0xfe1c0000, /* gmac1 */ + 0x0, /* sentinel */ + }, }; #define RV1108_GRF_GMAC_CON0 0X0900 diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 65c96773c6d2..6b43da78cdf0 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -1214,6 +1214,7 @@ static int stmmac_phy_setup(struct stmmac_priv *priv) if (priv->plat->tx_queues_to_use > 1) priv->phylink_config.mac_capabilities &= ~(MAC_10HD | MAC_100HD | MAC_1000HD); + priv->phylink_config.mac_managed_pm = true; phylink = phylink_create(&priv->phylink_config, fwnode, mode, &stmmac_phylink_mac_ops); @@ -6547,6 +6548,9 @@ void stmmac_xdp_release(struct net_device *dev) struct stmmac_priv *priv = netdev_priv(dev); u32 chan; + /* Ensure tx function is not running */ + netif_tx_disable(dev); + /* Disable NAPI process */ stmmac_disable_all_queues(priv); diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c index 91f10f746dff..1c16548415cd 100644 --- a/drivers/net/ethernet/sun/sunhme.c +++ b/drivers/net/ethernet/sun/sunhme.c @@ -1328,7 +1328,7 @@ static int happy_meal_init(struct happy_meal *hp) void __iomem *erxregs = hp->erxregs; void __iomem *bregs = hp->bigmacregs; void __iomem *tregs = hp->tcvregs; - const char *bursts; + const char *bursts = "64"; u32 regtmp, rxcfg; /* If auto-negotiation timer is running, kill it. */ diff --git a/drivers/net/ethernet/sunplus/spl2sw_driver.c b/drivers/net/ethernet/sunplus/spl2sw_driver.c index 9be585237277..c499a14314f1 100644 --- a/drivers/net/ethernet/sunplus/spl2sw_driver.c +++ b/drivers/net/ethernet/sunplus/spl2sw_driver.c @@ -287,7 +287,6 @@ static u32 spl2sw_init_netdev(struct platform_device *pdev, u8 *mac_addr, if (ret) { dev_err(&pdev->dev, "Failed to register net device \"%s\"!\n", ndev->name); - free_netdev(ndev); *r_ndev = NULL; return ret; } diff --git a/drivers/net/ethernet/ti/am65-cpsw-nuss.c b/drivers/net/ethernet/ti/am65-cpsw-nuss.c index 7f86068f3ff6..c50b137f92d7 100644 --- a/drivers/net/ethernet/ti/am65-cpsw-nuss.c +++ b/drivers/net/ethernet/ti/am65-cpsw-nuss.c @@ -2823,7 +2823,6 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev) if (ret < 0) return ret; - am65_cpsw_nuss_phylink_cleanup(common); am65_cpsw_unregister_devlink(common); am65_cpsw_unregister_notifiers(common); @@ -2831,6 +2830,7 @@ static int am65_cpsw_nuss_remove(struct platform_device *pdev) * dma_deconfigure(dev) before devres_release_all(dev) */ am65_cpsw_nuss_cleanup_ndev(common); + am65_cpsw_nuss_phylink_cleanup(common); of_platform_device_destroy(common->mdio_dev, NULL); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index 709ca6dd6ecb..13c9c2d6b79b 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -854,6 +854,8 @@ static int cpsw_ndo_open(struct net_device *ndev) err_cleanup: if (!cpsw->usage_count) { + napi_disable(&cpsw->napi_rx); + napi_disable(&cpsw->napi_tx); cpdma_ctlr_stop(cpsw->dma); cpsw_destroy_xdp_rxqs(cpsw); } diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c index 2cd2afc3fff0..d09d352e1c0a 100644 --- a/drivers/net/ethernet/tundra/tsi108_eth.c +++ b/drivers/net/ethernet/tundra/tsi108_eth.c @@ -1290,12 +1290,15 @@ static int tsi108_open(struct net_device *dev) data->rxring = dma_alloc_coherent(&data->pdev->dev, rxring_size, &data->rxdma, GFP_KERNEL); - if (!data->rxring) + if (!data->rxring) { + free_irq(data->irq_num, dev); return -ENOMEM; + } data->txring = dma_alloc_coherent(&data->pdev->dev, txring_size, &data->txdma, GFP_KERNEL); if (!data->txring) { + free_irq(data->irq_num, dev); dma_free_coherent(&data->pdev->dev, rxring_size, data->rxring, data->rxdma); return -ENOMEM; diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 05848ff15fb5..a3967f8de417 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -108,7 +108,7 @@ * @next_tx_buf_to_use: next Tx buffer to write to * @next_rx_buf_to_use: next Rx buffer to read from * @base_addr: base address of the Emaclite device - * @reset_lock: lock used for synchronization + * @reset_lock: lock to serialize xmit and tx_timeout execution * @deferred_skb: holds an skb (for transmission at a later time) when the * Tx buffer is not free * @phy_dev: pointer to the PHY device diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c index 30af0081e2be..83a16d10eedb 100644 --- a/drivers/net/hamradio/bpqether.c +++ b/drivers/net/hamradio/bpqether.c @@ -533,7 +533,7 @@ static int bpq_device_event(struct notifier_block *this, if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; - if (!dev_is_ethdev(dev)) + if (!dev_is_ethdev(dev) && !bpq_get_ax25_dev(dev)) return NOTIFY_DONE; switch (event) { diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 11f767a20444..eea777ec2541 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -20,6 +20,7 @@ #include <linux/vmalloc.h> #include <linux/rtnetlink.h> #include <linux/ucs2_string.h> +#include <linux/string.h> #include "hyperv_net.h" #include "netvsc_trace.h" @@ -335,9 +336,10 @@ static void rndis_filter_receive_response(struct net_device *ndev, if (resp->msg_len <= sizeof(struct rndis_message) + RNDIS_EXT_LEN) { memcpy(&request->response_msg, resp, RNDIS_HEADER_SIZE + sizeof(*req_id)); - memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id), + unsafe_memcpy((void *)&request->response_msg + RNDIS_HEADER_SIZE + sizeof(*req_id), data + RNDIS_HEADER_SIZE + sizeof(*req_id), - resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id)); + resp->msg_len - RNDIS_HEADER_SIZE - sizeof(*req_id), + "request->response_msg is followed by a padding of RNDIS_EXT_LEN inside rndis_request"); if (request->request_msg.ndis_msg_type == RNDIS_MSG_QUERY && request->request_msg.msg. query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS) diff --git a/drivers/net/ipa/data/ipa_data-v3.5.1.c b/drivers/net/ipa/data/ipa_data-v3.5.1.c index 383ef1890065..42f2c88a92d4 100644 --- a/drivers/net/ipa/data/ipa_data-v3.5.1.c +++ b/drivers/net/ipa/data/ipa_data-v3.5.1.c @@ -179,10 +179,10 @@ static const struct ipa_gsi_endpoint_data ipa_gsi_endpoint_data[] = { static const struct ipa_resource ipa_resource_src[] = { [IPA_RESOURCE_TYPE_SRC_PKT_CONTEXTS] = { .limits[IPA_RSRC_GROUP_SRC_LWA_DL] = { - .min = 1, .max = 255, + .min = 1, .max = 63, }, .limits[IPA_RSRC_GROUP_SRC_UL_DL] = { - .min = 1, .max = 255, + .min = 1, .max = 63, }, .limits[IPA_RSRC_GROUP_SRC_UC_RX_Q] = { .min = 1, .max = 63, diff --git a/drivers/net/ipa/ipa_main.c b/drivers/net/ipa/ipa_main.c index 3461ad3029ab..49537fccf6ad 100644 --- a/drivers/net/ipa/ipa_main.c +++ b/drivers/net/ipa/ipa_main.c @@ -434,6 +434,9 @@ static void ipa_idle_indication_cfg(struct ipa *ipa, const struct ipa_reg *reg; u32 val; + if (ipa->version < IPA_VERSION_3_5_1) + return; + reg = ipa_reg(ipa, IDLE_INDICATION_CFG); val = ipa_reg_encode(reg, ENTER_IDLE_DEBOUNCE_THRESH, enter_idle_debounce_thresh); diff --git a/drivers/net/ipa/reg/ipa_reg-v3.1.c b/drivers/net/ipa/reg/ipa_reg-v3.1.c index 116b27717e3d..0d002c3c38a2 100644 --- a/drivers/net/ipa/reg/ipa_reg-v3.1.c +++ b/drivers/net/ipa/reg/ipa_reg-v3.1.c @@ -127,112 +127,80 @@ static const u32 ipa_reg_counter_cfg_fmask[] = { IPA_REG_FIELDS(COUNTER_CFG, counter_cfg, 0x000001f0); static const u32 ipa_reg_src_rsrc_grp_01_rsrc_type_fmask[] = { - [X_MIN_LIM] = GENMASK(5, 0), - /* Bits 6-7 reserved */ - [X_MAX_LIM] = GENMASK(13, 8), - /* Bits 14-15 reserved */ - [Y_MIN_LIM] = GENMASK(21, 16), - /* Bits 22-23 reserved */ - [Y_MAX_LIM] = GENMASK(29, 24), - /* Bits 30-31 reserved */ + [X_MIN_LIM] = GENMASK(7, 0), + [X_MAX_LIM] = GENMASK(15, 8), + [Y_MIN_LIM] = GENMASK(23, 16), + [Y_MAX_LIM] = GENMASK(31, 24), }; IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_01_RSRC_TYPE, src_rsrc_grp_01_rsrc_type, 0x00000400, 0x0020); static const u32 ipa_reg_src_rsrc_grp_23_rsrc_type_fmask[] = { - [X_MIN_LIM] = GENMASK(5, 0), - /* Bits 6-7 reserved */ - [X_MAX_LIM] = GENMASK(13, 8), - /* Bits 14-15 reserved */ - [Y_MIN_LIM] = GENMASK(21, 16), - /* Bits 22-23 reserved */ - [Y_MAX_LIM] = GENMASK(29, 24), - /* Bits 30-31 reserved */ + [X_MIN_LIM] = GENMASK(7, 0), + [X_MAX_LIM] = GENMASK(15, 8), + [Y_MIN_LIM] = GENMASK(23, 16), + [Y_MAX_LIM] = GENMASK(31, 24), }; IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_23_RSRC_TYPE, src_rsrc_grp_23_rsrc_type, 0x00000404, 0x0020); static const u32 ipa_reg_src_rsrc_grp_45_rsrc_type_fmask[] = { - [X_MIN_LIM] = GENMASK(5, 0), - /* Bits 6-7 reserved */ - [X_MAX_LIM] = GENMASK(13, 8), - /* Bits 14-15 reserved */ - [Y_MIN_LIM] = GENMASK(21, 16), - /* Bits 22-23 reserved */ - [Y_MAX_LIM] = GENMASK(29, 24), - /* Bits 30-31 reserved */ + [X_MIN_LIM] = GENMASK(7, 0), + [X_MAX_LIM] = GENMASK(15, 8), + [Y_MIN_LIM] = GENMASK(23, 16), + [Y_MAX_LIM] = GENMASK(31, 24), }; IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_45_RSRC_TYPE, src_rsrc_grp_45_rsrc_type, 0x00000408, 0x0020); static const u32 ipa_reg_src_rsrc_grp_67_rsrc_type_fmask[] = { - [X_MIN_LIM] = GENMASK(5, 0), - /* Bits 6-7 reserved */ - [X_MAX_LIM] = GENMASK(13, 8), - /* Bits 14-15 reserved */ - [Y_MIN_LIM] = GENMASK(21, 16), - /* Bits 22-23 reserved */ - [Y_MAX_LIM] = GENMASK(29, 24), - /* Bits 30-31 reserved */ + [X_MIN_LIM] = GENMASK(7, 0), + [X_MAX_LIM] = GENMASK(15, 8), + [Y_MIN_LIM] = GENMASK(23, 16), + [Y_MAX_LIM] = GENMASK(31, 24), }; IPA_REG_STRIDE_FIELDS(SRC_RSRC_GRP_67_RSRC_TYPE, src_rsrc_grp_67_rsrc_type, 0x0000040c, 0x0020); static const u32 ipa_reg_dst_rsrc_grp_01_rsrc_type_fmask[] = { - [X_MIN_LIM] = GENMASK(5, 0), - /* Bits 6-7 reserved */ - [X_MAX_LIM] = GENMASK(13, 8), - /* Bits 14-15 reserved */ - [Y_MIN_LIM] = GENMASK(21, 16), - /* Bits 22-23 reserved */ - [Y_MAX_LIM] = GENMASK(29, 24), - /* Bits 30-31 reserved */ + [X_MIN_LIM] = GENMASK(7, 0), + [X_MAX_LIM] = GENMASK(15, 8), + [Y_MIN_LIM] = GENMASK(23, 16), + [Y_MAX_LIM] = GENMASK(31, 24), }; IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_01_RSRC_TYPE, dst_rsrc_grp_01_rsrc_type, 0x00000500, 0x0020); static const u32 ipa_reg_dst_rsrc_grp_23_rsrc_type_fmask[] = { - [X_MIN_LIM] = GENMASK(5, 0), - /* Bits 6-7 reserved */ - [X_MAX_LIM] = GENMASK(13, 8), - /* Bits 14-15 reserved */ - [Y_MIN_LIM] = GENMASK(21, 16), - /* Bits 22-23 reserved */ - [Y_MAX_LIM] = GENMASK(29, 24), - /* Bits 30-31 reserved */ + [X_MIN_LIM] = GENMASK(7, 0), + [X_MAX_LIM] = GENMASK(15, 8), + [Y_MIN_LIM] = GENMASK(23, 16), + [Y_MAX_LIM] = GENMASK(31, 24), }; IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_23_RSRC_TYPE, dst_rsrc_grp_23_rsrc_type, 0x00000504, 0x0020); static const u32 ipa_reg_dst_rsrc_grp_45_rsrc_type_fmask[] = { - [X_MIN_LIM] = GENMASK(5, 0), - /* Bits 6-7 reserved */ - [X_MAX_LIM] = GENMASK(13, 8), - /* Bits 14-15 reserved */ - [Y_MIN_LIM] = GENMASK(21, 16), - /* Bits 22-23 reserved */ - [Y_MAX_LIM] = GENMASK(29, 24), - /* Bits 30-31 reserved */ + [X_MIN_LIM] = GENMASK(7, 0), + [X_MAX_LIM] = GENMASK(15, 8), + [Y_MIN_LIM] = GENMASK(23, 16), + [Y_MAX_LIM] = GENMASK(31, 24), }; IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_45_RSRC_TYPE, dst_rsrc_grp_45_rsrc_type, 0x00000508, 0x0020); static const u32 ipa_reg_dst_rsrc_grp_67_rsrc_type_fmask[] = { - [X_MIN_LIM] = GENMASK(5, 0), - /* Bits 6-7 reserved */ - [X_MAX_LIM] = GENMASK(13, 8), - /* Bits 14-15 reserved */ - [Y_MIN_LIM] = GENMASK(21, 16), - /* Bits 22-23 reserved */ - [Y_MAX_LIM] = GENMASK(29, 24), - /* Bits 30-31 reserved */ + [X_MIN_LIM] = GENMASK(7, 0), + [X_MAX_LIM] = GENMASK(15, 8), + [Y_MIN_LIM] = GENMASK(23, 16), + [Y_MAX_LIM] = GENMASK(31, 24), }; IPA_REG_STRIDE_FIELDS(DST_RSRC_GRP_67_RSRC_TYPE, dst_rsrc_grp_67_rsrc_type, diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index c891b60937a7..85376d2f24ca 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -1413,7 +1413,8 @@ static struct macsec_rx_sc *del_rx_sc(struct macsec_secy *secy, sci_t sci) return NULL; } -static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) +static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci, + bool active) { struct macsec_rx_sc *rx_sc; struct macsec_dev *macsec; @@ -1437,7 +1438,7 @@ static struct macsec_rx_sc *create_rx_sc(struct net_device *dev, sci_t sci) } rx_sc->sci = sci; - rx_sc->active = true; + rx_sc->active = active; refcount_set(&rx_sc->refcnt, 1); secy = &macsec_priv(dev)->secy; @@ -1838,6 +1839,7 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info) secy->key_len); err = macsec_offload(ops->mdo_add_rxsa, &ctx); + memzero_explicit(ctx.sa.key, secy->key_len); if (err) goto cleanup; } @@ -1876,7 +1878,7 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) struct macsec_rx_sc *rx_sc; struct nlattr *tb_rxsc[MACSEC_RXSC_ATTR_MAX + 1]; struct macsec_secy *secy; - bool was_active; + bool active = true; int ret; if (!attrs[MACSEC_ATTR_IFINDEX]) @@ -1898,16 +1900,15 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) secy = &macsec_priv(dev)->secy; sci = nla_get_sci(tb_rxsc[MACSEC_RXSC_ATTR_SCI]); - rx_sc = create_rx_sc(dev, sci); + if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) + active = nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); + + rx_sc = create_rx_sc(dev, sci, active); if (IS_ERR(rx_sc)) { rtnl_unlock(); return PTR_ERR(rx_sc); } - was_active = rx_sc->active; - if (tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]) - rx_sc->active = !!nla_get_u8(tb_rxsc[MACSEC_RXSC_ATTR_ACTIVE]); - if (macsec_is_offloaded(netdev_priv(dev))) { const struct macsec_ops *ops; struct macsec_context ctx; @@ -1931,7 +1932,8 @@ static int macsec_add_rxsc(struct sk_buff *skb, struct genl_info *info) return 0; cleanup: - rx_sc->active = was_active; + del_rx_sc(secy, sci); + free_rx_sc(rx_sc); rtnl_unlock(); return ret; } @@ -2080,6 +2082,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info) secy->key_len); err = macsec_offload(ops->mdo_add_txsa, &ctx); + memzero_explicit(ctx.sa.key, secy->key_len); if (err) goto cleanup; } @@ -2570,7 +2573,7 @@ static bool macsec_is_configured(struct macsec_dev *macsec) struct macsec_tx_sc *tx_sc = &secy->tx_sc; int i; - if (secy->n_rx_sc > 0) + if (secy->rx_sc) return true; for (i = 0; i < MACSEC_NUM_AN; i++) @@ -2654,11 +2657,6 @@ static int macsec_upd_offload(struct sk_buff *skb, struct genl_info *info) if (ret) goto rollback; - /* Force features update, since they are different for SW MACSec and - * HW offloading cases. - */ - netdev_update_features(dev); - rtnl_unlock(); return 0; @@ -3432,16 +3430,9 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, return ret; } -#define SW_MACSEC_FEATURES \ +#define MACSEC_FEATURES \ (NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST) -/* If h/w offloading is enabled, use real device features save for - * VLAN_FEATURES - they require additional ops - * HW_MACSEC - no reason to report it - */ -#define REAL_DEV_FEATURES(dev) \ - ((dev)->features & ~(NETIF_F_VLAN_FEATURES | NETIF_F_HW_MACSEC)) - static int macsec_dev_init(struct net_device *dev) { struct macsec_dev *macsec = macsec_priv(dev); @@ -3458,12 +3449,8 @@ static int macsec_dev_init(struct net_device *dev) return err; } - if (macsec_is_offloaded(macsec)) { - dev->features = REAL_DEV_FEATURES(real_dev); - } else { - dev->features = real_dev->features & SW_MACSEC_FEATURES; - dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; - } + dev->features = real_dev->features & MACSEC_FEATURES; + dev->features |= NETIF_F_LLTX | NETIF_F_GSO_SOFTWARE; dev->needed_headroom = real_dev->needed_headroom + MACSEC_NEEDED_HEADROOM; @@ -3495,10 +3482,7 @@ static netdev_features_t macsec_fix_features(struct net_device *dev, struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; - if (macsec_is_offloaded(macsec)) - return REAL_DEV_FEATURES(real_dev); - - features &= (real_dev->features & SW_MACSEC_FEATURES) | + features &= (real_dev->features & MACSEC_FEATURES) | NETIF_F_GSO_SOFTWARE | NETIF_F_SOFT_FEATURES; features |= NETIF_F_LLTX; diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 8f8f73099de8..b8cc55b2d721 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -141,7 +141,7 @@ static struct macvlan_source_entry *macvlan_hash_lookup_source( u32 idx = macvlan_eth_hash(addr); struct hlist_head *h = &vlan->port->vlan_source_hash[idx]; - hlist_for_each_entry_rcu(entry, h, hlist) { + hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) { if (ether_addr_equal_64bits(entry->addr, addr) && entry->vlan == vlan) return entry; @@ -361,7 +361,7 @@ static void macvlan_broadcast_enqueue(struct macvlan_port *port, } spin_unlock(&port->bc_queue.lock); - schedule_work(&port->bc_work); + queue_work(system_unbound_wq, &port->bc_work); if (err) goto free_nskb; @@ -1533,8 +1533,10 @@ destroy_macvlan_port: /* the macvlan port may be freed by macvlan_uninit when fail to register. * so we destroy the macvlan port only when it's valid. */ - if (create && macvlan_port_get_rtnl(lowerdev)) + if (create && macvlan_port_get_rtnl(lowerdev)) { + macvlan_flush_sources(port, vlan); macvlan_port_destroy(port->dev); + } return err; } EXPORT_SYMBOL_GPL(macvlan_common_newlink); @@ -1645,7 +1647,7 @@ static int macvlan_fill_info_macaddr(struct sk_buff *skb, struct hlist_head *h = &vlan->port->vlan_source_hash[i]; struct macvlan_source_entry *entry; - hlist_for_each_entry_rcu(entry, h, hlist) { + hlist_for_each_entry_rcu(entry, h, hlist, lockdep_rtnl_is_held()) { if (entry->vlan != vlan) continue; if (nla_put(skb, IFLA_MACVLAN_MACADDR, ETH_ALEN, entry->addr)) diff --git a/drivers/net/mctp/mctp-i2c.c b/drivers/net/mctp/mctp-i2c.c index 0762c735dd8a..1d67a3ca1fd1 100644 --- a/drivers/net/mctp/mctp-i2c.c +++ b/drivers/net/mctp/mctp-i2c.c @@ -43,6 +43,7 @@ enum { MCTP_I2C_FLOW_STATE_NEW = 0, MCTP_I2C_FLOW_STATE_ACTIVE, + MCTP_I2C_FLOW_STATE_INVALID, }; /* List of all struct mctp_i2c_client @@ -374,12 +375,18 @@ mctp_i2c_get_tx_flow_state(struct mctp_i2c_dev *midev, struct sk_buff *skb) */ if (!key->valid) { state = MCTP_I2C_TX_FLOW_INVALID; - - } else if (key->dev_flow_state == MCTP_I2C_FLOW_STATE_NEW) { - key->dev_flow_state = MCTP_I2C_FLOW_STATE_ACTIVE; - state = MCTP_I2C_TX_FLOW_NEW; } else { - state = MCTP_I2C_TX_FLOW_EXISTING; + switch (key->dev_flow_state) { + case MCTP_I2C_FLOW_STATE_NEW: + key->dev_flow_state = MCTP_I2C_FLOW_STATE_ACTIVE; + state = MCTP_I2C_TX_FLOW_NEW; + break; + case MCTP_I2C_FLOW_STATE_ACTIVE: + state = MCTP_I2C_TX_FLOW_EXISTING; + break; + default: + state = MCTP_I2C_TX_FLOW_INVALID; + } } spin_unlock_irqrestore(&key->lock, flags); @@ -617,21 +624,31 @@ static void mctp_i2c_release_flow(struct mctp_dev *mdev, { struct mctp_i2c_dev *midev = netdev_priv(mdev->dev); + bool queue_release = false; unsigned long flags; spin_lock_irqsave(&midev->lock, flags); - midev->release_count++; - spin_unlock_irqrestore(&midev->lock, flags); - - /* Ensure we have a release operation queued, through the fake - * marker skb + /* if we have seen the flow/key previously, we need to pair the + * original lock with a release */ - spin_lock(&midev->tx_queue.lock); - if (!midev->unlock_marker.next) - __skb_queue_tail(&midev->tx_queue, &midev->unlock_marker); - spin_unlock(&midev->tx_queue.lock); + if (key->dev_flow_state == MCTP_I2C_FLOW_STATE_ACTIVE) { + midev->release_count++; + queue_release = true; + } + key->dev_flow_state = MCTP_I2C_FLOW_STATE_INVALID; + spin_unlock_irqrestore(&midev->lock, flags); - wake_up(&midev->tx_wq); + if (queue_release) { + /* Ensure we have a release operation queued, through the fake + * marker skb + */ + spin_lock(&midev->tx_queue.lock); + if (!midev->unlock_marker.next) + __skb_queue_tail(&midev->tx_queue, + &midev->unlock_marker); + spin_unlock(&midev->tx_queue.lock); + wake_up(&midev->tx_wq); + } } static const struct net_device_ops mctp_i2c_ops = { diff --git a/drivers/net/mhi_net.c b/drivers/net/mhi_net.c index 0b1b6f650104..0b9d37979133 100644 --- a/drivers/net/mhi_net.c +++ b/drivers/net/mhi_net.c @@ -343,6 +343,8 @@ static void mhi_net_dellink(struct mhi_device *mhi_dev, struct net_device *ndev) kfree_skb(mhi_netdev->skbagg_head); + free_netdev(ndev); + dev_set_drvdata(&mhi_dev->dev, NULL); } diff --git a/drivers/net/netdevsim/bus.c b/drivers/net/netdevsim/bus.c index b5f4df1a07a3..0052968e881e 100644 --- a/drivers/net/netdevsim/bus.c +++ b/drivers/net/netdevsim/bus.c @@ -117,6 +117,10 @@ static const struct attribute_group *nsim_bus_dev_attr_groups[] = { static void nsim_bus_dev_release(struct device *dev) { + struct nsim_bus_dev *nsim_bus_dev; + + nsim_bus_dev = container_of(dev, struct nsim_bus_dev, dev); + kfree(nsim_bus_dev); } static struct device_type nsim_bus_dev_type = { @@ -291,6 +295,8 @@ nsim_bus_dev_new(unsigned int id, unsigned int port_count, unsigned int num_queu err_nsim_bus_dev_id_free: ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id); + put_device(&nsim_bus_dev->dev); + nsim_bus_dev = NULL; err_nsim_bus_dev_free: kfree(nsim_bus_dev); return ERR_PTR(err); @@ -300,9 +306,8 @@ static void nsim_bus_dev_del(struct nsim_bus_dev *nsim_bus_dev) { /* Disallow using nsim_bus_dev */ smp_store_release(&nsim_bus_dev->init, false); - device_unregister(&nsim_bus_dev->dev); ida_free(&nsim_bus_dev_ids, nsim_bus_dev->dev.id); - kfree(nsim_bus_dev); + device_unregister(&nsim_bus_dev->dev); } static struct device_driver nsim_driver = { diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c index 794fc0cc73b8..68e56e451b2b 100644 --- a/drivers/net/netdevsim/dev.c +++ b/drivers/net/netdevsim/dev.c @@ -309,8 +309,10 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev) if (IS_ERR(nsim_dev->ddir)) return PTR_ERR(nsim_dev->ddir); nsim_dev->ports_ddir = debugfs_create_dir("ports", nsim_dev->ddir); - if (IS_ERR(nsim_dev->ports_ddir)) - return PTR_ERR(nsim_dev->ports_ddir); + if (IS_ERR(nsim_dev->ports_ddir)) { + err = PTR_ERR(nsim_dev->ports_ddir); + goto err_ddir; + } debugfs_create_bool("fw_update_status", 0600, nsim_dev->ddir, &nsim_dev->fw_update_status); debugfs_create_u32("fw_update_overwrite_mask", 0600, nsim_dev->ddir, @@ -346,7 +348,7 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev) nsim_dev->nodes_ddir = debugfs_create_dir("rate_nodes", nsim_dev->ddir); if (IS_ERR(nsim_dev->nodes_ddir)) { err = PTR_ERR(nsim_dev->nodes_ddir); - goto err_out; + goto err_ports_ddir; } debugfs_create_bool("fail_trap_drop_counter_get", 0600, nsim_dev->ddir, @@ -354,8 +356,9 @@ static int nsim_dev_debugfs_init(struct nsim_dev *nsim_dev) nsim_udp_tunnels_debugfs_create(nsim_dev); return 0; -err_out: +err_ports_ddir: debugfs_remove_recursive(nsim_dev->ports_ddir); +err_ddir: debugfs_remove_recursive(nsim_dev->ddir); return err; } @@ -442,7 +445,7 @@ static int nsim_dev_resources_register(struct devlink *devlink) ¶ms); if (err) { pr_err("Failed to register IPv4 top resource\n"); - goto out; + goto err_out; } err = devl_resource_register(devlink, "fib", (u64)-1, @@ -450,7 +453,7 @@ static int nsim_dev_resources_register(struct devlink *devlink) NSIM_RESOURCE_IPV4, ¶ms); if (err) { pr_err("Failed to register IPv4 FIB resource\n"); - return err; + goto err_out; } err = devl_resource_register(devlink, "fib-rules", (u64)-1, @@ -458,7 +461,7 @@ static int nsim_dev_resources_register(struct devlink *devlink) NSIM_RESOURCE_IPV4, ¶ms); if (err) { pr_err("Failed to register IPv4 FIB rules resource\n"); - return err; + goto err_out; } /* Resources for IPv6 */ @@ -468,7 +471,7 @@ static int nsim_dev_resources_register(struct devlink *devlink) ¶ms); if (err) { pr_err("Failed to register IPv6 top resource\n"); - goto out; + goto err_out; } err = devl_resource_register(devlink, "fib", (u64)-1, @@ -476,7 +479,7 @@ static int nsim_dev_resources_register(struct devlink *devlink) NSIM_RESOURCE_IPV6, ¶ms); if (err) { pr_err("Failed to register IPv6 FIB resource\n"); - return err; + goto err_out; } err = devl_resource_register(devlink, "fib-rules", (u64)-1, @@ -484,7 +487,7 @@ static int nsim_dev_resources_register(struct devlink *devlink) NSIM_RESOURCE_IPV6, ¶ms); if (err) { pr_err("Failed to register IPv6 FIB rules resource\n"); - return err; + goto err_out; } /* Resources for nexthops */ @@ -492,8 +495,14 @@ static int nsim_dev_resources_register(struct devlink *devlink) NSIM_RESOURCE_NEXTHOPS, DEVLINK_RESOURCE_ID_PARENT_TOP, ¶ms); + if (err) { + pr_err("Failed to register NEXTHOPS resource\n"); + goto err_out; + } + return 0; -out: +err_out: + devl_resources_unregister(devlink); return err; } @@ -1674,6 +1683,7 @@ void nsim_drv_remove(struct nsim_bus_dev *nsim_bus_dev) ARRAY_SIZE(nsim_devlink_params)); devl_resources_unregister(devlink); kfree(nsim_dev->vfconfigs); + kfree(nsim_dev->fa_cookie); devl_unlock(devlink); devlink_free(devlink); dev_set_drvdata(&nsim_bus_dev->dev, NULL); diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c index 8549e0e356c9..b60db8b6f477 100644 --- a/drivers/net/phy/dp83822.c +++ b/drivers/net/phy/dp83822.c @@ -254,8 +254,7 @@ static int dp83822_config_intr(struct phy_device *phydev) DP83822_EEE_ERROR_CHANGE_INT_EN); if (!dp83822->fx_enabled) - misr_status |= DP83822_MDI_XOVER_INT_EN | - DP83822_ANEG_ERR_INT_EN | + misr_status |= DP83822_ANEG_ERR_INT_EN | DP83822_WOL_PKT_INT_EN; err = phy_write(phydev, MII_DP83822_MISR2, misr_status); diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 6939563d3b7c..7446d5c6c714 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -682,6 +682,13 @@ static int dp83867_of_init(struct phy_device *phydev) */ dp83867->io_impedance = DP83867_IO_MUX_CFG_IO_IMPEDANCE_MIN / 2; + /* For non-OF device, the RX and TX FIFO depths are taken from + * default value. So, we init RX & TX FIFO depths here + * so that it is configured correctly later in dp83867_config_init(); + */ + dp83867->tx_fifo_depth = DP83867_PHYCR_FIFO_DEPTH_4_B_NIB; + dp83867->rx_fifo_depth = DP83867_PHYCR_FIFO_DEPTH_4_B_NIB; + return 0; } #endif /* CONFIG_OF_MDIO */ @@ -853,6 +860,14 @@ static int dp83867_config_init(struct phy_device *phydev) else val &= ~DP83867_SGMII_TYPE; phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val); + + /* This is a SW workaround for link instability if RX_CTRL is + * not strapped to mode 3 or 4 in HW. This is required for SGMII + * in addition to clearing bit 7, handled above. + */ + if (dp83867->rxctrl_strap_quirk) + phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, + BIT(8)); } val = phy_read(phydev, DP83867_CFG3); diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c index 2810f4f9da0c..0d706ee266af 100644 --- a/drivers/net/phy/marvell.c +++ b/drivers/net/phy/marvell.c @@ -2015,14 +2015,16 @@ static int m88e1510_loopback(struct phy_device *phydev, bool enable) if (err < 0) return err; - /* FIXME: Based on trial and error test, it seem 1G need to have - * delay between soft reset and loopback enablement. - */ - if (phydev->speed == SPEED_1000) - msleep(1000); + err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, + BMCR_LOOPBACK); - return phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, - BMCR_LOOPBACK); + if (!err) { + /* It takes some time for PHY device to switch + * into/out-of loopback mode. + */ + msleep(1000); + } + return err; } else { err = phy_modify(phydev, MII_BMCR, BMCR_LOOPBACK, 0); if (err < 0) diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c index f82090bdf7ab..1cd604cd1fa1 100644 --- a/drivers/net/phy/mdio_bus.c +++ b/drivers/net/phy/mdio_bus.c @@ -583,7 +583,7 @@ int __mdiobus_register(struct mii_bus *bus, struct module *owner) } for (i = 0; i < PHY_MAX_ADDR; i++) { - if ((bus->phy_mask & (1 << i)) == 0) { + if ((bus->phy_mask & BIT(i)) == 0) { struct phy_device *phydev; phydev = mdiobus_scan(bus, i); diff --git a/drivers/net/phy/mscc/mscc_macsec.c b/drivers/net/phy/mscc/mscc_macsec.c index ee5b17edca39..f81b077618f4 100644 --- a/drivers/net/phy/mscc/mscc_macsec.c +++ b/drivers/net/phy/mscc/mscc_macsec.c @@ -632,6 +632,7 @@ static void vsc8584_macsec_free_flow(struct vsc8531_private *priv, list_del(&flow->list); clear_bit(flow->index, bitmap); + memzero_explicit(flow->key, sizeof(flow->key)); kfree(flow); } diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c index 75464df191ef..6547b6cc6cbe 100644 --- a/drivers/net/phy/phylink.c +++ b/drivers/net/phy/phylink.c @@ -1661,6 +1661,9 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy, if (phy_interrupt_is_valid(phy)) phy_request_interrupt(phy); + if (pl->config->mac_managed_pm) + phy->mac_managed_pm = true; + return 0; } diff --git a/drivers/net/thunderbolt.c b/drivers/net/thunderbolt.c index 83fcaeb2ac5e..a52ee2bf5575 100644 --- a/drivers/net/thunderbolt.c +++ b/drivers/net/thunderbolt.c @@ -1391,12 +1391,21 @@ static int __init tbnet_init(void) tb_property_add_immediate(tbnet_dir, "prtcstns", flags); ret = tb_register_property_dir("network", tbnet_dir); - if (ret) { - tb_property_free_dir(tbnet_dir); - return ret; - } + if (ret) + goto err_free_dir; + + ret = tb_register_service_driver(&tbnet_driver); + if (ret) + goto err_unregister; - return tb_register_service_driver(&tbnet_driver); + return 0; + +err_unregister: + tb_unregister_property_dir("network", tbnet_dir); +err_free_dir: + tb_property_free_dir(tbnet_dir); + + return ret; } module_init(tbnet_init); diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 27c6d235cbda..7a3ab3427369 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1459,7 +1459,8 @@ static struct sk_buff *tun_napi_alloc_frags(struct tun_file *tfile, int err; int i; - if (it->nr_segs > MAX_SKB_FRAGS + 1) + if (it->nr_segs > MAX_SKB_FRAGS + 1 || + len > (ETH_MAX_MTU - NET_SKB_PAD - NET_IP_ALIGN)) return ERR_PTR(-EMSGSIZE); local_bh_disable(); @@ -1966,17 +1967,25 @@ drop: skb_headlen(skb)); if (unlikely(headlen > skb_headlen(skb))) { + WARN_ON_ONCE(1); + err = -ENOMEM; dev_core_stats_rx_dropped_inc(tun->dev); +napi_busy: napi_free_frags(&tfile->napi); rcu_read_unlock(); mutex_unlock(&tfile->napi_mutex); - WARN_ON(1); - return -ENOMEM; + return err; } - local_bh_disable(); - napi_gro_frags(&tfile->napi); - local_bh_enable(); + if (likely(napi_schedule_prep(&tfile->napi))) { + local_bh_disable(); + napi_gro_frags(&tfile->napi); + napi_complete(&tfile->napi); + local_bh_enable(); + } else { + err = -EBUSY; + goto napi_busy; + } mutex_unlock(&tfile->napi_mutex); } else if (tfile->napi_enabled) { struct sk_buff_head *queue = &tfile->sk.sk_write_queue; diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 26c34a7c21bd..afd6faa4c2ec 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -1357,6 +1357,7 @@ static const struct usb_device_id products[] = { {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ {QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1031, 3)}, /* Telit LE910C1-EUX */ + {QMI_QUIRK_SET_DTR(0x1bc7, 0x103a, 0)}, /* Telit LE910C4-WWX */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1040, 2)}, /* Telit LE922A */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1050, 2)}, /* Telit FN980 */ {QMI_QUIRK_SET_DTR(0x1bc7, 0x1057, 2)}, /* Telit FN980 */ diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c index bfb58c91db04..32d2c60d334d 100644 --- a/drivers/net/usb/smsc95xx.c +++ b/drivers/net/usb/smsc95xx.c @@ -66,6 +66,7 @@ struct smsc95xx_priv { spinlock_t mac_cr_lock; u8 features; u8 suspend_flags; + bool is_internal_phy; struct irq_chip irqchip; struct irq_domain *irqdomain; struct fwnode_handle *irqfwnode; @@ -252,6 +253,43 @@ done: mutex_unlock(&dev->phy_mutex); } +static int smsc95xx_mdiobus_reset(struct mii_bus *bus) +{ + struct smsc95xx_priv *pdata; + struct usbnet *dev; + u32 val; + int ret; + + dev = bus->priv; + pdata = dev->driver_priv; + + if (pdata->is_internal_phy) + return 0; + + mutex_lock(&dev->phy_mutex); + + ret = smsc95xx_read_reg(dev, PM_CTRL, &val); + if (ret < 0) + goto reset_out; + + val |= PM_CTL_PHY_RST_; + + ret = smsc95xx_write_reg(dev, PM_CTRL, val); + if (ret < 0) + goto reset_out; + + /* Driver has no knowledge at this point about the external PHY. + * The 802.3 specifies that the reset process shall + * be completed within 0.5 s. + */ + fsleep(500000); + +reset_out: + mutex_unlock(&dev->phy_mutex); + + return 0; +} + static int smsc95xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx) { struct usbnet *dev = bus->priv; @@ -1052,7 +1090,6 @@ static void smsc95xx_handle_link_change(struct net_device *net) static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) { struct smsc95xx_priv *pdata; - bool is_internal_phy; char usb_path[64]; int ret, phy_irq; u32 val; @@ -1133,13 +1170,14 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) if (ret < 0) goto free_mdio; - is_internal_phy = !(val & HW_CFG_PSEL_); - if (is_internal_phy) + pdata->is_internal_phy = !(val & HW_CFG_PSEL_); + if (pdata->is_internal_phy) pdata->mdiobus->phy_mask = ~(1u << SMSC95XX_INTERNAL_PHY_ID); pdata->mdiobus->priv = dev; pdata->mdiobus->read = smsc95xx_mdiobus_read; pdata->mdiobus->write = smsc95xx_mdiobus_write; + pdata->mdiobus->reset = smsc95xx_mdiobus_reset; pdata->mdiobus->name = "smsc95xx-mdiobus"; pdata->mdiobus->parent = &dev->udev->dev; @@ -1160,7 +1198,7 @@ static int smsc95xx_bind(struct usbnet *dev, struct usb_interface *intf) } pdata->phydev->irq = phy_irq; - pdata->phydev->is_internal = is_internal_phy; + pdata->phydev->is_internal = pdata->is_internal_phy; /* detect device revision as different features may be available */ ret = smsc95xx_read_reg(dev, ID_REV, &val); diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 960f1393595c..d62a904d2e42 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -325,6 +325,7 @@ static int lapbeth_open(struct net_device *dev) err = lapb_register(dev, &lapbeth_callbacks); if (err != LAPB_OK) { + napi_disable(&lapbeth->napi); pr_err("lapb_register error: %d\n", err); return -ENODEV; } @@ -446,7 +447,7 @@ static int lapbeth_device_event(struct notifier_block *this, if (dev_net(dev) != &init_net) return NOTIFY_DONE; - if (!dev_is_ethdev(dev)) + if (!dev_is_ethdev(dev) && !lapbeth_get_x25_dev(dev)) return NOTIFY_DONE; switch (event) { diff --git a/drivers/net/wireless/ath/ath11k/qmi.h b/drivers/net/wireless/ath/ath11k/qmi.h index 2ec56a34fa81..0909d53cefeb 100644 --- a/drivers/net/wireless/ath/ath11k/qmi.h +++ b/drivers/net/wireless/ath/ath11k/qmi.h @@ -27,7 +27,7 @@ #define ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01 52 #define ATH11K_QMI_CALDB_SIZE 0x480000 #define ATH11K_QMI_BDF_EXT_STR_LENGTH 0x20 -#define ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT 3 +#define ATH11K_QMI_FW_MEM_REQ_SEGMENT_CNT 5 #define QMI_WLFW_REQUEST_MEM_IND_V01 0x0035 #define QMI_WLFW_FW_MEM_READY_IND_V01 0x0037 diff --git a/drivers/net/wireless/ath/ath11k/reg.c b/drivers/net/wireless/ath/ath11k/reg.c index 7ee3ff69dfc8..6fae4e61ede7 100644 --- a/drivers/net/wireless/ath/ath11k/reg.c +++ b/drivers/net/wireless/ath/ath11k/reg.c @@ -287,11 +287,7 @@ int ath11k_regd_update(struct ath11k *ar) goto err; } - rtnl_lock(); - wiphy_lock(ar->hw->wiphy); - ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy); - wiphy_unlock(ar->hw->wiphy); - rtnl_unlock(); + ret = regulatory_set_wiphy_regd(ar->hw->wiphy, regd_copy); kfree(regd_copy); diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c index bc3f4e4edcdf..dac7eb77799b 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/fweh.c @@ -228,6 +228,10 @@ static void brcmf_fweh_event_worker(struct work_struct *work) brcmf_fweh_event_name(event->code), event->code, event->emsg.ifidx, event->emsg.bsscfgidx, event->emsg.addr); + if (event->emsg.bsscfgidx >= BRCMF_MAX_IFS) { + bphy_err(drvr, "invalid bsscfg index: %u\n", event->emsg.bsscfgidx); + goto event_free; + } /* convert event message */ emsg_be = &event->emsg; diff --git a/drivers/net/wireless/cisco/airo.c b/drivers/net/wireless/cisco/airo.c index 10daef81c355..fb2c35bd73bb 100644 --- a/drivers/net/wireless/cisco/airo.c +++ b/drivers/net/wireless/cisco/airo.c @@ -5232,7 +5232,7 @@ static int get_wep_tx_idx(struct airo_info *ai) return -1; } -static int set_wep_key(struct airo_info *ai, u16 index, const char *key, +static int set_wep_key(struct airo_info *ai, u16 index, const u8 *key, u16 keylen, int perm, int lock) { static const unsigned char macaddr[ETH_ALEN] = { 0x01, 0, 0, 0, 0, 0 }; @@ -5283,7 +5283,7 @@ static void proc_wepkey_on_close(struct inode *inode, struct file *file) struct net_device *dev = pde_data(inode); struct airo_info *ai = dev->ml_priv; int i, rc; - char key[16]; + u8 key[16]; u16 index = 0; int j = 0; @@ -5311,12 +5311,22 @@ static void proc_wepkey_on_close(struct inode *inode, struct file *file) } for (i = 0; i < 16*3 && data->wbuffer[i+j]; i++) { + int val; + + if (i % 3 == 2) + continue; + + val = hex_to_bin(data->wbuffer[i+j]); + if (val < 0) { + airo_print_err(ai->dev->name, "WebKey passed invalid key hex"); + return; + } switch(i%3) { case 0: - key[i/3] = hex_to_bin(data->wbuffer[i+j])<<4; + key[i/3] = (u8)val << 4; break; case 1: - key[i/3] |= hex_to_bin(data->wbuffer[i+j]); + key[i/3] |= (u8)val; break; } } diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c index a40636c90ec3..0d81098c7b45 100644 --- a/drivers/net/wireless/mac80211_hwsim.c +++ b/drivers/net/wireless/mac80211_hwsim.c @@ -910,6 +910,7 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct sk_buff *skb; struct ieee80211_hdr *hdr; + struct ieee80211_tx_info *cb; if (!vp->assoc) return; @@ -931,6 +932,10 @@ static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, memcpy(hdr->addr2, mac, ETH_ALEN); memcpy(hdr->addr3, vp->bssid, ETH_ALEN); + cb = IEEE80211_SKB_CB(skb); + cb->control.rates[0].count = 1; + cb->control.rates[1].idx = -1; + rcu_read_lock(); mac80211_hwsim_tx_frame(data->hw, skb, rcu_dereference(vif->bss_conf.chanctx_conf)->def.chan); diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c index 273c5eac3362..ddfc16de1b26 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.c @@ -1023,9 +1023,9 @@ static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev, { u32 reg, reg2; unsigned int i; - char put_to_sleep; - char bbp_state; - char rf_state; + bool put_to_sleep; + u8 bbp_state; + u8 rf_state; put_to_sleep = (state != STATE_AWAKE); @@ -1561,7 +1561,7 @@ static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; struct channel_info *info; - char *tx_power; + u8 *tx_power; unsigned int i; /* diff --git a/drivers/net/wireless/ralink/rt2x00/rt2400pci.h b/drivers/net/wireless/ralink/rt2x00/rt2400pci.h index b8187b6de143..979d5fd8babf 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2400pci.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2400pci.h @@ -939,7 +939,7 @@ #define DEFAULT_TXPOWER 39 #define __CLAMP_TX(__txpower) \ - clamp_t(char, (__txpower), MIN_TXPOWER, MAX_TXPOWER) + clamp_t(u8, (__txpower), MIN_TXPOWER, MAX_TXPOWER) #define TXPOWER_FROM_DEV(__txpower) \ ((__CLAMP_TX(__txpower) - MAX_TXPOWER) + MIN_TXPOWER) diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c index 8faa0a80e73a..cd6371e25062 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.c @@ -1176,9 +1176,9 @@ static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev, { u32 reg, reg2; unsigned int i; - char put_to_sleep; - char bbp_state; - char rf_state; + bool put_to_sleep; + u8 bbp_state; + u8 rf_state; put_to_sleep = (state != STATE_AWAKE); @@ -1856,7 +1856,7 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; struct channel_info *info; - char *tx_power; + u8 *tx_power; unsigned int i; /* diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500pci.h b/drivers/net/wireless/ralink/rt2x00/rt2500pci.h index 7e64aee2a172..ba362675c52c 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500pci.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2500pci.h @@ -1219,6 +1219,6 @@ (((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower) #define TXPOWER_TO_DEV(__txpower) \ - clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER) + clamp_t(u8, __txpower, MIN_TXPOWER, MAX_TXPOWER) #endif /* RT2500PCI_H */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c index bb5ed6630645..4f3b0e6c6256 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.c @@ -984,9 +984,9 @@ static int rt2500usb_set_state(struct rt2x00_dev *rt2x00dev, u16 reg; u16 reg2; unsigned int i; - char put_to_sleep; - char bbp_state; - char rf_state; + bool put_to_sleep; + u8 bbp_state; + u8 rf_state; put_to_sleep = (state != STATE_AWAKE); @@ -1663,7 +1663,7 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; struct channel_info *info; - char *tx_power; + u8 *tx_power; unsigned int i; /* diff --git a/drivers/net/wireless/ralink/rt2x00/rt2500usb.h b/drivers/net/wireless/ralink/rt2x00/rt2500usb.h index 0c070288a140..746f0e950b76 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2500usb.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2500usb.h @@ -839,6 +839,6 @@ (((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower) #define TXPOWER_TO_DEV(__txpower) \ - clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER) + clamp_t(u8, __txpower, MIN_TXPOWER, MAX_TXPOWER) #endif /* RT2500USB_H */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c index cbbb1a4849cf..12b700c7b9c3 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.c @@ -3372,10 +3372,10 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, if (rt2x00_has_cap_bt_coexist(rt2x00dev)) { if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) { /* r55/r59 value array of channel 1~14 */ - static const char r55_bt_rev[] = {0x83, 0x83, + static const u8 r55_bt_rev[] = {0x83, 0x83, 0x83, 0x73, 0x73, 0x63, 0x53, 0x53, 0x53, 0x43, 0x43, 0x43, 0x43, 0x43}; - static const char r59_bt_rev[] = {0x0e, 0x0e, + static const u8 r59_bt_rev[] = {0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07}; @@ -3384,7 +3384,7 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, rt2800_rfcsr_write(rt2x00dev, 59, r59_bt_rev[idx]); } else { - static const char r59_bt[] = {0x8b, 0x8b, 0x8b, + static const u8 r59_bt[] = {0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89, 0x88, 0x88, 0x86, 0x85, 0x84}; @@ -3392,10 +3392,10 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, } } else { if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) { - static const char r55_nonbt_rev[] = {0x23, 0x23, + static const u8 r55_nonbt_rev[] = {0x23, 0x23, 0x23, 0x23, 0x13, 0x13, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03}; - static const char r59_nonbt_rev[] = {0x07, 0x07, + static const u8 r59_nonbt_rev[] = {0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x06, 0x05, 0x04, 0x04}; @@ -3406,14 +3406,14 @@ static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, } else if (rt2x00_rt(rt2x00dev, RT5390) || rt2x00_rt(rt2x00dev, RT5392) || rt2x00_rt(rt2x00dev, RT6352)) { - static const char r59_non_bt[] = {0x8f, 0x8f, + static const u8 r59_non_bt[] = {0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d, 0x8a, 0x88, 0x88, 0x87, 0x87, 0x86}; rt2800_rfcsr_write(rt2x00dev, 59, r59_non_bt[idx]); } else if (rt2x00_rt(rt2x00dev, RT5350)) { - static const char r59_non_bt[] = {0x0b, 0x0b, + static const u8 r59_non_bt[] = {0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0a, 0x0a, 0x09, 0x08, 0x07, 0x07, 0x06}; @@ -4035,23 +4035,23 @@ static void rt2800_iq_calibrate(struct rt2x00_dev *rt2x00dev, int channel) rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0); } -static char rt2800_txpower_to_dev(struct rt2x00_dev *rt2x00dev, +static s8 rt2800_txpower_to_dev(struct rt2x00_dev *rt2x00dev, unsigned int channel, - char txpower) + s8 txpower) { if (rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT3883)) txpower = rt2x00_get_field8(txpower, EEPROM_TXPOWER_ALC); if (channel <= 14) - return clamp_t(char, txpower, MIN_G_TXPOWER, MAX_G_TXPOWER); + return clamp_t(s8, txpower, MIN_G_TXPOWER, MAX_G_TXPOWER); if (rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT3883)) - return clamp_t(char, txpower, MIN_A_TXPOWER_3593, + return clamp_t(s8, txpower, MIN_A_TXPOWER_3593, MAX_A_TXPOWER_3593); else - return clamp_t(char, txpower, MIN_A_TXPOWER, MAX_A_TXPOWER); + return clamp_t(s8, txpower, MIN_A_TXPOWER, MAX_A_TXPOWER); } static void rt3883_bbp_adjust(struct rt2x00_dev *rt2x00dev, @@ -8530,7 +8530,7 @@ static void rt2800_r_calibration(struct rt2x00_dev *rt2x00dev) u8 bytevalue = 0; int rcalcode; u8 r_cal_code = 0; - char d1 = 0, d2 = 0; + s8 d1 = 0, d2 = 0; u8 rfvalue; u32 MAC_RF_BYPASS0, MAC_RF_CONTROL0, MAC_PWR_PIN_CFG; u32 maccfg; @@ -8591,7 +8591,7 @@ static void rt2800_r_calibration(struct rt2x00_dev *rt2x00dev) if (bytevalue > 128) d1 = bytevalue - 256; else - d1 = (char)bytevalue; + d1 = (s8)bytevalue; rt2800_bbp_write(rt2x00dev, 22, 0x0); rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, 0x01); @@ -8601,7 +8601,7 @@ static void rt2800_r_calibration(struct rt2x00_dev *rt2x00dev) if (bytevalue > 128) d2 = bytevalue - 256; else - d2 = (char)bytevalue; + d2 = (s8)bytevalue; rt2800_bbp_write(rt2x00dev, 22, 0x0); rcalcode = rt2800_calcrcalibrationcode(rt2x00dev, d1, d2); @@ -8703,7 +8703,7 @@ static void rt2800_rxdcoc_calibration(struct rt2x00_dev *rt2x00dev) static u32 rt2800_do_sqrt_accumulation(u32 si) { u32 root, root_pre, bit; - char i; + s8 i; bit = 1 << 15; root = 0; @@ -9330,11 +9330,11 @@ static void rt2800_loft_search(struct rt2x00_dev *rt2x00dev, u8 ch_idx, u8 alc_idx, u8 dc_result[][RF_ALC_NUM][2]) { u32 p0 = 0, p1 = 0, pf = 0; - char idx0 = 0, idx1 = 0; + s8 idx0 = 0, idx1 = 0; u8 idxf[] = {0x00, 0x00}; u8 ibit = 0x20; u8 iorq; - char bidx; + s8 bidx; rt2800_bbp_write(rt2x00dev, 158, 0xb0); rt2800_bbp_write(rt2x00dev, 159, 0x80); @@ -9384,17 +9384,17 @@ static void rt2800_loft_search(struct rt2x00_dev *rt2x00dev, u8 ch_idx, static void rt2800_iq_search(struct rt2x00_dev *rt2x00dev, u8 ch_idx, u8 *ges, u8 *pes) { u32 p0 = 0, p1 = 0, pf = 0; - char perr = 0, gerr = 0, iq_err = 0; - char pef = 0, gef = 0; - char psta, pend; - char gsta, gend; + s8 perr = 0, gerr = 0, iq_err = 0; + s8 pef = 0, gef = 0; + s8 psta, pend; + s8 gsta, gend; u8 ibit = 0x20; u8 first_search = 0x00, touch_neg_max = 0x00; - char idx0 = 0, idx1 = 0; + s8 idx0 = 0, idx1 = 0; u8 gop; u8 bbp = 0; - char bidx; + s8 bidx; for (bidx = 5; bidx >= 1; bidx--) { for (gop = 0; gop < 2; gop++) { @@ -10043,11 +10043,11 @@ static int rt2800_rf_lp_config(struct rt2x00_dev *rt2x00dev, bool btxcal) return 0; } -static char rt2800_lp_tx_filter_bw_cal(struct rt2x00_dev *rt2x00dev) +static s8 rt2800_lp_tx_filter_bw_cal(struct rt2x00_dev *rt2x00dev) { unsigned int cnt; u8 bbp_val; - char cal_val; + s8 cal_val; rt2800_bbp_dcoc_write(rt2x00dev, 0, 0x82); @@ -10079,7 +10079,7 @@ static void rt2800_bw_filter_calibration(struct rt2x00_dev *rt2x00dev, u8 rx_filter_target_20m = 0x27, rx_filter_target_40m = 0x31; int loop = 0, is_ht40, cnt; u8 bbp_val, rf_val; - char cal_r32_init, cal_r32_val, cal_diff; + s8 cal_r32_init, cal_r32_val, cal_diff; u8 saverfb5r00, saverfb5r01, saverfb5r03, saverfb5r04, saverfb5r05; u8 saverfb5r06, saverfb5r07; u8 saverfb5r08, saverfb5r17, saverfb5r18, saverfb5r19, saverfb5r20; @@ -11550,9 +11550,9 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; struct channel_info *info; - char *default_power1; - char *default_power2; - char *default_power3; + s8 *default_power1; + s8 *default_power2; + s8 *default_power3; unsigned int i, tx_chains, rx_chains; u32 reg; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h index 3cbef77b4bd3..194de676df8f 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2800lib.h +++ b/drivers/net/wireless/ralink/rt2x00/rt2800lib.h @@ -32,10 +32,10 @@ struct rf_reg_pair { struct rt2800_drv_data { u8 calibration_bw20; u8 calibration_bw40; - char rx_calibration_bw20; - char rx_calibration_bw40; - char tx_calibration_bw20; - char tx_calibration_bw40; + s8 rx_calibration_bw20; + s8 rx_calibration_bw40; + s8 tx_calibration_bw20; + s8 tx_calibration_bw40; u8 bbp25; u8 bbp26; u8 txmixer_gain_24g; diff --git a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c index 0827bc860bf8..8fd22c69855f 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt2x00usb.c @@ -117,12 +117,12 @@ int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev, const u16 buffer_length) { int status = 0; - unsigned char *tb; + u8 *tb; u16 off, len, bsize; mutex_lock(&rt2x00dev->csr_mutex); - tb = (char *)buffer; + tb = (u8 *)buffer; off = offset; len = buffer_length; while (len && !status) { @@ -215,7 +215,7 @@ void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev, rd->cr.wLength = cpu_to_le16(sizeof(u32)); usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0), - (unsigned char *)(&rd->cr), &rd->reg, sizeof(rd->reg), + (u8 *)(&rd->cr), &rd->reg, sizeof(rd->reg), rt2x00usb_register_read_async_cb, rd); usb_anchor_urb(urb, rt2x00dev->anchor); if (usb_submit_urb(urb, GFP_ATOMIC) < 0) { diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.c b/drivers/net/wireless/ralink/rt2x00/rt61pci.c index d92f9eb07dc9..81db7f57c7e4 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt61pci.c +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.c @@ -1709,7 +1709,7 @@ static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { u32 reg, reg2; unsigned int i; - char put_to_sleep; + bool put_to_sleep; put_to_sleep = (state != STATE_AWAKE); @@ -2656,7 +2656,7 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; struct channel_info *info; - char *tx_power; + u8 *tx_power; unsigned int i; /* diff --git a/drivers/net/wireless/ralink/rt2x00/rt61pci.h b/drivers/net/wireless/ralink/rt2x00/rt61pci.h index 5f208ad509bd..d72d0ffd1127 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt61pci.h +++ b/drivers/net/wireless/ralink/rt2x00/rt61pci.h @@ -1484,6 +1484,6 @@ struct hw_pairwise_ta_entry { (((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower) #define TXPOWER_TO_DEV(__txpower) \ - clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER) + clamp_t(u8, __txpower, MIN_TXPOWER, MAX_TXPOWER) #endif /* RT61PCI_H */ diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.c b/drivers/net/wireless/ralink/rt2x00/rt73usb.c index e3269fd7c59e..861035444374 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt73usb.c +++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.c @@ -1378,7 +1378,7 @@ static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { u32 reg, reg2; unsigned int i; - char put_to_sleep; + bool put_to_sleep; put_to_sleep = (state != STATE_AWAKE); @@ -2090,7 +2090,7 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; struct channel_info *info; - char *tx_power; + u8 *tx_power; unsigned int i; /* diff --git a/drivers/net/wireless/ralink/rt2x00/rt73usb.h b/drivers/net/wireless/ralink/rt2x00/rt73usb.h index 1b56d285c34b..bb0a68516c08 100644 --- a/drivers/net/wireless/ralink/rt2x00/rt73usb.h +++ b/drivers/net/wireless/ralink/rt2x00/rt73usb.h @@ -1063,6 +1063,6 @@ struct hw_pairwise_ta_entry { (((u8)(__txpower)) > MAX_TXPOWER) ? DEFAULT_TXPOWER : (__txpower) #define TXPOWER_TO_DEV(__txpower) \ - clamp_t(char, __txpower, MIN_TXPOWER, MAX_TXPOWER) + clamp_t(u8, __txpower, MIN_TXPOWER, MAX_TXPOWER) #endif /* RT73USB_H */ diff --git a/drivers/net/wwan/Kconfig b/drivers/net/wwan/Kconfig index 3486ffe94ac4..ac4d73b5626f 100644 --- a/drivers/net/wwan/Kconfig +++ b/drivers/net/wwan/Kconfig @@ -94,7 +94,7 @@ config RPMSG_WWAN_CTRL config IOSM tristate "IOSM Driver for Intel M.2 WWAN Device" - depends on INTEL_IOMMU + depends on PCI select NET_DEVLINK select RELAY if WWAN_DEBUGFS help diff --git a/drivers/net/wwan/iosm/iosm_ipc_coredump.c b/drivers/net/wwan/iosm/iosm_ipc_coredump.c index 9acd87724c9d..26ca30476f40 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_coredump.c +++ b/drivers/net/wwan/iosm/iosm_ipc_coredump.c @@ -2,6 +2,7 @@ /* * Copyright (C) 2020-2021 Intel Corporation. */ +#include <linux/vmalloc.h> #include "iosm_ipc_coredump.h" diff --git a/drivers/net/wwan/iosm/iosm_ipc_devlink.c b/drivers/net/wwan/iosm/iosm_ipc_devlink.c index 17da85a8f337..2fe724d623c0 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_devlink.c +++ b/drivers/net/wwan/iosm/iosm_ipc_devlink.c @@ -2,6 +2,7 @@ /* * Copyright (C) 2020-2021 Intel Corporation. */ +#include <linux/vmalloc.h> #include "iosm_ipc_chnl_cfg.h" #include "iosm_ipc_coredump.h" diff --git a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c index b7f9237dedf7..66b90cc4c346 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c +++ b/drivers/net/wwan/iosm/iosm_ipc_imem_ops.c @@ -91,6 +91,14 @@ void ipc_imem_wwan_channel_init(struct iosm_imem *ipc_imem, } ipc_chnl_cfg_get(&chnl_cfg, ipc_imem->nr_of_channels); + + if (ipc_imem->mmio->mux_protocol == MUX_AGGREGATION && + ipc_imem->nr_of_channels == IPC_MEM_IP_CHL_ID_0) { + chnl_cfg.ul_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_UL; + chnl_cfg.dl_nr_of_entries = IPC_MEM_MAX_TDS_MUX_AGGR_DL; + chnl_cfg.dl_buf_size = IPC_MEM_MAX_ADB_BUF_SIZE; + } + ipc_imem_channel_init(ipc_imem, IPC_CTYPE_WWAN, chnl_cfg, IRQ_MOD_OFF); diff --git a/drivers/net/wwan/iosm/iosm_ipc_mux.h b/drivers/net/wwan/iosm/iosm_ipc_mux.h index cd9d74cc097f..9968bb885c1f 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_mux.h +++ b/drivers/net/wwan/iosm/iosm_ipc_mux.h @@ -10,6 +10,7 @@ #define IPC_MEM_MAX_UL_DG_ENTRIES 100 #define IPC_MEM_MAX_TDS_MUX_AGGR_UL 60 +#define IPC_MEM_MAX_TDS_MUX_AGGR_DL 60 #define IPC_MEM_MAX_ADB_BUF_SIZE (16 * 1024) #define IPC_MEM_MAX_UL_ADB_BUF_SIZE IPC_MEM_MAX_ADB_BUF_SIZE diff --git a/drivers/net/wwan/iosm/iosm_ipc_pcie.c b/drivers/net/wwan/iosm/iosm_ipc_pcie.c index 31f57b986df2..d3d34d1c4704 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_pcie.c +++ b/drivers/net/wwan/iosm/iosm_ipc_pcie.c @@ -232,6 +232,7 @@ static void ipc_pcie_config_init(struct iosm_pcie *ipc_pcie) */ static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev) { + enum ipc_pcie_sleep_state sleep_state = IPC_PCIE_D0L12; union acpi_object *object; acpi_handle handle_acpi; @@ -242,18 +243,23 @@ static enum ipc_pcie_sleep_state ipc_pcie_read_bios_cfg(struct device *dev) } object = acpi_evaluate_dsm(handle_acpi, &wwan_acpi_guid, 0, 3, NULL); + if (!object) + goto default_ret; + + if (object->integer.value == 3) + sleep_state = IPC_PCIE_D3L2; - if (object && object->integer.value == 3) - return IPC_PCIE_D3L2; + kfree(object); default_ret: - return IPC_PCIE_D0L12; + return sleep_state; } static int ipc_pcie_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { struct iosm_pcie *ipc_pcie = kzalloc(sizeof(*ipc_pcie), GFP_KERNEL); + int ret; pr_debug("Probing device 0x%X from the vendor 0x%X", pci_id->device, pci_id->vendor); @@ -286,6 +292,12 @@ static int ipc_pcie_probe(struct pci_dev *pci, goto pci_enable_fail; } + ret = dma_set_mask(ipc_pcie->dev, DMA_BIT_MASK(64)); + if (ret) { + dev_err(ipc_pcie->dev, "Could not set PCI DMA mask: %d", ret); + return ret; + } + ipc_pcie_config_aspm(ipc_pcie); dev_dbg(ipc_pcie->dev, "PCIe device enabled."); diff --git a/drivers/net/wwan/iosm/iosm_ipc_wwan.c b/drivers/net/wwan/iosm/iosm_ipc_wwan.c index 2f1f8b5d5b59..4c9022a93e01 100644 --- a/drivers/net/wwan/iosm/iosm_ipc_wwan.c +++ b/drivers/net/wwan/iosm/iosm_ipc_wwan.c @@ -40,13 +40,11 @@ struct iosm_netdev_priv { * @ipc_imem: Pointer to imem data-struct * @sub_netlist: List of active netdevs * @dev: Pointer device structure - * @if_mutex: Mutex used for add and remove interface id */ struct iosm_wwan { struct iosm_imem *ipc_imem; struct iosm_netdev_priv __rcu *sub_netlist[IP_MUX_SESSION_END + 1]; struct device *dev; - struct mutex if_mutex; /* Mutex used for add and remove interface id */ }; /* Bring-up the wwan net link */ @@ -55,14 +53,11 @@ static int ipc_wwan_link_open(struct net_device *netdev) struct iosm_netdev_priv *priv = wwan_netdev_drvpriv(netdev); struct iosm_wwan *ipc_wwan = priv->ipc_wwan; int if_id = priv->if_id; - int ret; if (if_id < IP_MUX_SESSION_START || if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist)) return -EINVAL; - mutex_lock(&ipc_wwan->if_mutex); - /* get channel id */ priv->ch_id = ipc_imem_sys_wwan_open(ipc_wwan->ipc_imem, if_id); @@ -70,8 +65,7 @@ static int ipc_wwan_link_open(struct net_device *netdev) dev_err(ipc_wwan->dev, "cannot connect wwan0 & id %d to the IPC mem layer", if_id); - ret = -ENODEV; - goto out; + return -ENODEV; } /* enable tx path, DL data may follow */ @@ -80,10 +74,7 @@ static int ipc_wwan_link_open(struct net_device *netdev) dev_dbg(ipc_wwan->dev, "Channel id %d allocated to if_id %d", priv->ch_id, priv->if_id); - ret = 0; -out: - mutex_unlock(&ipc_wwan->if_mutex); - return ret; + return 0; } /* Bring-down the wwan net link */ @@ -93,11 +84,9 @@ static int ipc_wwan_link_stop(struct net_device *netdev) netif_stop_queue(netdev); - mutex_lock(&priv->ipc_wwan->if_mutex); ipc_imem_sys_wwan_close(priv->ipc_wwan->ipc_imem, priv->if_id, priv->ch_id); priv->ch_id = -1; - mutex_unlock(&priv->ipc_wwan->if_mutex); return 0; } @@ -168,6 +157,7 @@ static void ipc_wwan_setup(struct net_device *iosm_dev) iosm_dev->max_mtu = ETH_MAX_MTU; iosm_dev->flags = IFF_POINTOPOINT | IFF_NOARP; + iosm_dev->needs_free_netdev = true; iosm_dev->netdev_ops = &ipc_inm_ops; } @@ -189,26 +179,17 @@ static int ipc_wwan_newlink(void *ctxt, struct net_device *dev, priv->netdev = dev; priv->ipc_wwan = ipc_wwan; - mutex_lock(&ipc_wwan->if_mutex); - if (rcu_access_pointer(ipc_wwan->sub_netlist[if_id])) { - err = -EBUSY; - goto out_unlock; - } + if (rcu_access_pointer(ipc_wwan->sub_netlist[if_id])) + return -EBUSY; err = register_netdevice(dev); if (err) - goto out_unlock; + return err; rcu_assign_pointer(ipc_wwan->sub_netlist[if_id], priv); - mutex_unlock(&ipc_wwan->if_mutex); - netif_device_attach(dev); return 0; - -out_unlock: - mutex_unlock(&ipc_wwan->if_mutex); - return err; } static void ipc_wwan_dellink(void *ctxt, struct net_device *dev, @@ -222,17 +203,12 @@ static void ipc_wwan_dellink(void *ctxt, struct net_device *dev, if_id >= ARRAY_SIZE(ipc_wwan->sub_netlist))) return; - mutex_lock(&ipc_wwan->if_mutex); - if (WARN_ON(rcu_access_pointer(ipc_wwan->sub_netlist[if_id]) != priv)) - goto unlock; + return; RCU_INIT_POINTER(ipc_wwan->sub_netlist[if_id], NULL); /* unregistering includes synchronize_net() */ unregister_netdevice_queue(dev, head); - -unlock: - mutex_unlock(&ipc_wwan->if_mutex); } static const struct wwan_ops iosm_wwan_ops = { @@ -323,12 +299,9 @@ struct iosm_wwan *ipc_wwan_init(struct iosm_imem *ipc_imem, struct device *dev) ipc_wwan->dev = dev; ipc_wwan->ipc_imem = ipc_imem; - mutex_init(&ipc_wwan->if_mutex); - /* WWAN core will create a netdev for the default IP MUX channel */ if (wwan_register_ops(ipc_wwan->dev, &iosm_wwan_ops, ipc_wwan, IP_MUX_SESSION_DEFAULT)) { - mutex_destroy(&ipc_wwan->if_mutex); kfree(ipc_wwan); return NULL; } @@ -341,7 +314,5 @@ void ipc_wwan_deinit(struct iosm_wwan *ipc_wwan) /* This call will remove all child netdev(s) */ wwan_unregister_ops(ipc_wwan->dev); - mutex_destroy(&ipc_wwan->if_mutex); - kfree(ipc_wwan); } diff --git a/drivers/net/wwan/mhi_wwan_mbim.c b/drivers/net/wwan/mhi_wwan_mbim.c index 6872782e8dd8..ef70bb7c88ad 100644 --- a/drivers/net/wwan/mhi_wwan_mbim.c +++ b/drivers/net/wwan/mhi_wwan_mbim.c @@ -582,6 +582,7 @@ static void mhi_mbim_setup(struct net_device *ndev) ndev->min_mtu = ETH_MIN_MTU; ndev->max_mtu = MHI_MAX_BUF_SZ - ndev->needed_headroom; ndev->tx_queue_len = 1000; + ndev->needs_free_netdev = true; } static const struct wwan_ops mhi_mbim_wwan_ops = { diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c index ff09a8cedf93..2397a903d8f5 100644 --- a/drivers/net/wwan/wwan_hwsim.c +++ b/drivers/net/wwan/wwan_hwsim.c @@ -311,7 +311,7 @@ err_unreg_dev: return ERR_PTR(err); err_free_dev: - kfree(dev); + put_device(&dev->dev); return ERR_PTR(err); } diff --git a/drivers/nfc/fdp/fdp.c b/drivers/nfc/fdp/fdp.c index c6b3334f24c9..f12f903a9dd1 100644 --- a/drivers/nfc/fdp/fdp.c +++ b/drivers/nfc/fdp/fdp.c @@ -249,11 +249,19 @@ static int fdp_nci_close(struct nci_dev *ndev) static int fdp_nci_send(struct nci_dev *ndev, struct sk_buff *skb) { struct fdp_nci_info *info = nci_get_drvdata(ndev); + int ret; if (atomic_dec_and_test(&info->data_pkt_counter)) info->data_pkt_counter_cb(ndev); - return info->phy_ops->write(info->phy, skb); + ret = info->phy_ops->write(info->phy, skb); + if (ret < 0) { + kfree_skb(skb); + return ret; + } + + consume_skb(skb); + return 0; } static int fdp_nci_request_firmware(struct nci_dev *ndev) diff --git a/drivers/nfc/nfcmrvl/i2c.c b/drivers/nfc/nfcmrvl/i2c.c index acef0cfd76af..24436c9e54c9 100644 --- a/drivers/nfc/nfcmrvl/i2c.c +++ b/drivers/nfc/nfcmrvl/i2c.c @@ -132,10 +132,15 @@ static int nfcmrvl_i2c_nci_send(struct nfcmrvl_private *priv, ret = -EREMOTEIO; } else ret = 0; + } + + if (ret) { kfree_skb(skb); + return ret; } - return ret; + consume_skb(skb); + return 0; } static void nfcmrvl_i2c_nci_update_config(struct nfcmrvl_private *priv, diff --git a/drivers/nfc/nxp-nci/core.c b/drivers/nfc/nxp-nci/core.c index 7c93d484dc1b..580cb6ecffee 100644 --- a/drivers/nfc/nxp-nci/core.c +++ b/drivers/nfc/nxp-nci/core.c @@ -80,10 +80,13 @@ static int nxp_nci_send(struct nci_dev *ndev, struct sk_buff *skb) return -EINVAL; r = info->phy_ops->write(info->phy_id, skb); - if (r < 0) + if (r < 0) { kfree_skb(skb); + return r; + } - return r; + consume_skb(skb); + return 0; } static int nxp_nci_rf_pll_unlocked_ntf(struct nci_dev *ndev, diff --git a/drivers/nfc/s3fwrn5/core.c b/drivers/nfc/s3fwrn5/core.c index 1c412007fabb..0270e05b68df 100644 --- a/drivers/nfc/s3fwrn5/core.c +++ b/drivers/nfc/s3fwrn5/core.c @@ -110,11 +110,15 @@ static int s3fwrn5_nci_send(struct nci_dev *ndev, struct sk_buff *skb) } ret = s3fwrn5_write(info, skb); - if (ret < 0) + if (ret < 0) { kfree_skb(skb); + mutex_unlock(&info->mutex); + return ret; + } + consume_skb(skb); mutex_unlock(&info->mutex); - return ret; + return 0; } static int s3fwrn5_nci_post_setup(struct nci_dev *ndev) diff --git a/drivers/nfc/virtual_ncidev.c b/drivers/nfc/virtual_ncidev.c index f577449e4935..85c06dbb2c44 100644 --- a/drivers/nfc/virtual_ncidev.c +++ b/drivers/nfc/virtual_ncidev.c @@ -54,16 +54,19 @@ static int virtual_nci_send(struct nci_dev *ndev, struct sk_buff *skb) mutex_lock(&nci_mutex); if (state != virtual_ncidev_enabled) { mutex_unlock(&nci_mutex); + kfree_skb(skb); return 0; } if (send_buff) { mutex_unlock(&nci_mutex); + kfree_skb(skb); return -1; } send_buff = skb_copy(skb, GFP_KERNEL); mutex_unlock(&nci_mutex); wake_up_interruptible(&wq); + consume_skb(skb); return 0; } diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c index 5fc5ea196b40..ff8b083dc5c6 100644 --- a/drivers/nvme/host/apple.c +++ b/drivers/nvme/host/apple.c @@ -1039,6 +1039,8 @@ static void apple_nvme_reset_work(struct work_struct *work) dma_max_mapping_size(anv->dev) >> 9); anv->ctrl.max_segments = NVME_MAX_SEGS; + dma_set_max_seg_size(anv->dev, 0xffffffff); + /* * Enable NVMMU and linear submission queues. * While we could keep those disabled and pretend this is slightly diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 059737c1a2c1..da55ce45ac70 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -675,6 +675,7 @@ void nvme_init_request(struct request *req, struct nvme_command *cmd) if (req->mq_hctx->type == HCTX_TYPE_POLL) req->cmd_flags |= REQ_POLLED; nvme_clear_nvme_request(req); + req->rq_flags |= RQF_QUIET; memcpy(nvme_req(req)->cmd, cmd, sizeof(*cmd)); } EXPORT_SYMBOL_GPL(nvme_init_request); @@ -1037,7 +1038,6 @@ int __nvme_submit_sync_cmd(struct request_queue *q, struct nvme_command *cmd, goto out; } - req->rq_flags |= RQF_QUIET; ret = nvme_execute_rq(req, at_head); if (result && ret >= 0) *result = nvme_req(req)->result; @@ -1227,7 +1227,6 @@ static void nvme_keep_alive_work(struct work_struct *work) rq->timeout = ctrl->kato * HZ; rq->end_io = nvme_keep_alive_end_io; rq->end_io_data = ctrl; - rq->rq_flags |= RQF_QUIET; blk_execute_rq_nowait(rq, false); } @@ -3262,8 +3261,12 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl) return ret; if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) { + /* + * Do not return errors unless we are in a controller reset, + * the controller works perfectly fine without hwmon. + */ ret = nvme_hwmon_init(ctrl); - if (ret < 0) + if (ret == -EINTR) return ret; } @@ -4846,7 +4849,7 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set, return 0; out_cleanup_admin_q: - blk_mq_destroy_queue(ctrl->fabrics_q); + blk_mq_destroy_queue(ctrl->admin_q); out_free_tagset: blk_mq_free_tag_set(ctrl->admin_tagset); return ret; diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c index 0a586d712920..9e6e56c20ec9 100644 --- a/drivers/nvme/host/hwmon.c +++ b/drivers/nvme/host/hwmon.c @@ -12,7 +12,7 @@ struct nvme_hwmon_data { struct nvme_ctrl *ctrl; - struct nvme_smart_log log; + struct nvme_smart_log *log; struct mutex read_lock; }; @@ -60,14 +60,14 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under, static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data) { return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0, - NVME_CSI_NVM, &data->log, sizeof(data->log), 0); + NVME_CSI_NVM, data->log, sizeof(*data->log), 0); } static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *val) { struct nvme_hwmon_data *data = dev_get_drvdata(dev); - struct nvme_smart_log *log = &data->log; + struct nvme_smart_log *log = data->log; int temp; int err; @@ -163,7 +163,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data, case hwmon_temp_max: case hwmon_temp_min: if ((!channel && data->ctrl->wctemp) || - (channel && data->log.temp_sensor[channel - 1])) { + (channel && data->log->temp_sensor[channel - 1])) { if (data->ctrl->quirks & NVME_QUIRK_NO_TEMP_THRESH_CHANGE) return 0444; @@ -176,7 +176,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data, break; case hwmon_temp_input: case hwmon_temp_label: - if (!channel || data->log.temp_sensor[channel - 1]) + if (!channel || data->log->temp_sensor[channel - 1]) return 0444; break; default: @@ -230,7 +230,13 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl) data = kzalloc(sizeof(*data), GFP_KERNEL); if (!data) - return 0; + return -ENOMEM; + + data->log = kzalloc(sizeof(*data->log), GFP_KERNEL); + if (!data->log) { + err = -ENOMEM; + goto err_free_data; + } data->ctrl = ctrl; mutex_init(&data->read_lock); @@ -238,8 +244,7 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl) err = nvme_hwmon_get_smart_log(data); if (err) { dev_warn(dev, "Failed to read smart log (error %d)\n", err); - kfree(data); - return err; + goto err_free_log; } hwmon = hwmon_device_register_with_info(dev, "nvme", @@ -247,11 +252,17 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl) NULL); if (IS_ERR(hwmon)) { dev_warn(dev, "Failed to instantiate hwmon device\n"); - kfree(data); - return PTR_ERR(hwmon); + err = PTR_ERR(hwmon); + goto err_free_log; } ctrl->hwmon_device = hwmon; return 0; + +err_free_log: + kfree(data->log); +err_free_data: + kfree(data); + return err; } void nvme_hwmon_exit(struct nvme_ctrl *ctrl) @@ -262,6 +273,7 @@ void nvme_hwmon_exit(struct nvme_ctrl *ctrl) hwmon_device_unregister(ctrl->hwmon_device); ctrl->hwmon_device = NULL; + kfree(data->log); kfree(data); } } diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index 0ea7e441e080..93e2138a8b42 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -516,6 +516,7 @@ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl, struct nvme_ns_head *head) /* set to a default value of 512 until the disk is validated */ blk_queue_logical_block_size(head->disk->queue, 512); blk_set_stacking_limits(&head->disk->queue->limits); + blk_queue_dma_alignment(head->disk->queue, 3); /* we need to propagate up the VMC settings */ if (ctrl->vwc & NVME_CTRL_VWC_PRESENT) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index bcbef6bc5672..f4335519399d 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -1436,7 +1436,6 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req) abort_req->end_io = abort_endio; abort_req->end_io_data = NULL; - abort_req->rq_flags |= RQF_QUIET; blk_execute_rq_nowait(abort_req, false); /* @@ -2490,7 +2489,6 @@ static int nvme_delete_queue(struct nvme_queue *nvmeq, u8 opcode) req->end_io_data = nvmeq; init_completion(&nvmeq->delete_done); - req->rq_flags |= RQF_QUIET; blk_execute_rq_nowait(req, false); return 0; } @@ -3491,6 +3489,8 @@ static const struct pci_device_id nvme_id_table[] = { NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_DEVICE(0x1344, 0x5407), /* Micron Technology Inc NVMe SSD */ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN }, + { PCI_DEVICE(0x1344, 0x6001), /* Micron Nitro NVMe */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1c5c, 0x1504), /* SK Hynix PC400 */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x1c5c, 0x174a), /* SK Hynix P31 SSD */ @@ -3511,6 +3511,18 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, { PCI_DEVICE(0x2646, 0x2263), /* KINGSTON A2000 NVMe SSD */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS, }, + { PCI_DEVICE(0x2646, 0x5018), /* KINGSTON OM8SFP4xxxxP OS21012 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x5016), /* KINGSTON OM3PGP4xxxxP OS21011 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x501A), /* KINGSTON OM8PGP4xxxxP OS21005 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x501B), /* KINGSTON OM8PGP4xxxxQ OS21005 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */ + .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e4B, 0x1002), /* MAXIO MAP1002 */ diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 1eed0fc26b3a..9b47dcb2a7d9 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -387,7 +387,7 @@ static inline void nvme_tcp_ddgst_update(struct ahash_request *hash, { struct scatterlist sg; - sg_init_marker(&sg, 1); + sg_init_table(&sg, 1); sg_set_page(&sg, page, len, off); ahash_request_set_crypt(hash, &sg, NULL, len); crypto_ahash_update(hash); @@ -1141,6 +1141,7 @@ static int nvme_tcp_try_send_ddgst(struct nvme_tcp_request *req) static int nvme_tcp_try_send(struct nvme_tcp_queue *queue) { struct nvme_tcp_request *req; + unsigned int noreclaim_flag; int ret = 1; if (!queue->request) { @@ -1150,12 +1151,13 @@ static int nvme_tcp_try_send(struct nvme_tcp_queue *queue) } req = queue->request; + noreclaim_flag = memalloc_noreclaim_save(); if (req->state == NVME_TCP_SEND_CMD_PDU) { ret = nvme_tcp_try_send_cmd_pdu(req); if (ret <= 0) goto done; if (!nvme_tcp_has_inline_data(req)) - return ret; + goto out; } if (req->state == NVME_TCP_SEND_H2C_PDU) { @@ -1181,6 +1183,8 @@ done: nvme_tcp_fail_request(queue->request); nvme_tcp_done_send_req(queue); } +out: + memalloc_noreclaim_restore(noreclaim_flag); return ret; } @@ -1296,6 +1300,7 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) struct page *page; struct nvme_tcp_ctrl *ctrl = to_tcp_ctrl(nctrl); struct nvme_tcp_queue *queue = &ctrl->queues[qid]; + unsigned int noreclaim_flag; if (!test_and_clear_bit(NVME_TCP_Q_ALLOCATED, &queue->flags)) return; @@ -1308,7 +1313,11 @@ static void nvme_tcp_free_queue(struct nvme_ctrl *nctrl, int qid) __page_frag_cache_drain(page, queue->pf_cache.pagecnt_bias); queue->pf_cache.va = NULL; } + + noreclaim_flag = memalloc_noreclaim_save(); sock_release(queue->sock); + memalloc_noreclaim_restore(noreclaim_flag); + kfree(queue->pdu); mutex_destroy(&queue->send_mutex); mutex_destroy(&queue->queue_lock); diff --git a/drivers/nvme/target/auth.c b/drivers/nvme/target/auth.c index c4113b43dbfe..4dcddcf95279 100644 --- a/drivers/nvme/target/auth.c +++ b/drivers/nvme/target/auth.c @@ -45,9 +45,11 @@ int nvmet_auth_set_key(struct nvmet_host *host, const char *secret, if (!dhchap_secret) return -ENOMEM; if (set_ctrl) { + kfree(host->dhchap_ctrl_secret); host->dhchap_ctrl_secret = strim(dhchap_secret); host->dhchap_ctrl_key_hash = key_hash; } else { + kfree(host->dhchap_secret); host->dhchap_secret = strim(dhchap_secret); host->dhchap_key_hash = key_hash; } diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c index e34a2896fedb..6a2816f3b4e8 100644 --- a/drivers/nvme/target/configfs.c +++ b/drivers/nvme/target/configfs.c @@ -1215,6 +1215,7 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys, const char *page, size_t count) { int pos = 0, len; + char *val; if (subsys->subsys_discovered) { pr_err("Can't set model number. %s is already assigned\n", @@ -1237,9 +1238,11 @@ static ssize_t nvmet_subsys_attr_model_store_locked(struct nvmet_subsys *subsys, return -EINVAL; } - subsys->model_number = kmemdup_nul(page, len, GFP_KERNEL); - if (!subsys->model_number) + val = kmemdup_nul(page, len, GFP_KERNEL); + if (!val) return -ENOMEM; + kfree(subsys->model_number); + subsys->model_number = val; return count; } @@ -1290,12 +1293,8 @@ static ssize_t nvmet_subsys_attr_qid_max_show(struct config_item *item, static ssize_t nvmet_subsys_attr_qid_max_store(struct config_item *item, const char *page, size_t cnt) { - struct nvmet_port *port = to_nvmet_port(item); u16 qid_max; - if (nvmet_is_port_enabled(port, __func__)) - return -EACCES; - if (sscanf(page, "%hu\n", &qid_max) != 1) return -EINVAL; @@ -1840,6 +1839,7 @@ static void nvmet_host_release(struct config_item *item) #ifdef CONFIG_NVME_TARGET_AUTH kfree(host->dhchap_secret); + kfree(host->dhchap_ctrl_secret); #endif kfree(host); } diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index 14677145bbba..aecb5853f8da 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -1176,7 +1176,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl) * reset the keep alive timer when the controller is enabled. */ if (ctrl->kato) - mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ); + mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ); } static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl) diff --git a/drivers/nvmem/lan9662-otpc.c b/drivers/nvmem/lan9662-otpc.c index f6732fd216d8..377bf34c2946 100644 --- a/drivers/nvmem/lan9662-otpc.c +++ b/drivers/nvmem/lan9662-otpc.c @@ -203,7 +203,7 @@ static int lan9662_otp_probe(struct platform_device *pdev) } static const struct of_device_id lan9662_otp_match[] = { - { .compatible = "microchip,lan9662-otp", }, + { .compatible = "microchip,lan9662-otpc", }, { }, }; MODULE_DEVICE_TABLE(of, lan9662_otp_match); diff --git a/drivers/nvmem/u-boot-env.c b/drivers/nvmem/u-boot-env.c index 8e72d1bbd649..4fdbdccebda1 100644 --- a/drivers/nvmem/u-boot-env.c +++ b/drivers/nvmem/u-boot-env.c @@ -135,7 +135,7 @@ static int u_boot_env_parse(struct u_boot_env *priv) break; case U_BOOT_FORMAT_REDUNDANT: crc32_offset = offsetof(struct u_boot_env_image_redundant, crc32); - crc32_data_offset = offsetof(struct u_boot_env_image_redundant, mark); + crc32_data_offset = offsetof(struct u_boot_env_image_redundant, data); data_offset = offsetof(struct u_boot_env_image_redundant, data); break; } diff --git a/drivers/parisc/iosapic.c b/drivers/parisc/iosapic.c index bdef7a8d6ab8..bcc1dae00780 100644 --- a/drivers/parisc/iosapic.c +++ b/drivers/parisc/iosapic.c @@ -866,6 +866,7 @@ int iosapic_serial_irq(struct parisc_device *dev) return vi->txn_irq; } +EXPORT_SYMBOL(iosapic_serial_irq); #endif diff --git a/drivers/parisc/pdc_stable.c b/drivers/parisc/pdc_stable.c index d9e51036a4fa..d6af5726ddf3 100644 --- a/drivers/parisc/pdc_stable.c +++ b/drivers/parisc/pdc_stable.c @@ -14,7 +14,7 @@ * all) PA-RISC machines should have them. Anyway, for safety reasons, the * following code can deal with just 96 bytes of Stable Storage, and all * sizes between 96 and 192 bytes (provided they are multiple of struct - * device_path size, eg: 128, 160 and 192) to provide full information. + * pdc_module_path size, eg: 128, 160 and 192) to provide full information. * One last word: there's one path we can always count on: the primary path. * Anything above 224 bytes is used for 'osdep2' OS-dependent storage area. * @@ -88,7 +88,7 @@ struct pdcspath_entry { short ready; /* entry record is valid if != 0 */ unsigned long addr; /* entry address in stable storage */ char *name; /* entry name */ - struct device_path devpath; /* device path in parisc representation */ + struct pdc_module_path devpath; /* device path in parisc representation */ struct device *dev; /* corresponding device */ struct kobject kobj; }; @@ -138,7 +138,7 @@ struct pdcspath_attribute paths_attr_##_name = { \ static int pdcspath_fetch(struct pdcspath_entry *entry) { - struct device_path *devpath; + struct pdc_module_path *devpath; if (!entry) return -EINVAL; @@ -153,7 +153,7 @@ pdcspath_fetch(struct pdcspath_entry *entry) return -EIO; /* Find the matching device. - NOTE: hardware_path overlays with device_path, so the nice cast can + NOTE: hardware_path overlays with pdc_module_path, so the nice cast can be used */ entry->dev = hwpath_to_device((struct hardware_path *)devpath); @@ -179,7 +179,7 @@ pdcspath_fetch(struct pdcspath_entry *entry) static void pdcspath_store(struct pdcspath_entry *entry) { - struct device_path *devpath; + struct pdc_module_path *devpath; BUG_ON(!entry); @@ -221,7 +221,7 @@ static ssize_t pdcspath_hwpath_read(struct pdcspath_entry *entry, char *buf) { char *out = buf; - struct device_path *devpath; + struct pdc_module_path *devpath; short i; if (!entry || !buf) @@ -236,11 +236,11 @@ pdcspath_hwpath_read(struct pdcspath_entry *entry, char *buf) return -ENODATA; for (i = 0; i < 6; i++) { - if (devpath->bc[i] >= 128) + if (devpath->path.bc[i] < 0) continue; - out += sprintf(out, "%u/", (unsigned char)devpath->bc[i]); + out += sprintf(out, "%d/", devpath->path.bc[i]); } - out += sprintf(out, "%u\n", (unsigned char)devpath->mod); + out += sprintf(out, "%u\n", (unsigned char)devpath->path.mod); return out - buf; } @@ -296,12 +296,12 @@ pdcspath_hwpath_write(struct pdcspath_entry *entry, const char *buf, size_t coun for (i=5; ((temp = strrchr(in, '/'))) && (temp-in > 0) && (likely(i)); i--) { hwpath.bc[i] = simple_strtoul(temp+1, NULL, 10); in[temp-in] = '\0'; - DPRINTK("%s: bc[%d]: %d\n", __func__, i, hwpath.bc[i]); + DPRINTK("%s: bc[%d]: %d\n", __func__, i, hwpath.path.bc[i]); } /* Store the final field */ hwpath.bc[i] = simple_strtoul(in, NULL, 10); - DPRINTK("%s: bc[%d]: %d\n", __func__, i, hwpath.bc[i]); + DPRINTK("%s: bc[%d]: %d\n", __func__, i, hwpath.path.bc[i]); /* Now we check that the user isn't trying to lure us */ if (!(dev = hwpath_to_device((struct hardware_path *)&hwpath))) { @@ -342,7 +342,7 @@ static ssize_t pdcspath_layer_read(struct pdcspath_entry *entry, char *buf) { char *out = buf; - struct device_path *devpath; + struct pdc_module_path *devpath; short i; if (!entry || !buf) @@ -547,7 +547,7 @@ static ssize_t pdcs_auto_read(struct kobject *kobj, pathentry = &pdcspath_entry_primary; read_lock(&pathentry->rw_lock); - out += sprintf(out, "%s\n", (pathentry->devpath.flags & knob) ? + out += sprintf(out, "%s\n", (pathentry->devpath.path.flags & knob) ? "On" : "Off"); read_unlock(&pathentry->rw_lock); @@ -594,8 +594,8 @@ static ssize_t pdcs_timer_read(struct kobject *kobj, /* print the timer value in seconds */ read_lock(&pathentry->rw_lock); - out += sprintf(out, "%u\n", (pathentry->devpath.flags & PF_TIMER) ? - (1 << (pathentry->devpath.flags & PF_TIMER)) : 0); + out += sprintf(out, "%u\n", (pathentry->devpath.path.flags & PF_TIMER) ? + (1 << (pathentry->devpath.path.flags & PF_TIMER)) : 0); read_unlock(&pathentry->rw_lock); return out - buf; @@ -764,7 +764,7 @@ static ssize_t pdcs_auto_write(struct kobject *kobj, /* Be nice to the existing flag record */ read_lock(&pathentry->rw_lock); - flags = pathentry->devpath.flags; + flags = pathentry->devpath.path.flags; read_unlock(&pathentry->rw_lock); DPRINTK("%s: flags before: 0x%X\n", __func__, flags); @@ -785,7 +785,7 @@ static ssize_t pdcs_auto_write(struct kobject *kobj, write_lock(&pathentry->rw_lock); /* Change the path entry flags first */ - pathentry->devpath.flags = flags; + pathentry->devpath.path.flags = flags; /* Now, dive in. Write back to the hardware */ pdcspath_store(pathentry); diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c index 7c45927e2131..5784dc20fb38 100644 --- a/drivers/parport/parport_pc.c +++ b/drivers/parport/parport_pc.c @@ -468,7 +468,7 @@ static size_t parport_pc_fifo_write_block_pio(struct parport *port, const unsigned char *bufp = buf; size_t left = length; unsigned long expire = jiffies + port->physport->cad->timeout; - const int fifo = FIFO(port); + const unsigned long fifo = FIFO(port); int poll_for = 8; /* 80 usecs */ const struct parport_pc_private *priv = port->physport->private_data; const int fifo_depth = priv->fifo_depth; diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c index e7c6f6629e7c..ba64284eaf9f 100644 --- a/drivers/pci/controller/pci-hyperv.c +++ b/drivers/pci/controller/pci-hyperv.c @@ -1614,7 +1614,7 @@ out: static u32 hv_compose_msi_req_v1( struct pci_create_interrupt *int_pkt, const struct cpumask *affinity, - u32 slot, u8 vector, u8 vector_count) + u32 slot, u8 vector, u16 vector_count) { int_pkt->message_type.type = PCI_CREATE_INTERRUPT_MESSAGE; int_pkt->wslot.slot = slot; @@ -1642,7 +1642,7 @@ static int hv_compose_msi_req_get_cpu(const struct cpumask *affinity) static u32 hv_compose_msi_req_v2( struct pci_create_interrupt2 *int_pkt, const struct cpumask *affinity, - u32 slot, u8 vector, u8 vector_count) + u32 slot, u8 vector, u16 vector_count) { int cpu; @@ -1661,7 +1661,7 @@ static u32 hv_compose_msi_req_v2( static u32 hv_compose_msi_req_v3( struct pci_create_interrupt3 *int_pkt, const struct cpumask *affinity, - u32 slot, u32 vector, u8 vector_count) + u32 slot, u32 vector, u16 vector_count) { int cpu; @@ -1701,7 +1701,12 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) struct compose_comp_ctxt comp; struct tran_int_desc *int_desc; struct msi_desc *msi_desc; - u8 vector, vector_count; + /* + * vector_count should be u16: see hv_msi_desc, hv_msi_desc2 + * and hv_msi_desc3. vector must be u32: see hv_msi_desc3. + */ + u16 vector_count; + u32 vector; struct { struct pci_packet pci_pkt; union { @@ -1767,6 +1772,11 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) vector_count = 1; } + /* + * hv_compose_msi_req_v1 and v2 are for x86 only, meaning 'vector' + * can't exceed u8. Cast 'vector' down to u8 for v1/v2 explicitly + * for better readability. + */ memset(&ctxt, 0, sizeof(ctxt)); init_completion(&comp.comp_pkt.host_event); ctxt.pci_pkt.completion_func = hv_pci_compose_compl; @@ -1777,7 +1787,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) size = hv_compose_msi_req_v1(&ctxt.int_pkts.v1, dest, hpdev->desc.win_slot.slot, - vector, + (u8)vector, vector_count); break; @@ -1786,7 +1796,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) size = hv_compose_msi_req_v2(&ctxt.int_pkts.v2, dest, hpdev->desc.win_slot.slot, - vector, + (u8)vector, vector_count); break; diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c index 24478ae5a345..8e323e93be91 100644 --- a/drivers/pci/controller/pci-tegra.c +++ b/drivers/pci/controller/pci-tegra.c @@ -415,6 +415,13 @@ static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset) * address (access to which generates correct config transaction) falls in * this 4 KiB region. */ +static unsigned int tegra_pcie_conf_offset(u8 bus, unsigned int devfn, + unsigned int where) +{ + return ((where & 0xf00) << 16) | (bus << 16) | (PCI_SLOT(devfn) << 11) | + (PCI_FUNC(devfn) << 8) | (where & 0xff); +} + static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus, unsigned int devfn, int where) @@ -436,9 +443,7 @@ static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus, unsigned int offset; u32 base; - offset = PCI_CONF1_EXT_ADDRESS(bus->number, PCI_SLOT(devfn), - PCI_FUNC(devfn), where) & - ~PCI_CONF1_ENABLE; + offset = tegra_pcie_conf_offset(bus->number, devfn, where); /* move 4 KiB window to offset within the FPCI region */ base = 0xfe100000 + ((offset & ~(SZ_4K - 1)) >> 8); diff --git a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c index 9807c4d935cd..ba9d761ec49a 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp-combo.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp-combo.c @@ -2240,7 +2240,7 @@ static void qmp_combo_enable_autonomous_mode(struct qmp_phy *qphy) static void qmp_combo_disable_autonomous_mode(struct qmp_phy *qphy) { const struct qmp_phy_cfg *cfg = qphy->cfg; - void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs_usb; + void __iomem *pcs_usb = qphy->pcs_usb ?: qphy->pcs; void __iomem *pcs_misc = qphy->pcs_misc; /* Disable i/o clamp_n on resume for normal mode */ diff --git a/drivers/phy/ralink/phy-mt7621-pci.c b/drivers/phy/ralink/phy-mt7621-pci.c index 5e6530f545b5..85888ab2d307 100644 --- a/drivers/phy/ralink/phy-mt7621-pci.c +++ b/drivers/phy/ralink/phy-mt7621-pci.c @@ -280,7 +280,8 @@ static struct phy *mt7621_pcie_phy_of_xlate(struct device *dev, } static const struct soc_device_attribute mt7621_pci_quirks_match[] = { - { .soc_id = "mt7621", .revision = "E2" } + { .soc_id = "mt7621", .revision = "E2" }, + { /* sentinel */ } }; static const struct regmap_config mt7621_pci_phy_regmap_config = { diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c index a98c911cc37a..5bb9647b078f 100644 --- a/drivers/phy/st/phy-stm32-usbphyc.c +++ b/drivers/phy/st/phy-stm32-usbphyc.c @@ -710,6 +710,8 @@ static int stm32_usbphyc_probe(struct platform_device *pdev) ret = of_property_read_u32(child, "reg", &index); if (ret || index > usbphyc->nphys) { dev_err(&phy->dev, "invalid reg property: %d\n", ret); + if (!ret) + ret = -EINVAL; goto put_child; } diff --git a/drivers/phy/sunplus/phy-sunplus-usb2.c b/drivers/phy/sunplus/phy-sunplus-usb2.c index b932087c55b2..e827b79f6d49 100644 --- a/drivers/phy/sunplus/phy-sunplus-usb2.c +++ b/drivers/phy/sunplus/phy-sunplus-usb2.c @@ -256,8 +256,8 @@ static int sp_usb_phy_probe(struct platform_device *pdev) usbphy->moon4_res_mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "moon4"); usbphy->moon4_regs = devm_ioremap(&pdev->dev, usbphy->moon4_res_mem->start, resource_size(usbphy->moon4_res_mem)); - if (IS_ERR(usbphy->moon4_regs)) - return PTR_ERR(usbphy->moon4_regs); + if (!usbphy->moon4_regs) + return -ENOMEM; usbphy->phy_clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(usbphy->phy_clk)) diff --git a/drivers/phy/tegra/xusb.c b/drivers/phy/tegra/xusb.c index 95091876c422..dce45fbbd699 100644 --- a/drivers/phy/tegra/xusb.c +++ b/drivers/phy/tegra/xusb.c @@ -1461,8 +1461,14 @@ EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_port_reset); void tegra_phy_xusb_utmi_pad_power_on(struct phy *phy) { - struct tegra_xusb_lane *lane = phy_get_drvdata(phy); - struct tegra_xusb_padctl *padctl = lane->pad->padctl; + struct tegra_xusb_lane *lane; + struct tegra_xusb_padctl *padctl; + + if (!phy) + return; + + lane = phy_get_drvdata(phy); + padctl = lane->pad->padctl; if (padctl->soc->ops->utmi_pad_power_on) padctl->soc->ops->utmi_pad_power_on(phy); @@ -1471,8 +1477,14 @@ EXPORT_SYMBOL_GPL(tegra_phy_xusb_utmi_pad_power_on); void tegra_phy_xusb_utmi_pad_power_down(struct phy *phy) { - struct tegra_xusb_lane *lane = phy_get_drvdata(phy); - struct tegra_xusb_padctl *padctl = lane->pad->padctl; + struct tegra_xusb_lane *lane; + struct tegra_xusb_padctl *padctl; + + if (!phy) + return; + + lane = phy_get_drvdata(phy); + padctl = lane->pad->padctl; if (padctl->soc->ops->utmi_pad_power_down) padctl->soc->ops->utmi_pad_power_down(phy); diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c index ef898ee8ca6b..6e0a40962f38 100644 --- a/drivers/pinctrl/devicetree.c +++ b/drivers/pinctrl/devicetree.c @@ -220,6 +220,8 @@ int pinctrl_dt_to_map(struct pinctrl *p, struct pinctrl_dev *pctldev) for (state = 0; ; state++) { /* Retrieve the pinctrl-* property */ propname = kasprintf(GFP_KERNEL, "pinctrl-%d", state); + if (!propname) + return -ENOMEM; prop = of_find_property(np, propname, &size); kfree(propname); if (!prop) { diff --git a/drivers/pinctrl/mediatek/mtk-eint.c b/drivers/pinctrl/mediatek/mtk-eint.c index f7b54a551764..65d312967619 100644 --- a/drivers/pinctrl/mediatek/mtk-eint.c +++ b/drivers/pinctrl/mediatek/mtk-eint.c @@ -24,6 +24,7 @@ #define MTK_EINT_EDGE_SENSITIVE 0 #define MTK_EINT_LEVEL_SENSITIVE 1 #define MTK_EINT_DBNC_SET_DBNC_BITS 4 +#define MTK_EINT_DBNC_MAX 16 #define MTK_EINT_DBNC_RST_BIT (0x1 << 1) #define MTK_EINT_DBNC_SET_EN (0x1 << 0) @@ -48,6 +49,21 @@ static const struct mtk_eint_regs mtk_generic_eint_regs = { .dbnc_clr = 0x700, }; +const unsigned int debounce_time_mt2701[] = { + 500, 1000, 16000, 32000, 64000, 128000, 256000, 0 +}; +EXPORT_SYMBOL_GPL(debounce_time_mt2701); + +const unsigned int debounce_time_mt6765[] = { + 125, 250, 500, 1000, 16000, 32000, 64000, 128000, 256000, 512000, 0 +}; +EXPORT_SYMBOL_GPL(debounce_time_mt6765); + +const unsigned int debounce_time_mt6795[] = { + 500, 1000, 16000, 32000, 64000, 128000, 256000, 512000, 0 +}; +EXPORT_SYMBOL_GPL(debounce_time_mt6795); + static void __iomem *mtk_eint_get_offset(struct mtk_eint *eint, unsigned int eint_num, unsigned int offset) @@ -404,10 +420,11 @@ int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num, int virq, eint_offset; unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask, dbnc; - static const unsigned int debounce_time[] = {500, 1000, 16000, 32000, - 64000, 128000, 256000}; struct irq_data *d; + if (!eint->hw->db_time) + return -EOPNOTSUPP; + virq = irq_find_mapping(eint->domain, eint_num); eint_offset = (eint_num % 4) * 8; d = irq_get_irq_data(virq); @@ -418,9 +435,9 @@ int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num, if (!mtk_eint_can_en_debounce(eint, eint_num)) return -EINVAL; - dbnc = ARRAY_SIZE(debounce_time); - for (i = 0; i < ARRAY_SIZE(debounce_time); i++) { - if (debounce <= debounce_time[i]) { + dbnc = eint->num_db_time; + for (i = 0; i < eint->num_db_time; i++) { + if (debounce <= eint->hw->db_time[i]) { dbnc = i; break; } @@ -494,6 +511,13 @@ int mtk_eint_do_init(struct mtk_eint *eint) if (!eint->domain) return -ENOMEM; + if (eint->hw->db_time) { + for (i = 0; i < MTK_EINT_DBNC_MAX; i++) + if (eint->hw->db_time[i] == 0) + break; + eint->num_db_time = i; + } + mtk_eint_hw_init(eint); for (i = 0; i < eint->hw->ap_num; i++) { int virq = irq_create_mapping(eint->domain, i); diff --git a/drivers/pinctrl/mediatek/mtk-eint.h b/drivers/pinctrl/mediatek/mtk-eint.h index 48468d0fae68..6139b16cd225 100644 --- a/drivers/pinctrl/mediatek/mtk-eint.h +++ b/drivers/pinctrl/mediatek/mtk-eint.h @@ -37,8 +37,13 @@ struct mtk_eint_hw { u8 ports; unsigned int ap_num; unsigned int db_cnt; + const unsigned int *db_time; }; +extern const unsigned int debounce_time_mt2701[]; +extern const unsigned int debounce_time_mt6765[]; +extern const unsigned int debounce_time_mt6795[]; + struct mtk_eint; struct mtk_eint_xt { @@ -62,6 +67,7 @@ struct mtk_eint { /* Used to fit into various EINT device */ const struct mtk_eint_hw *hw; const struct mtk_eint_regs *regs; + u16 num_db_time; /* Used to fit into various pinctrl device */ void *pctl; diff --git a/drivers/pinctrl/mediatek/pinctrl-mt2701.c b/drivers/pinctrl/mediatek/pinctrl-mt2701.c index d1583b4fdd9d..b185538452a0 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt2701.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt2701.c @@ -518,6 +518,7 @@ static const struct mtk_pinctrl_devdata mt2701_pinctrl_data = { .ports = 6, .ap_num = 169, .db_cnt = 16, + .db_time = debounce_time_mt2701, }, }; diff --git a/drivers/pinctrl/mediatek/pinctrl-mt2712.c b/drivers/pinctrl/mediatek/pinctrl-mt2712.c index b921068f9e69..730a496848dc 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt2712.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt2712.c @@ -567,6 +567,7 @@ static const struct mtk_pinctrl_devdata mt2712_pinctrl_data = { .ports = 8, .ap_num = 229, .db_cnt = 40, + .db_time = debounce_time_mt2701, }, }; diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6765.c b/drivers/pinctrl/mediatek/pinctrl-mt6765.c index c57b19fcda03..f6ec41eb6e0c 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt6765.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt6765.c @@ -1062,6 +1062,7 @@ static const struct mtk_eint_hw mt6765_eint_hw = { .ports = 6, .ap_num = 160, .db_cnt = 13, + .db_time = debounce_time_mt6765, }; static const struct mtk_pin_soc mt6765_data = { diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6779.c b/drivers/pinctrl/mediatek/pinctrl-mt6779.c index 4ddf8bda6827..62d4f5ad6737 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt6779.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt6779.c @@ -737,6 +737,7 @@ static const struct mtk_eint_hw mt6779_eint_hw = { .ports = 6, .ap_num = 195, .db_cnt = 13, + .db_time = debounce_time_mt2701, }; static const struct mtk_pin_soc mt6779_data = { diff --git a/drivers/pinctrl/mediatek/pinctrl-mt6795.c b/drivers/pinctrl/mediatek/pinctrl-mt6795.c index f90152261a0f..01e855ccd4dd 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt6795.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt6795.c @@ -475,6 +475,7 @@ static const struct mtk_eint_hw mt6795_eint_hw = { .ports = 7, .ap_num = 224, .db_cnt = 32, + .db_time = debounce_time_mt6795, }; static const unsigned int mt6795_pull_type[] = { diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c index 68eee881ee3d..3c1148d59eff 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c @@ -846,6 +846,7 @@ static const struct mtk_eint_hw mt7622_eint_hw = { .ports = 7, .ap_num = ARRAY_SIZE(mt7622_pins), .db_cnt = 20, + .db_time = debounce_time_mt6765, }; static const struct mtk_pin_soc mt7622_data = { diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7623.c b/drivers/pinctrl/mediatek/pinctrl-mt7623.c index b8d9d31db74f..699977074697 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt7623.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt7623.c @@ -1369,6 +1369,7 @@ static const struct mtk_eint_hw mt7623_eint_hw = { .ports = 6, .ap_num = 169, .db_cnt = 20, + .db_time = debounce_time_mt2701, }; static struct mtk_pin_soc mt7623_data = { diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7629.c b/drivers/pinctrl/mediatek/pinctrl-mt7629.c index b5f0fa43245f..2ce411cb9c6e 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt7629.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt7629.c @@ -402,6 +402,7 @@ static const struct mtk_eint_hw mt7629_eint_hw = { .ports = 7, .ap_num = ARRAY_SIZE(mt7629_pins), .db_cnt = 16, + .db_time = debounce_time_mt2701, }; static struct mtk_pin_soc mt7629_data = { diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7986.c b/drivers/pinctrl/mediatek/pinctrl-mt7986.c index f26869f1a367..50cb736f9f11 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt7986.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt7986.c @@ -826,6 +826,7 @@ static const struct mtk_eint_hw mt7986a_eint_hw = { .ports = 7, .ap_num = ARRAY_SIZE(mt7986a_pins), .db_cnt = 16, + .db_time = debounce_time_mt6765, }; static const struct mtk_eint_hw mt7986b_eint_hw = { @@ -833,6 +834,7 @@ static const struct mtk_eint_hw mt7986b_eint_hw = { .ports = 7, .ap_num = ARRAY_SIZE(mt7986b_pins), .db_cnt = 16, + .db_time = debounce_time_mt6765, }; static struct mtk_pin_soc mt7986a_data = { diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8127.c b/drivers/pinctrl/mediatek/pinctrl-mt8127.c index 91c530e7b00e..e8772dcfe69e 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt8127.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt8127.c @@ -286,6 +286,7 @@ static const struct mtk_pinctrl_devdata mt8127_pinctrl_data = { .ports = 6, .ap_num = 143, .db_cnt = 16, + .db_time = debounce_time_mt2701, }, }; diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8135.c b/drivers/pinctrl/mediatek/pinctrl-mt8135.c index 562846756517..cdb0252071fb 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt8135.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt8135.c @@ -315,6 +315,7 @@ static const struct mtk_pinctrl_devdata mt8135_pinctrl_data = { .ports = 6, .ap_num = 192, .db_cnt = 16, + .db_time = debounce_time_mt2701, }, }; diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8167.c b/drivers/pinctrl/mediatek/pinctrl-mt8167.c index 825167f5d020..866da2c4a890 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt8167.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt8167.c @@ -319,6 +319,7 @@ static const struct mtk_pinctrl_devdata mt8167_pinctrl_data = { .ports = 6, .ap_num = 169, .db_cnt = 64, + .db_time = debounce_time_mt6795, }, }; diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8173.c b/drivers/pinctrl/mediatek/pinctrl-mt8173.c index 1d7d11a32e7d..37d8cec1c3ce 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt8173.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt8173.c @@ -327,6 +327,7 @@ static const struct mtk_pinctrl_devdata mt8173_pinctrl_data = { .ports = 6, .ap_num = 224, .db_cnt = 16, + .db_time = debounce_time_mt2701, }, }; diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8183.c b/drivers/pinctrl/mediatek/pinctrl-mt8183.c index fecb1e64fff4..ddc48b725c22 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt8183.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt8183.c @@ -545,6 +545,7 @@ static const struct mtk_eint_hw mt8183_eint_hw = { .ports = 6, .ap_num = 212, .db_cnt = 13, + .db_time = debounce_time_mt6765, }; static const struct mtk_pin_soc mt8183_data = { diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8186.c b/drivers/pinctrl/mediatek/pinctrl-mt8186.c index a4dd5197abc1..a02f7c326970 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt8186.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt8186.c @@ -1222,6 +1222,7 @@ static const struct mtk_eint_hw mt8186_eint_hw = { .ports = 7, .ap_num = 217, .db_cnt = 32, + .db_time = debounce_time_mt6765, }; static const struct mtk_pin_soc mt8186_data = { diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8188.c b/drivers/pinctrl/mediatek/pinctrl-mt8188.c index d0e75c1b4417..6a3d0126288e 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt8188.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt8188.c @@ -1625,6 +1625,7 @@ static const struct mtk_eint_hw mt8188_eint_hw = { .ports = 7, .ap_num = 225, .db_cnt = 32, + .db_time = debounce_time_mt6765, }; static const struct mtk_pin_soc mt8188_data = { diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8192.c b/drivers/pinctrl/mediatek/pinctrl-mt8192.c index 78c02b7c81f0..9695f4ec6aba 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt8192.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt8192.c @@ -1371,6 +1371,7 @@ static const struct mtk_eint_hw mt8192_eint_hw = { .ports = 7, .ap_num = 224, .db_cnt = 32, + .db_time = debounce_time_mt6765, }; static const struct mtk_pin_reg_calc mt8192_reg_cals[PINCTRL_PIN_REG_MAX] = { diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8195.c b/drivers/pinctrl/mediatek/pinctrl-mt8195.c index 563693d3d4c2..89557c7ed2ab 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt8195.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt8195.c @@ -935,6 +935,7 @@ static const struct mtk_eint_hw mt8195_eint_hw = { .ports = 7, .ap_num = 225, .db_cnt = 32, + .db_time = debounce_time_mt6765, }; static const struct mtk_pin_soc mt8195_data = { diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8365.c b/drivers/pinctrl/mediatek/pinctrl-mt8365.c index 57f37a294063..e31b89b226b7 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt8365.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt8365.c @@ -453,6 +453,7 @@ static const struct mtk_pinctrl_devdata mt8365_pinctrl_data = { .ports = 5, .ap_num = 160, .db_cnt = 160, + .db_time = debounce_time_mt6765, }, }; diff --git a/drivers/pinctrl/mediatek/pinctrl-mt8516.c b/drivers/pinctrl/mediatek/pinctrl-mt8516.c index 939a1932b8dc..e929339dd2cb 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mt8516.c +++ b/drivers/pinctrl/mediatek/pinctrl-mt8516.c @@ -319,6 +319,7 @@ static const struct mtk_pinctrl_devdata mt8516_pinctrl_data = { .ports = 6, .ap_num = 169, .db_cnt = 64, + .db_time = debounce_time_mt6795, }, }; diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c index e1ae3beb9f72..b7921b59eb7b 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c @@ -709,6 +709,9 @@ static int mtk_pinconf_bias_set_rsel(struct mtk_pinctrl *hw, { int err, rsel_val; + if (!pullup && arg == MTK_DISABLE) + return 0; + if (hw->rsel_si_unit) { /* find pin rsel_index from pin_rsel array*/ err = mtk_hw_pin_rsel_lookup(hw, desc, pullup, arg, &rsel_val); diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c index 7e732076dedf..9e46d83e5138 100644 --- a/drivers/pinctrl/pinctrl-ingenic.c +++ b/drivers/pinctrl/pinctrl-ingenic.c @@ -667,7 +667,7 @@ static u8 jz4755_lcd_24bit_funcs[] = { 1, 1, 1, 1, 0, 0, }; static const struct group_desc jz4755_groups[] = { INGENIC_PIN_GROUP("uart0-data", jz4755_uart0_data, 0), INGENIC_PIN_GROUP("uart0-hwflow", jz4755_uart0_hwflow, 0), - INGENIC_PIN_GROUP("uart1-data", jz4755_uart1_data, 0), + INGENIC_PIN_GROUP("uart1-data", jz4755_uart1_data, 1), INGENIC_PIN_GROUP("uart2-data", jz4755_uart2_data, 1), INGENIC_PIN_GROUP("ssi-dt-b", jz4755_ssi_dt_b, 0), INGENIC_PIN_GROUP("ssi-dt-f", jz4755_ssi_dt_f, 0), @@ -721,7 +721,7 @@ static const char *jz4755_ssi_groups[] = { "ssi-ce1-b", "ssi-ce1-f", }; static const char *jz4755_mmc0_groups[] = { "mmc0-1bit", "mmc0-4bit", }; -static const char *jz4755_mmc1_groups[] = { "mmc0-1bit", "mmc0-4bit", }; +static const char *jz4755_mmc1_groups[] = { "mmc1-1bit", "mmc1-4bit", }; static const char *jz4755_i2c_groups[] = { "i2c-data", }; static const char *jz4755_cim_groups[] = { "cim-data", }; static const char *jz4755_lcd_groups[] = { diff --git a/drivers/pinctrl/pinctrl-ocelot.c b/drivers/pinctrl/pinctrl-ocelot.c index 62ce3957abe4..687aaa601555 100644 --- a/drivers/pinctrl/pinctrl-ocelot.c +++ b/drivers/pinctrl/pinctrl-ocelot.c @@ -1864,19 +1864,28 @@ static void ocelot_irq_unmask_level(struct irq_data *data) if (val & bit) ack = true; + /* Try to clear any rising edges */ + if (!active && ack) + regmap_write_bits(info->map, REG(OCELOT_GPIO_INTR, info, gpio), + bit, bit); + /* Enable the interrupt now */ gpiochip_enable_irq(chip, gpio); regmap_update_bits(info->map, REG(OCELOT_GPIO_INTR_ENA, info, gpio), bit, bit); /* - * In case the interrupt line is still active and the interrupt - * controller has not seen any changes in the interrupt line, then it - * means that there happen another interrupt while the line was active. + * In case the interrupt line is still active then it means that + * there happen another interrupt while the line was active. * So we missed that one, so we need to kick the interrupt again * handler. */ - if (active && !ack) { + regmap_read(info->map, REG(OCELOT_GPIO_IN, info, gpio), &val); + if ((!(val & bit) && trigger_level == IRQ_TYPE_LEVEL_LOW) || + (val & bit && trigger_level == IRQ_TYPE_LEVEL_HIGH)) + active = true; + + if (active) { struct ocelot_irq_work *work; work = kmalloc(sizeof(*work), GFP_ATOMIC); diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index 53bdfc40f055..da974ff2d75d 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -679,14 +679,54 @@ static void rockchip_get_recalced_mux(struct rockchip_pin_bank *bank, int pin, } static struct rockchip_mux_route_data px30_mux_route_data[] = { + RK_MUXROUTE_SAME(2, RK_PB4, 1, 0x184, BIT(16 + 7)), /* cif-d0m0 */ + RK_MUXROUTE_SAME(3, RK_PA1, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d0m1 */ + RK_MUXROUTE_SAME(2, RK_PB6, 1, 0x184, BIT(16 + 7)), /* cif-d1m0 */ + RK_MUXROUTE_SAME(3, RK_PA2, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d1m1 */ RK_MUXROUTE_SAME(2, RK_PA0, 1, 0x184, BIT(16 + 7)), /* cif-d2m0 */ RK_MUXROUTE_SAME(3, RK_PA3, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d2m1 */ + RK_MUXROUTE_SAME(2, RK_PA1, 1, 0x184, BIT(16 + 7)), /* cif-d3m0 */ + RK_MUXROUTE_SAME(3, RK_PA5, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d3m1 */ + RK_MUXROUTE_SAME(2, RK_PA2, 1, 0x184, BIT(16 + 7)), /* cif-d4m0 */ + RK_MUXROUTE_SAME(3, RK_PA7, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d4m1 */ + RK_MUXROUTE_SAME(2, RK_PA3, 1, 0x184, BIT(16 + 7)), /* cif-d5m0 */ + RK_MUXROUTE_SAME(3, RK_PB0, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d5m1 */ + RK_MUXROUTE_SAME(2, RK_PA4, 1, 0x184, BIT(16 + 7)), /* cif-d6m0 */ + RK_MUXROUTE_SAME(3, RK_PB1, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d6m1 */ + RK_MUXROUTE_SAME(2, RK_PA5, 1, 0x184, BIT(16 + 7)), /* cif-d7m0 */ + RK_MUXROUTE_SAME(3, RK_PB4, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d7m1 */ + RK_MUXROUTE_SAME(2, RK_PA6, 1, 0x184, BIT(16 + 7)), /* cif-d8m0 */ + RK_MUXROUTE_SAME(3, RK_PB6, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d8m1 */ + RK_MUXROUTE_SAME(2, RK_PA7, 1, 0x184, BIT(16 + 7)), /* cif-d9m0 */ + RK_MUXROUTE_SAME(3, RK_PB7, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d9m1 */ + RK_MUXROUTE_SAME(2, RK_PB7, 1, 0x184, BIT(16 + 7)), /* cif-d10m0 */ + RK_MUXROUTE_SAME(3, RK_PC6, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d10m1 */ + RK_MUXROUTE_SAME(2, RK_PC0, 1, 0x184, BIT(16 + 7)), /* cif-d11m0 */ + RK_MUXROUTE_SAME(3, RK_PC7, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-d11m1 */ + RK_MUXROUTE_SAME(2, RK_PB0, 1, 0x184, BIT(16 + 7)), /* cif-vsyncm0 */ + RK_MUXROUTE_SAME(3, RK_PD1, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-vsyncm1 */ + RK_MUXROUTE_SAME(2, RK_PB1, 1, 0x184, BIT(16 + 7)), /* cif-hrefm0 */ + RK_MUXROUTE_SAME(3, RK_PD2, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-hrefm1 */ + RK_MUXROUTE_SAME(2, RK_PB2, 1, 0x184, BIT(16 + 7)), /* cif-clkinm0 */ + RK_MUXROUTE_SAME(3, RK_PD3, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-clkinm1 */ + RK_MUXROUTE_SAME(2, RK_PB3, 1, 0x184, BIT(16 + 7)), /* cif-clkoutm0 */ + RK_MUXROUTE_SAME(3, RK_PD0, 3, 0x184, BIT(16 + 7) | BIT(7)), /* cif-clkoutm1 */ RK_MUXROUTE_SAME(3, RK_PC6, 2, 0x184, BIT(16 + 8)), /* pdm-m0 */ RK_MUXROUTE_SAME(2, RK_PC6, 1, 0x184, BIT(16 + 8) | BIT(8)), /* pdm-m1 */ + RK_MUXROUTE_SAME(3, RK_PD3, 2, 0x184, BIT(16 + 8)), /* pdm-sdi0m0 */ + RK_MUXROUTE_SAME(2, RK_PC5, 2, 0x184, BIT(16 + 8) | BIT(8)), /* pdm-sdi0m1 */ RK_MUXROUTE_SAME(1, RK_PD3, 2, 0x184, BIT(16 + 10)), /* uart2-rxm0 */ RK_MUXROUTE_SAME(2, RK_PB6, 2, 0x184, BIT(16 + 10) | BIT(10)), /* uart2-rxm1 */ + RK_MUXROUTE_SAME(1, RK_PD2, 2, 0x184, BIT(16 + 10)), /* uart2-txm0 */ + RK_MUXROUTE_SAME(2, RK_PB4, 2, 0x184, BIT(16 + 10) | BIT(10)), /* uart2-txm1 */ RK_MUXROUTE_SAME(0, RK_PC1, 2, 0x184, BIT(16 + 9)), /* uart3-rxm0 */ RK_MUXROUTE_SAME(1, RK_PB7, 2, 0x184, BIT(16 + 9) | BIT(9)), /* uart3-rxm1 */ + RK_MUXROUTE_SAME(0, RK_PC0, 2, 0x184, BIT(16 + 9)), /* uart3-txm0 */ + RK_MUXROUTE_SAME(1, RK_PB6, 2, 0x184, BIT(16 + 9) | BIT(9)), /* uart3-txm1 */ + RK_MUXROUTE_SAME(0, RK_PC2, 2, 0x184, BIT(16 + 9)), /* uart3-ctsm0 */ + RK_MUXROUTE_SAME(1, RK_PB4, 2, 0x184, BIT(16 + 9) | BIT(9)), /* uart3-ctsm1 */ + RK_MUXROUTE_SAME(0, RK_PC3, 2, 0x184, BIT(16 + 9)), /* uart3-rtsm0 */ + RK_MUXROUTE_SAME(1, RK_PB5, 2, 0x184, BIT(16 + 9) | BIT(9)), /* uart3-rtsm1 */ }; static struct rockchip_mux_route_data rv1126_mux_route_data[] = { diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c index 7d2fbf8a02cd..c98f35ad8921 100644 --- a/drivers/pinctrl/pinctrl-zynqmp.c +++ b/drivers/pinctrl/pinctrl-zynqmp.c @@ -412,10 +412,6 @@ static int zynqmp_pinconf_cfg_set(struct pinctrl_dev *pctldev, break; case PIN_CONFIG_BIAS_HIGH_IMPEDANCE: - param = PM_PINCTRL_CONFIG_TRI_STATE; - arg = PM_PINCTRL_TRI_STATE_ENABLE; - ret = zynqmp_pm_pinctrl_set_config(pin, param, arg); - break; case PIN_CONFIG_MODE_LOW_POWER: /* * These cases are mentioned in dts but configurable @@ -424,11 +420,6 @@ static int zynqmp_pinconf_cfg_set(struct pinctrl_dev *pctldev, */ ret = 0; break; - case PIN_CONFIG_OUTPUT_ENABLE: - param = PM_PINCTRL_CONFIG_TRI_STATE; - arg = PM_PINCTRL_TRI_STATE_DISABLE; - ret = zynqmp_pm_pinctrl_set_config(pin, param, arg); - break; default: dev_warn(pctldev->dev, "unsupported configuration parameter '%u'\n", diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index a2abfe987ab1..8bf8b21954fe 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -51,6 +51,7 @@ * detection. * @skip_wake_irqs: Skip IRQs that are handled by wakeup interrupt controller * @disabled_for_mux: These IRQs were disabled because we muxed away. + * @ever_gpio: This bit is set the first time we mux a pin to gpio_func. * @soc: Reference to soc_data of platform specific data. * @regs: Base addresses for the TLMM tiles. * @phys_base: Physical base address @@ -72,6 +73,7 @@ struct msm_pinctrl { DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO); DECLARE_BITMAP(skip_wake_irqs, MAX_NR_GPIO); DECLARE_BITMAP(disabled_for_mux, MAX_NR_GPIO); + DECLARE_BITMAP(ever_gpio, MAX_NR_GPIO); const struct msm_pinctrl_soc_data *soc; void __iomem *regs[MAX_NR_TILES]; @@ -218,6 +220,25 @@ static int msm_pinmux_set_mux(struct pinctrl_dev *pctldev, val = msm_readl_ctl(pctrl, g); + /* + * If this is the first time muxing to GPIO and the direction is + * output, make sure that we're not going to be glitching the pin + * by reading the current state of the pin and setting it as the + * output. + */ + if (i == gpio_func && (val & BIT(g->oe_bit)) && + !test_and_set_bit(group, pctrl->ever_gpio)) { + u32 io_val = msm_readl_io(pctrl, g); + + if (io_val & BIT(g->in_bit)) { + if (!(io_val & BIT(g->out_bit))) + msm_writel_io(io_val | BIT(g->out_bit), pctrl, g); + } else { + if (io_val & BIT(g->out_bit)) + msm_writel_io(io_val & ~BIT(g->out_bit), pctrl, g); + } + } + if (egpio_func && i == egpio_func) { if (val & BIT(g->egpio_present)) val &= ~BIT(g->egpio_enable); diff --git a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c index aa2075390f3e..e96c00686a25 100644 --- a/drivers/pinctrl/qcom/pinctrl-sc8280xp.c +++ b/drivers/pinctrl/qcom/pinctrl-sc8280xp.c @@ -1873,8 +1873,8 @@ static const struct msm_pingroup sc8280xp_groups[] = { [225] = PINGROUP(225, hs3_mi2s, phase_flag, _, _, _, _, egpio), [226] = PINGROUP(226, hs3_mi2s, phase_flag, _, _, _, _, egpio), [227] = PINGROUP(227, hs3_mi2s, phase_flag, _, _, _, _, egpio), - [228] = UFS_RESET(ufs_reset, 0xf1004), - [229] = UFS_RESET(ufs1_reset, 0xf3004), + [228] = UFS_RESET(ufs_reset, 0xf1000), + [229] = UFS_RESET(ufs1_reset, 0xf3000), [230] = SDC_QDSD_PINGROUP(sdc2_clk, 0xe8000, 14, 6), [231] = SDC_QDSD_PINGROUP(sdc2_cmd, 0xe8000, 11, 3), [232] = SDC_QDSD_PINGROUP(sdc2_data, 0xe8000, 9, 0), diff --git a/drivers/platform/loongarch/loongson-laptop.c b/drivers/platform/loongarch/loongson-laptop.c index f0166ad5d2c2..99203584949d 100644 --- a/drivers/platform/loongarch/loongson-laptop.c +++ b/drivers/platform/loongarch/loongson-laptop.c @@ -199,6 +199,13 @@ static int loongson_hotkey_resume(struct device *dev) struct key_entry ke; struct backlight_device *bd; + bd = backlight_device_get_by_type(BACKLIGHT_PLATFORM); + if (bd) { + loongson_laptop_backlight_update(bd) ? + pr_warn("Loongson_backlight: resume brightness failed") : + pr_info("Loongson_backlight: resume brightness %d\n", bd->props.brightness); + } + /* * Only if the firmware supports SW_LID event model, we can handle the * event. This is for the consideration of development board without EC. @@ -228,13 +235,6 @@ static int loongson_hotkey_resume(struct device *dev) } } - bd = backlight_device_get_by_type(BACKLIGHT_PLATFORM); - if (bd) { - loongson_laptop_backlight_update(bd) ? - pr_warn("Loongson_backlight: resume brightness failed") : - pr_info("Loongson_backlight: resume brightness %d\n", bd->props.brightness); - } - return 0; } @@ -448,6 +448,7 @@ static int __init event_init(struct generic_sub_driver *sub_driver) if (ret < 0) { pr_err("Failed to setup input device keymap\n"); input_free_device(generic_inputdev); + generic_inputdev = NULL; return ret; } @@ -502,8 +503,11 @@ static int __init generic_subdriver_init(struct generic_sub_driver *sub_driver) if (ret) return -EINVAL; - if (sub_driver->init) - sub_driver->init(sub_driver); + if (sub_driver->init) { + ret = sub_driver->init(sub_driver); + if (ret) + goto err_out; + } if (sub_driver->notify) { ret = setup_acpi_notify(sub_driver); @@ -519,7 +523,7 @@ static int __init generic_subdriver_init(struct generic_sub_driver *sub_driver) err_out: generic_subdriver_exit(sub_driver); - return (ret < 0) ? ret : 0; + return ret; } static void generic_subdriver_exit(struct generic_sub_driver *sub_driver) diff --git a/drivers/platform/surface/aggregator/ssh_packet_layer.c b/drivers/platform/surface/aggregator/ssh_packet_layer.c index 6748fe4ac5d5..def8d7ac541f 100644 --- a/drivers/platform/surface/aggregator/ssh_packet_layer.c +++ b/drivers/platform/surface/aggregator/ssh_packet_layer.c @@ -1596,16 +1596,32 @@ static void ssh_ptl_timeout_reap(struct work_struct *work) ssh_ptl_tx_wakeup_packet(ptl); } -static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, u8 seq) +static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, const struct ssh_frame *frame) { int i; /* + * Ignore unsequenced packets. On some devices (notably Surface Pro 9), + * unsequenced events will always be sent with SEQ=0x00. Attempting to + * detect retransmission would thus just block all events. + * + * While sequence numbers would also allow detection of retransmitted + * packets in unsequenced communication, they have only ever been used + * to cover edge-cases in sequenced transmission. In particular, the + * only instance of packets being retransmitted (that we are aware of) + * is due to an ACK timeout. As this does not happen in unsequenced + * communication, skip the retransmission check for those packets + * entirely. + */ + if (frame->type == SSH_FRAME_TYPE_DATA_NSQ) + return false; + + /* * Check if SEQ has been seen recently (i.e. packet was * re-transmitted and we should ignore it). */ for (i = 0; i < ARRAY_SIZE(ptl->rx.blocked.seqs); i++) { - if (likely(ptl->rx.blocked.seqs[i] != seq)) + if (likely(ptl->rx.blocked.seqs[i] != frame->seq)) continue; ptl_dbg(ptl, "ptl: ignoring repeated data packet\n"); @@ -1613,7 +1629,7 @@ static bool ssh_ptl_rx_retransmit_check(struct ssh_ptl *ptl, u8 seq) } /* Update list of blocked sequence IDs. */ - ptl->rx.blocked.seqs[ptl->rx.blocked.offset] = seq; + ptl->rx.blocked.seqs[ptl->rx.blocked.offset] = frame->seq; ptl->rx.blocked.offset = (ptl->rx.blocked.offset + 1) % ARRAY_SIZE(ptl->rx.blocked.seqs); @@ -1624,7 +1640,7 @@ static void ssh_ptl_rx_dataframe(struct ssh_ptl *ptl, const struct ssh_frame *frame, const struct ssam_span *payload) { - if (ssh_ptl_rx_retransmit_check(ptl, frame->seq)) + if (ssh_ptl_rx_retransmit_check(ptl, frame)) return; ptl->ops.data_received(ptl, payload); diff --git a/drivers/platform/surface/surface_aggregator_registry.c b/drivers/platform/surface/surface_aggregator_registry.c index 585911020cea..023f126121d7 100644 --- a/drivers/platform/surface/surface_aggregator_registry.c +++ b/drivers/platform/surface/surface_aggregator_registry.c @@ -234,6 +234,19 @@ static const struct software_node *ssam_node_group_sl3[] = { NULL, }; +/* Devices for Surface Laptop 5. */ +static const struct software_node *ssam_node_group_sl5[] = { + &ssam_node_root, + &ssam_node_bat_ac, + &ssam_node_bat_main, + &ssam_node_tmp_pprof, + &ssam_node_hid_main_keyboard, + &ssam_node_hid_main_touchpad, + &ssam_node_hid_main_iid5, + &ssam_node_hid_sam_ucm_ucsi, + NULL, +}; + /* Devices for Surface Laptop Studio. */ static const struct software_node *ssam_node_group_sls[] = { &ssam_node_root, @@ -268,6 +281,7 @@ static const struct software_node *ssam_node_group_sp7[] = { NULL, }; +/* Devices for Surface Pro 8 */ static const struct software_node *ssam_node_group_sp8[] = { &ssam_node_root, &ssam_node_hub_kip, @@ -284,6 +298,23 @@ static const struct software_node *ssam_node_group_sp8[] = { NULL, }; +/* Devices for Surface Pro 9 */ +static const struct software_node *ssam_node_group_sp9[] = { + &ssam_node_root, + &ssam_node_hub_kip, + &ssam_node_bat_ac, + &ssam_node_bat_main, + &ssam_node_tmp_pprof, + /* TODO: Tablet mode switch (via POS subsystem) */ + &ssam_node_hid_kip_keyboard, + &ssam_node_hid_kip_penstash, + &ssam_node_hid_kip_touchpad, + &ssam_node_hid_kip_fwupd, + &ssam_node_hid_sam_sensors, + &ssam_node_hid_sam_ucm_ucsi, + NULL, +}; + /* -- SSAM platform/meta-hub driver. ---------------------------------------- */ @@ -303,6 +334,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = { /* Surface Pro 8 */ { "MSHW0263", (unsigned long)ssam_node_group_sp8 }, + /* Surface Pro 9 */ + { "MSHW0343", (unsigned long)ssam_node_group_sp9 }, + /* Surface Book 2 */ { "MSHW0107", (unsigned long)ssam_node_group_gen5 }, @@ -324,6 +358,9 @@ static const struct acpi_device_id ssam_platform_hub_match[] = { /* Surface Laptop 4 (13", Intel) */ { "MSHW0250", (unsigned long)ssam_node_group_sl3 }, + /* Surface Laptop 5 */ + { "MSHW0350", (unsigned long)ssam_node_group_sl5 }, + /* Surface Laptop Go 1 */ { "MSHW0118", (unsigned long)ssam_node_group_slg1 }, diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c index 18224f9a5bc0..ee67efdd5499 100644 --- a/drivers/platform/x86/acer-wmi.c +++ b/drivers/platform/x86/acer-wmi.c @@ -566,6 +566,15 @@ static const struct dmi_system_id acer_quirks[] __initconst = { }, { .callback = set_force_caps, + .ident = "Acer Aspire Switch V 10 SW5-017", + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SW5-017"), + }, + .driver_data = (void *)ACER_CAP_KBD_DOCK, + }, + { + .callback = set_force_caps, .ident = "Acer One 10 (S1003)", .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Acer"), diff --git a/drivers/platform/x86/amd/pmc.c b/drivers/platform/x86/amd/pmc.c index ce859b300712..ef4ae977b8e0 100644 --- a/drivers/platform/x86/amd/pmc.c +++ b/drivers/platform/x86/amd/pmc.c @@ -276,7 +276,6 @@ static const struct file_operations amd_pmc_stb_debugfs_fops_v2 = { .release = amd_pmc_stb_debugfs_release_v2, }; -#if defined(CONFIG_SUSPEND) || defined(CONFIG_DEBUG_FS) static int amd_pmc_setup_smu_logging(struct amd_pmc_dev *dev) { if (dev->cpu_id == AMD_CPU_ID_PCO) { @@ -351,7 +350,6 @@ static int get_metrics_table(struct amd_pmc_dev *pdev, struct smu_metrics *table memcpy_fromio(table, pdev->smu_virt_addr, sizeof(struct smu_metrics)); return 0; } -#endif /* CONFIG_SUSPEND || CONFIG_DEBUG_FS */ #ifdef CONFIG_SUSPEND static void amd_pmc_validate_deepest(struct amd_pmc_dev *pdev) @@ -663,6 +661,13 @@ static int amd_pmc_verify_czn_rtc(struct amd_pmc_dev *pdev, u32 *arg) struct rtc_time tm; int rc; + /* we haven't yet read SMU version */ + if (!pdev->major) { + rc = amd_pmc_get_smu_version(pdev); + if (rc) + return rc; + } + if (pdev->major < 64 || (pdev->major == 64 && pdev->minor < 53)) return 0; @@ -957,6 +962,7 @@ static const struct acpi_device_id amd_pmc_acpi_ids[] = { {"AMDI0006", 0}, {"AMDI0007", 0}, {"AMDI0008", 0}, + {"AMDI0009", 0}, {"AMD0004", 0}, {"AMD0005", 0}, { } diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c index 613c45c9fbe3..c685a705b73d 100644 --- a/drivers/platform/x86/asus-nb-wmi.c +++ b/drivers/platform/x86/asus-nb-wmi.c @@ -464,6 +464,15 @@ static const struct dmi_system_id asus_quirks[] = { }, .driver_data = &quirk_asus_tablet_mode, }, + { + .callback = dmi_matched, + .ident = "ASUS ROG FLOW X16", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "GV601R"), + }, + .driver_data = &quirk_asus_tablet_mode, + }, {}, }; diff --git a/drivers/platform/x86/asus-wmi.c b/drivers/platform/x86/asus-wmi.c index 6e8e093f96b3..872efc1d5b36 100644 --- a/drivers/platform/x86/asus-wmi.c +++ b/drivers/platform/x86/asus-wmi.c @@ -1738,6 +1738,8 @@ static void asus_wmi_set_xusb2pr(struct asus_wmi *asus) pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, cpu_to_le32(ports_available)); + pci_dev_put(xhci_pdev); + pr_info("set USB_INTEL_XUSB2PR old: 0x%04x, new: 0x%04x\n", orig_ports_available, ports_available); } diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c index 627a6d0eaf83..0a99058be813 100644 --- a/drivers/platform/x86/hp-wmi.c +++ b/drivers/platform/x86/hp-wmi.c @@ -90,6 +90,7 @@ enum hp_wmi_event_ids { HPWMI_PEAKSHIFT_PERIOD = 0x0F, HPWMI_BATTERY_CHARGE_PERIOD = 0x10, HPWMI_SANITIZATION_MODE = 0x17, + HPWMI_SMART_EXPERIENCE_APP = 0x21, }; /* @@ -859,6 +860,8 @@ static void hp_wmi_notify(u32 value, void *context) break; case HPWMI_SANITIZATION_MODE: break; + case HPWMI_SMART_EXPERIENCE_APP: + break; default: pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data); break; @@ -1300,8 +1303,16 @@ static int __init hp_wmi_bios_setup(struct platform_device *device) wwan_rfkill = NULL; rfkill2_count = 0; - if (hp_wmi_rfkill_setup(device)) - hp_wmi_rfkill2_setup(device); + /* + * In pre-2009 BIOS, command 1Bh return 0x4 to indicate that + * BIOS no longer controls the power for the wireless + * devices. All features supported by this command will no + * longer be supported. + */ + if (!hp_wmi_bios_2009_later()) { + if (hp_wmi_rfkill_setup(device)) + hp_wmi_rfkill2_setup(device); + } err = hp_wmi_hwmon_init(); diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c index abd0c81d62c4..3ea8fc6a9ca3 100644 --- a/drivers/platform/x86/ideapad-laptop.c +++ b/drivers/platform/x86/ideapad-laptop.c @@ -136,6 +136,7 @@ struct ideapad_private { bool dytc : 1; bool fan_mode : 1; bool fn_lock : 1; + bool set_fn_lock_led : 1; bool hw_rfkill_switch : 1; bool kbd_bl : 1; bool touchpad_ctrl_via_ec : 1; @@ -154,7 +155,21 @@ MODULE_PARM_DESC(no_bt_rfkill, "No rfkill for bluetooth."); static bool allow_v4_dytc; module_param(allow_v4_dytc, bool, 0444); -MODULE_PARM_DESC(allow_v4_dytc, "Enable DYTC version 4 platform-profile support."); +MODULE_PARM_DESC(allow_v4_dytc, + "Enable DYTC version 4 platform-profile support. " + "If you need this please report this to: platform-driver-x86@vger.kernel.org"); + +static bool hw_rfkill_switch; +module_param(hw_rfkill_switch, bool, 0444); +MODULE_PARM_DESC(hw_rfkill_switch, + "Enable rfkill support for laptops with a hw on/off wifi switch/slider. " + "If you need this please report this to: platform-driver-x86@vger.kernel.org"); + +static bool set_fn_lock_led; +module_param(set_fn_lock_led, bool, 0444); +MODULE_PARM_DESC(set_fn_lock_led, + "Enable driver based updates of the fn-lock LED on fn-lock changes. " + "If you need this please report this to: platform-driver-x86@vger.kernel.org"); /* * ACPI Helpers @@ -1501,6 +1516,9 @@ static void ideapad_wmi_notify(u32 value, void *context) ideapad_input_report(priv, value); break; case 208: + if (!priv->features.set_fn_lock_led) + break; + if (!eval_hals(priv->adev->handle, &result)) { bool state = test_bit(HALS_FNLOCK_STATE_BIT, &result); @@ -1514,6 +1532,18 @@ static void ideapad_wmi_notify(u32 value, void *context) } #endif +/* On some models we need to call exec_sals(SALS_FNLOCK_ON/OFF) to set the LED */ +static const struct dmi_system_id set_fn_lock_led_list[] = { + { + /* https://bugzilla.kernel.org/show_bug.cgi?id=212671 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Legion R7000P2020H"), + } + }, + {} +}; + /* * Some ideapads have a hardware rfkill switch, but most do not have one. * Reading VPCCMD_R_RF always results in 0 on models without a hardware rfkill, @@ -1533,15 +1563,41 @@ static const struct dmi_system_id hw_rfkill_list[] = { {} }; +static const struct dmi_system_id no_touchpad_switch_list[] = { + { + .ident = "Lenovo Yoga 3 Pro 1370", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 3"), + }, + }, + { + .ident = "ZhaoYang K4e-IML", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_VERSION, "ZhaoYang K4e-IML"), + }, + }, + {} +}; + static void ideapad_check_features(struct ideapad_private *priv) { acpi_handle handle = priv->adev->handle; unsigned long val; - priv->features.hw_rfkill_switch = dmi_check_system(hw_rfkill_list); + priv->features.set_fn_lock_led = + set_fn_lock_led || dmi_check_system(set_fn_lock_led_list); + priv->features.hw_rfkill_switch = + hw_rfkill_switch || dmi_check_system(hw_rfkill_list); /* Most ideapads with ELAN0634 touchpad don't use EC touchpad switch */ - priv->features.touchpad_ctrl_via_ec = !acpi_dev_present("ELAN0634", NULL, -1); + if (acpi_dev_present("ELAN0634", NULL, -1)) + priv->features.touchpad_ctrl_via_ec = 0; + else if (dmi_check_system(no_touchpad_switch_list)) + priv->features.touchpad_ctrl_via_ec = 0; + else + priv->features.touchpad_ctrl_via_ec = 1; if (!read_ec_data(handle, VPCCMD_R_FAN, &val)) priv->features.fan_mode = true; diff --git a/drivers/platform/x86/intel/hid.c b/drivers/platform/x86/intel/hid.c index 79cff1fc675c..b6313ecd190c 100644 --- a/drivers/platform/x86/intel/hid.c +++ b/drivers/platform/x86/intel/hid.c @@ -27,6 +27,9 @@ static const struct acpi_device_id intel_hid_ids[] = { {"INTC1051", 0}, {"INTC1054", 0}, {"INTC1070", 0}, + {"INTC1076", 0}, + {"INTC1077", 0}, + {"INTC1078", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, intel_hid_ids); diff --git a/drivers/platform/x86/intel/pmc/core.c b/drivers/platform/x86/intel/pmc/core.c index a1fe1e0dcf4a..17ec5825d13d 100644 --- a/drivers/platform/x86/intel/pmc/core.c +++ b/drivers/platform/x86/intel/pmc/core.c @@ -1914,6 +1914,8 @@ static const struct x86_cpu_id intel_pmc_core_ids[] = { X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, &tgl_reg_map), X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, &adl_reg_map), X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, &tgl_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, &adl_reg_map), + X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, &adl_reg_map), {} }; diff --git a/drivers/platform/x86/intel/pmc/pltdrv.c b/drivers/platform/x86/intel/pmc/pltdrv.c index 15ca8afdd973..ddfba38c2104 100644 --- a/drivers/platform/x86/intel/pmc/pltdrv.c +++ b/drivers/platform/x86/intel/pmc/pltdrv.c @@ -18,6 +18,8 @@ #include <asm/cpu_device_id.h> #include <asm/intel-family.h> +#include <xen/xen.h> + static void intel_pmc_core_release(struct device *dev) { kfree(dev); @@ -53,6 +55,13 @@ static int __init pmc_core_platform_init(void) if (acpi_dev_present("INT33A1", NULL, -1)) return -ENODEV; + /* + * Skip forcefully attaching the device for VMs. Make an exception for + * Xen dom0, which does have full hardware access. + */ + if (cpu_feature_enabled(X86_FEATURE_HYPERVISOR) && !xen_initial_domain()) + return -ENODEV; + if (!x86_match_cpu(intel_pmc_core_platform_ids)) return -ENODEV; diff --git a/drivers/platform/x86/intel/pmt/class.c b/drivers/platform/x86/intel/pmt/class.c index 53d7fd2943b4..46598dcb634a 100644 --- a/drivers/platform/x86/intel/pmt/class.c +++ b/drivers/platform/x86/intel/pmt/class.c @@ -9,6 +9,7 @@ */ #include <linux/kernel.h> +#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/pci.h> @@ -19,6 +20,7 @@ #define PMT_XA_START 0 #define PMT_XA_MAX INT_MAX #define PMT_XA_LIMIT XA_LIMIT(PMT_XA_START, PMT_XA_MAX) +#define GUID_SPR_PUNIT 0x9956f43f bool intel_pmt_is_early_client_hw(struct device *dev) { @@ -33,6 +35,29 @@ bool intel_pmt_is_early_client_hw(struct device *dev) } EXPORT_SYMBOL_GPL(intel_pmt_is_early_client_hw); +static inline int +pmt_memcpy64_fromio(void *to, const u64 __iomem *from, size_t count) +{ + int i, remain; + u64 *buf = to; + + if (!IS_ALIGNED((unsigned long)from, 8)) + return -EFAULT; + + for (i = 0; i < count/8; i++) + buf[i] = readq(&from[i]); + + /* Copy any remaining bytes */ + remain = count % 8; + if (remain) { + u64 tmp = readq(&from[i]); + + memcpy(&buf[i], &tmp, remain); + } + + return count; +} + /* * sysfs */ @@ -54,7 +79,11 @@ intel_pmt_read(struct file *filp, struct kobject *kobj, if (count > entry->size - off) count = entry->size - off; - memcpy_fromio(buf, entry->base + off, count); + if (entry->guid == GUID_SPR_PUNIT) + /* PUNIT on SPR only supports aligned 64-bit read */ + count = pmt_memcpy64_fromio(buf, entry->base + off, count); + else + memcpy_fromio(buf, entry->base + off, count); return count; } diff --git a/drivers/platform/x86/p2sb.c b/drivers/platform/x86/p2sb.c index 384d0962ae93..1cf2471d54dd 100644 --- a/drivers/platform/x86/p2sb.c +++ b/drivers/platform/x86/p2sb.c @@ -19,26 +19,23 @@ #define P2SBC 0xe0 #define P2SBC_HIDE BIT(8) +#define P2SB_DEVFN_DEFAULT PCI_DEVFN(31, 1) + static const struct x86_cpu_id p2sb_cpu_ids[] = { X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT, PCI_DEVFN(13, 0)), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_GOLDMONT_D, PCI_DEVFN(31, 1)), - X86_MATCH_INTEL_FAM6_MODEL(ATOM_SILVERMONT_D, PCI_DEVFN(31, 1)), - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE, PCI_DEVFN(31, 1)), - X86_MATCH_INTEL_FAM6_MODEL(KABYLAKE_L, PCI_DEVFN(31, 1)), - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE, PCI_DEVFN(31, 1)), - X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_L, PCI_DEVFN(31, 1)), {} }; static int p2sb_get_devfn(unsigned int *devfn) { + unsigned int fn = P2SB_DEVFN_DEFAULT; const struct x86_cpu_id *id; id = x86_match_cpu(p2sb_cpu_ids); - if (!id) - return -ENODEV; + if (id) + fn = (unsigned int)id->driver_data; - *devfn = (unsigned int)id->driver_data; + *devfn = fn; return 0; } diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 6a823b850a77..8476dfef4e62 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -263,6 +263,8 @@ enum tpacpi_hkey_event_t { #define TPACPI_DBG_BRGHT 0x0020 #define TPACPI_DBG_MIXER 0x0040 +#define FAN_NOT_PRESENT 65535 + #define strlencmp(a, b) (strncmp((a), (b), strlen(b))) @@ -4495,6 +4497,14 @@ static const struct dmi_system_id fwbug_list[] __initconst = { DMI_MATCH(DMI_PRODUCT_NAME, "21A0"), } }, + { + .ident = "P14s Gen2 AMD", + .driver_data = &quirk_s2idle_bug, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "LENOVO"), + DMI_MATCH(DMI_PRODUCT_NAME, "21A1"), + } + }, {} }; @@ -8876,7 +8886,7 @@ static int __init fan_init(struct ibm_init_struct *iibm) /* Try and probe the 2nd fan */ tp_features.second_fan = 1; /* needed for get_speed to work */ res = fan2_get_speed(&speed); - if (res >= 0) { + if (res >= 0 && speed != FAN_NOT_PRESENT) { /* It responded - so let's assume it's there */ tp_features.second_fan = 1; tp_features.second_fan_ctl = 1; diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index bc97bfa8e8a6..baae3120efd0 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -770,6 +770,22 @@ static const struct ts_dmi_data predia_basic_data = { .properties = predia_basic_props, }; +static const struct property_entry rca_cambio_w101_v2_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-x", 4), + PROPERTY_ENTRY_U32("touchscreen-min-y", 20), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1644), + PROPERTY_ENTRY_U32("touchscreen-size-y", 874), + PROPERTY_ENTRY_BOOL("touchscreen-swapped-x-y"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rca-cambio-w101-v2.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + { } +}; + +static const struct ts_dmi_data rca_cambio_w101_v2_data = { + .acpi_name = "MSSL1680:00", + .properties = rca_cambio_w101_v2_props, +}; + static const struct property_entry rwc_nanote_p8_props[] = { PROPERTY_ENTRY_U32("touchscreen-min-y", 46), PROPERTY_ENTRY_U32("touchscreen-size-x", 1728), @@ -1410,6 +1426,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = { }, }, { + /* RCA Cambio W101 v2 */ + /* https://github.com/onitake/gsl-firmware/discussions/193 */ + .driver_data = (void *)&rca_cambio_w101_v2_data, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "RCA"), + DMI_MATCH(DMI_PRODUCT_NAME, "W101SA23T1"), + }, + }, + { /* RWC NANOTE P8 */ .driver_data = (void *)&rwc_nanote_p8_data, .matches = { diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 610413b4e9ca..58cc2bae2f8a 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -1233,6 +1233,9 @@ static u32 rtc_handler(void *context) static inline void rtc_wake_setup(struct device *dev) { + if (acpi_disabled) + return; + acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev); /* * After the RTC handler is installed, the Fixed_RTC event should @@ -1286,7 +1289,6 @@ static void cmos_wake_setup(struct device *dev) use_acpi_alarm_quirks(); - rtc_wake_setup(dev); acpi_rtc_info.wake_on = rtc_wake_on; acpi_rtc_info.wake_off = rtc_wake_off; @@ -1344,6 +1346,9 @@ static void cmos_check_acpi_rtc_status(struct device *dev, { } +static void rtc_wake_setup(struct device *dev) +{ +} #endif #ifdef CONFIG_PNP @@ -1354,6 +1359,8 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) { int irq, ret; + cmos_wake_setup(&pnp->dev); + if (pnp_port_start(pnp, 0) == 0x70 && !pnp_irq_valid(pnp, 0)) { irq = 0; #ifdef CONFIG_X86 @@ -1372,7 +1379,7 @@ static int cmos_pnp_probe(struct pnp_dev *pnp, const struct pnp_device_id *id) if (ret) return ret; - cmos_wake_setup(&pnp->dev); + rtc_wake_setup(&pnp->dev); return 0; } @@ -1461,6 +1468,7 @@ static int __init cmos_platform_probe(struct platform_device *pdev) int irq, ret; cmos_of_init(pdev); + cmos_wake_setup(&pdev->dev); if (RTC_IOMAPPED) resource = platform_get_resource(pdev, IORESOURCE_IO, 0); @@ -1474,7 +1482,7 @@ static int __init cmos_platform_probe(struct platform_device *pdev) if (ret) return ret; - cmos_wake_setup(&pdev->dev); + rtc_wake_setup(&pdev->dev); return 0; } diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 93b80da60277..b392b9f5482e 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c @@ -636,6 +636,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char dev_info->gd->minors = DCSSBLK_MINORS_PER_DISK; dev_info->gd->fops = &dcssblk_devops; dev_info->gd->private_data = dev_info; + dev_info->gd->flags |= GENHD_FL_NO_PART; blk_queue_logical_block_size(dev_info->gd->queue, 4096); blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue); diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 913b6ddd040b..c7db95398500 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c @@ -753,13 +753,9 @@ static int __unset_online(struct device *dev, void *data) { struct idset *set = data; struct subchannel *sch = to_subchannel(dev); - struct ccw_device *cdev; - if (sch->st == SUBCHANNEL_TYPE_IO) { - cdev = sch_get_cdev(sch); - if (cdev && cdev->online) - idset_sch_del(set, sch->schid); - } + if (sch->st == SUBCHANNEL_TYPE_IO && sch->config.ena) + idset_sch_del(set, sch->schid); return 0; } diff --git a/drivers/s390/crypto/vfio_ap_private.h b/drivers/s390/crypto/vfio_ap_private.h index 2eddd5f34ed3..976a65f32e7d 100644 --- a/drivers/s390/crypto/vfio_ap_private.h +++ b/drivers/s390/crypto/vfio_ap_private.h @@ -52,7 +52,7 @@ struct ap_matrix_dev { struct mutex guests_lock; /* serializes access to each KVM guest */ struct mdev_parent parent; struct mdev_type mdev_type; - struct mdev_type *mdev_types[]; + struct mdev_type *mdev_types[1]; }; extern struct ap_matrix_dev *matrix_dev; diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c index 8fb34b8eeb18..5ad251477593 100644 --- a/drivers/s390/crypto/zcrypt_msgtype6.c +++ b/drivers/s390/crypto/zcrypt_msgtype6.c @@ -342,7 +342,10 @@ static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg, }; struct { struct type6_hdr hdr; - struct CPRBX cprbx; + union { + struct CPRBX cprbx; + DECLARE_FLEX_ARRAY(u8, userdata); + }; } __packed * msg = ap_msg->msg; int rcblen = CEIL4(xcrb->request_control_blk_length); @@ -403,7 +406,8 @@ static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg, msg->hdr.fromcardlen2 = xcrb->reply_data_length; /* prepare CPRB */ - if (z_copy_from_user(userspace, &msg->cprbx, xcrb->request_control_blk_addr, + if (z_copy_from_user(userspace, msg->userdata, + xcrb->request_control_blk_addr, xcrb->request_control_blk_length)) return -EFAULT; if (msg->cprbx.cprb_len + sizeof(msg->hdr.function_code) > @@ -469,9 +473,14 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap struct { struct type6_hdr hdr; - struct ep11_cprb cprbx; - unsigned char pld_tag; /* fixed value 0x30 */ - unsigned char pld_lenfmt; /* payload length format */ + union { + struct { + struct ep11_cprb cprbx; + unsigned char pld_tag; /* fixed value 0x30 */ + unsigned char pld_lenfmt; /* length format */ + } __packed; + DECLARE_FLEX_ARRAY(u8, userdata); + }; } __packed * msg = ap_msg->msg; struct pld_hdr { @@ -500,7 +509,7 @@ static int xcrb_msg_to_type6_ep11cprb_msgx(bool userspace, struct ap_message *ap msg->hdr.fromcardlen1 = xcrb->resp_len; /* Import CPRB data from the ioctl input parameter */ - if (z_copy_from_user(userspace, &msg->cprbx.cprb_len, + if (z_copy_from_user(userspace, msg->userdata, (char __force __user *)xcrb->req, xcrb->req_len)) { return -EFAULT; } diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 19223b075568..ab3ea529cca7 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -884,7 +884,7 @@ static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) const bool is_srb = zfcp_fsf_req_is_status_read_buffer(req); struct zfcp_adapter *adapter = req->adapter; struct zfcp_qdio *qdio = adapter->qdio; - int req_id = req->req_id; + unsigned long req_id = req->req_id; zfcp_reqlist_add(adapter->req_list, req); diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c index 00684e11976b..1a0c0b7289d2 100644 --- a/drivers/scsi/ibmvscsi/ibmvfc.c +++ b/drivers/scsi/ibmvscsi/ibmvfc.c @@ -708,8 +708,13 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost) memset(vhost->async_crq.msgs.async, 0, PAGE_SIZE); vhost->async_crq.cur = 0; - list_for_each_entry(tgt, &vhost->targets, queue) - ibmvfc_del_tgt(tgt); + list_for_each_entry(tgt, &vhost->targets, queue) { + if (vhost->client_migrated) + tgt->need_login = 1; + else + ibmvfc_del_tgt(tgt); + } + scsi_block_requests(vhost->host); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT); vhost->job_step = ibmvfc_npiv_login; @@ -3235,9 +3240,12 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost, /* We need to re-setup the interpartition connection */ dev_info(vhost->dev, "Partition migrated, Re-enabling adapter\n"); vhost->client_migrated = 1; + + scsi_block_requests(vhost->host); ibmvfc_purge_requests(vhost, DID_REQUEUE); - ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN); + ibmvfc_set_host_state(vhost, IBMVFC_LINK_DOWN); ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_REENABLE); + wake_up(&vhost->work_wait_q); } else if (crq->format == IBMVFC_PARTNER_FAILED || crq->format == IBMVFC_PARTNER_DEREGISTER) { dev_err(vhost->dev, "Host partner adapter deregistered or failed (rc=%d)\n", crq->format); ibmvfc_purge_requests(vhost, DID_ERROR); diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c index ac0c7ccf2eae..852b025e2fec 100644 --- a/drivers/scsi/lpfc/lpfc_bsg.c +++ b/drivers/scsi/lpfc/lpfc_bsg.c @@ -2582,7 +2582,7 @@ static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi) * * This function obtains the transmit and receive ids required to send * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp - * flags are used to the unsolicted response handler is able to process + * flags are used to the unsolicited response handler is able to process * the ct command sent on the same port. **/ static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi, @@ -2874,7 +2874,7 @@ out: * @len: Number of data bytes * * This function allocates and posts a data buffer of sufficient size to receive - * an unsolicted CT command. + * an unsolicited CT command. **/ static int lpfcdiag_sli3_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri, size_t len) diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c index 75fd2bfc212b..e941a99aa965 100644 --- a/drivers/scsi/lpfc/lpfc_ct.c +++ b/drivers/scsi/lpfc/lpfc_ct.c @@ -90,7 +90,7 @@ lpfc_ct_ignore_hbq_buffer(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq, get_job_ulpstatus(phba, piocbq)); } lpfc_printf_log(phba, KERN_INFO, LOG_ELS, - "0145 Ignoring unsolicted CT HBQ Size:%d " + "0145 Ignoring unsolicited CT HBQ Size:%d " "status = x%x\n", size, get_job_ulpstatus(phba, piocbq)); } diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index b49c39569386..b535f1fd3010 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c @@ -4812,7 +4812,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) rc = lpfc_vmid_res_alloc(phba, vport); if (rc) - goto out; + goto out_put_shost; /* Initialize all internally managed lists. */ INIT_LIST_HEAD(&vport->fc_nodes); @@ -4830,16 +4830,17 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev) error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev); if (error) - goto out_put_shost; + goto out_free_vmid; spin_lock_irq(&phba->port_list_lock); list_add_tail(&vport->listentry, &phba->port_list); spin_unlock_irq(&phba->port_list_lock); return vport; -out_put_shost: +out_free_vmid: kfree(vport->vmid); bitmap_free(vport->vmid_priority_range); +out_put_shost: scsi_host_put(shost); out: return NULL; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 9be4ba61a076..d265a2d9d082 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -5874,10 +5874,6 @@ fallback: static int megasas_get_device_list(struct megasas_instance *instance) { - memset(instance->pd_list, 0, - (MEGASAS_MAX_PD * sizeof(struct megasas_pd_list))); - memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS); - if (instance->enable_fw_dev_list) { if (megasas_host_device_list_query(instance, true)) return FAILED; @@ -7220,7 +7216,7 @@ int megasas_alloc_ctrl_dma_buffers(struct megasas_instance *instance) if (!fusion->ioc_init_request) { dev_err(&pdev->dev, - "Failed to allocate PD list buffer\n"); + "Failed to allocate ioc init request\n"); return -ENOMEM; } @@ -7439,7 +7435,6 @@ static inline void megasas_init_ctrl_params(struct megasas_instance *instance) (instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) instance->flag_ieee = 1; - megasas_dbg_lvl = 0; instance->flag = 0; instance->unload = 1; instance->last_time = 0; @@ -8762,33 +8757,26 @@ static int megasas_update_device_list(struct megasas_instance *instance, int event_type) { - int dcmd_ret = DCMD_SUCCESS; + int dcmd_ret; if (instance->enable_fw_dev_list) { - dcmd_ret = megasas_host_device_list_query(instance, false); - if (dcmd_ret != DCMD_SUCCESS) - goto out; + return megasas_host_device_list_query(instance, false); } else { if (event_type & SCAN_PD_CHANNEL) { dcmd_ret = megasas_get_pd_list(instance); - if (dcmd_ret != DCMD_SUCCESS) - goto out; + return dcmd_ret; } if (event_type & SCAN_VD_CHANNEL) { if (!instance->requestorId || megasas_get_ld_vf_affiliation(instance, 0)) { - dcmd_ret = megasas_ld_list_query(instance, + return megasas_ld_list_query(instance, MR_LD_QUERY_TYPE_EXPOSED_TO_HOST); - if (dcmd_ret != DCMD_SUCCESS) - goto out; } } } - -out: - return dcmd_ret; + return DCMD_SUCCESS; } /** @@ -8918,7 +8906,7 @@ megasas_aen_polling(struct work_struct *work) sdev1 = scsi_device_lookup(instance->host, MEGASAS_MAX_PD_CHANNELS + (ld_target_id / MEGASAS_MAX_DEV_PER_CHANNEL), - (ld_target_id - MEGASAS_MAX_DEV_PER_CHANNEL), + (ld_target_id % MEGASAS_MAX_DEV_PER_CHANNEL), 0); if (sdev1) megasas_remove_scsi_device(sdev1); @@ -9016,6 +9004,7 @@ static int __init megasas_init(void) */ pr_info("megasas: %s\n", MEGASAS_VERSION); + megasas_dbg_lvl = 0; support_poll_for_event = 2; support_device_change = 1; support_nvme_encapsulation = true; diff --git a/drivers/scsi/mpi3mr/Kconfig b/drivers/scsi/mpi3mr/Kconfig index 8997531940c2..f48740cd5b95 100644 --- a/drivers/scsi/mpi3mr/Kconfig +++ b/drivers/scsi/mpi3mr/Kconfig @@ -4,5 +4,6 @@ config SCSI_MPI3MR tristate "Broadcom MPI3 Storage Controller Device Driver" depends on PCI && SCSI select BLK_DEV_BSGLIB + select SCSI_SAS_ATTRS help MPI3 based Storage & RAID Controllers Driver. diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index f77ee4051b00..3306de7170f6 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -3265,7 +3265,8 @@ void mpi3mr_process_op_reply_desc(struct mpi3mr_ioc *mrioc, } if (scmd->result != (DID_OK << 16) && (scmd->cmnd[0] != ATA_12) && - (scmd->cmnd[0] != ATA_16)) { + (scmd->cmnd[0] != ATA_16) && + mrioc->logging_level & MPI3_DEBUG_SCSI_ERROR) { ioc_info(mrioc, "%s :scmd->result 0x%x\n", __func__, scmd->result); scsi_print_command(scmd); diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c index 8b22df8c1792..4e981ccaac41 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_base.c +++ b/drivers/scsi/mpt3sas/mpt3sas_base.c @@ -2993,7 +2993,7 @@ _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev) u64 coherent_dma_mask, dma_mask; if (ioc->is_mcpu_endpoint || sizeof(dma_addr_t) == 4 || - dma_get_required_mask(&pdev->dev) <= 32) { + dma_get_required_mask(&pdev->dev) <= DMA_BIT_MASK(32)) { ioc->dma_mask = 32; coherent_dma_mask = dma_mask = DMA_BIT_MASK(32); /* Set 63 bit DMA mask for all SAS3 and SAS35 controllers */ diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c index 2ff2fac1e403..7a7d63aa90e2 100644 --- a/drivers/scsi/pm8001/pm8001_init.c +++ b/drivers/scsi/pm8001/pm8001_init.c @@ -99,6 +99,7 @@ static void pm8001_map_queues(struct Scsi_Host *shost) static struct scsi_host_template pm8001_sht = { .module = THIS_MODULE, .name = DRV_NAME, + .proc_name = DRV_NAME, .queuecommand = sas_queuecommand, .dma_need_drain = ata_scsi_dma_need_drain, .target_alloc = sas_target_alloc, diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index fa1fcbfb946f..b67ad30d56e6 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -951,9 +951,9 @@ qla2x00_sysfs_read_dcbx_tlv(struct file *filp, struct kobject *kobj, if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE) return 0; + mutex_lock(&vha->hw->optrom_mutex); if (ha->dcbx_tlv) goto do_read; - mutex_lock(&vha->hw->optrom_mutex); if (qla2x00_chip_is_down(vha)) { mutex_unlock(&vha->hw->optrom_mutex); return 0; @@ -3330,11 +3330,34 @@ struct fc_function_template qla2xxx_transport_vport_functions = { .bsg_timeout = qla24xx_bsg_timeout, }; +static uint +qla2x00_get_host_supported_speeds(scsi_qla_host_t *vha, uint speeds) +{ + uint supported_speeds = FC_PORTSPEED_UNKNOWN; + + if (speeds & FDMI_PORT_SPEED_64GB) + supported_speeds |= FC_PORTSPEED_64GBIT; + if (speeds & FDMI_PORT_SPEED_32GB) + supported_speeds |= FC_PORTSPEED_32GBIT; + if (speeds & FDMI_PORT_SPEED_16GB) + supported_speeds |= FC_PORTSPEED_16GBIT; + if (speeds & FDMI_PORT_SPEED_8GB) + supported_speeds |= FC_PORTSPEED_8GBIT; + if (speeds & FDMI_PORT_SPEED_4GB) + supported_speeds |= FC_PORTSPEED_4GBIT; + if (speeds & FDMI_PORT_SPEED_2GB) + supported_speeds |= FC_PORTSPEED_2GBIT; + if (speeds & FDMI_PORT_SPEED_1GB) + supported_speeds |= FC_PORTSPEED_1GBIT; + + return supported_speeds; +} + void qla2x00_init_host_attr(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; - u32 speeds = FC_PORTSPEED_UNKNOWN; + u32 speeds = 0, fdmi_speed = 0; fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); @@ -3344,7 +3367,8 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha) fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; - speeds = qla25xx_fdmi_port_speed_capability(ha); + fdmi_speed = qla25xx_fdmi_port_speed_capability(ha); + speeds = qla2x00_get_host_supported_speeds(vha, fdmi_speed); fc_host_supported_speeds(vha->host) = speeds; } diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c index 697fc57bc711..bebda917b138 100644 --- a/drivers/scsi/scsi_debug.c +++ b/drivers/scsi/scsi_debug.c @@ -1899,6 +1899,13 @@ static int resp_readcap16(struct scsi_cmnd *scp, arr[14] |= 0x40; } + /* + * Since the scsi_debug READ CAPACITY implementation always reports the + * total disk capacity, set RC BASIS = 1 for host-managed ZBC devices. + */ + if (devip->zmodel == BLK_ZONED_HM) + arr[12] |= 1 << 4; + arr[15] = sdebug_lowest_aligned & 0xff; if (have_dif_prot) { @@ -7316,8 +7323,12 @@ static int sdebug_add_host_helper(int per_host_idx) dev_set_name(&sdbg_host->dev, "adapter%d", sdebug_num_hosts); error = device_register(&sdbg_host->dev); - if (error) + if (error) { + spin_lock(&sdebug_host_list_lock); + list_del(&sdbg_host->host_list); + spin_unlock(&sdebug_host_list_lock); goto clean; + } ++sdebug_num_hosts; return 0; diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index c95177ca6ed2..cac7c902cf70 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c @@ -828,6 +828,14 @@ store_state_field(struct device *dev, struct device_attribute *attr, } mutex_lock(&sdev->state_mutex); + switch (sdev->sdev_state) { + case SDEV_RUNNING: + case SDEV_OFFLINE: + break; + default: + mutex_unlock(&sdev->state_mutex); + return -EINVAL; + } if (sdev->sdev_state == SDEV_RUNNING && state == SDEV_RUNNING) { ret = 0; } else { diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index cd3db9684e52..f473c002fa4d 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -231,7 +231,7 @@ iscsi_create_endpoint(int dd_size) dev_set_name(&ep->dev, "ep-%d", id); err = device_register(&ep->dev); if (err) - goto free_id; + goto put_dev; err = sysfs_create_group(&ep->dev.kobj, &iscsi_endpoint_group); if (err) @@ -245,10 +245,12 @@ unregister_dev: device_unregister(&ep->dev); return NULL; -free_id: +put_dev: mutex_lock(&iscsi_ep_idr_mutex); idr_remove(&iscsi_ep_idr, id); mutex_unlock(&iscsi_ep_idr_mutex); + put_device(&ep->dev); + return NULL; free_ep: kfree(ep); return NULL; @@ -766,7 +768,7 @@ iscsi_create_iface(struct Scsi_Host *shost, struct iscsi_transport *transport, err = device_register(&iface->dev); if (err) - goto free_iface; + goto put_dev; err = sysfs_create_group(&iface->dev.kobj, &iscsi_iface_group); if (err) @@ -780,9 +782,8 @@ unreg_iface: device_unregister(&iface->dev); return NULL; -free_iface: - put_device(iface->dev.parent); - kfree(iface); +put_dev: + put_device(&iface->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_create_iface); @@ -1251,15 +1252,15 @@ iscsi_create_flashnode_sess(struct Scsi_Host *shost, int index, err = device_register(&fnode_sess->dev); if (err) - goto free_fnode_sess; + goto put_dev; if (dd_size) fnode_sess->dd_data = &fnode_sess[1]; return fnode_sess; -free_fnode_sess: - kfree(fnode_sess); +put_dev: + put_device(&fnode_sess->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_create_flashnode_sess); @@ -1299,15 +1300,15 @@ iscsi_create_flashnode_conn(struct Scsi_Host *shost, err = device_register(&fnode_conn->dev); if (err) - goto free_fnode_conn; + goto put_dev; if (dd_size) fnode_conn->dd_data = &fnode_conn[1]; return fnode_conn; -free_fnode_conn: - kfree(fnode_conn); +put_dev: + put_device(&fnode_conn->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_create_flashnode_conn); @@ -4815,7 +4816,7 @@ iscsi_register_transport(struct iscsi_transport *tt) dev_set_name(&priv->dev, "%s", tt->name); err = device_register(&priv->dev); if (err) - goto free_priv; + goto put_dev; err = sysfs_create_group(&priv->dev.kobj, &iscsi_transport_group); if (err) @@ -4850,8 +4851,8 @@ iscsi_register_transport(struct iscsi_transport *tt) unregister_dev: device_unregister(&priv->dev); return NULL; -free_priv: - kfree(priv); +put_dev: + put_device(&priv->dev); return NULL; } EXPORT_SYMBOL_GPL(iscsi_register_transport); diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c index 2f88c61216ee..74b99f2b0b74 100644 --- a/drivers/scsi/scsi_transport_sas.c +++ b/drivers/scsi/scsi_transport_sas.c @@ -722,12 +722,17 @@ int sas_phy_add(struct sas_phy *phy) int error; error = device_add(&phy->dev); - if (!error) { - transport_add_device(&phy->dev); - transport_configure_device(&phy->dev); + if (error) + return error; + + error = transport_add_device(&phy->dev); + if (error) { + device_del(&phy->dev); + return error; } + transport_configure_device(&phy->dev); - return error; + return 0; } EXPORT_SYMBOL(sas_phy_add); diff --git a/drivers/siox/siox-core.c b/drivers/siox/siox-core.c index 7c4f32d76966..561408583b2b 100644 --- a/drivers/siox/siox-core.c +++ b/drivers/siox/siox-core.c @@ -839,6 +839,8 @@ static struct siox_device *siox_device_add(struct siox_master *smaster, err_device_register: /* don't care to make the buffer smaller again */ + put_device(&sdevice->dev); + sdevice = NULL; err_buf_alloc: siox_master_unlock(smaster); diff --git a/drivers/slimbus/Kconfig b/drivers/slimbus/Kconfig index 2ed821f75816..a0fdf9d792cb 100644 --- a/drivers/slimbus/Kconfig +++ b/drivers/slimbus/Kconfig @@ -23,7 +23,7 @@ config SLIM_QCOM_CTRL config SLIM_QCOM_NGD_CTRL tristate "Qualcomm SLIMbus Satellite Non-Generic Device Component" depends on HAS_IOMEM && DMA_ENGINE && NET - depends on QCOM_RPROC_COMMON || COMPILE_TEST + depends on QCOM_RPROC_COMMON || (COMPILE_TEST && !QCOM_RPROC_COMMON) depends on ARCH_QCOM || COMPILE_TEST select QCOM_QMI_HELPERS select QCOM_PDR_HELPERS diff --git a/drivers/slimbus/stream.c b/drivers/slimbus/stream.c index 75f87b3d8b95..73a2aa362957 100644 --- a/drivers/slimbus/stream.c +++ b/drivers/slimbus/stream.c @@ -67,10 +67,10 @@ static const int slim_presence_rate_table[] = { 384000, 768000, 0, /* Reserved */ - 110250, - 220500, - 441000, - 882000, + 11025, + 22050, + 44100, + 88200, 176400, 352800, 705600, diff --git a/drivers/soc/imx/imx93-pd.c b/drivers/soc/imx/imx93-pd.c index 1f3d7039c1de..4d235c8c4924 100644 --- a/drivers/soc/imx/imx93-pd.c +++ b/drivers/soc/imx/imx93-pd.c @@ -135,11 +135,24 @@ static int imx93_pd_probe(struct platform_device *pdev) ret = pm_genpd_init(&domain->genpd, NULL, domain->init_off); if (ret) - return ret; + goto err_clk_unprepare; platform_set_drvdata(pdev, domain); - return of_genpd_add_provider_simple(np, &domain->genpd); + ret = of_genpd_add_provider_simple(np, &domain->genpd); + if (ret) + goto err_genpd_remove; + + return 0; + +err_genpd_remove: + pm_genpd_remove(&domain->genpd); + +err_clk_unprepare: + if (!domain->init_off) + clk_bulk_disable_unprepare(domain->num_clks, domain->clks); + + return ret; } static const struct of_device_id imx93_pd_ids[] = { diff --git a/drivers/soc/imx/soc-imx8m.c b/drivers/soc/imx/soc-imx8m.c index cc57a384d74d..28144c699b0c 100644 --- a/drivers/soc/imx/soc-imx8m.c +++ b/drivers/soc/imx/soc-imx8m.c @@ -11,6 +11,7 @@ #include <linux/platform_device.h> #include <linux/arm-smccc.h> #include <linux/of.h> +#include <linux/clk.h> #define REV_B1 0x21 @@ -56,6 +57,7 @@ static u32 __init imx8mq_soc_revision(void) void __iomem *ocotp_base; u32 magic; u32 rev; + struct clk *clk; np = of_find_compatible_node(NULL, NULL, "fsl,imx8mq-ocotp"); if (!np) @@ -63,6 +65,13 @@ static u32 __init imx8mq_soc_revision(void) ocotp_base = of_iomap(np, 0); WARN_ON(!ocotp_base); + clk = of_clk_get_by_name(np, NULL); + if (!clk) { + WARN_ON(!clk); + return 0; + } + + clk_prepare_enable(clk); /* * SOC revision on older imx8mq is not available in fuses so query @@ -79,6 +88,8 @@ static u32 __init imx8mq_soc_revision(void) soc_uid <<= 32; soc_uid |= readl_relaxed(ocotp_base + OCOTP_UID_LOW); + clk_disable_unprepare(clk); + clk_put(clk); iounmap(ocotp_base); of_node_put(np); diff --git a/drivers/soundwire/intel.c b/drivers/soundwire/intel.c index 244209358784..8c76541d553f 100644 --- a/drivers/soundwire/intel.c +++ b/drivers/soundwire/intel.c @@ -1513,6 +1513,7 @@ static int intel_link_probe(struct auxiliary_device *auxdev, bus->link_id = auxdev->id; bus->dev_num_ida_min = INTEL_DEV_NUM_IDA_MIN; + bus->clk_stop_timeout = 1; sdw_cdns_probe(cdns); diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c index b33d5db494a5..cee2b2223141 100644 --- a/drivers/soundwire/qcom.c +++ b/drivers/soundwire/qcom.c @@ -344,6 +344,9 @@ static int qcom_swrm_cmd_fifo_wr_cmd(struct qcom_swrm_ctrl *swrm, u8 cmd_data, if (swrm_wait_for_wr_fifo_avail(swrm)) return SDW_CMD_FAIL_OTHER; + if (cmd_id == SWR_BROADCAST_CMD_ID) + reinit_completion(&swrm->broadcast); + /* Its assumed that write is okay as we do not get any status back */ swrm->reg_write(swrm, SWRM_CMD_FIFO_WR_CMD, val); @@ -377,6 +380,12 @@ static int qcom_swrm_cmd_fifo_rd_cmd(struct qcom_swrm_ctrl *swrm, val = swrm_get_packed_reg_val(&swrm->rcmd_id, len, dev_addr, reg_addr); + /* + * Check for outstanding cmd wrt. write fifo depth to avoid + * overflow as read will also increase write fifo cnt. + */ + swrm_wait_for_wr_fifo_avail(swrm); + /* wait for FIFO RD to complete to avoid overflow */ usleep_range(100, 105); swrm->reg_write(swrm, SWRM_CMD_FIFO_RD_CMD, val); diff --git a/drivers/spi/spi-amd.c b/drivers/spi/spi-amd.c index e23121456c70..bfc3ab5f39ea 100644 --- a/drivers/spi/spi-amd.c +++ b/drivers/spi/spi-amd.c @@ -65,7 +65,7 @@ enum amd_spi_speed { F_16_66MHz, F_100MHz, F_800KHz, - SPI_SPD7, + SPI_SPD7 = 0x7, F_50MHz = 0x4, F_4MHz = 0x32, F_3_17MHz = 0x3F diff --git a/drivers/spi/spi-aspeed-smc.c b/drivers/spi/spi-aspeed-smc.c index a334e89add86..b90571396a60 100644 --- a/drivers/spi/spi-aspeed-smc.c +++ b/drivers/spi/spi-aspeed-smc.c @@ -398,7 +398,7 @@ static void aspeed_spi_get_windows(struct aspeed_spi *aspi, windows[cs].cs = cs; windows[cs].size = data->segment_end(aspi, reg_val) - data->segment_start(aspi, reg_val); - windows[cs].offset = cs ? windows[cs - 1].offset + windows[cs - 1].size : 0; + windows[cs].offset = data->segment_start(aspi, reg_val) - aspi->ahb_base_phy; dev_vdbg(aspi->dev, "CE%d offset=0x%.8x size=0x%x\n", cs, windows[cs].offset, windows[cs].size); } @@ -1163,7 +1163,7 @@ static const struct aspeed_spi_data ast2500_spi_data = { static const struct aspeed_spi_data ast2600_fmc_data = { .max_cs = 3, .hastype = false, - .mode_bits = SPI_RX_QUAD | SPI_RX_QUAD, + .mode_bits = SPI_RX_QUAD | SPI_TX_QUAD, .we0 = 16, .ctl0 = CE0_CTRL_REG, .timing = CE0_TIMING_COMPENSATION_REG, @@ -1178,7 +1178,7 @@ static const struct aspeed_spi_data ast2600_fmc_data = { static const struct aspeed_spi_data ast2600_spi_data = { .max_cs = 2, .hastype = false, - .mode_bits = SPI_RX_QUAD | SPI_RX_QUAD, + .mode_bits = SPI_RX_QUAD | SPI_TX_QUAD, .we0 = 16, .ctl0 = CE0_CTRL_REG, .timing = CE0_TIMING_COMPENSATION_REG, diff --git a/drivers/spi/spi-gxp.c b/drivers/spi/spi-gxp.c index 15b110183839..c900c2f39b57 100644 --- a/drivers/spi/spi-gxp.c +++ b/drivers/spi/spi-gxp.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0=or-later +// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2022 Hewlett-Packard Development Company, L.P. */ #include <linux/iopoll.h> diff --git a/drivers/spi/spi-intel.c b/drivers/spi/spi-intel.c index 55f4ee2db002..3ac73691fbb5 100644 --- a/drivers/spi/spi-intel.c +++ b/drivers/spi/spi-intel.c @@ -52,17 +52,17 @@ #define FRACC 0x50 #define FREG(n) (0x54 + ((n) * 4)) -#define FREG_BASE_MASK 0x3fff +#define FREG_BASE_MASK GENMASK(14, 0) #define FREG_LIMIT_SHIFT 16 -#define FREG_LIMIT_MASK (0x03fff << FREG_LIMIT_SHIFT) +#define FREG_LIMIT_MASK GENMASK(30, 16) /* Offset is from @ispi->pregs */ #define PR(n) ((n) * 4) #define PR_WPE BIT(31) #define PR_LIMIT_SHIFT 16 -#define PR_LIMIT_MASK (0x3fff << PR_LIMIT_SHIFT) +#define PR_LIMIT_MASK GENMASK(30, 16) #define PR_RPE BIT(15) -#define PR_BASE_MASK 0x3fff +#define PR_BASE_MASK GENMASK(14, 0) /* Offsets are from @ispi->sregs */ #define SSFSTS_CTL 0x00 @@ -114,7 +114,7 @@ #define ERASE_OPCODE_SHIFT 8 #define ERASE_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) #define ERASE_64K_OPCODE_SHIFT 16 -#define ERASE_64K_OPCODE_MASK (0xff << ERASE_OPCODE_SHIFT) +#define ERASE_64K_OPCODE_MASK (0xff << ERASE_64K_OPCODE_SHIFT) /* Flash descriptor fields */ #define FLVALSIG_MAGIC 0x0ff0a55a diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index bad201510a99..1b4195c54ee2 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -160,6 +160,7 @@ struct meson_spicc_device { struct clk *clk; struct spi_message *message; struct spi_transfer *xfer; + struct completion done; const struct meson_spicc_data *data; u8 *tx_buf; u8 *rx_buf; @@ -282,7 +283,7 @@ static irqreturn_t meson_spicc_irq(int irq, void *data) /* Disable all IRQs */ writel(0, spicc->base + SPICC_INTREG); - spi_finalize_current_transfer(spicc->master); + complete(&spicc->done); return IRQ_HANDLED; } @@ -386,6 +387,7 @@ static int meson_spicc_transfer_one(struct spi_master *master, struct spi_transfer *xfer) { struct meson_spicc_device *spicc = spi_master_get_devdata(master); + uint64_t timeout; /* Store current transfer */ spicc->xfer = xfer; @@ -410,13 +412,29 @@ static int meson_spicc_transfer_one(struct spi_master *master, /* Setup burst */ meson_spicc_setup_burst(spicc); + /* Setup wait for completion */ + reinit_completion(&spicc->done); + + /* For each byte we wait for 8 cycles of the SPI clock */ + timeout = 8LL * MSEC_PER_SEC * xfer->len; + do_div(timeout, xfer->speed_hz); + + /* Add 10us delay between each fifo bursts */ + timeout += ((xfer->len >> 4) * 10) / MSEC_PER_SEC; + + /* Increase it twice and add 200 ms tolerance */ + timeout += timeout + 200; + /* Start burst */ writel_bits_relaxed(SPICC_XCH, SPICC_XCH, spicc->base + SPICC_CONREG); /* Enable interrupts */ writel_relaxed(SPICC_TC_EN, spicc->base + SPICC_INTREG); - return 1; + if (!wait_for_completion_timeout(&spicc->done, msecs_to_jiffies(timeout))) + return -ETIMEDOUT; + + return 0; } static int meson_spicc_prepare_message(struct spi_master *master, @@ -743,6 +761,8 @@ static int meson_spicc_probe(struct platform_device *pdev) spicc->pdev = pdev; platform_set_drvdata(pdev, spicc); + init_completion(&spicc->done); + spicc->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(spicc->base)) { dev_err(&pdev->dev, "io resource mapping failed\n"); diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c index cb075c1acbee..7b64e64c65cf 100644 --- a/drivers/spi/spi-mpc52xx.c +++ b/drivers/spi/spi-mpc52xx.c @@ -151,7 +151,7 @@ mpc52xx_spi_fsmstate_idle(int irq, struct mpc52xx_spi *ms, u8 status, u8 data) int spr, sppr; u8 ctrl1; - if (status && (irq != NO_IRQ)) + if (status && irq) dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n", status); diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index 11aeae7fe7fc..a33c9a3de395 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -551,14 +551,17 @@ static void mtk_spi_enable_transfer(struct spi_master *master) writel(cmd, mdata->base + SPI_CMD_REG); } -static int mtk_spi_get_mult_delta(u32 xfer_len) +static int mtk_spi_get_mult_delta(struct mtk_spi *mdata, u32 xfer_len) { - u32 mult_delta; + u32 mult_delta = 0; - if (xfer_len > MTK_SPI_PACKET_SIZE) - mult_delta = xfer_len % MTK_SPI_PACKET_SIZE; - else - mult_delta = 0; + if (mdata->dev_comp->ipm_design) { + if (xfer_len > MTK_SPI_IPM_PACKET_SIZE) + mult_delta = xfer_len % MTK_SPI_IPM_PACKET_SIZE; + } else { + if (xfer_len > MTK_SPI_PACKET_SIZE) + mult_delta = xfer_len % MTK_SPI_PACKET_SIZE; + } return mult_delta; } @@ -570,22 +573,22 @@ static void mtk_spi_update_mdata_len(struct spi_master *master) if (mdata->tx_sgl_len && mdata->rx_sgl_len) { if (mdata->tx_sgl_len > mdata->rx_sgl_len) { - mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len); + mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len); mdata->xfer_len = mdata->rx_sgl_len - mult_delta; mdata->rx_sgl_len = mult_delta; mdata->tx_sgl_len -= mdata->xfer_len; } else { - mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len); + mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len); mdata->xfer_len = mdata->tx_sgl_len - mult_delta; mdata->tx_sgl_len = mult_delta; mdata->rx_sgl_len -= mdata->xfer_len; } } else if (mdata->tx_sgl_len) { - mult_delta = mtk_spi_get_mult_delta(mdata->tx_sgl_len); + mult_delta = mtk_spi_get_mult_delta(mdata, mdata->tx_sgl_len); mdata->xfer_len = mdata->tx_sgl_len - mult_delta; mdata->tx_sgl_len = mult_delta; } else if (mdata->rx_sgl_len) { - mult_delta = mtk_spi_get_mult_delta(mdata->rx_sgl_len); + mult_delta = mtk_spi_get_mult_delta(mdata, mdata->rx_sgl_len); mdata->xfer_len = mdata->rx_sgl_len - mult_delta; mdata->rx_sgl_len = mult_delta; } diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c index 7d89510dc3f0..678dc51ef017 100644 --- a/drivers/spi/spi-qup.c +++ b/drivers/spi/spi-qup.c @@ -1057,6 +1057,8 @@ static int spi_qup_probe(struct platform_device *pdev) else master->num_chipselect = num_cs; + master->use_gpio_descriptors = true; + master->max_native_cs = SPI_NUM_CHIPSELECTS; master->bus_num = pdev->id; master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index 6fe617b445a5..def09cf0dc14 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -434,7 +434,7 @@ static int stm32_spi_prepare_mbr(struct stm32_spi *spi, u32 speed_hz, u32 div, mbrdiv; /* Ensure spi->clk_rate is even */ - div = DIV_ROUND_UP(spi->clk_rate & ~0x1, speed_hz); + div = DIV_ROUND_CLOSEST(spi->clk_rate & ~0x1, speed_hz); /* * SPI framework set xfer->speed_hz to master->max_speed_hz if @@ -886,6 +886,7 @@ static irqreturn_t stm32h7_spi_irq_thread(int irq, void *dev_id) static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL * 10, 1); + ratelimit_set_flags(&rs, RATELIMIT_MSG_ON_RELEASE); if (__ratelimit(&rs)) dev_dbg_ratelimited(spi->dev, "Communication suspended\n"); if (!spi->cur_usedma && (spi->rx_buf && (spi->rx_len > 0))) diff --git a/drivers/spi/spi-tegra210-quad.c b/drivers/spi/spi-tegra210-quad.c index c89592b21ffc..10f0c5a6e0dc 100644 --- a/drivers/spi/spi-tegra210-quad.c +++ b/drivers/spi/spi-tegra210-quad.c @@ -720,6 +720,9 @@ static int tegra_qspi_start_cpu_based_transfer(struct tegra_qspi *qspi, struct s static void tegra_qspi_deinit_dma(struct tegra_qspi *tqspi) { + if (!tqspi->soc_data->has_dma) + return; + if (tqspi->tx_dma_buf) { dma_free_coherent(tqspi->dev, tqspi->dma_buf_size, tqspi->tx_dma_buf, tqspi->tx_dma_phys); @@ -750,6 +753,9 @@ static int tegra_qspi_init_dma(struct tegra_qspi *tqspi) u32 *dma_buf; int err; + if (!tqspi->soc_data->has_dma) + return 0; + dma_chan = dma_request_chan(tqspi->dev, "rx"); if (IS_ERR(dma_chan)) { err = PTR_ERR(dma_chan); @@ -1157,6 +1163,11 @@ static int tegra_qspi_combined_seq_xfer(struct tegra_qspi *tqspi, msg->actual_length += xfer->len; transfer_phase++; } + if (!xfer->cs_change) { + tegra_qspi_transfer_end(spi); + spi_transfer_delay_exec(xfer); + } + ret = 0; exit: msg->status = ret; diff --git a/drivers/staging/media/atomisp/Makefile b/drivers/staging/media/atomisp/Makefile index fb7b406f50bf..532e12ed72e6 100644 --- a/drivers/staging/media/atomisp/Makefile +++ b/drivers/staging/media/atomisp/Makefile @@ -17,7 +17,6 @@ atomisp-objs += \ pci/atomisp_compat_css20.o \ pci/atomisp_csi2.o \ pci/atomisp_drvfs.o \ - pci/atomisp_file.o \ pci/atomisp_fops.o \ pci/atomisp_ioctl.o \ pci/atomisp_subdev.o \ diff --git a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c index 8f48b23be3aa..fa1de45b7a2d 100644 --- a/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c +++ b/drivers/staging/media/atomisp/i2c/atomisp-ov2680.c @@ -841,8 +841,6 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd, if (!ov2680_info) return -EINVAL; - mutex_lock(&dev->input_lock); - res = v4l2_find_nearest_size(ov2680_res_preview, ARRAY_SIZE(ov2680_res_preview), width, height, fmt->width, fmt->height); @@ -855,19 +853,22 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd, fmt->code = MEDIA_BUS_FMT_SBGGR10_1X10; if (format->which == V4L2_SUBDEV_FORMAT_TRY) { sd_state->pads->try_fmt = *fmt; - mutex_unlock(&dev->input_lock); return 0; } dev_dbg(&client->dev, "%s: %dx%d\n", __func__, fmt->width, fmt->height); + mutex_lock(&dev->input_lock); + /* s_power has not been called yet for std v4l2 clients (camorama) */ power_up(sd); ret = ov2680_write_reg_array(client, dev->res->regs); - if (ret) + if (ret) { dev_err(&client->dev, "ov2680 write resolution register err: %d\n", ret); + goto err; + } vts = dev->res->lines_per_frame; @@ -876,8 +877,10 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd, vts = dev->exposure + OV2680_INTEGRATION_TIME_MARGIN; ret = ov2680_write_reg(client, 2, OV2680_TIMING_VTS_H, vts); - if (ret) + if (ret) { dev_err(&client->dev, "ov2680 write vts err: %d\n", ret); + goto err; + } ret = ov2680_get_intg_factor(client, ov2680_info, res); if (ret) { @@ -894,11 +897,7 @@ static int ov2680_set_fmt(struct v4l2_subdev *sd, if (v_flag) ov2680_v_flip(sd, v_flag); - /* - * ret = startup(sd); - * if (ret) - * dev_err(&client->dev, "ov2680 startup err\n"); - */ + dev->res = res; err: mutex_unlock(&dev->input_lock); return ret; diff --git a/drivers/staging/media/atomisp/include/hmm/hmm_bo.h b/drivers/staging/media/atomisp/include/hmm/hmm_bo.h index 385e22fc4a46..c5cbae1d9cf9 100644 --- a/drivers/staging/media/atomisp/include/hmm/hmm_bo.h +++ b/drivers/staging/media/atomisp/include/hmm/hmm_bo.h @@ -65,9 +65,6 @@ #define check_bo_null_return_void(bo) \ check_null_return_void(bo, "NULL hmm buffer object.\n") -#define HMM_MAX_ORDER 3 -#define HMM_MIN_ORDER 0 - #define ISP_VM_START 0x0 #define ISP_VM_SIZE (0x7FFFFFFF) /* 2G address space */ #define ISP_PTR_NULL NULL @@ -89,8 +86,6 @@ enum hmm_bo_type { #define HMM_BO_VMAPED 0x10 #define HMM_BO_VMAPED_CACHED 0x20 #define HMM_BO_ACTIVE 0x1000 -#define HMM_BO_MEM_TYPE_USER 0x1 -#define HMM_BO_MEM_TYPE_PFN 0x2 struct hmm_bo_device { struct isp_mmu mmu; @@ -126,7 +121,6 @@ struct hmm_buffer_object { enum hmm_bo_type type; int mmap_count; int status; - int mem_type; void *vmap_addr; /* kernel virtual address by vmap */ struct rb_node node; diff --git a/drivers/staging/media/atomisp/include/linux/atomisp.h b/drivers/staging/media/atomisp/include/linux/atomisp.h index f96f5adbd9de..3f602b5aaff9 100644 --- a/drivers/staging/media/atomisp/include/linux/atomisp.h +++ b/drivers/staging/media/atomisp/include/linux/atomisp.h @@ -740,20 +740,6 @@ enum atomisp_frame_status { ATOMISP_FRAME_STATUS_FLASH_FAILED, }; -/* ISP memories, isp2400 */ -enum atomisp_acc_memory { - ATOMISP_ACC_MEMORY_PMEM0 = 0, - ATOMISP_ACC_MEMORY_DMEM0, - /* for backward compatibility */ - ATOMISP_ACC_MEMORY_DMEM = ATOMISP_ACC_MEMORY_DMEM0, - ATOMISP_ACC_MEMORY_VMEM0, - ATOMISP_ACC_MEMORY_VAMEM0, - ATOMISP_ACC_MEMORY_VAMEM1, - ATOMISP_ACC_MEMORY_VAMEM2, - ATOMISP_ACC_MEMORY_HMEM0, - ATOMISP_ACC_NR_MEMORY -}; - enum atomisp_ext_isp_id { EXT_ISP_CID_ISO = 0, EXT_ISP_CID_CAPTURE_HDR, diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h index 58e0ea5355a3..5463d11d4295 100644 --- a/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h +++ b/drivers/staging/media/atomisp/include/linux/atomisp_gmin_platform.h @@ -26,8 +26,6 @@ struct v4l2_subdev *atomisp_gmin_find_subdev(struct i2c_adapter *adapter, int atomisp_gmin_remove_subdev(struct v4l2_subdev *sd); int gmin_get_var_int(struct device *dev, bool is_gmin, const char *var, int def); -int camera_sensor_csi(struct v4l2_subdev *sd, u32 port, - u32 lanes, u32 format, u32 bayer_order, int flag); struct camera_sensor_platform_data * gmin_camera_platform_data( struct v4l2_subdev *subdev, diff --git a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h index 8c65733e0255..0253661d4332 100644 --- a/drivers/staging/media/atomisp/include/linux/atomisp_platform.h +++ b/drivers/staging/media/atomisp/include/linux/atomisp_platform.h @@ -141,23 +141,6 @@ struct atomisp_platform_data { struct intel_v4l2_subdev_table *subdevs; }; -/* Describe the capacities of one single sensor. */ -struct atomisp_sensor_caps { - /* The number of streams this sensor can output. */ - int stream_num; - bool is_slave; -}; - -/* Describe the capacities of sensors connected to one camera port. */ -struct atomisp_camera_caps { - /* The number of sensors connected to this camera port. */ - int sensor_num; - /* The capacities of each sensor. */ - struct atomisp_sensor_caps sensor[MAX_SENSORS_PER_PORT]; - /* Define whether stream control is required for multiple streams. */ - bool multi_stream_ctrl; -}; - /* * Sensor of external ISP can send multiple steams with different mipi data * type in the same virtual channel. This information needs to come from the @@ -235,7 +218,6 @@ struct camera_mipi_info { }; const struct atomisp_platform_data *atomisp_get_platform_data(void); -const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void); /* API from old platform_camera.h, new CPUID implementation */ #define __IS_SOC(x) (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && \ diff --git a/drivers/staging/media/atomisp/notes.txt b/drivers/staging/media/atomisp/notes.txt index d128b792e05f..d3cf6ed547ae 100644 --- a/drivers/staging/media/atomisp/notes.txt +++ b/drivers/staging/media/atomisp/notes.txt @@ -28,3 +28,22 @@ Since getting a picture requires multiple processing steps, this means that unlike in fixed pipelines the soft pipelines on the ISP can do multiple processing steps in a single pipeline element (in a single binary). + +### + +The sensor drivers use of v4l2_get_subdev_hostdata(), which returns +a camera_mipi_info struct. This struct is allocated/managed by +the core atomisp code. The most important parts of the struct +are filled by the atomisp core itself, like e.g. the port number. + +The sensor drivers on a set_fmt call do fill in camera_mipi_info.data +which is a atomisp_sensor_mode_data struct. This gets filled from +a function called <sensor_name>_get_intg_factor(). This struct is not +used by the atomisp code at all. It is returned to userspace by +a ATOMISP_IOC_G_SENSOR_MODE_DATA and the Android userspace does use this. + +Other members of camera_mipi_info which are set by some drivers are: +-metadata_width, metadata_height, metadata_effective_width, set by + the ov5693 driver (and used by the atomisp core) +-raw_bayer_order, adjusted by the ov2680 driver when flipping since + flipping can change the bayer order diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.c b/drivers/staging/media/atomisp/pci/atomisp_cmd.c index c932f340068f..c72d0e344671 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_cmd.c +++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.c @@ -80,6 +80,8 @@ union host { } ptr; }; +static int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id); + /* * get sensor:dis71430/ov2720 related info from v4l2_subdev->priv data field. * subdev->priv is set in mrst.c @@ -98,15 +100,6 @@ struct atomisp_video_pipe *atomisp_to_video_pipe(struct video_device *dev) container_of(dev, struct atomisp_video_pipe, vdev); } -/* - * get struct atomisp_acc_pipe from v4l2 video_device - */ -struct atomisp_acc_pipe *atomisp_to_acc_pipe(struct video_device *dev) -{ - return (struct atomisp_acc_pipe *) - container_of(dev, struct atomisp_acc_pipe, vdev); -} - static unsigned short atomisp_get_sensor_fps(struct atomisp_sub_device *asd) { struct v4l2_subdev_frame_interval fi = { 0 }; @@ -777,24 +770,6 @@ static struct atomisp_video_pipe *__atomisp_get_pipe( enum ia_css_pipe_id css_pipe_id, enum ia_css_buffer_type buf_type) { - struct atomisp_device *isp = asd->isp; - - if (css_pipe_id == IA_CSS_PIPE_ID_COPY && - isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num > 1) { - switch (stream_id) { - case ATOMISP_INPUT_STREAM_PREVIEW: - return &asd->video_out_preview; - case ATOMISP_INPUT_STREAM_POSTVIEW: - return &asd->video_out_vf; - case ATOMISP_INPUT_STREAM_VIDEO: - return &asd->video_out_video_capture; - case ATOMISP_INPUT_STREAM_CAPTURE: - default: - return &asd->video_out_capture; - } - } - /* video is same in online as in continuouscapture mode */ if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_LOWLAT) { /* @@ -906,7 +881,8 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, enum atomisp_metadata_type md_type; struct atomisp_device *isp = asd->isp; struct v4l2_control ctrl; - bool reset_wdt_timer = false; + + lockdep_assert_held(&isp->mutex); if ( buf_type != IA_CSS_BUFFER_TYPE_METADATA && @@ -1013,9 +989,6 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, break; case IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME: case IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME: - if (IS_ISP2401) - reset_wdt_timer = true; - pipe->buffers_in_css--; frame = buffer.css_buffer.data.frame; if (!frame) { @@ -1068,9 +1041,6 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, break; case IA_CSS_BUFFER_TYPE_OUTPUT_FRAME: case IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME: - if (IS_ISP2401) - reset_wdt_timer = true; - pipe->buffers_in_css--; frame = buffer.css_buffer.data.frame; if (!frame) { @@ -1238,8 +1208,6 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, */ wake_up(&vb->done); } - if (IS_ISP2401) - atomic_set(&pipe->wdt_count, 0); /* * Requeue should only be done for 3a and dis buffers. @@ -1256,19 +1224,6 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, } if (!error && q_buffers) atomisp_qbuffers_to_css(asd); - - if (IS_ISP2401) { - /* If there are no buffers queued then - * delete wdt timer. */ - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - return; - if (!atomisp_buffers_queued_pipe(pipe)) - atomisp_wdt_stop_pipe(pipe, false); - else if (reset_wdt_timer) - /* SOF irq should not reset wdt timer. */ - atomisp_wdt_refresh_pipe(pipe, - ATOMISP_WDT_KEEP_CURRENT_DELAY); - } } void atomisp_delayed_init_work(struct work_struct *work) @@ -1307,10 +1262,14 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout) bool stream_restart[MAX_STREAM_NUM] = {0}; bool depth_mode = false; int i, ret, depth_cnt = 0; + unsigned long flags; - if (!isp->sw_contex.file_input) - atomisp_css_irq_enable(isp, - IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, false); + lockdep_assert_held(&isp->mutex); + + if (!atomisp_streaming_count(isp)) + return; + + atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, false); BUG_ON(isp->num_of_streams > MAX_STREAM_NUM); @@ -1331,7 +1290,9 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout) stream_restart[asd->index] = true; + spin_lock_irqsave(&isp->lock, flags); asd->streaming = ATOMISP_DEVICE_STREAMING_STOPPING; + spin_unlock_irqrestore(&isp->lock, flags); /* stream off sensor */ ret = v4l2_subdev_call( @@ -1346,7 +1307,9 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout) css_pipe_id = atomisp_get_css_pipe_id(asd); atomisp_css_stop(asd, css_pipe_id, true); + spin_lock_irqsave(&isp->lock, flags); asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED; + spin_unlock_irqrestore(&isp->lock, flags); asd->preview_exp_id = 1; asd->postview_exp_id = 1; @@ -1387,25 +1350,23 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout) IA_CSS_INPUT_MODE_BUFFERED_SENSOR); css_pipe_id = atomisp_get_css_pipe_id(asd); - if (atomisp_css_start(asd, css_pipe_id, true)) + if (atomisp_css_start(asd, css_pipe_id, true)) { dev_warn(isp->dev, "start SP failed, so do not set streaming to be enable!\n"); - else + } else { + spin_lock_irqsave(&isp->lock, flags); asd->streaming = ATOMISP_DEVICE_STREAMING_ENABLED; + spin_unlock_irqrestore(&isp->lock, flags); + } atomisp_csi2_configure(asd); } - if (!isp->sw_contex.file_input) { - atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, - atomisp_css_valid_sof(isp)); + atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, + atomisp_css_valid_sof(isp)); - if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_AUTO, true) < 0) - dev_dbg(isp->dev, "DFS auto failed while recovering!\n"); - } else { - if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, true) < 0) - dev_dbg(isp->dev, "DFS max failed while recovering!\n"); - } + if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_AUTO, true) < 0) + dev_dbg(isp->dev, "DFS auto failed while recovering!\n"); for (i = 0; i < isp->num_of_streams; i++) { struct atomisp_sub_device *asd; @@ -1454,361 +1415,24 @@ static void __atomisp_css_recover(struct atomisp_device *isp, bool isp_timeout) } } -void atomisp_wdt_work(struct work_struct *work) +void atomisp_assert_recovery_work(struct work_struct *work) { struct atomisp_device *isp = container_of(work, struct atomisp_device, - wdt_work); - int i; - unsigned int pipe_wdt_cnt[MAX_STREAM_NUM][4] = { {0} }; - bool css_recover = true; - - rt_mutex_lock(&isp->mutex); - if (!atomisp_streaming_count(isp)) { - atomic_set(&isp->wdt_work_queued, 0); - rt_mutex_unlock(&isp->mutex); - return; - } - - if (!IS_ISP2401) { - dev_err(isp->dev, "timeout %d of %d\n", - atomic_read(&isp->wdt_count) + 1, - ATOMISP_ISP_MAX_TIMEOUT_COUNT); - } else { - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - pipe_wdt_cnt[i][0] += - atomic_read(&asd->video_out_capture.wdt_count); - pipe_wdt_cnt[i][1] += - atomic_read(&asd->video_out_vf.wdt_count); - pipe_wdt_cnt[i][2] += - atomic_read(&asd->video_out_preview.wdt_count); - pipe_wdt_cnt[i][3] += - atomic_read(&asd->video_out_video_capture.wdt_count); - css_recover = - (pipe_wdt_cnt[i][0] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT && - pipe_wdt_cnt[i][1] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT && - pipe_wdt_cnt[i][2] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT && - pipe_wdt_cnt[i][3] <= ATOMISP_ISP_MAX_TIMEOUT_COUNT) - ? true : false; - dev_err(isp->dev, - "pipe on asd%d timeout cnt: (%d, %d, %d, %d) of %d, recover = %d\n", - asd->index, pipe_wdt_cnt[i][0], pipe_wdt_cnt[i][1], - pipe_wdt_cnt[i][2], pipe_wdt_cnt[i][3], - ATOMISP_ISP_MAX_TIMEOUT_COUNT, css_recover); - } - } - - if (css_recover) { - ia_css_debug_dump_sp_sw_debug_info(); - ia_css_debug_dump_debug_info(__func__); - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - continue; - dev_err(isp->dev, "%s, vdev %s buffers in css: %d\n", - __func__, - asd->video_out_capture.vdev.name, - asd->video_out_capture. - buffers_in_css); - dev_err(isp->dev, - "%s, vdev %s buffers in css: %d\n", - __func__, - asd->video_out_vf.vdev.name, - asd->video_out_vf. - buffers_in_css); - dev_err(isp->dev, - "%s, vdev %s buffers in css: %d\n", - __func__, - asd->video_out_preview.vdev.name, - asd->video_out_preview. - buffers_in_css); - dev_err(isp->dev, - "%s, vdev %s buffers in css: %d\n", - __func__, - asd->video_out_video_capture.vdev.name, - asd->video_out_video_capture. - buffers_in_css); - dev_err(isp->dev, - "%s, s3a buffers in css preview pipe:%d\n", - __func__, - asd->s3a_bufs_in_css[IA_CSS_PIPE_ID_PREVIEW]); - dev_err(isp->dev, - "%s, s3a buffers in css capture pipe:%d\n", - __func__, - asd->s3a_bufs_in_css[IA_CSS_PIPE_ID_CAPTURE]); - dev_err(isp->dev, - "%s, s3a buffers in css video pipe:%d\n", - __func__, - asd->s3a_bufs_in_css[IA_CSS_PIPE_ID_VIDEO]); - dev_err(isp->dev, - "%s, dis buffers in css: %d\n", - __func__, asd->dis_bufs_in_css); - dev_err(isp->dev, - "%s, metadata buffers in css preview pipe:%d\n", - __func__, - asd->metadata_bufs_in_css - [ATOMISP_INPUT_STREAM_GENERAL] - [IA_CSS_PIPE_ID_PREVIEW]); - dev_err(isp->dev, - "%s, metadata buffers in css capture pipe:%d\n", - __func__, - asd->metadata_bufs_in_css - [ATOMISP_INPUT_STREAM_GENERAL] - [IA_CSS_PIPE_ID_CAPTURE]); - dev_err(isp->dev, - "%s, metadata buffers in css video pipe:%d\n", - __func__, - asd->metadata_bufs_in_css - [ATOMISP_INPUT_STREAM_GENERAL] - [IA_CSS_PIPE_ID_VIDEO]); - if (asd->enable_raw_buffer_lock->val) { - unsigned int j; - - dev_err(isp->dev, "%s, raw_buffer_locked_count %d\n", - __func__, asd->raw_buffer_locked_count); - for (j = 0; j <= ATOMISP_MAX_EXP_ID / 32; j++) - dev_err(isp->dev, "%s, raw_buffer_bitmap[%d]: 0x%x\n", - __func__, j, - asd->raw_buffer_bitmap[j]); - } - } - - /*sh_css_dump_sp_state();*/ - /*sh_css_dump_isp_state();*/ - } else { - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - if (asd->streaming == - ATOMISP_DEVICE_STREAMING_ENABLED) { - atomisp_clear_css_buffer_counters(asd); - atomisp_flush_bufs_and_wakeup(asd); - complete(&asd->init_done); - } - if (IS_ISP2401) - atomisp_wdt_stop(asd, false); - } - - if (!IS_ISP2401) { - atomic_set(&isp->wdt_count, 0); - } else { - isp->isp_fatal_error = true; - atomic_set(&isp->wdt_work_queued, 0); - - rt_mutex_unlock(&isp->mutex); - return; - } - } + assert_recovery_work); + mutex_lock(&isp->mutex); __atomisp_css_recover(isp, true); - if (IS_ISP2401) { - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - continue; - - atomisp_wdt_refresh(asd, - isp->sw_contex.file_input ? - ATOMISP_ISP_FILE_TIMEOUT_DURATION : - ATOMISP_ISP_TIMEOUT_DURATION); - } - } - - dev_err(isp->dev, "timeout recovery handling done\n"); - atomic_set(&isp->wdt_work_queued, 0); - - rt_mutex_unlock(&isp->mutex); + mutex_unlock(&isp->mutex); } void atomisp_css_flush(struct atomisp_device *isp) { - int i; - - if (!atomisp_streaming_count(isp)) - return; - - /* Disable wdt */ - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - atomisp_wdt_stop(asd, true); - } - /* Start recover */ __atomisp_css_recover(isp, false); - /* Restore wdt */ - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - if (asd->streaming != - ATOMISP_DEVICE_STREAMING_ENABLED) - continue; - atomisp_wdt_refresh(asd, - isp->sw_contex.file_input ? - ATOMISP_ISP_FILE_TIMEOUT_DURATION : - ATOMISP_ISP_TIMEOUT_DURATION); - } dev_dbg(isp->dev, "atomisp css flush done\n"); } -void atomisp_wdt(struct timer_list *t) -{ - struct atomisp_sub_device *asd; - struct atomisp_device *isp; - - if (!IS_ISP2401) { - asd = from_timer(asd, t, wdt); - isp = asd->isp; - } else { - struct atomisp_video_pipe *pipe = from_timer(pipe, t, wdt); - - asd = pipe->asd; - isp = asd->isp; - - atomic_inc(&pipe->wdt_count); - dev_warn(isp->dev, - "[WARNING]asd %d pipe %s ISP timeout %d!\n", - asd->index, pipe->vdev.name, - atomic_read(&pipe->wdt_count)); - } - - if (atomic_read(&isp->wdt_work_queued)) { - dev_dbg(isp->dev, "ISP watchdog was put into workqueue\n"); - return; - } - atomic_set(&isp->wdt_work_queued, 1); - queue_work(isp->wdt_work_queue, &isp->wdt_work); -} - -/* ISP2400 */ -void atomisp_wdt_start(struct atomisp_sub_device *asd) -{ - atomisp_wdt_refresh(asd, ATOMISP_ISP_TIMEOUT_DURATION); -} - -/* ISP2401 */ -void atomisp_wdt_refresh_pipe(struct atomisp_video_pipe *pipe, - unsigned int delay) -{ - unsigned long next; - - if (!pipe->asd) { - dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, pipe->vdev.name); - return; - } - - if (delay != ATOMISP_WDT_KEEP_CURRENT_DELAY) - pipe->wdt_duration = delay; - - next = jiffies + pipe->wdt_duration; - - /* Override next if it has been pushed beyon the "next" time */ - if (atomisp_is_wdt_running(pipe) && time_after(pipe->wdt_expires, next)) - next = pipe->wdt_expires; - - pipe->wdt_expires = next; - - if (atomisp_is_wdt_running(pipe)) - dev_dbg(pipe->asd->isp->dev, "WDT will hit after %d ms (%s)\n", - ((int)(next - jiffies) * 1000 / HZ), pipe->vdev.name); - else - dev_dbg(pipe->asd->isp->dev, "WDT starts with %d ms period (%s)\n", - ((int)(next - jiffies) * 1000 / HZ), pipe->vdev.name); - - mod_timer(&pipe->wdt, next); -} - -void atomisp_wdt_refresh(struct atomisp_sub_device *asd, unsigned int delay) -{ - if (!IS_ISP2401) { - unsigned long next; - - if (delay != ATOMISP_WDT_KEEP_CURRENT_DELAY) - asd->wdt_duration = delay; - - next = jiffies + asd->wdt_duration; - - /* Override next if it has been pushed beyon the "next" time */ - if (atomisp_is_wdt_running(asd) && time_after(asd->wdt_expires, next)) - next = asd->wdt_expires; - - asd->wdt_expires = next; - - if (atomisp_is_wdt_running(asd)) - dev_dbg(asd->isp->dev, "WDT will hit after %d ms\n", - ((int)(next - jiffies) * 1000 / HZ)); - else - dev_dbg(asd->isp->dev, "WDT starts with %d ms period\n", - ((int)(next - jiffies) * 1000 / HZ)); - - mod_timer(&asd->wdt, next); - atomic_set(&asd->isp->wdt_count, 0); - } else { - dev_dbg(asd->isp->dev, "WDT refresh all:\n"); - if (atomisp_is_wdt_running(&asd->video_out_capture)) - atomisp_wdt_refresh_pipe(&asd->video_out_capture, delay); - if (atomisp_is_wdt_running(&asd->video_out_preview)) - atomisp_wdt_refresh_pipe(&asd->video_out_preview, delay); - if (atomisp_is_wdt_running(&asd->video_out_vf)) - atomisp_wdt_refresh_pipe(&asd->video_out_vf, delay); - if (atomisp_is_wdt_running(&asd->video_out_video_capture)) - atomisp_wdt_refresh_pipe(&asd->video_out_video_capture, delay); - } -} - -/* ISP2401 */ -void atomisp_wdt_stop_pipe(struct atomisp_video_pipe *pipe, bool sync) -{ - if (!pipe->asd) { - dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, pipe->vdev.name); - return; - } - - if (!atomisp_is_wdt_running(pipe)) - return; - - dev_dbg(pipe->asd->isp->dev, - "WDT stop asd %d (%s)\n", pipe->asd->index, pipe->vdev.name); - - if (sync) { - del_timer_sync(&pipe->wdt); - cancel_work_sync(&pipe->asd->isp->wdt_work); - } else { - del_timer(&pipe->wdt); - } -} - -/* ISP 2401 */ -void atomisp_wdt_start_pipe(struct atomisp_video_pipe *pipe) -{ - atomisp_wdt_refresh_pipe(pipe, ATOMISP_ISP_TIMEOUT_DURATION); -} - -void atomisp_wdt_stop(struct atomisp_sub_device *asd, bool sync) -{ - dev_dbg(asd->isp->dev, "WDT stop:\n"); - - if (!IS_ISP2401) { - if (sync) { - del_timer_sync(&asd->wdt); - cancel_work_sync(&asd->isp->wdt_work); - } else { - del_timer(&asd->wdt); - } - } else { - atomisp_wdt_stop_pipe(&asd->video_out_capture, sync); - atomisp_wdt_stop_pipe(&asd->video_out_preview, sync); - atomisp_wdt_stop_pipe(&asd->video_out_vf, sync); - atomisp_wdt_stop_pipe(&asd->video_out_video_capture, sync); - } -} - void atomisp_setup_flash(struct atomisp_sub_device *asd) { struct atomisp_device *isp = asd->isp; @@ -1884,7 +1508,7 @@ irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr) * For CSS2.0: we change the way to not dequeue all the event at one * time, instead, dequue one and process one, then another */ - rt_mutex_lock(&isp->mutex); + mutex_lock(&isp->mutex); if (atomisp_css_isr_thread(isp, frame_done_found, css_pipe_done)) goto out; @@ -1895,15 +1519,7 @@ irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr) atomisp_setup_flash(asd); } out: - rt_mutex_unlock(&isp->mutex); - for (i = 0; i < isp->num_of_streams; i++) { - asd = &isp->asd[i]; - if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED - && css_pipe_done[asd->index] - && isp->sw_contex.file_input) - v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - video, s_stream, 1); - } + mutex_unlock(&isp->mutex); dev_dbg(isp->dev, "<%s\n", __func__); return IRQ_HANDLED; @@ -2322,7 +1938,6 @@ static void atomisp_update_grid_info(struct atomisp_sub_device *asd, { struct atomisp_device *isp = asd->isp; int err; - u16 stream_id = atomisp_source_pad_to_stream_id(asd, source_pad); if (atomisp_css_get_grid_info(asd, pipe_id, source_pad)) return; @@ -2331,7 +1946,7 @@ static void atomisp_update_grid_info(struct atomisp_sub_device *asd, the grid size. */ atomisp_css_free_stat_buffers(asd); - err = atomisp_alloc_css_stat_bufs(asd, stream_id); + err = atomisp_alloc_css_stat_bufs(asd, ATOMISP_INPUT_STREAM_GENERAL); if (err) { dev_err(isp->dev, "stat_buf allocate error\n"); goto err; @@ -4077,6 +3692,8 @@ void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe) unsigned long irqflags; bool need_to_enqueue_buffer = false; + lockdep_assert_held(&asd->isp->mutex); + if (!asd) { dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", __func__, pipe->vdev.name); @@ -4143,19 +3760,6 @@ void atomisp_handle_parameter_and_buffer(struct atomisp_video_pipe *pipe) return; atomisp_qbuffers_to_css(asd); - - if (!IS_ISP2401) { - if (!atomisp_is_wdt_running(asd) && atomisp_buffers_queued(asd)) - atomisp_wdt_start(asd); - } else { - if (atomisp_buffers_queued_pipe(pipe)) { - if (!atomisp_is_wdt_running(pipe)) - atomisp_wdt_start_pipe(pipe); - else - atomisp_wdt_refresh_pipe(pipe, - ATOMISP_WDT_KEEP_CURRENT_DELAY); - } - } } /* @@ -4170,6 +3774,8 @@ int atomisp_set_parameters(struct video_device *vdev, struct atomisp_css_params *css_param = &asd->params.css_param; int ret; + lockdep_assert_held(&asd->isp->mutex); + if (!asd) { dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", __func__, vdev->name); @@ -4824,8 +4430,6 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f, const struct atomisp_format_bridge *fmt; struct atomisp_input_stream_info *stream_info = (struct atomisp_input_stream_info *)snr_mbus_fmt->reserved; - u16 stream_index; - int source_pad = atomisp_subdev_source_pad(vdev); int ret; if (!asd) { @@ -4837,7 +4441,6 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f, if (!isp->inputs[asd->input_curr].camera) return -EINVAL; - stream_index = atomisp_source_pad_to_stream_id(asd, source_pad); fmt = atomisp_get_format_bridge(f->pixelformat); if (!fmt) { dev_err(isp->dev, "unsupported pixelformat!\n"); @@ -4851,7 +4454,7 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f, snr_mbus_fmt->width = f->width; snr_mbus_fmt->height = f->height; - __atomisp_init_stream_info(stream_index, stream_info); + __atomisp_init_stream_info(ATOMISP_INPUT_STREAM_GENERAL, stream_info); dev_dbg(isp->dev, "try_mbus_fmt: asking for %ux%u\n", snr_mbus_fmt->width, snr_mbus_fmt->height); @@ -4886,8 +4489,8 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f, return 0; } - if (snr_mbus_fmt->width < f->width - && snr_mbus_fmt->height < f->height) { + if (!res_overflow || (snr_mbus_fmt->width < f->width && + snr_mbus_fmt->height < f->height)) { f->width = snr_mbus_fmt->width; f->height = snr_mbus_fmt->height; /* Set the flag when resolution requested is @@ -4906,41 +4509,6 @@ int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f, return 0; } -static int -atomisp_try_fmt_file(struct atomisp_device *isp, struct v4l2_format *f) -{ - u32 width = f->fmt.pix.width; - u32 height = f->fmt.pix.height; - u32 pixelformat = f->fmt.pix.pixelformat; - enum v4l2_field field = f->fmt.pix.field; - u32 depth; - - if (!atomisp_get_format_bridge(pixelformat)) { - dev_err(isp->dev, "Wrong output pixelformat\n"); - return -EINVAL; - } - - depth = atomisp_get_pixel_depth(pixelformat); - - if (field == V4L2_FIELD_ANY) { - field = V4L2_FIELD_NONE; - } else if (field != V4L2_FIELD_NONE) { - dev_err(isp->dev, "Wrong output field\n"); - return -EINVAL; - } - - f->fmt.pix.field = field; - f->fmt.pix.width = clamp_t(u32, - rounddown(width, (u32)ATOM_ISP_STEP_WIDTH), - ATOM_ISP_MIN_WIDTH, ATOM_ISP_MAX_WIDTH); - f->fmt.pix.height = clamp_t(u32, rounddown(height, - (u32)ATOM_ISP_STEP_HEIGHT), - ATOM_ISP_MIN_HEIGHT, ATOM_ISP_MAX_HEIGHT); - f->fmt.pix.bytesperline = (width * depth) >> 3; - - return 0; -} - enum mipi_port_id __get_mipi_port(struct atomisp_device *isp, enum atomisp_camera_port port) { @@ -5171,7 +4739,6 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, int (*configure_pp_input)(struct atomisp_sub_device *asd, unsigned int width, unsigned int height) = configure_pp_input_nop; - u16 stream_index; const struct atomisp_in_fmt_conv *fc; int ret, i; @@ -5180,7 +4747,6 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, __func__, vdev->name); return -EINVAL; } - stream_index = atomisp_source_pad_to_stream_id(asd, source_pad); v4l2_fh_init(&fh.vfh, vdev); @@ -5200,7 +4766,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, dev_err(isp->dev, "mipi_info is NULL\n"); return -EINVAL; } - if (atomisp_set_sensor_mipi_to_isp(asd, stream_index, + if (atomisp_set_sensor_mipi_to_isp(asd, ATOMISP_INPUT_STREAM_GENERAL, mipi_info)) return -EINVAL; fc = atomisp_find_in_fmt_conv_by_atomisp_in_fmt( @@ -5284,7 +4850,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, /* ISP2401 new input system need to use copy pipe */ if (asd->copy_mode) { pipe_id = IA_CSS_PIPE_ID_COPY; - atomisp_css_capture_enable_online(asd, stream_index, false); + atomisp_css_capture_enable_online(asd, ATOMISP_INPUT_STREAM_GENERAL, false); } else if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) { /* video same in continuouscapture and online modes */ configure_output = atomisp_css_video_configure_output; @@ -5316,7 +4882,9 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, pipe_id = IA_CSS_PIPE_ID_CAPTURE; atomisp_update_capture_mode(asd); - atomisp_css_capture_enable_online(asd, stream_index, false); + atomisp_css_capture_enable_online(asd, + ATOMISP_INPUT_STREAM_GENERAL, + false); } } } else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) { @@ -5341,7 +4909,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, if (!asd->continuous_mode->val) /* in case of ANR, force capture pipe to offline mode */ - atomisp_css_capture_enable_online(asd, stream_index, + atomisp_css_capture_enable_online(asd, ATOMISP_INPUT_STREAM_GENERAL, asd->params.low_light ? false : asd->params.online_process); @@ -5372,7 +4940,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, pipe_id = IA_CSS_PIPE_ID_YUVPP; if (asd->copy_mode) - ret = atomisp_css_copy_configure_output(asd, stream_index, + ret = atomisp_css_copy_configure_output(asd, ATOMISP_INPUT_STREAM_GENERAL, pix->width, pix->height, format->planar ? pix->bytesperline : pix->bytesperline * 8 / format->depth, @@ -5396,8 +4964,9 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, return -EINVAL; } if (asd->copy_mode) - ret = atomisp_css_copy_get_output_frame_info(asd, stream_index, - output_info); + ret = atomisp_css_copy_get_output_frame_info(asd, + ATOMISP_INPUT_STREAM_GENERAL, + output_info); else ret = get_frame_info(asd, output_info); if (ret) { @@ -5412,8 +4981,7 @@ static int atomisp_set_fmt_to_isp(struct video_device *vdev, ia_css_frame_free(asd->raw_output_frame); asd->raw_output_frame = NULL; - if (!asd->continuous_mode->val && - !asd->params.online_process && !isp->sw_contex.file_input && + if (!asd->continuous_mode->val && !asd->params.online_process && ia_css_frame_allocate_from_info(&asd->raw_output_frame, raw_output_info)) return -ENOMEM; @@ -5462,12 +5030,7 @@ static void atomisp_check_copy_mode(struct atomisp_sub_device *asd, src = atomisp_subdev_get_ffmt(&asd->subdev, NULL, V4L2_SUBDEV_FORMAT_ACTIVE, source_pad); - if ((sink->code == src->code && - sink->width == f->width && - sink->height == f->height) || - ((asd->isp->inputs[asd->input_curr].type == SOC_CAMERA) && - (asd->isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num > 1))) + if (sink->code == src->code && sink->width == f->width && sink->height == f->height) asd->copy_mode = true; else asd->copy_mode = false; @@ -5495,7 +5058,6 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, struct atomisp_device *isp; struct atomisp_input_stream_info *stream_info = (struct atomisp_input_stream_info *)ffmt->reserved; - u16 stream_index = ATOMISP_INPUT_STREAM_GENERAL; int source_pad = atomisp_subdev_source_pad(vdev); struct v4l2_subdev_fh fh; int ret; @@ -5510,8 +5072,6 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, v4l2_fh_init(&fh.vfh, vdev); - stream_index = atomisp_source_pad_to_stream_id(asd, source_pad); - format = atomisp_get_format_bridge(pixelformat); if (!format) return -EINVAL; @@ -5524,7 +5084,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, ffmt->width, ffmt->height, padding_w, padding_h, dvs_env_w, dvs_env_h); - __atomisp_init_stream_info(stream_index, stream_info); + __atomisp_init_stream_info(ATOMISP_INPUT_STREAM_GENERAL, stream_info); req_ffmt = ffmt; @@ -5556,7 +5116,7 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, if (ret) return ret; - __atomisp_update_stream_env(asd, stream_index, stream_info); + __atomisp_update_stream_env(asd, ATOMISP_INPUT_STREAM_GENERAL, stream_info); dev_dbg(isp->dev, "sensor width: %d, height: %d\n", ffmt->width, ffmt->height); @@ -5580,8 +5140,9 @@ static int atomisp_set_fmt_to_snr(struct video_device *vdev, return css_input_resolution_changed(asd, ffmt); } -int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f) +int atomisp_set_fmt(struct file *file, void *unused, struct v4l2_format *f) { + struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); struct atomisp_sub_device *asd = pipe->asd; @@ -5604,20 +5165,13 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f) struct v4l2_subdev_fh fh; int ret; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } + ret = atomisp_pipe_check(pipe, true); + if (ret) + return ret; if (source_pad >= ATOMISP_SUBDEV_PADS_NUM) return -EINVAL; - if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) { - dev_warn(isp->dev, "ISP does not support set format while at streaming!\n"); - return -EBUSY; - } - dev_dbg(isp->dev, "setting resolution %ux%u on pad %u for asd%d, bytesperline %u\n", f->fmt.pix.width, f->fmt.pix.height, source_pad, @@ -5699,58 +5253,7 @@ int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f) f->fmt.pix.height = r.height; } - if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW && - (asd->isp->inputs[asd->input_curr].type == SOC_CAMERA) && - (asd->isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num > 1)) { - /* For M10MO outputing YUV preview images. */ - u16 video_index = - atomisp_source_pad_to_stream_id(asd, - ATOMISP_SUBDEV_PAD_SOURCE_VIDEO); - - ret = atomisp_css_copy_get_output_frame_info(asd, - video_index, &output_info); - if (ret) { - dev_err(isp->dev, - "copy_get_output_frame_info ret %i", ret); - return -EINVAL; - } - if (!asd->yuvpp_mode) { - /* - * If viewfinder was configured into copy_mode, - * we switch to using yuvpp pipe instead. - */ - asd->yuvpp_mode = true; - ret = atomisp_css_copy_configure_output( - asd, video_index, 0, 0, 0, 0); - if (ret) { - dev_err(isp->dev, - "failed to disable copy pipe"); - return -EINVAL; - } - ret = atomisp_css_yuvpp_configure_output( - asd, video_index, - output_info.res.width, - output_info.res.height, - output_info.padded_width, - output_info.format); - if (ret) { - dev_err(isp->dev, - "failed to set up yuvpp pipe\n"); - return -EINVAL; - } - atomisp_css_video_enable_online(asd, false); - atomisp_css_preview_enable_online(asd, - ATOMISP_INPUT_STREAM_GENERAL, false); - } - atomisp_css_yuvpp_configure_viewfinder(asd, video_index, - f->fmt.pix.width, f->fmt.pix.height, - format_bridge->planar ? f->fmt.pix.bytesperline - : f->fmt.pix.bytesperline * 8 - / format_bridge->depth, format_bridge->sh_fmt); - atomisp_css_yuvpp_get_viewfinder_frame_info( - asd, video_index, &output_info); - } else if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) { + if (source_pad == ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW) { atomisp_css_video_configure_viewfinder(asd, f->fmt.pix.width, f->fmt.pix.height, format_bridge->planar ? f->fmt.pix.bytesperline @@ -6078,55 +5581,6 @@ done: return 0; } -int atomisp_set_fmt_file(struct video_device *vdev, struct v4l2_format *f) -{ - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - struct atomisp_sub_device *asd = pipe->asd; - struct v4l2_mbus_framefmt ffmt = {0}; - const struct atomisp_format_bridge *format_bridge; - struct v4l2_subdev_fh fh; - int ret; - - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - - v4l2_fh_init(&fh.vfh, vdev); - - dev_dbg(isp->dev, "setting fmt %ux%u 0x%x for file inject\n", - f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.pixelformat); - ret = atomisp_try_fmt_file(isp, f); - if (ret) { - dev_err(isp->dev, "atomisp_try_fmt_file err: %d\n", ret); - return ret; - } - - format_bridge = atomisp_get_format_bridge(f->fmt.pix.pixelformat); - if (!format_bridge) { - dev_dbg(isp->dev, "atomisp_get_format_bridge err! fmt:0x%x\n", - f->fmt.pix.pixelformat); - return -EINVAL; - } - - pipe->pix = f->fmt.pix; - atomisp_css_input_set_mode(asd, IA_CSS_INPUT_MODE_FIFO); - atomisp_css_input_configure_port(asd, - __get_mipi_port(isp, ATOMISP_CAMERA_PORT_PRIMARY), 2, 0xffff4, - 0, 0, 0, 0); - ffmt.width = f->fmt.pix.width; - ffmt.height = f->fmt.pix.height; - ffmt.code = format_bridge->mbus_code; - - atomisp_subdev_set_ffmt(&asd->subdev, fh.state, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK, &ffmt); - - return 0; -} - int atomisp_set_shading_table(struct atomisp_sub_device *asd, struct atomisp_shading_table *user_shading_table) { @@ -6275,6 +5729,8 @@ int atomisp_offline_capture_configure(struct atomisp_sub_device *asd, { struct v4l2_ctrl *c; + lockdep_assert_held(&asd->isp->mutex); + /* * In case of M10MO ZSL capture case, we need to issue a separate * capture request to M10MO which will output captured jpeg image @@ -6379,36 +5835,6 @@ int atomisp_flash_enable(struct atomisp_sub_device *asd, int num_frames) return 0; } -int atomisp_source_pad_to_stream_id(struct atomisp_sub_device *asd, - uint16_t source_pad) -{ - int stream_id; - struct atomisp_device *isp = asd->isp; - - if (isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num == 1) - return ATOMISP_INPUT_STREAM_GENERAL; - - switch (source_pad) { - case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE: - stream_id = ATOMISP_INPUT_STREAM_CAPTURE; - break; - case ATOMISP_SUBDEV_PAD_SOURCE_VF: - stream_id = ATOMISP_INPUT_STREAM_POSTVIEW; - break; - case ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW: - stream_id = ATOMISP_INPUT_STREAM_PREVIEW; - break; - case ATOMISP_SUBDEV_PAD_SOURCE_VIDEO: - stream_id = ATOMISP_INPUT_STREAM_VIDEO; - break; - default: - stream_id = ATOMISP_INPUT_STREAM_GENERAL; - } - - return stream_id; -} - bool atomisp_is_vf_pipe(struct atomisp_video_pipe *pipe) { struct atomisp_sub_device *asd = pipe->asd; @@ -6459,7 +5885,7 @@ void atomisp_init_raw_buffer_bitmap(struct atomisp_sub_device *asd) spin_unlock_irqrestore(&asd->raw_buffer_bitmap_lock, flags); } -int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id) +static int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id) { int *bitmap, bit; unsigned long flags; @@ -6549,6 +5975,8 @@ int atomisp_exp_id_capture(struct atomisp_sub_device *asd, int *exp_id) int value = *exp_id; int ret; + lockdep_assert_held(&isp->mutex); + ret = __is_raw_buffer_locked(asd, value); if (ret) { dev_err(isp->dev, "%s exp_id %d invalid %d.\n", __func__, value, ret); @@ -6570,6 +5998,8 @@ int atomisp_exp_id_unlock(struct atomisp_sub_device *asd, int *exp_id) int value = *exp_id; int ret; + lockdep_assert_held(&isp->mutex); + ret = __clear_raw_buffer_bitmap(asd, value); if (ret) { dev_err(isp->dev, "%s exp_id %d invalid %d.\n", __func__, value, ret); @@ -6605,6 +6035,8 @@ int atomisp_inject_a_fake_event(struct atomisp_sub_device *asd, int *event) if (!event || asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) return -EINVAL; + lockdep_assert_held(&asd->isp->mutex); + dev_dbg(asd->isp->dev, "%s: trying to inject a fake event 0x%x\n", __func__, *event); @@ -6675,19 +6107,6 @@ int atomisp_get_invalid_frame_num(struct video_device *vdev, struct ia_css_pipe_info p_info; int ret; - if (!asd) { - dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - - if (asd->isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num > 1) { - /* External ISP */ - *invalid_frame_num = 0; - return 0; - } - pipe_id = atomisp_get_pipe_id(pipe); if (!asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].pipes[pipe_id]) { dev_warn(asd->isp->dev, diff --git a/drivers/staging/media/atomisp/pci/atomisp_cmd.h b/drivers/staging/media/atomisp/pci/atomisp_cmd.h index ebc729468f87..c9f92f1326b6 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_cmd.h +++ b/drivers/staging/media/atomisp/pci/atomisp_cmd.h @@ -54,7 +54,6 @@ void dump_sp_dmem(struct atomisp_device *isp, unsigned int addr, unsigned int size); struct camera_mipi_info *atomisp_to_sensor_mipi_info(struct v4l2_subdev *sd); struct atomisp_video_pipe *atomisp_to_video_pipe(struct video_device *dev); -struct atomisp_acc_pipe *atomisp_to_acc_pipe(struct video_device *dev); int atomisp_reset(struct atomisp_device *isp); void atomisp_flush_bufs_and_wakeup(struct atomisp_sub_device *asd); void atomisp_clear_css_buffer_counters(struct atomisp_sub_device *asd); @@ -66,8 +65,7 @@ bool atomisp_buffers_queued_pipe(struct atomisp_video_pipe *pipe); /* Interrupt functions */ void atomisp_msi_irq_init(struct atomisp_device *isp); void atomisp_msi_irq_uninit(struct atomisp_device *isp); -void atomisp_wdt_work(struct work_struct *work); -void atomisp_wdt(struct timer_list *t); +void atomisp_assert_recovery_work(struct work_struct *work); void atomisp_setup_flash(struct atomisp_sub_device *asd); irqreturn_t atomisp_isr(int irq, void *dev); irqreturn_t atomisp_isr_thread(int irq, void *isp_ptr); @@ -268,8 +266,7 @@ int atomisp_get_sensor_mode_data(struct atomisp_sub_device *asd, int atomisp_try_fmt(struct video_device *vdev, struct v4l2_pix_format *f, bool *res_overflow); -int atomisp_set_fmt(struct video_device *vdev, struct v4l2_format *f); -int atomisp_set_fmt_file(struct video_device *vdev, struct v4l2_format *f); +int atomisp_set_fmt(struct file *file, void *fh, struct v4l2_format *f); int atomisp_set_shading_table(struct atomisp_sub_device *asd, struct atomisp_shading_table *shading_table); @@ -300,8 +297,6 @@ void atomisp_buf_done(struct atomisp_sub_device *asd, int error, bool q_buffers, enum atomisp_input_stream_id stream_id); void atomisp_css_flush(struct atomisp_device *isp); -int atomisp_source_pad_to_stream_id(struct atomisp_sub_device *asd, - uint16_t source_pad); /* Events. Only one event has to be exported for now. */ void atomisp_eof_event(struct atomisp_sub_device *asd, uint8_t exp_id); @@ -324,8 +319,6 @@ void atomisp_flush_params_queue(struct atomisp_video_pipe *asd); int atomisp_exp_id_unlock(struct atomisp_sub_device *asd, int *exp_id); int atomisp_exp_id_capture(struct atomisp_sub_device *asd, int *exp_id); -/* Function to update Raw Buffer bitmap */ -int atomisp_set_raw_buffer_bitmap(struct atomisp_sub_device *asd, int exp_id); void atomisp_init_raw_buffer_bitmap(struct atomisp_sub_device *asd); /* Function to enable/disable zoom for capture pipe */ diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat.h b/drivers/staging/media/atomisp/pci/atomisp_compat.h index 3393ae6824f0..a6d85d0f9ae5 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_compat.h +++ b/drivers/staging/media/atomisp/pci/atomisp_compat.h @@ -129,10 +129,6 @@ int atomisp_alloc_metadata_output_buf(struct atomisp_sub_device *asd); void atomisp_free_metadata_output_buf(struct atomisp_sub_device *asd); -void atomisp_css_get_dis_statistics(struct atomisp_sub_device *asd, - struct atomisp_css_buffer *isp_css_buffer, - struct ia_css_isp_dvs_statistics_map *dvs_map); - void atomisp_css_temp_pipe_to_pipe_id(struct atomisp_sub_device *asd, struct atomisp_css_event *current_event); @@ -434,17 +430,11 @@ void atomisp_css_get_morph_table(struct atomisp_sub_device *asd, void atomisp_css_morph_table_free(struct ia_css_morph_table *table); -void atomisp_css_set_cont_prev_start_time(struct atomisp_device *isp, - unsigned int overlap); - int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd, struct atomisp_dis_statistics *stats); int atomisp_css_update_stream(struct atomisp_sub_device *asd); -struct atomisp_acc_fw; -int atomisp_css_set_acc_parameters(struct atomisp_acc_fw *acc_fw); - int atomisp_css_isr_thread(struct atomisp_device *isp, bool *frame_done_found, bool *css_pipe_done); diff --git a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c index 5aa108a1724c..fdc05548d972 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c +++ b/drivers/staging/media/atomisp/pci/atomisp_compat_css20.c @@ -1427,7 +1427,6 @@ int atomisp_css_get_grid_info(struct atomisp_sub_device *asd, struct ia_css_pipe_info p_info; struct ia_css_grid_info old_info; struct atomisp_device *isp = asd->isp; - int stream_index = atomisp_source_pad_to_stream_id(asd, source_pad); int md_width = asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL]. stream_config.metadata_config.resolution.width; @@ -1435,7 +1434,7 @@ int atomisp_css_get_grid_info(struct atomisp_sub_device *asd, memset(&old_info, 0, sizeof(struct ia_css_grid_info)); if (ia_css_pipe_get_info( - asd->stream_env[stream_index].pipes[pipe_id], + asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].pipes[pipe_id], &p_info) != 0) { dev_err(isp->dev, "ia_css_pipe_get_info failed\n"); return -EINVAL; @@ -1574,20 +1573,6 @@ void atomisp_free_metadata_output_buf(struct atomisp_sub_device *asd) } } -void atomisp_css_get_dis_statistics(struct atomisp_sub_device *asd, - struct atomisp_css_buffer *isp_css_buffer, - struct ia_css_isp_dvs_statistics_map *dvs_map) -{ - if (asd->params.dvs_stat) { - if (dvs_map) - ia_css_translate_dvs2_statistics( - asd->params.dvs_stat, dvs_map); - else - ia_css_get_dvs2_statistics(asd->params.dvs_stat, - isp_css_buffer->css_buffer.data.stats_dvs); - } -} - void atomisp_css_temp_pipe_to_pipe_id(struct atomisp_sub_device *asd, struct atomisp_css_event *current_event) { @@ -2694,11 +2679,11 @@ int atomisp_get_css_frame_info(struct atomisp_sub_device *asd, struct atomisp_device *isp = asd->isp; if (ATOMISP_SOC_CAMERA(asd)) { - stream_index = atomisp_source_pad_to_stream_id(asd, source_pad); + stream_index = ATOMISP_INPUT_STREAM_GENERAL; } else { stream_index = (pipe_index == IA_CSS_PIPE_ID_YUVPP) ? ATOMISP_INPUT_STREAM_VIDEO : - atomisp_source_pad_to_stream_id(asd, source_pad); + ATOMISP_INPUT_STREAM_GENERAL; } if (0 != ia_css_pipe_get_info(asd->stream_env[stream_index] @@ -3626,6 +3611,8 @@ int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd, struct atomisp_dis_buf *dis_buf; unsigned long flags; + lockdep_assert_held(&isp->mutex); + if (!asd->params.dvs_stat->hor_prod.odd_real || !asd->params.dvs_stat->hor_prod.odd_imag || !asd->params.dvs_stat->hor_prod.even_real || @@ -3637,12 +3624,8 @@ int atomisp_css_get_dis_stat(struct atomisp_sub_device *asd, return -EINVAL; /* isp needs to be streaming to get DIS statistics */ - spin_lock_irqsave(&isp->lock, flags); - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) { - spin_unlock_irqrestore(&isp->lock, flags); + if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) return -EINVAL; - } - spin_unlock_irqrestore(&isp->lock, flags); if (atomisp_compare_dvs_grid(asd, &stats->dvs2_stat.grid_info) != 0) /* If the grid info in the argument differs from the current @@ -3763,32 +3746,6 @@ void atomisp_css_morph_table_free(struct ia_css_morph_table *table) ia_css_morph_table_free(table); } -void atomisp_css_set_cont_prev_start_time(struct atomisp_device *isp, - unsigned int overlap) -{ - /* CSS 2.0 doesn't support this API. */ - dev_dbg(isp->dev, "set cont prev start time is not supported.\n"); - return; -} - -/* Set the ACC binary arguments */ -int atomisp_css_set_acc_parameters(struct atomisp_acc_fw *acc_fw) -{ - unsigned int mem; - - for (mem = 0; mem < ATOMISP_ACC_NR_MEMORY; mem++) { - if (acc_fw->args[mem].length == 0) - continue; - - ia_css_isp_param_set_css_mem_init(&acc_fw->fw->mem_initializers, - IA_CSS_PARAM_CLASS_PARAM, mem, - acc_fw->args[mem].css_ptr, - acc_fw->args[mem].length); - } - - return 0; -} - static struct atomisp_sub_device *__get_atomisp_subdev( struct ia_css_pipe *css_pipe, struct atomisp_device *isp, @@ -3824,8 +3781,8 @@ int atomisp_css_isr_thread(struct atomisp_device *isp, enum atomisp_input_stream_id stream_id = 0; struct atomisp_css_event current_event; struct atomisp_sub_device *asd; - bool reset_wdt_timer[MAX_STREAM_NUM] = {false}; - int i; + + lockdep_assert_held(&isp->mutex); while (!ia_css_dequeue_psys_event(¤t_event.event)) { if (current_event.event.type == @@ -3839,14 +3796,8 @@ int atomisp_css_isr_thread(struct atomisp_device *isp, __func__, current_event.event.fw_assert_module_id, current_event.event.fw_assert_line_no); - for (i = 0; i < isp->num_of_streams; i++) - atomisp_wdt_stop(&isp->asd[i], 0); - - if (!IS_ISP2401) - atomisp_wdt(&isp->asd[0].wdt); - else - queue_work(isp->wdt_work_queue, &isp->wdt_work); + queue_work(system_long_wq, &isp->assert_recovery_work); return -EINVAL; } else if (current_event.event.type == IA_CSS_EVENT_TYPE_FW_WARNING) { dev_warn(isp->dev, "%s: ISP reports warning, code is %d, exp_id %d\n", @@ -3875,20 +3826,12 @@ int atomisp_css_isr_thread(struct atomisp_device *isp, frame_done_found[asd->index] = true; atomisp_buf_done(asd, 0, IA_CSS_BUFFER_TYPE_OUTPUT_FRAME, current_event.pipe, true, stream_id); - - if (!IS_ISP2401) - reset_wdt_timer[asd->index] = true; /* ISP running */ - break; case IA_CSS_EVENT_TYPE_SECOND_OUTPUT_FRAME_DONE: dev_dbg(isp->dev, "event: Second output frame done"); frame_done_found[asd->index] = true; atomisp_buf_done(asd, 0, IA_CSS_BUFFER_TYPE_SEC_OUTPUT_FRAME, current_event.pipe, true, stream_id); - - if (!IS_ISP2401) - reset_wdt_timer[asd->index] = true; /* ISP running */ - break; case IA_CSS_EVENT_TYPE_3A_STATISTICS_DONE: dev_dbg(isp->dev, "event: 3A stats frame done"); @@ -3909,19 +3852,12 @@ int atomisp_css_isr_thread(struct atomisp_device *isp, atomisp_buf_done(asd, 0, IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME, current_event.pipe, true, stream_id); - - if (!IS_ISP2401) - reset_wdt_timer[asd->index] = true; /* ISP running */ - break; case IA_CSS_EVENT_TYPE_SECOND_VF_OUTPUT_FRAME_DONE: dev_dbg(isp->dev, "event: second VF output frame done"); atomisp_buf_done(asd, 0, IA_CSS_BUFFER_TYPE_SEC_VF_OUTPUT_FRAME, current_event.pipe, true, stream_id); - if (!IS_ISP2401) - reset_wdt_timer[asd->index] = true; /* ISP running */ - break; case IA_CSS_EVENT_TYPE_DIS_STATISTICS_DONE: dev_dbg(isp->dev, "event: dis stats frame done"); @@ -3944,24 +3880,6 @@ int atomisp_css_isr_thread(struct atomisp_device *isp, } } - if (IS_ISP2401) - return 0; - - /* ISP2400: If there are no buffers queued then delete wdt timer. */ - for (i = 0; i < isp->num_of_streams; i++) { - asd = &isp->asd[i]; - if (!asd) - continue; - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - continue; - if (!atomisp_buffers_queued(asd)) - atomisp_wdt_stop(asd, false); - else if (reset_wdt_timer[i]) - /* SOF irq should not reset wdt timer. */ - atomisp_wdt_refresh(asd, - ATOMISP_WDT_KEEP_CURRENT_DELAY); - } - return 0; } diff --git a/drivers/staging/media/atomisp/pci/atomisp_file.c b/drivers/staging/media/atomisp/pci/atomisp_file.c deleted file mode 100644 index 4570a9ab100b..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp_file.c +++ /dev/null @@ -1,229 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#include <media/v4l2-event.h> -#include <media/v4l2-mediabus.h> - -#include <media/videobuf-vmalloc.h> -#include <linux/delay.h> - -#include "ia_css.h" - -#include "atomisp_cmd.h" -#include "atomisp_common.h" -#include "atomisp_file.h" -#include "atomisp_internal.h" -#include "atomisp_ioctl.h" - -static void file_work(struct work_struct *work) -{ - struct atomisp_file_device *file_dev = - container_of(work, struct atomisp_file_device, work); - struct atomisp_device *isp = file_dev->isp; - /* only support file injection on subdev0 */ - struct atomisp_sub_device *asd = &isp->asd[0]; - struct atomisp_video_pipe *out_pipe = &asd->video_in; - unsigned short *buf = videobuf_to_vmalloc(out_pipe->outq.bufs[0]); - struct v4l2_mbus_framefmt isp_sink_fmt; - - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - return; - - dev_dbg(isp->dev, ">%s: ready to start streaming\n", __func__); - isp_sink_fmt = *atomisp_subdev_get_ffmt(&asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK); - - while (!ia_css_isp_has_started()) - usleep_range(1000, 1500); - - ia_css_stream_send_input_frame(asd->stream_env[ATOMISP_INPUT_STREAM_GENERAL].stream, - buf, isp_sink_fmt.width, - isp_sink_fmt.height); - dev_dbg(isp->dev, "<%s: streaming done\n", __func__); -} - -static int file_input_s_stream(struct v4l2_subdev *sd, int enable) -{ - struct atomisp_file_device *file_dev = v4l2_get_subdevdata(sd); - struct atomisp_device *isp = file_dev->isp; - /* only support file injection on subdev0 */ - struct atomisp_sub_device *asd = &isp->asd[0]; - - dev_dbg(isp->dev, "%s: enable %d\n", __func__, enable); - if (enable) { - if (asd->streaming != ATOMISP_DEVICE_STREAMING_ENABLED) - return 0; - - queue_work(file_dev->work_queue, &file_dev->work); - return 0; - } - cancel_work_sync(&file_dev->work); - return 0; -} - -static int file_input_get_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_state *sd_state, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *fmt = &format->format; - struct atomisp_file_device *file_dev = v4l2_get_subdevdata(sd); - struct atomisp_device *isp = file_dev->isp; - /* only support file injection on subdev0 */ - struct atomisp_sub_device *asd = &isp->asd[0]; - struct v4l2_mbus_framefmt *isp_sink_fmt; - - if (format->pad) - return -EINVAL; - isp_sink_fmt = atomisp_subdev_get_ffmt(&asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK); - - fmt->width = isp_sink_fmt->width; - fmt->height = isp_sink_fmt->height; - fmt->code = isp_sink_fmt->code; - - return 0; -} - -static int file_input_set_fmt(struct v4l2_subdev *sd, - struct v4l2_subdev_state *sd_state, - struct v4l2_subdev_format *format) -{ - struct v4l2_mbus_framefmt *fmt = &format->format; - - if (format->pad) - return -EINVAL; - file_input_get_fmt(sd, sd_state, format); - if (format->which == V4L2_SUBDEV_FORMAT_TRY) - sd_state->pads->try_fmt = *fmt; - return 0; -} - -static int file_input_log_status(struct v4l2_subdev *sd) -{ - /*to fake*/ - return 0; -} - -static int file_input_s_power(struct v4l2_subdev *sd, int on) -{ - /* to fake */ - return 0; -} - -static int file_input_enum_mbus_code(struct v4l2_subdev *sd, - struct v4l2_subdev_state *sd_state, - struct v4l2_subdev_mbus_code_enum *code) -{ - /*to fake*/ - return 0; -} - -static int file_input_enum_frame_size(struct v4l2_subdev *sd, - struct v4l2_subdev_state *sd_state, - struct v4l2_subdev_frame_size_enum *fse) -{ - /*to fake*/ - return 0; -} - -static int file_input_enum_frame_ival(struct v4l2_subdev *sd, - struct v4l2_subdev_state *sd_state, - struct v4l2_subdev_frame_interval_enum - *fie) -{ - /*to fake*/ - return 0; -} - -static const struct v4l2_subdev_video_ops file_input_video_ops = { - .s_stream = file_input_s_stream, -}; - -static const struct v4l2_subdev_core_ops file_input_core_ops = { - .log_status = file_input_log_status, - .s_power = file_input_s_power, -}; - -static const struct v4l2_subdev_pad_ops file_input_pad_ops = { - .enum_mbus_code = file_input_enum_mbus_code, - .enum_frame_size = file_input_enum_frame_size, - .enum_frame_interval = file_input_enum_frame_ival, - .get_fmt = file_input_get_fmt, - .set_fmt = file_input_set_fmt, -}; - -static const struct v4l2_subdev_ops file_input_ops = { - .core = &file_input_core_ops, - .video = &file_input_video_ops, - .pad = &file_input_pad_ops, -}; - -void -atomisp_file_input_unregister_entities(struct atomisp_file_device *file_dev) -{ - media_entity_cleanup(&file_dev->sd.entity); - v4l2_device_unregister_subdev(&file_dev->sd); -} - -int atomisp_file_input_register_entities(struct atomisp_file_device *file_dev, - struct v4l2_device *vdev) -{ - /* Register the subdev and video nodes. */ - return v4l2_device_register_subdev(vdev, &file_dev->sd); -} - -void atomisp_file_input_cleanup(struct atomisp_device *isp) -{ - struct atomisp_file_device *file_dev = &isp->file_dev; - - if (file_dev->work_queue) { - destroy_workqueue(file_dev->work_queue); - file_dev->work_queue = NULL; - } -} - -int atomisp_file_input_init(struct atomisp_device *isp) -{ - struct atomisp_file_device *file_dev = &isp->file_dev; - struct v4l2_subdev *sd = &file_dev->sd; - struct media_pad *pads = file_dev->pads; - struct media_entity *me = &sd->entity; - - file_dev->isp = isp; - file_dev->work_queue = alloc_workqueue(isp->v4l2_dev.name, 0, 1); - if (!file_dev->work_queue) { - dev_err(isp->dev, "Failed to initialize file inject workq\n"); - return -ENOMEM; - } - - INIT_WORK(&file_dev->work, file_work); - - v4l2_subdev_init(sd, &file_input_ops); - sd->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; - strscpy(sd->name, "file_input_subdev", sizeof(sd->name)); - v4l2_set_subdevdata(sd, file_dev); - - pads[0].flags = MEDIA_PAD_FL_SINK; - me->function = MEDIA_ENT_F_V4L2_SUBDEV_UNKNOWN; - - return media_entity_pads_init(me, 1, pads); -} diff --git a/drivers/staging/media/atomisp/pci/atomisp_file.h b/drivers/staging/media/atomisp/pci/atomisp_file.h deleted file mode 100644 index f166a2aefff1..000000000000 --- a/drivers/staging/media/atomisp/pci/atomisp_file.h +++ /dev/null @@ -1,44 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * Support for Medifield PNW Camera Imaging ISP subsystem. - * - * Copyright (c) 2010 Intel Corporation. All Rights Reserved. - * - * Copyright (c) 2010 Silicon Hive www.siliconhive.com. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version - * 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * - */ - -#ifndef __ATOMISP_FILE_H__ -#define __ATOMISP_FILE_H__ - -#include <media/media-entity.h> -#include <media/v4l2-subdev.h> - -struct atomisp_device; - -struct atomisp_file_device { - struct v4l2_subdev sd; - struct atomisp_device *isp; - struct media_pad pads[1]; - - struct workqueue_struct *work_queue; - struct work_struct work; -}; - -void atomisp_file_input_cleanup(struct atomisp_device *isp); -int atomisp_file_input_init(struct atomisp_device *isp); -void atomisp_file_input_unregister_entities( - struct atomisp_file_device *file_dev); -int atomisp_file_input_register_entities(struct atomisp_file_device *file_dev, - struct v4l2_device *vdev); -#endif /* __ATOMISP_FILE_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp_fops.c b/drivers/staging/media/atomisp/pci/atomisp_fops.c index 77150e4ae144..84a84e0cdeef 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_fops.c +++ b/drivers/staging/media/atomisp/pci/atomisp_fops.c @@ -369,45 +369,6 @@ static int atomisp_get_css_buf_type(struct atomisp_sub_device *asd, return IA_CSS_BUFFER_TYPE_VF_OUTPUT_FRAME; } -static int atomisp_qbuffers_to_css_for_all_pipes(struct atomisp_sub_device *asd) -{ - enum ia_css_buffer_type buf_type; - enum ia_css_pipe_id css_capture_pipe_id = IA_CSS_PIPE_ID_COPY; - enum ia_css_pipe_id css_preview_pipe_id = IA_CSS_PIPE_ID_COPY; - enum ia_css_pipe_id css_video_pipe_id = IA_CSS_PIPE_ID_COPY; - enum atomisp_input_stream_id input_stream_id; - struct atomisp_video_pipe *capture_pipe; - struct atomisp_video_pipe *preview_pipe; - struct atomisp_video_pipe *video_pipe; - - capture_pipe = &asd->video_out_capture; - preview_pipe = &asd->video_out_preview; - video_pipe = &asd->video_out_video_capture; - - buf_type = atomisp_get_css_buf_type( - asd, css_preview_pipe_id, - atomisp_subdev_source_pad(&preview_pipe->vdev)); - input_stream_id = ATOMISP_INPUT_STREAM_PREVIEW; - atomisp_q_video_buffers_to_css(asd, preview_pipe, - input_stream_id, - buf_type, css_preview_pipe_id); - - buf_type = atomisp_get_css_buf_type(asd, css_capture_pipe_id, - atomisp_subdev_source_pad(&capture_pipe->vdev)); - input_stream_id = ATOMISP_INPUT_STREAM_GENERAL; - atomisp_q_video_buffers_to_css(asd, capture_pipe, - input_stream_id, - buf_type, css_capture_pipe_id); - - buf_type = atomisp_get_css_buf_type(asd, css_video_pipe_id, - atomisp_subdev_source_pad(&video_pipe->vdev)); - input_stream_id = ATOMISP_INPUT_STREAM_VIDEO; - atomisp_q_video_buffers_to_css(asd, video_pipe, - input_stream_id, - buf_type, css_video_pipe_id); - return 0; -} - /* queue all available buffers to css */ int atomisp_qbuffers_to_css(struct atomisp_sub_device *asd) { @@ -423,11 +384,6 @@ int atomisp_qbuffers_to_css(struct atomisp_sub_device *asd) bool raw_mode = atomisp_is_mbuscode_raw( asd->fmt[asd->capture_pad].fmt.code); - if (asd->isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num == 2 && - !asd->yuvpp_mode) - return atomisp_qbuffers_to_css_for_all_pipes(asd); - if (asd->vfpp->val == ATOMISP_VFPP_DISABLE_SCALER) { video_pipe = &asd->video_out_video_capture; css_video_pipe_id = IA_CSS_PIPE_ID_VIDEO; @@ -593,47 +549,6 @@ static void atomisp_buf_release(struct videobuf_queue *vq, atomisp_videobuf_free_buf(vb); } -static int atomisp_buf_setup_output(struct videobuf_queue *vq, - unsigned int *count, unsigned int *size) -{ - struct atomisp_video_pipe *pipe = vq->priv_data; - - *size = pipe->pix.sizeimage; - - return 0; -} - -static int atomisp_buf_prepare_output(struct videobuf_queue *vq, - struct videobuf_buffer *vb, - enum v4l2_field field) -{ - struct atomisp_video_pipe *pipe = vq->priv_data; - - vb->size = pipe->pix.sizeimage; - vb->width = pipe->pix.width; - vb->height = pipe->pix.height; - vb->field = field; - vb->state = VIDEOBUF_PREPARED; - - return 0; -} - -static void atomisp_buf_queue_output(struct videobuf_queue *vq, - struct videobuf_buffer *vb) -{ - struct atomisp_video_pipe *pipe = vq->priv_data; - - list_add_tail(&vb->queue, &pipe->activeq_out); - vb->state = VIDEOBUF_QUEUED; -} - -static void atomisp_buf_release_output(struct videobuf_queue *vq, - struct videobuf_buffer *vb) -{ - videobuf_vmalloc_free(vb); - vb->state = VIDEOBUF_NEEDS_INIT; -} - static const struct videobuf_queue_ops videobuf_qops = { .buf_setup = atomisp_buf_setup, .buf_prepare = atomisp_buf_prepare, @@ -641,13 +556,6 @@ static const struct videobuf_queue_ops videobuf_qops = { .buf_release = atomisp_buf_release, }; -static const struct videobuf_queue_ops videobuf_qops_output = { - .buf_setup = atomisp_buf_setup_output, - .buf_prepare = atomisp_buf_prepare_output, - .buf_queue = atomisp_buf_queue_output, - .buf_release = atomisp_buf_release_output, -}; - static int atomisp_init_pipe(struct atomisp_video_pipe *pipe) { /* init locks */ @@ -660,15 +568,7 @@ static int atomisp_init_pipe(struct atomisp_video_pipe *pipe) sizeof(struct atomisp_buffer), pipe, NULL); /* ext_lock: NULL */ - videobuf_queue_vmalloc_init(&pipe->outq, &videobuf_qops_output, NULL, - &pipe->irq_lock, - V4L2_BUF_TYPE_VIDEO_OUTPUT, - V4L2_FIELD_NONE, - sizeof(struct atomisp_buffer), pipe, - NULL); /* ext_lock: NULL */ - INIT_LIST_HEAD(&pipe->activeq); - INIT_LIST_HEAD(&pipe->activeq_out); INIT_LIST_HEAD(&pipe->buffers_waiting_for_param); INIT_LIST_HEAD(&pipe->per_frame_params); memset(pipe->frame_request_config_id, 0, @@ -684,7 +584,6 @@ static void atomisp_dev_init_struct(struct atomisp_device *isp) { unsigned int i; - isp->sw_contex.file_input = false; isp->need_gfx_throttle = true; isp->isp_fatal_error = false; isp->mipi_frame_size = 0; @@ -741,9 +640,7 @@ static unsigned int atomisp_subdev_users(struct atomisp_sub_device *asd) return asd->video_out_preview.users + asd->video_out_vf.users + asd->video_out_capture.users + - asd->video_out_video_capture.users + - asd->video_acc.users + - asd->video_in.users; + asd->video_out_video_capture.users; } unsigned int atomisp_dev_users(struct atomisp_device *isp) @@ -760,48 +657,18 @@ static int atomisp_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe = NULL; - struct atomisp_acc_pipe *acc_pipe = NULL; - struct atomisp_sub_device *asd; - bool acc_node = false; + struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); + struct atomisp_sub_device *asd = pipe->asd; int ret; dev_dbg(isp->dev, "open device %s\n", vdev->name); - /* - * Ensure that if we are still loading we block. Once the loading - * is over we can proceed. We can't blindly hold the lock until - * that occurs as if the load fails we'll deadlock the unload - */ - rt_mutex_lock(&isp->loading); - /* - * FIXME: revisit this with a better check once the code structure - * is cleaned up a bit more - */ ret = v4l2_fh_open(file); - if (ret) { - dev_err(isp->dev, - "%s: v4l2_fh_open() returned error %d\n", - __func__, ret); - rt_mutex_unlock(&isp->loading); + if (ret) return ret; - } - if (!isp->ready) { - rt_mutex_unlock(&isp->loading); - return -ENXIO; - } - rt_mutex_unlock(&isp->loading); - rt_mutex_lock(&isp->mutex); + mutex_lock(&isp->mutex); - acc_node = !strcmp(vdev->name, "ATOMISP ISP ACC"); - if (acc_node) { - acc_pipe = atomisp_to_acc_pipe(vdev); - asd = acc_pipe->asd; - } else { - pipe = atomisp_to_video_pipe(vdev); - asd = pipe->asd; - } asd->subdev.devnode = vdev; /* Deferred firmware loading case. */ if (isp->css_env.isp_css_fw.bytes == 0) { @@ -823,14 +690,6 @@ static int atomisp_open(struct file *file) isp->css_env.isp_css_fw.data = NULL; } - if (acc_node && acc_pipe->users) { - dev_dbg(isp->dev, "acc node already opened\n"); - rt_mutex_unlock(&isp->mutex); - return -EBUSY; - } else if (acc_node) { - goto dev_init; - } - if (!isp->input_cnt) { dev_err(isp->dev, "no camera attached\n"); ret = -EINVAL; @@ -842,7 +701,7 @@ static int atomisp_open(struct file *file) */ if (pipe->users) { dev_dbg(isp->dev, "video node already opened\n"); - rt_mutex_unlock(&isp->mutex); + mutex_unlock(&isp->mutex); return -EBUSY; } @@ -850,7 +709,6 @@ static int atomisp_open(struct file *file) if (ret) goto error; -dev_init: if (atomisp_dev_users(isp)) { dev_dbg(isp->dev, "skip init isp in open\n"); goto init_subdev; @@ -885,16 +743,11 @@ init_subdev: atomisp_subdev_init_struct(asd); done: - - if (acc_node) - acc_pipe->users++; - else - pipe->users++; - rt_mutex_unlock(&isp->mutex); + pipe->users++; + mutex_unlock(&isp->mutex); /* Ensure that a mode is set */ - if (!acc_node) - v4l2_ctrl_s_ctrl(asd->run_mode, pipe->default_run_mode); + v4l2_ctrl_s_ctrl(asd->run_mode, pipe->default_run_mode); return 0; @@ -902,7 +755,8 @@ css_error: atomisp_css_uninit(isp); pm_runtime_put(vdev->v4l2_dev->dev); error: - rt_mutex_unlock(&isp->mutex); + mutex_unlock(&isp->mutex); + v4l2_fh_release(file); return ret; } @@ -910,13 +764,12 @@ static int atomisp_release(struct file *file) { struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe; - struct atomisp_acc_pipe *acc_pipe; - struct atomisp_sub_device *asd; - bool acc_node; + struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); + struct atomisp_sub_device *asd = pipe->asd; struct v4l2_requestbuffers req; struct v4l2_subdev_fh fh; struct v4l2_rect clear_compose = {0}; + unsigned long flags; int ret = 0; v4l2_fh_init(&fh.vfh, vdev); @@ -925,23 +778,12 @@ static int atomisp_release(struct file *file) if (!isp) return -EBADF; - mutex_lock(&isp->streamoff_mutex); - rt_mutex_lock(&isp->mutex); + mutex_lock(&isp->mutex); dev_dbg(isp->dev, "release device %s\n", vdev->name); - acc_node = !strcmp(vdev->name, "ATOMISP ISP ACC"); - if (acc_node) { - acc_pipe = atomisp_to_acc_pipe(vdev); - asd = acc_pipe->asd; - } else { - pipe = atomisp_to_video_pipe(vdev); - asd = pipe->asd; - } + asd->subdev.devnode = vdev; - if (acc_node) { - acc_pipe->users--; - goto subdev_uninit; - } + pipe->users--; if (pipe->capq.streaming) @@ -950,27 +792,19 @@ static int atomisp_release(struct file *file) __func__); if (pipe->capq.streaming && - __atomisp_streamoff(file, NULL, V4L2_BUF_TYPE_VIDEO_CAPTURE)) { - dev_err(isp->dev, - "atomisp_streamoff failed on release, driver bug"); + atomisp_streamoff(file, NULL, V4L2_BUF_TYPE_VIDEO_CAPTURE)) { + dev_err(isp->dev, "atomisp_streamoff failed on release, driver bug"); goto done; } if (pipe->users) goto done; - if (__atomisp_reqbufs(file, NULL, &req)) { - dev_err(isp->dev, - "atomisp_reqbufs failed on release, driver bug"); + if (atomisp_reqbufs(file, NULL, &req)) { + dev_err(isp->dev, "atomisp_reqbufs failed on release, driver bug"); goto done; } - if (pipe->outq.bufs[0]) { - mutex_lock(&pipe->outq.vb_lock); - videobuf_queue_cancel(&pipe->outq); - mutex_unlock(&pipe->outq.vb_lock); - } - /* * A little trick here: * file injection input resolution is recorded in the sink pad, @@ -978,26 +812,17 @@ static int atomisp_release(struct file *file) * The sink pad setting can only be cleared when all device nodes * get released. */ - if (!isp->sw_contex.file_input && asd->fmt_auto->val) { + if (asd->fmt_auto->val) { struct v4l2_mbus_framefmt isp_sink_fmt = { 0 }; atomisp_subdev_set_ffmt(&asd->subdev, fh.state, V4L2_SUBDEV_FORMAT_ACTIVE, ATOMISP_SUBDEV_PAD_SINK, &isp_sink_fmt); } -subdev_uninit: + if (atomisp_subdev_users(asd)) goto done; - /* clear the sink pad for file input */ - if (isp->sw_contex.file_input && asd->fmt_auto->val) { - struct v4l2_mbus_framefmt isp_sink_fmt = { 0 }; - - atomisp_subdev_set_ffmt(&asd->subdev, fh.state, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK, &isp_sink_fmt); - } - atomisp_css_free_stat_buffers(asd); atomisp_free_internal_buffers(asd); ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, @@ -1007,7 +832,9 @@ subdev_uninit: /* clear the asd field to show this camera is not used */ isp->inputs[asd->input_curr].asd = NULL; + spin_lock_irqsave(&isp->lock, flags); asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED; + spin_unlock_irqrestore(&isp->lock, flags); if (atomisp_dev_users(isp)) goto done; @@ -1029,15 +856,12 @@ subdev_uninit: dev_err(isp->dev, "Failed to power off device\n"); done: - if (!acc_node) { - atomisp_subdev_set_selection(&asd->subdev, fh.state, - V4L2_SUBDEV_FORMAT_ACTIVE, - atomisp_subdev_source_pad(vdev), - V4L2_SEL_TGT_COMPOSE, 0, - &clear_compose); - } - rt_mutex_unlock(&isp->mutex); - mutex_unlock(&isp->streamoff_mutex); + atomisp_subdev_set_selection(&asd->subdev, fh.state, + V4L2_SUBDEV_FORMAT_ACTIVE, + atomisp_subdev_source_pad(vdev), + V4L2_SEL_TGT_COMPOSE, 0, + &clear_compose); + mutex_unlock(&isp->mutex); return v4l2_fh_release(file); } @@ -1194,7 +1018,7 @@ static int atomisp_mmap(struct file *file, struct vm_area_struct *vma) if (!(vma->vm_flags & (VM_WRITE | VM_READ))) return -EACCES; - rt_mutex_lock(&isp->mutex); + mutex_lock(&isp->mutex); if (!(vma->vm_flags & VM_SHARED)) { /* Map private buffer. @@ -1205,7 +1029,7 @@ static int atomisp_mmap(struct file *file, struct vm_area_struct *vma) */ vma->vm_flags |= VM_SHARED; ret = hmm_mmap(vma, vma->vm_pgoff << PAGE_SHIFT); - rt_mutex_unlock(&isp->mutex); + mutex_unlock(&isp->mutex); return ret; } @@ -1248,7 +1072,7 @@ static int atomisp_mmap(struct file *file, struct vm_area_struct *vma) } raw_virt_addr->data_bytes = origin_size; vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; - rt_mutex_unlock(&isp->mutex); + mutex_unlock(&isp->mutex); return 0; } @@ -1260,24 +1084,16 @@ static int atomisp_mmap(struct file *file, struct vm_area_struct *vma) ret = -EINVAL; goto error; } - rt_mutex_unlock(&isp->mutex); + mutex_unlock(&isp->mutex); return atomisp_videobuf_mmap_mapper(&pipe->capq, vma); error: - rt_mutex_unlock(&isp->mutex); + mutex_unlock(&isp->mutex); return ret; } -static int atomisp_file_mmap(struct file *file, struct vm_area_struct *vma) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - - return videobuf_mmap_mapper(&pipe->outq, vma); -} - static __poll_t atomisp_poll(struct file *file, struct poll_table_struct *pt) { @@ -1285,12 +1101,12 @@ static __poll_t atomisp_poll(struct file *file, struct atomisp_device *isp = video_get_drvdata(vdev); struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - rt_mutex_lock(&isp->mutex); + mutex_lock(&isp->mutex); if (pipe->capq.streaming != 1) { - rt_mutex_unlock(&isp->mutex); + mutex_unlock(&isp->mutex); return EPOLLERR; } - rt_mutex_unlock(&isp->mutex); + mutex_unlock(&isp->mutex); return videobuf_poll_stream(file, &pipe->capq, pt); } @@ -1310,15 +1126,3 @@ const struct v4l2_file_operations atomisp_fops = { #endif .poll = atomisp_poll, }; - -const struct v4l2_file_operations atomisp_file_fops = { - .owner = THIS_MODULE, - .open = atomisp_open, - .release = atomisp_release, - .mmap = atomisp_file_mmap, - .unlocked_ioctl = video_ioctl2, -#ifdef CONFIG_COMPAT - /* .compat_ioctl32 = atomisp_compat_ioctl32, */ -#endif - .poll = atomisp_poll, -}; diff --git a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c index bf527b366ab3..3d41fab661cf 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c +++ b/drivers/staging/media/atomisp/pci/atomisp_gmin_platform.c @@ -134,24 +134,6 @@ static DEFINE_MUTEX(vcm_lock); static struct gmin_subdev *find_gmin_subdev(struct v4l2_subdev *subdev); -/* - * Legacy/stub behavior copied from upstream platform_camera.c. The - * atomisp driver relies on these values being non-NULL in a few - * places, even though they are hard-coded in all current - * implementations. - */ -const struct atomisp_camera_caps *atomisp_get_default_camera_caps(void) -{ - static const struct atomisp_camera_caps caps = { - .sensor_num = 1, - .sensor = { - { .stream_num = 1, }, - }, - }; - return ∩︀ -} -EXPORT_SYMBOL_GPL(atomisp_get_default_camera_caps); - const struct atomisp_platform_data *atomisp_get_platform_data(void) { return &pdata; @@ -1066,6 +1048,38 @@ static int gmin_flisclk_ctrl(struct v4l2_subdev *subdev, int on) return ret; } +static int camera_sensor_csi_alloc(struct v4l2_subdev *sd, u32 port, u32 lanes, + u32 format, u32 bayer_order) +{ + struct i2c_client *client = v4l2_get_subdevdata(sd); + struct camera_mipi_info *csi; + + csi = kzalloc(sizeof(*csi), GFP_KERNEL); + if (!csi) + return -ENOMEM; + + csi->port = port; + csi->num_lanes = lanes; + csi->input_format = format; + csi->raw_bayer_order = bayer_order; + v4l2_set_subdev_hostdata(sd, csi); + csi->metadata_format = ATOMISP_INPUT_FORMAT_EMBEDDED; + csi->metadata_effective_width = NULL; + dev_info(&client->dev, + "camera pdata: port: %d lanes: %d order: %8.8x\n", + port, lanes, bayer_order); + + return 0; +} + +static void camera_sensor_csi_free(struct v4l2_subdev *sd) +{ + struct camera_mipi_info *csi; + + csi = v4l2_get_subdev_hostdata(sd); + kfree(csi); +} + static int gmin_csi_cfg(struct v4l2_subdev *sd, int flag) { struct i2c_client *client = v4l2_get_subdevdata(sd); @@ -1074,8 +1088,11 @@ static int gmin_csi_cfg(struct v4l2_subdev *sd, int flag) if (!client || !gs) return -ENODEV; - return camera_sensor_csi(sd, gs->csi_port, gs->csi_lanes, - gs->csi_fmt, gs->csi_bayer, flag); + if (flag) + return camera_sensor_csi_alloc(sd, gs->csi_port, gs->csi_lanes, + gs->csi_fmt, gs->csi_bayer); + camera_sensor_csi_free(sd); + return 0; } static struct camera_vcm_control *gmin_get_vcm_ctrl(struct v4l2_subdev *subdev, @@ -1207,16 +1224,14 @@ static int gmin_get_config_dsm_var(struct device *dev, if (!strcmp(var, "CamClk")) return -EINVAL; - obj = acpi_evaluate_dsm(handle, &atomisp_dsm_guid, 0, 0, NULL); + /* Return on unexpected object type */ + obj = acpi_evaluate_dsm_typed(handle, &atomisp_dsm_guid, 0, 0, NULL, + ACPI_TYPE_PACKAGE); if (!obj) { dev_info_once(dev, "Didn't find ACPI _DSM table.\n"); return -EINVAL; } - /* Return on unexpected object type */ - if (obj->type != ACPI_TYPE_PACKAGE) - return -EINVAL; - #if 0 /* Just for debugging purposes */ for (i = 0; i < obj->package.count; i++) { union acpi_object *cur = &obj->package.elements[i]; @@ -1360,35 +1375,6 @@ int gmin_get_var_int(struct device *dev, bool is_gmin, const char *var, int def) } EXPORT_SYMBOL_GPL(gmin_get_var_int); -int camera_sensor_csi(struct v4l2_subdev *sd, u32 port, - u32 lanes, u32 format, u32 bayer_order, int flag) -{ - struct i2c_client *client = v4l2_get_subdevdata(sd); - struct camera_mipi_info *csi = NULL; - - if (flag) { - csi = kzalloc(sizeof(*csi), GFP_KERNEL); - if (!csi) - return -ENOMEM; - csi->port = port; - csi->num_lanes = lanes; - csi->input_format = format; - csi->raw_bayer_order = bayer_order; - v4l2_set_subdev_hostdata(sd, (void *)csi); - csi->metadata_format = ATOMISP_INPUT_FORMAT_EMBEDDED; - csi->metadata_effective_width = NULL; - dev_info(&client->dev, - "camera pdata: port: %d lanes: %d order: %8.8x\n", - port, lanes, bayer_order); - } else { - csi = v4l2_get_subdev_hostdata(sd); - kfree(csi); - } - - return 0; -} -EXPORT_SYMBOL_GPL(camera_sensor_csi); - /* PCI quirk: The BYT ISP advertises PCI runtime PM but it doesn't * work. Disable so the kernel framework doesn't hang the device * trying. The driver itself does direct calls to the PUNIT to manage diff --git a/drivers/staging/media/atomisp/pci/atomisp_internal.h b/drivers/staging/media/atomisp/pci/atomisp_internal.h index f71ab1ee6e19..d9d158cdf09e 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_internal.h +++ b/drivers/staging/media/atomisp/pci/atomisp_internal.h @@ -34,7 +34,6 @@ #include "sh_css_legacy.h" #include "atomisp_csi2.h" -#include "atomisp_file.h" #include "atomisp_subdev.h" #include "atomisp_tpg.h" #include "atomisp_compat.h" @@ -86,13 +85,12 @@ #define ATOM_ISP_POWER_DOWN 0 #define ATOM_ISP_POWER_UP 1 -#define ATOM_ISP_MAX_INPUTS 4 +#define ATOM_ISP_MAX_INPUTS 3 #define ATOMISP_SC_TYPE_SIZE 2 #define ATOMISP_ISP_TIMEOUT_DURATION (2 * HZ) #define ATOMISP_EXT_ISP_TIMEOUT_DURATION (6 * HZ) -#define ATOMISP_ISP_FILE_TIMEOUT_DURATION (60 * HZ) #define ATOMISP_WDT_KEEP_CURRENT_DELAY 0 #define ATOMISP_ISP_MAX_TIMEOUT_COUNT 2 #define ATOMISP_CSS_STOP_TIMEOUT_US 200000 @@ -107,9 +105,6 @@ #define ATOMISP_DELAYED_INIT_QUEUED 1 #define ATOMISP_DELAYED_INIT_DONE 2 -#define ATOMISP_CALC_CSS_PREV_OVERLAP(lines) \ - ((lines) * 38 / 100 & 0xfffffe) - /* * Define how fast CPU should be able to serve ISP interrupts. * The bigger the value, the higher risk that the ISP is not @@ -132,9 +127,7 @@ * Moorefield/Baytrail platform. */ #define ATOMISP_SOC_CAMERA(asd) \ - (asd->isp->inputs[asd->input_curr].type == SOC_CAMERA \ - && asd->isp->inputs[asd->input_curr].camera_caps-> \ - sensor[asd->sensor_curr].stream_num == 1) + (asd->isp->inputs[asd->input_curr].type == SOC_CAMERA) #define ATOMISP_USE_YUVPP(asd) \ (ATOMISP_SOC_CAMERA(asd) && ATOMISP_CSS_SUPPORT_YUVPP && \ @@ -167,7 +160,6 @@ struct atomisp_input_subdev { */ struct atomisp_sub_device *asd; - const struct atomisp_camera_caps *camera_caps; int sensor_index; }; @@ -203,7 +195,6 @@ struct atomisp_regs { }; struct atomisp_sw_contex { - bool file_input; int power_state; int running_freq; }; @@ -241,24 +232,10 @@ struct atomisp_device { struct atomisp_mipi_csi2_device csi2_port[ATOMISP_CAMERA_NR_PORTS]; struct atomisp_tpg_device tpg; - struct atomisp_file_device file_dev; /* Purpose of mutex is to protect and serialize use of isp data * structures and css API calls. */ - struct rt_mutex mutex; - /* - * This mutex ensures that we don't allow an open to succeed while - * the initialization process is incomplete - */ - struct rt_mutex loading; - /* Set once the ISP is ready to allow opens */ - bool ready; - /* - * Serialise streamoff: mutex is dropped during streamoff to - * cancel the watchdog queue. MUST be acquired BEFORE - * "mutex". - */ - struct mutex streamoff_mutex; + struct mutex mutex; unsigned int input_cnt; struct atomisp_input_subdev inputs[ATOM_ISP_MAX_INPUTS]; @@ -272,15 +249,9 @@ struct atomisp_device { /* isp timeout status flag */ bool isp_timeout; bool isp_fatal_error; - struct workqueue_struct *wdt_work_queue; - struct work_struct wdt_work; - - /* ISP2400 */ - atomic_t wdt_count; - - atomic_t wdt_work_queued; + struct work_struct assert_recovery_work; - spinlock_t lock; /* Just for streaming below */ + spinlock_t lock; /* Protects asd[i].streaming */ bool need_gfx_throttle; @@ -296,20 +267,4 @@ struct atomisp_device { extern struct device *atomisp_dev; -#define atomisp_is_wdt_running(a) timer_pending(&(a)->wdt) - -/* ISP2401 */ -void atomisp_wdt_refresh_pipe(struct atomisp_video_pipe *pipe, - unsigned int delay); -void atomisp_wdt_refresh(struct atomisp_sub_device *asd, unsigned int delay); - -/* ISP2400 */ -void atomisp_wdt_start(struct atomisp_sub_device *asd); - -/* ISP2401 */ -void atomisp_wdt_start_pipe(struct atomisp_video_pipe *pipe); -void atomisp_wdt_stop_pipe(struct atomisp_video_pipe *pipe, bool sync); - -void atomisp_wdt_stop(struct atomisp_sub_device *asd, bool sync); - #endif /* __ATOMISP_INTERNAL_H__ */ diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c index 459645c2e2a7..0ddb0ed42dd9 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.c +++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.c @@ -535,6 +535,32 @@ atomisp_get_format_bridge_from_mbus(u32 mbus_code) return NULL; } +int atomisp_pipe_check(struct atomisp_video_pipe *pipe, bool settings_change) +{ + lockdep_assert_held(&pipe->isp->mutex); + + if (pipe->isp->isp_fatal_error) + return -EIO; + + switch (pipe->asd->streaming) { + case ATOMISP_DEVICE_STREAMING_DISABLED: + break; + case ATOMISP_DEVICE_STREAMING_ENABLED: + if (settings_change) { + dev_err(pipe->isp->dev, "Set fmt/input IOCTL while streaming\n"); + return -EBUSY; + } + break; + case ATOMISP_DEVICE_STREAMING_STOPPING: + dev_err(pipe->isp->dev, "IOCTL issued while stopping\n"); + return -EBUSY; + default: + return -EINVAL; + } + + return 0; +} + /* * v4l2 ioctls * return ISP capabilities @@ -609,8 +635,7 @@ atomisp_subdev_streaming_count(struct atomisp_sub_device *asd) return asd->video_out_preview.capq.streaming + asd->video_out_capture.capq.streaming + asd->video_out_video_capture.capq.streaming - + asd->video_out_vf.capq.streaming - + asd->video_in.capq.streaming; + + asd->video_out_vf.capq.streaming; } unsigned int atomisp_streaming_count(struct atomisp_device *isp) @@ -630,19 +655,9 @@ unsigned int atomisp_streaming_count(struct atomisp_device *isp) static int atomisp_g_input(struct file *file, void *fh, unsigned int *input) { struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - - rt_mutex_lock(&isp->mutex); *input = asd->input_curr; - rt_mutex_unlock(&isp->mutex); - return 0; } @@ -653,22 +668,19 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input) { struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; + struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); + struct atomisp_sub_device *asd = pipe->asd; struct v4l2_subdev *camera = NULL; struct v4l2_subdev *motor; int ret; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } + ret = atomisp_pipe_check(pipe, true); + if (ret) + return ret; - rt_mutex_lock(&isp->mutex); if (input >= ATOM_ISP_MAX_INPUTS || input >= isp->input_cnt) { dev_dbg(isp->dev, "input_cnt: %d\n", isp->input_cnt); - ret = -EINVAL; - goto error; + return -EINVAL; } /* @@ -680,22 +692,13 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input) dev_err(isp->dev, "%s, camera is already used by stream: %d\n", __func__, isp->inputs[input].asd->index); - ret = -EBUSY; - goto error; + return -EBUSY; } camera = isp->inputs[input].camera; if (!camera) { dev_err(isp->dev, "%s, no camera\n", __func__); - ret = -EINVAL; - goto error; - } - - if (atomisp_subdev_streaming_count(asd)) { - dev_err(isp->dev, - "ISP is still streaming, stop first\n"); - ret = -EINVAL; - goto error; + return -EINVAL; } /* power off the current owned sensor, as it is not used this time */ @@ -714,7 +717,7 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input) ret = v4l2_subdev_call(isp->inputs[input].camera, core, s_power, 1); if (ret) { dev_err(isp->dev, "Failed to power-on sensor\n"); - goto error; + return ret; } /* * Some sensor driver resets the run mode during power-on, thus force @@ -727,7 +730,7 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input) 0, isp->inputs[input].sensor_index, 0); if (ret && (ret != -ENOIOCTLCMD)) { dev_err(isp->dev, "Failed to select sensor\n"); - goto error; + return ret; } if (!IS_ISP2401) { @@ -738,20 +741,14 @@ static int atomisp_s_input(struct file *file, void *fh, unsigned int input) ret = v4l2_subdev_call(motor, core, s_power, 1); } - if (!isp->sw_contex.file_input && motor) + if (motor) ret = v4l2_subdev_call(motor, core, init, 1); asd->input_curr = input; /* mark this camera is used by the current stream */ isp->inputs[input].asd = asd; - rt_mutex_unlock(&isp->mutex); return 0; - -error: - rt_mutex_unlock(&isp->mutex); - - return ret; } static int atomisp_enum_framesizes(struct file *file, void *priv, @@ -819,12 +816,6 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh, unsigned int i, fi = 0; int rval; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - camera = isp->inputs[asd->input_curr].camera; if(!camera) { dev_err(isp->dev, "%s(): camera is NULL, device is %s\n", @@ -832,15 +823,12 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh, return -EINVAL; } - rt_mutex_lock(&isp->mutex); - rval = v4l2_subdev_call(camera, pad, enum_mbus_code, NULL, &code); if (rval == -ENOIOCTLCMD) { dev_warn(isp->dev, "enum_mbus_code pad op not supported by %s. Please fix your sensor driver!\n", camera->name); } - rt_mutex_unlock(&isp->mutex); if (rval) return rval; @@ -872,20 +860,6 @@ static int atomisp_enum_fmt_cap(struct file *file, void *fh, return -EINVAL; } -static int atomisp_g_fmt_file(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - - rt_mutex_lock(&isp->mutex); - f->fmt.pix = pipe->pix; - rt_mutex_unlock(&isp->mutex); - - return 0; -} - static int atomisp_adjust_fmt(struct v4l2_format *f) { const struct atomisp_format_bridge *format_bridge; @@ -957,13 +931,16 @@ static int atomisp_try_fmt_cap(struct file *file, void *fh, struct v4l2_format *f) { struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); int ret; - rt_mutex_lock(&isp->mutex); - ret = atomisp_try_fmt(vdev, &f->fmt.pix, NULL); - rt_mutex_unlock(&isp->mutex); + /* + * atomisp_try_fmt() gived results with padding included, note + * (this gets removed again by the atomisp_adjust_fmt() call below. + */ + f->fmt.pix.width += pad_w; + f->fmt.pix.height += pad_h; + ret = atomisp_try_fmt(vdev, &f->fmt.pix, NULL); if (ret) return ret; @@ -974,12 +951,9 @@ static int atomisp_g_fmt_cap(struct file *file, void *fh, struct v4l2_format *f) { struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); struct atomisp_video_pipe *pipe; - rt_mutex_lock(&isp->mutex); pipe = atomisp_to_video_pipe(vdev); - rt_mutex_unlock(&isp->mutex); f->fmt.pix = pipe->pix; @@ -994,37 +968,6 @@ static int atomisp_g_fmt_cap(struct file *file, void *fh, return atomisp_try_fmt_cap(file, fh, f); } -static int atomisp_s_fmt_cap(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - int ret; - - rt_mutex_lock(&isp->mutex); - if (isp->isp_fatal_error) { - ret = -EIO; - rt_mutex_unlock(&isp->mutex); - return ret; - } - ret = atomisp_set_fmt(vdev, f); - rt_mutex_unlock(&isp->mutex); - return ret; -} - -static int atomisp_s_fmt_file(struct file *file, void *fh, - struct v4l2_format *f) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - int ret; - - rt_mutex_lock(&isp->mutex); - ret = atomisp_set_fmt_file(vdev, f); - rt_mutex_unlock(&isp->mutex); - return ret; -} - /* * Free videobuffer buffer priv data */ @@ -1160,8 +1103,7 @@ error: /* * Initiate Memory Mapping or User Pointer I/O */ -int __atomisp_reqbufs(struct file *file, void *fh, - struct v4l2_requestbuffers *req) +int atomisp_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *req) { struct video_device *vdev = video_devdata(file); struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); @@ -1170,16 +1112,8 @@ int __atomisp_reqbufs(struct file *file, void *fh, struct ia_css_frame *frame; struct videobuf_vmalloc_memory *vm_mem; u16 source_pad = atomisp_subdev_source_pad(vdev); - u16 stream_id; int ret = 0, i = 0; - if (!asd) { - dev_err(pipe->isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - stream_id = atomisp_source_pad_to_stream_id(asd, source_pad); - if (req->count == 0) { mutex_lock(&pipe->capq.vb_lock); if (!list_empty(&pipe->capq.stream)) @@ -1200,7 +1134,7 @@ int __atomisp_reqbufs(struct file *file, void *fh, if (ret) return ret; - atomisp_alloc_css_stat_bufs(asd, stream_id); + atomisp_alloc_css_stat_bufs(asd, ATOMISP_INPUT_STREAM_GENERAL); /* * for user pointer type, buffers are not really allocated here, @@ -1238,36 +1172,6 @@ error: return -ENOMEM; } -int atomisp_reqbufs(struct file *file, void *fh, - struct v4l2_requestbuffers *req) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - int ret; - - rt_mutex_lock(&isp->mutex); - ret = __atomisp_reqbufs(file, fh, req); - rt_mutex_unlock(&isp->mutex); - - return ret; -} - -static int atomisp_reqbufs_file(struct file *file, void *fh, - struct v4l2_requestbuffers *req) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - - if (req->count == 0) { - mutex_lock(&pipe->outq.vb_lock); - atomisp_videobuf_free_queue(&pipe->outq); - mutex_unlock(&pipe->outq.vb_lock); - return 0; - } - - return videobuf_reqbufs(&pipe->outq, req); -} - /* application query the status of a buffer */ static int atomisp_querybuf(struct file *file, void *fh, struct v4l2_buffer *buf) @@ -1278,15 +1182,6 @@ static int atomisp_querybuf(struct file *file, void *fh, return videobuf_querybuf(&pipe->capq, buf); } -static int atomisp_querybuf_file(struct file *file, void *fh, - struct v4l2_buffer *buf) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - - return videobuf_querybuf(&pipe->outq, buf); -} - /* * Applications call the VIDIOC_QBUF ioctl to enqueue an empty (capturing) or * filled (output) buffer in the drivers incoming queue. @@ -1305,32 +1200,16 @@ static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) struct ia_css_frame *handle = NULL; u32 length; u32 pgnr; - int ret = 0; - - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - - rt_mutex_lock(&isp->mutex); - if (isp->isp_fatal_error) { - ret = -EIO; - goto error; - } + int ret; - if (asd->streaming == ATOMISP_DEVICE_STREAMING_STOPPING) { - dev_err(isp->dev, "%s: reject, as ISP at stopping.\n", - __func__); - ret = -EIO; - goto error; - } + ret = atomisp_pipe_check(pipe, false); + if (ret) + return ret; if (!buf || buf->index >= VIDEO_MAX_FRAME || !pipe->capq.bufs[buf->index]) { dev_err(isp->dev, "Invalid index for qbuf.\n"); - ret = -EINVAL; - goto error; + return -EINVAL; } /* @@ -1338,12 +1217,15 @@ static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) * address and reprograme out page table properly */ if (buf->memory == V4L2_MEMORY_USERPTR) { + if (offset_in_page(buf->m.userptr)) { + dev_err(isp->dev, "Error userptr is not page aligned.\n"); + return -EINVAL; + } + vb = pipe->capq.bufs[buf->index]; vm_mem = vb->priv; - if (!vm_mem) { - ret = -EINVAL; - goto error; - } + if (!vm_mem) + return -EINVAL; length = vb->bsize; pgnr = (length + (PAGE_SIZE - 1)) >> PAGE_SHIFT; @@ -1352,17 +1234,15 @@ static int atomisp_qbuf(struct file *file, void *fh, struct v4l2_buffer *buf) goto done; if (atomisp_get_css_frame_info(asd, - atomisp_subdev_source_pad(vdev), &frame_info)) { - ret = -EIO; - goto error; - } + atomisp_subdev_source_pad(vdev), &frame_info)) + return -EIO; ret = ia_css_frame_map(&handle, &frame_info, (void __user *)buf->m.userptr, pgnr); if (ret) { dev_err(isp->dev, "Failed to map user buffer\n"); - goto error; + return ret; } if (vm_mem->vaddr) { @@ -1406,12 +1286,11 @@ done: pipe->frame_params[buf->index] = NULL; - rt_mutex_unlock(&isp->mutex); - + mutex_unlock(&isp->mutex); ret = videobuf_qbuf(&pipe->capq, buf); - rt_mutex_lock(&isp->mutex); + mutex_lock(&isp->mutex); if (ret) - goto error; + return ret; /* TODO: do this better, not best way to queue to css */ if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) { @@ -1419,15 +1298,6 @@ done: atomisp_handle_parameter_and_buffer(pipe); } else { atomisp_qbuffers_to_css(asd); - - if (!IS_ISP2401) { - if (!atomisp_is_wdt_running(asd) && atomisp_buffers_queued(asd)) - atomisp_wdt_start(asd); - } else { - if (!atomisp_is_wdt_running(pipe) && - atomisp_buffers_queued_pipe(pipe)) - atomisp_wdt_start_pipe(pipe); - } } } @@ -1449,58 +1319,11 @@ done: asd->pending_capture_request++; dev_dbg(isp->dev, "Add one pending capture request.\n"); } - rt_mutex_unlock(&isp->mutex); dev_dbg(isp->dev, "qbuf buffer %d (%s) for asd%d\n", buf->index, vdev->name, asd->index); - return ret; - -error: - rt_mutex_unlock(&isp->mutex); - return ret; -} - -static int atomisp_qbuf_file(struct file *file, void *fh, - struct v4l2_buffer *buf) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); - int ret; - - rt_mutex_lock(&isp->mutex); - if (isp->isp_fatal_error) { - ret = -EIO; - goto error; - } - - if (!buf || buf->index >= VIDEO_MAX_FRAME || - !pipe->outq.bufs[buf->index]) { - dev_err(isp->dev, "Invalid index for qbuf.\n"); - ret = -EINVAL; - goto error; - } - - if (buf->memory != V4L2_MEMORY_MMAP) { - dev_err(isp->dev, "Unsupported memory method\n"); - ret = -EINVAL; - goto error; - } - - if (buf->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) { - dev_err(isp->dev, "Unsupported buffer type\n"); - ret = -EINVAL; - goto error; - } - rt_mutex_unlock(&isp->mutex); - - return videobuf_qbuf(&pipe->outq, buf); - -error: - rt_mutex_unlock(&isp->mutex); - - return ret; + return 0; } static int __get_frame_exp_id(struct atomisp_video_pipe *pipe, @@ -1529,37 +1352,21 @@ static int atomisp_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf) struct atomisp_video_pipe *pipe = atomisp_to_video_pipe(vdev); struct atomisp_sub_device *asd = pipe->asd; struct atomisp_device *isp = video_get_drvdata(vdev); - int ret = 0; - - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - - rt_mutex_lock(&isp->mutex); - - if (isp->isp_fatal_error) { - rt_mutex_unlock(&isp->mutex); - return -EIO; - } - - if (asd->streaming == ATOMISP_DEVICE_STREAMING_STOPPING) { - rt_mutex_unlock(&isp->mutex); - dev_err(isp->dev, "%s: reject, as ISP at stopping.\n", - __func__); - return -EIO; - } + int ret; - rt_mutex_unlock(&isp->mutex); + ret = atomisp_pipe_check(pipe, false); + if (ret) + return ret; + mutex_unlock(&isp->mutex); ret = videobuf_dqbuf(&pipe->capq, buf, file->f_flags & O_NONBLOCK); + mutex_lock(&isp->mutex); if (ret) { if (ret != -EAGAIN) dev_dbg(isp->dev, "<%s: %d\n", __func__, ret); return ret; } - rt_mutex_lock(&isp->mutex); + buf->bytesused = pipe->pix.sizeimage; buf->reserved = asd->frame_status[buf->index]; @@ -1573,7 +1380,6 @@ static int atomisp_dqbuf(struct file *file, void *fh, struct v4l2_buffer *buf) if (!(buf->flags & V4L2_BUF_FLAG_ERROR)) buf->reserved |= __get_frame_exp_id(pipe, buf) << 16; buf->reserved2 = pipe->frame_config_id[buf->index]; - rt_mutex_unlock(&isp->mutex); dev_dbg(isp->dev, "dqbuf buffer %d (%s) for asd%d with exp_id %d, isp_config_id %d\n", @@ -1622,16 +1428,6 @@ enum ia_css_pipe_id atomisp_get_css_pipe_id(struct atomisp_sub_device *asd) static unsigned int atomisp_sensor_start_stream(struct atomisp_sub_device *asd) { - struct atomisp_device *isp = asd->isp; - - if (isp->inputs[asd->input_curr].camera_caps-> - sensor[asd->sensor_curr].stream_num > 1) { - if (asd->high_speed_mode) - return 1; - else - return 2; - } - if (asd->vfpp->val != ATOMISP_VFPP_ENABLE || asd->copy_mode) return 1; @@ -1650,31 +1446,15 @@ static unsigned int atomisp_sensor_start_stream(struct atomisp_sub_device *asd) int atomisp_stream_on_master_slave_sensor(struct atomisp_device *isp, bool isp_timeout) { - unsigned int master = -1, slave = -1, delay_slave = 0; - int i, ret; - - /* - * ISP only support 2 streams now so ignore multiple master/slave - * case to reduce the delay between 2 stream_on calls. - */ - for (i = 0; i < isp->num_of_streams; i++) { - int sensor_index = isp->asd[i].input_curr; - - if (isp->inputs[sensor_index].camera_caps-> - sensor[isp->asd[i].sensor_curr].is_slave) - slave = sensor_index; - else - master = sensor_index; - } + unsigned int master, slave, delay_slave = 0; + int ret; - if (master == -1 || slave == -1) { - master = ATOMISP_DEPTH_DEFAULT_MASTER_SENSOR; - slave = ATOMISP_DEPTH_DEFAULT_SLAVE_SENSOR; - dev_warn(isp->dev, - "depth mode use default master=%s.slave=%s.\n", - isp->inputs[master].camera->name, - isp->inputs[slave].camera->name); - } + master = ATOMISP_DEPTH_DEFAULT_MASTER_SENSOR; + slave = ATOMISP_DEPTH_DEFAULT_SLAVE_SENSOR; + dev_warn(isp->dev, + "depth mode use default master=%s.slave=%s.\n", + isp->inputs[master].camera->name, + isp->inputs[slave].camera->name); ret = v4l2_subdev_call(isp->inputs[master].camera, core, ioctl, ATOMISP_IOC_G_DEPTH_SYNC_COMP, @@ -1708,51 +1488,6 @@ int atomisp_stream_on_master_slave_sensor(struct atomisp_device *isp, return 0; } -/* FIXME! ISP2400 */ -static void __wdt_on_master_slave_sensor(struct atomisp_device *isp, - unsigned int wdt_duration) -{ - if (atomisp_buffers_queued(&isp->asd[0])) - atomisp_wdt_refresh(&isp->asd[0], wdt_duration); - if (atomisp_buffers_queued(&isp->asd[1])) - atomisp_wdt_refresh(&isp->asd[1], wdt_duration); -} - -/* FIXME! ISP2401 */ -static void __wdt_on_master_slave_sensor_pipe(struct atomisp_video_pipe *pipe, - unsigned int wdt_duration, - bool enable) -{ - static struct atomisp_video_pipe *pipe0; - - if (enable) { - if (atomisp_buffers_queued_pipe(pipe0)) - atomisp_wdt_refresh_pipe(pipe0, wdt_duration); - if (atomisp_buffers_queued_pipe(pipe)) - atomisp_wdt_refresh_pipe(pipe, wdt_duration); - } else { - pipe0 = pipe; - } -} - -static void atomisp_pause_buffer_event(struct atomisp_device *isp) -{ - struct v4l2_event event = {0}; - int i; - - event.type = V4L2_EVENT_ATOMISP_PAUSE_BUFFER; - - for (i = 0; i < isp->num_of_streams; i++) { - int sensor_index = isp->asd[i].input_curr; - - if (isp->inputs[sensor_index].camera_caps-> - sensor[isp->asd[i].sensor_curr].is_slave) { - v4l2_event_queue(isp->asd[i].subdev.devnode, &event); - break; - } - } -} - /* Input system HW workaround */ /* Input system address translation corrupts burst during */ /* invalidate. SW workaround for this is to set burst length */ @@ -1784,15 +1519,8 @@ static int atomisp_streamon(struct file *file, void *fh, struct pci_dev *pdev = to_pci_dev(isp->dev); enum ia_css_pipe_id css_pipe_id; unsigned int sensor_start_stream; - unsigned int wdt_duration = ATOMISP_ISP_TIMEOUT_DURATION; - int ret = 0; unsigned long irqflags; - - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } + int ret; dev_dbg(isp->dev, "Start stream on pad %d for asd%d\n", atomisp_subdev_source_pad(vdev), asd->index); @@ -1802,19 +1530,12 @@ static int atomisp_streamon(struct file *file, void *fh, return -EINVAL; } - rt_mutex_lock(&isp->mutex); - if (isp->isp_fatal_error) { - ret = -EIO; - goto out; - } - - if (asd->streaming == ATOMISP_DEVICE_STREAMING_STOPPING) { - ret = -EBUSY; - goto out; - } + ret = atomisp_pipe_check(pipe, false); + if (ret) + return ret; if (pipe->capq.streaming) - goto out; + return 0; /* Input system HW workaround */ atomisp_dma_burst_len_cfg(asd); @@ -1829,20 +1550,18 @@ static int atomisp_streamon(struct file *file, void *fh, if (list_empty(&pipe->capq.stream)) { spin_unlock_irqrestore(&pipe->irq_lock, irqflags); dev_dbg(isp->dev, "no buffer in the queue\n"); - ret = -EINVAL; - goto out; + return -EINVAL; } spin_unlock_irqrestore(&pipe->irq_lock, irqflags); ret = videobuf_streamon(&pipe->capq); if (ret) - goto out; + return ret; /* Reset pending capture request count. */ asd->pending_capture_request = 0; - if ((atomisp_subdev_streaming_count(asd) > sensor_start_stream) && - (!isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl)) { + if (atomisp_subdev_streaming_count(asd) > sensor_start_stream) { /* trigger still capture */ if (asd->continuous_mode->val && atomisp_subdev_source_pad(vdev) @@ -1856,11 +1575,11 @@ static int atomisp_streamon(struct file *file, void *fh, if (asd->delayed_init == ATOMISP_DELAYED_INIT_QUEUED) { flush_work(&asd->delayed_init_work); - rt_mutex_unlock(&isp->mutex); - if (wait_for_completion_interruptible( - &asd->init_done) != 0) + mutex_unlock(&isp->mutex); + ret = wait_for_completion_interruptible(&asd->init_done); + mutex_lock(&isp->mutex); + if (ret != 0) return -ERESTARTSYS; - rt_mutex_lock(&isp->mutex); } /* handle per_frame_setting parameter and buffers */ @@ -1882,16 +1601,12 @@ static int atomisp_streamon(struct file *file, void *fh, asd->params.offline_parm.num_captures, asd->params.offline_parm.skip_frames, asd->params.offline_parm.offset); - if (ret) { - ret = -EINVAL; - goto out; - } - if (asd->depth_mode->val) - atomisp_pause_buffer_event(isp); + if (ret) + return -EINVAL; } } atomisp_qbuffers_to_css(asd); - goto out; + return 0; } if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) { @@ -1917,14 +1632,14 @@ static int atomisp_streamon(struct file *file, void *fh, ret = atomisp_css_start(asd, css_pipe_id, false); if (ret) - goto out; + return ret; + spin_lock_irqsave(&isp->lock, irqflags); asd->streaming = ATOMISP_DEVICE_STREAMING_ENABLED; + spin_unlock_irqrestore(&isp->lock, irqflags); atomic_set(&asd->sof_count, -1); atomic_set(&asd->sequence, -1); atomic_set(&asd->sequence_temp, -1); - if (isp->sw_contex.file_input) - wdt_duration = ATOMISP_ISP_FILE_TIMEOUT_DURATION; asd->params.dis_proj_data_valid = false; asd->latest_preview_exp_id = 0; @@ -1938,7 +1653,7 @@ static int atomisp_streamon(struct file *file, void *fh, /* Only start sensor when the last streaming instance started */ if (atomisp_subdev_streaming_count(asd) < sensor_start_stream) - goto out; + return 0; start_sensor: if (isp->flash) { @@ -1947,26 +1662,21 @@ start_sensor: atomisp_setup_flash(asd); } - if (!isp->sw_contex.file_input) { - atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, - atomisp_css_valid_sof(isp)); - atomisp_csi2_configure(asd); - /* - * set freq to max when streaming count > 1 which indicate - * dual camera would run - */ - if (atomisp_streaming_count(isp) > 1) { - if (atomisp_freq_scaling(isp, - ATOMISP_DFS_MODE_MAX, false) < 0) - dev_dbg(isp->dev, "DFS max mode failed!\n"); - } else { - if (atomisp_freq_scaling(isp, - ATOMISP_DFS_MODE_AUTO, false) < 0) - dev_dbg(isp->dev, "DFS auto mode failed!\n"); - } - } else { - if (atomisp_freq_scaling(isp, ATOMISP_DFS_MODE_MAX, false) < 0) + atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, + atomisp_css_valid_sof(isp)); + atomisp_csi2_configure(asd); + /* + * set freq to max when streaming count > 1 which indicate + * dual camera would run + */ + if (atomisp_streaming_count(isp) > 1) { + if (atomisp_freq_scaling(isp, + ATOMISP_DFS_MODE_MAX, false) < 0) dev_dbg(isp->dev, "DFS max mode failed!\n"); + } else { + if (atomisp_freq_scaling(isp, + ATOMISP_DFS_MODE_AUTO, false) < 0) + dev_dbg(isp->dev, "DFS auto mode failed!\n"); } if (asd->depth_mode->val && atomisp_streaming_count(isp) == @@ -1974,17 +1684,11 @@ start_sensor: ret = atomisp_stream_on_master_slave_sensor(isp, false); if (ret) { dev_err(isp->dev, "master slave sensor stream on failed!\n"); - goto out; + return ret; } - if (!IS_ISP2401) - __wdt_on_master_slave_sensor(isp, wdt_duration); - else - __wdt_on_master_slave_sensor_pipe(pipe, wdt_duration, true); goto start_delay_wq; } else if (asd->depth_mode->val && (atomisp_streaming_count(isp) < ATOMISP_DEPTH_SENSOR_STREAMON_COUNT)) { - if (IS_ISP2401) - __wdt_on_master_slave_sensor_pipe(pipe, wdt_duration, false); goto start_delay_wq; } @@ -1999,41 +1703,29 @@ start_sensor: ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, video, s_stream, 1); if (ret) { + spin_lock_irqsave(&isp->lock, irqflags); asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED; - ret = -EINVAL; - goto out; - } - - if (!IS_ISP2401) { - if (atomisp_buffers_queued(asd)) - atomisp_wdt_refresh(asd, wdt_duration); - } else { - if (atomisp_buffers_queued_pipe(pipe)) - atomisp_wdt_refresh_pipe(pipe, wdt_duration); + spin_unlock_irqrestore(&isp->lock, irqflags); + return -EINVAL; } start_delay_wq: if (asd->continuous_mode->val) { - struct v4l2_mbus_framefmt *sink; - - sink = atomisp_subdev_get_ffmt(&asd->subdev, NULL, - V4L2_SUBDEV_FORMAT_ACTIVE, - ATOMISP_SUBDEV_PAD_SINK); + atomisp_subdev_get_ffmt(&asd->subdev, NULL, + V4L2_SUBDEV_FORMAT_ACTIVE, + ATOMISP_SUBDEV_PAD_SINK); reinit_completion(&asd->init_done); asd->delayed_init = ATOMISP_DELAYED_INIT_QUEUED; queue_work(asd->delayed_init_workq, &asd->delayed_init_work); - atomisp_css_set_cont_prev_start_time(isp, - ATOMISP_CALC_CSS_PREV_OVERLAP(sink->height)); } else { asd->delayed_init = ATOMISP_DELAYED_INIT_NOT_QUEUED; } -out: - rt_mutex_unlock(&isp->mutex); - return ret; + + return 0; } -int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) +int atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) { struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); @@ -2050,17 +1742,10 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) unsigned long flags; bool first_streamoff = false; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - dev_dbg(isp->dev, "Stop stream on pad %d for asd%d\n", atomisp_subdev_source_pad(vdev), asd->index); lockdep_assert_held(&isp->mutex); - lockdep_assert_held(&isp->streamoff_mutex); if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { dev_dbg(isp->dev, "unsupported v4l2 buf type\n"); @@ -2071,17 +1756,10 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) * do only videobuf_streamoff for capture & vf pipes in * case of continuous capture */ - if ((asd->continuous_mode->val || - isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl) && - atomisp_subdev_source_pad(vdev) != - ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW && - atomisp_subdev_source_pad(vdev) != - ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) { - if (isp->inputs[asd->input_curr].camera_caps->multi_stream_ctrl) { - v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - video, s_stream, 0); - } else if (atomisp_subdev_source_pad(vdev) - == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE) { + if (asd->continuous_mode->val && + atomisp_subdev_source_pad(vdev) != ATOMISP_SUBDEV_PAD_SOURCE_PREVIEW && + atomisp_subdev_source_pad(vdev) != ATOMISP_SUBDEV_PAD_SOURCE_VIDEO) { + if (atomisp_subdev_source_pad(vdev) == ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE) { /* stop continuous still capture if needed */ if (asd->params.offline_parm.num_captures == -1) atomisp_css_offline_capture_configure(asd, @@ -2118,32 +1796,14 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) if (!pipe->capq.streaming) return 0; - spin_lock_irqsave(&isp->lock, flags); - if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) { - asd->streaming = ATOMISP_DEVICE_STREAMING_STOPPING; + if (asd->streaming == ATOMISP_DEVICE_STREAMING_ENABLED) first_streamoff = true; - } - spin_unlock_irqrestore(&isp->lock, flags); - - if (first_streamoff) { - /* if other streams are running, should not disable watch dog */ - rt_mutex_unlock(&isp->mutex); - atomisp_wdt_stop(asd, true); - - /* - * must stop sending pixels into GP_FIFO before stop - * the pipeline. - */ - if (isp->sw_contex.file_input) - v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - video, s_stream, 0); - - rt_mutex_lock(&isp->mutex); - } spin_lock_irqsave(&isp->lock, flags); if (atomisp_subdev_streaming_count(asd) == 1) asd->streaming = ATOMISP_DEVICE_STREAMING_DISABLED; + else + asd->streaming = ATOMISP_DEVICE_STREAMING_STOPPING; spin_unlock_irqrestore(&isp->lock, flags); if (!first_streamoff) { @@ -2154,19 +1814,16 @@ int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) } atomisp_clear_css_buffer_counters(asd); - - if (!isp->sw_contex.file_input) - atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, - false); + atomisp_css_irq_enable(isp, IA_CSS_IRQ_INFO_CSS_RECEIVER_SOF, false); if (asd->delayed_init == ATOMISP_DELAYED_INIT_QUEUED) { cancel_work_sync(&asd->delayed_init_work); asd->delayed_init = ATOMISP_DELAYED_INIT_NOT_QUEUED; } - if (first_streamoff) { - css_pipe_id = atomisp_get_css_pipe_id(asd); - atomisp_css_stop(asd, css_pipe_id, false); - } + + css_pipe_id = atomisp_get_css_pipe_id(asd); + atomisp_css_stop(asd, css_pipe_id, false); + /* cancel work queue*/ if (asd->video_out_capture.users) { capture_pipe = &asd->video_out_capture; @@ -2210,9 +1867,8 @@ stopsensor: != atomisp_sensor_start_stream(asd)) return 0; - if (!isp->sw_contex.file_input) - ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, - video, s_stream, 0); + ret = v4l2_subdev_call(isp->inputs[asd->input_curr].camera, + video, s_stream, 0); if (isp->flash) { asd->params.num_flash_frames = 0; @@ -2284,22 +1940,6 @@ stopsensor: return ret; } -static int atomisp_streamoff(struct file *file, void *fh, - enum v4l2_buf_type type) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - int rval; - - mutex_lock(&isp->streamoff_mutex); - rt_mutex_lock(&isp->mutex); - rval = __atomisp_streamoff(file, fh, type); - rt_mutex_unlock(&isp->mutex); - mutex_unlock(&isp->streamoff_mutex); - - return rval; -} - /* * To get the current value of a control. * applications initialize the id field of a struct v4l2_control and @@ -2313,12 +1953,6 @@ static int atomisp_g_ctrl(struct file *file, void *fh, struct atomisp_device *isp = video_get_drvdata(vdev); int i, ret = -EINVAL; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - for (i = 0; i < ctrls_num; i++) { if (ci_v4l2_controls[i].id == control->id) { ret = 0; @@ -2329,8 +1963,6 @@ static int atomisp_g_ctrl(struct file *file, void *fh, if (ret) return ret; - rt_mutex_lock(&isp->mutex); - switch (control->id) { case V4L2_CID_IRIS_ABSOLUTE: case V4L2_CID_EXPOSURE_ABSOLUTE: @@ -2352,7 +1984,6 @@ static int atomisp_g_ctrl(struct file *file, void *fh, case V4L2_CID_TEST_PATTERN_COLOR_GR: case V4L2_CID_TEST_PATTERN_COLOR_GB: case V4L2_CID_TEST_PATTERN_COLOR_B: - rt_mutex_unlock(&isp->mutex); return v4l2_g_ctrl(isp->inputs[asd->input_curr].camera-> ctrl_handler, control); case V4L2_CID_COLORFX: @@ -2381,7 +2012,6 @@ static int atomisp_g_ctrl(struct file *file, void *fh, break; } - rt_mutex_unlock(&isp->mutex); return ret; } @@ -2398,12 +2028,6 @@ static int atomisp_s_ctrl(struct file *file, void *fh, struct atomisp_device *isp = video_get_drvdata(vdev); int i, ret = -EINVAL; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - for (i = 0; i < ctrls_num; i++) { if (ci_v4l2_controls[i].id == control->id) { ret = 0; @@ -2414,7 +2038,6 @@ static int atomisp_s_ctrl(struct file *file, void *fh, if (ret) return ret; - rt_mutex_lock(&isp->mutex); switch (control->id) { case V4L2_CID_AUTO_N_PRESET_WHITE_BALANCE: case V4L2_CID_EXPOSURE: @@ -2435,7 +2058,6 @@ static int atomisp_s_ctrl(struct file *file, void *fh, case V4L2_CID_TEST_PATTERN_COLOR_GR: case V4L2_CID_TEST_PATTERN_COLOR_GB: case V4L2_CID_TEST_PATTERN_COLOR_B: - rt_mutex_unlock(&isp->mutex); return v4l2_s_ctrl(NULL, isp->inputs[asd->input_curr].camera-> ctrl_handler, control); @@ -2467,7 +2089,6 @@ static int atomisp_s_ctrl(struct file *file, void *fh, ret = -EINVAL; break; } - rt_mutex_unlock(&isp->mutex); return ret; } @@ -2485,12 +2106,6 @@ static int atomisp_queryctl(struct file *file, void *fh, struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; struct atomisp_device *isp = video_get_drvdata(vdev); - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - switch (qc->id) { case V4L2_CID_FOCUS_ABSOLUTE: case V4L2_CID_FOCUS_RELATIVE: @@ -2536,12 +2151,6 @@ static int atomisp_camera_g_ext_ctrls(struct file *file, void *fh, int i; int ret = 0; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - if (!IS_ISP2401) motor = isp->inputs[asd->input_curr].motor; else @@ -2592,9 +2201,7 @@ static int atomisp_camera_g_ext_ctrls(struct file *file, void *fh, &ctrl); break; case V4L2_CID_ZOOM_ABSOLUTE: - rt_mutex_lock(&isp->mutex); ret = atomisp_digital_zoom(asd, 0, &ctrl.value); - rt_mutex_unlock(&isp->mutex); break; case V4L2_CID_G_SKIP_FRAMES: ret = v4l2_subdev_call( @@ -2653,12 +2260,6 @@ static int atomisp_camera_s_ext_ctrls(struct file *file, void *fh, int i; int ret = 0; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - if (!IS_ISP2401) motor = isp->inputs[asd->input_curr].motor; else @@ -2707,7 +2308,6 @@ static int atomisp_camera_s_ext_ctrls(struct file *file, void *fh, case V4L2_CID_FLASH_STROBE: case V4L2_CID_FLASH_MODE: case V4L2_CID_FLASH_STATUS_REGISTER: - rt_mutex_lock(&isp->mutex); if (isp->flash) { ret = v4l2_s_ctrl(NULL, isp->flash->ctrl_handler, @@ -2722,12 +2322,9 @@ static int atomisp_camera_s_ext_ctrls(struct file *file, void *fh, asd->params.num_flash_frames = 0; } } - rt_mutex_unlock(&isp->mutex); break; case V4L2_CID_ZOOM_ABSOLUTE: - rt_mutex_lock(&isp->mutex); ret = atomisp_digital_zoom(asd, 1, &ctrl.value); - rt_mutex_unlock(&isp->mutex); break; default: ctr = v4l2_ctrl_find(&asd->ctrl_handler, ctrl.id); @@ -2784,20 +2381,12 @@ static int atomisp_g_parm(struct file *file, void *fh, struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; struct atomisp_device *isp = video_get_drvdata(vdev); - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { dev_err(isp->dev, "unsupported v4l2 buf type\n"); return -EINVAL; } - rt_mutex_lock(&isp->mutex); parm->parm.capture.capturemode = asd->run_mode->val; - rt_mutex_unlock(&isp->mutex); return 0; } @@ -2812,19 +2401,11 @@ static int atomisp_s_parm(struct file *file, void *fh, int rval; int fps; - if (!asd) { - dev_err(isp->dev, "%s(): asd is NULL, device is %s\n", - __func__, vdev->name); - return -EINVAL; - } - if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) { dev_err(isp->dev, "unsupported v4l2 buf type\n"); return -EINVAL; } - rt_mutex_lock(&isp->mutex); - asd->high_speed_mode = false; switch (parm->parm.capture.capturemode) { case CI_MODE_NONE: { @@ -2843,7 +2424,7 @@ static int atomisp_s_parm(struct file *file, void *fh, asd->high_speed_mode = true; } - goto out; + return rval == -ENOIOCTLCMD ? 0 : rval; } case CI_MODE_VIDEO: mode = ATOMISP_RUN_MODE_VIDEO; @@ -2858,76 +2439,29 @@ static int atomisp_s_parm(struct file *file, void *fh, mode = ATOMISP_RUN_MODE_PREVIEW; break; default: - rval = -EINVAL; - goto out; + return -EINVAL; } rval = v4l2_ctrl_s_ctrl(asd->run_mode, mode); -out: - rt_mutex_unlock(&isp->mutex); - return rval == -ENOIOCTLCMD ? 0 : rval; } -static int atomisp_s_parm_file(struct file *file, void *fh, - struct v4l2_streamparm *parm) -{ - struct video_device *vdev = video_devdata(file); - struct atomisp_device *isp = video_get_drvdata(vdev); - - if (parm->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) { - dev_err(isp->dev, "unsupported v4l2 buf type for output\n"); - return -EINVAL; - } - - rt_mutex_lock(&isp->mutex); - isp->sw_contex.file_input = true; - rt_mutex_unlock(&isp->mutex); - - return 0; -} - static long atomisp_vidioc_default(struct file *file, void *fh, bool valid_prio, unsigned int cmd, void *arg) { struct video_device *vdev = video_devdata(file); struct atomisp_device *isp = video_get_drvdata(vdev); - struct atomisp_sub_device *asd; + struct atomisp_sub_device *asd = atomisp_to_video_pipe(vdev)->asd; struct v4l2_subdev *motor; - bool acc_node; int err; - acc_node = !strcmp(vdev->name, "ATOMISP ISP ACC"); - if (acc_node) - asd = atomisp_to_acc_pipe(vdev)->asd; - else - asd = atomisp_to_video_pipe(vdev)->asd; - if (!IS_ISP2401) motor = isp->inputs[asd->input_curr].motor; else motor = isp->motor; switch (cmd) { - case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA: - case ATOMISP_IOC_S_EXPOSURE: - case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP: - case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA: - case ATOMISP_IOC_EXT_ISP_CTRL: - case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO: - case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE: - case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE: - case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT: - case ATOMISP_IOC_S_SENSOR_EE_CONFIG: - case ATOMISP_IOC_G_UPDATE_EXPOSURE: - /* we do not need take isp->mutex for these IOCTLs */ - break; - default: - rt_mutex_lock(&isp->mutex); - break; - } - switch (cmd) { case ATOMISP_IOC_S_SENSOR_RUNMODE: if (IS_ISP2401) err = atomisp_set_sensor_runmode(asd, arg); @@ -3173,22 +2707,6 @@ static long atomisp_vidioc_default(struct file *file, void *fh, break; } - switch (cmd) { - case ATOMISP_IOC_G_MOTOR_PRIV_INT_DATA: - case ATOMISP_IOC_S_EXPOSURE: - case ATOMISP_IOC_G_SENSOR_CALIBRATION_GROUP: - case ATOMISP_IOC_G_SENSOR_PRIV_INT_DATA: - case ATOMISP_IOC_EXT_ISP_CTRL: - case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_INFO: - case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_MODE: - case ATOMISP_IOC_G_SENSOR_AE_BRACKETING_MODE: - case ATOMISP_IOC_S_SENSOR_AE_BRACKETING_LUT: - case ATOMISP_IOC_G_UPDATE_EXPOSURE: - break; - default: - rt_mutex_unlock(&isp->mutex); - break; - } return err; } @@ -3207,7 +2725,7 @@ const struct v4l2_ioctl_ops atomisp_ioctl_ops = { .vidioc_enum_fmt_vid_cap = atomisp_enum_fmt_cap, .vidioc_try_fmt_vid_cap = atomisp_try_fmt_cap, .vidioc_g_fmt_vid_cap = atomisp_g_fmt_cap, - .vidioc_s_fmt_vid_cap = atomisp_s_fmt_cap, + .vidioc_s_fmt_vid_cap = atomisp_set_fmt, .vidioc_reqbufs = atomisp_reqbufs, .vidioc_querybuf = atomisp_querybuf, .vidioc_qbuf = atomisp_qbuf, @@ -3218,13 +2736,3 @@ const struct v4l2_ioctl_ops atomisp_ioctl_ops = { .vidioc_s_parm = atomisp_s_parm, .vidioc_g_parm = atomisp_g_parm, }; - -const struct v4l2_ioctl_ops atomisp_file_ioctl_ops = { - .vidioc_querycap = atomisp_querycap, - .vidioc_g_fmt_vid_out = atomisp_g_fmt_file, - .vidioc_s_fmt_vid_out = atomisp_s_fmt_file, - .vidioc_s_parm = atomisp_s_parm_file, - .vidioc_reqbufs = atomisp_reqbufs_file, - .vidioc_querybuf = atomisp_querybuf_file, - .vidioc_qbuf = atomisp_qbuf_file, -}; diff --git a/drivers/staging/media/atomisp/pci/atomisp_ioctl.h b/drivers/staging/media/atomisp/pci/atomisp_ioctl.h index d85e0d697a4e..c660f631d371 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_ioctl.h +++ b/drivers/staging/media/atomisp/pci/atomisp_ioctl.h @@ -34,27 +34,21 @@ atomisp_format_bridge *atomisp_get_format_bridge(unsigned int pixelformat); const struct atomisp_format_bridge *atomisp_get_format_bridge_from_mbus(u32 mbus_code); +int atomisp_pipe_check(struct atomisp_video_pipe *pipe, bool streaming_ok); + int atomisp_alloc_css_stat_bufs(struct atomisp_sub_device *asd, uint16_t stream_id); -int __atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type); -int __atomisp_reqbufs(struct file *file, void *fh, - struct v4l2_requestbuffers *req); - -int atomisp_reqbufs(struct file *file, void *fh, - struct v4l2_requestbuffers *req); +int atomisp_streamoff(struct file *file, void *fh, enum v4l2_buf_type type); +int atomisp_reqbufs(struct file *file, void *fh, struct v4l2_requestbuffers *req); enum ia_css_pipe_id atomisp_get_css_pipe_id(struct atomisp_sub_device *asd); void atomisp_videobuf_free_buf(struct videobuf_buffer *vb); -extern const struct v4l2_file_operations atomisp_file_fops; - extern const struct v4l2_ioctl_ops atomisp_ioctl_ops; -extern const struct v4l2_ioctl_ops atomisp_file_ioctl_ops; - unsigned int atomisp_streaming_count(struct atomisp_device *isp); /* compat_ioctl for 32bit userland app and 64bit kernel */ diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.c b/drivers/staging/media/atomisp/pci/atomisp_subdev.c index 394fe6959033..847dfee6ad78 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_subdev.c +++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.c @@ -373,16 +373,12 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd, struct atomisp_sub_device *isp_sd = v4l2_get_subdevdata(sd); struct atomisp_device *isp = isp_sd->isp; struct v4l2_mbus_framefmt *ffmt[ATOMISP_SUBDEV_PADS_NUM]; - u16 vdev_pad = atomisp_subdev_source_pad(sd->devnode); struct v4l2_rect *crop[ATOMISP_SUBDEV_PADS_NUM], *comp[ATOMISP_SUBDEV_PADS_NUM]; - enum atomisp_input_stream_id stream_id; unsigned int i; unsigned int padding_w = pad_w; unsigned int padding_h = pad_h; - stream_id = atomisp_source_pad_to_stream_id(isp_sd, vdev_pad); - isp_get_fmt_rect(sd, sd_state, which, ffmt, crop, comp); dev_dbg(isp->dev, @@ -478,9 +474,10 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd, dvs_w = dvs_h = 0; } atomisp_css_video_set_dis_envelope(isp_sd, dvs_w, dvs_h); - atomisp_css_input_set_effective_resolution(isp_sd, stream_id, - crop[pad]->width, crop[pad]->height); - + atomisp_css_input_set_effective_resolution(isp_sd, + ATOMISP_INPUT_STREAM_GENERAL, + crop[pad]->width, + crop[pad]->height); break; } case ATOMISP_SUBDEV_PAD_SOURCE_CAPTURE: @@ -523,14 +520,14 @@ int atomisp_subdev_set_selection(struct v4l2_subdev *sd, if (r->width * crop[ATOMISP_SUBDEV_PAD_SINK]->height < crop[ATOMISP_SUBDEV_PAD_SINK]->width * r->height) atomisp_css_input_set_effective_resolution(isp_sd, - stream_id, + ATOMISP_INPUT_STREAM_GENERAL, rounddown(crop[ATOMISP_SUBDEV_PAD_SINK]-> height * r->width / r->height, ATOM_ISP_STEP_WIDTH), crop[ATOMISP_SUBDEV_PAD_SINK]->height); else atomisp_css_input_set_effective_resolution(isp_sd, - stream_id, + ATOMISP_INPUT_STREAM_GENERAL, crop[ATOMISP_SUBDEV_PAD_SINK]->width, rounddown(crop[ATOMISP_SUBDEV_PAD_SINK]-> width * r->height / r->width, @@ -620,16 +617,12 @@ void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd, struct atomisp_device *isp = isp_sd->isp; struct v4l2_mbus_framefmt *__ffmt = atomisp_subdev_get_ffmt(sd, sd_state, which, pad); - u16 vdev_pad = atomisp_subdev_source_pad(sd->devnode); - enum atomisp_input_stream_id stream_id; dev_dbg(isp->dev, "ffmt: pad %s w %d h %d code 0x%8.8x which %s\n", atomisp_pad_str(pad), ffmt->width, ffmt->height, ffmt->code, which == V4L2_SUBDEV_FORMAT_TRY ? "V4L2_SUBDEV_FORMAT_TRY" : "V4L2_SUBDEV_FORMAT_ACTIVE"); - stream_id = atomisp_source_pad_to_stream_id(isp_sd, vdev_pad); - switch (pad) { case ATOMISP_SUBDEV_PAD_SINK: { const struct atomisp_in_fmt_conv *fc = @@ -649,15 +642,15 @@ void atomisp_subdev_set_ffmt(struct v4l2_subdev *sd, if (which == V4L2_SUBDEV_FORMAT_ACTIVE) { atomisp_css_input_set_resolution(isp_sd, - stream_id, ffmt); + ATOMISP_INPUT_STREAM_GENERAL, ffmt); atomisp_css_input_set_binning_factor(isp_sd, - stream_id, + ATOMISP_INPUT_STREAM_GENERAL, atomisp_get_sensor_bin_factor(isp_sd)); - atomisp_css_input_set_bayer_order(isp_sd, stream_id, + atomisp_css_input_set_bayer_order(isp_sd, ATOMISP_INPUT_STREAM_GENERAL, fc->bayer_order); - atomisp_css_input_set_format(isp_sd, stream_id, + atomisp_css_input_set_format(isp_sd, ATOMISP_INPUT_STREAM_GENERAL, fc->atomisp_in_fmt); - atomisp_css_set_default_isys_config(isp_sd, stream_id, + atomisp_css_set_default_isys_config(isp_sd, ATOMISP_INPUT_STREAM_GENERAL, ffmt); } @@ -874,12 +867,18 @@ static int s_ctrl(struct v4l2_ctrl *ctrl) { struct atomisp_sub_device *asd = container_of( ctrl->handler, struct atomisp_sub_device, ctrl_handler); + unsigned int streaming; + unsigned long flags; switch (ctrl->id) { case V4L2_CID_RUN_MODE: return __atomisp_update_run_mode(asd); case V4L2_CID_DEPTH_MODE: - if (asd->streaming != ATOMISP_DEVICE_STREAMING_DISABLED) { + /* Use spinlock instead of mutex to avoid possible locking issues */ + spin_lock_irqsave(&asd->isp->lock, flags); + streaming = asd->streaming; + spin_unlock_irqrestore(&asd->isp->lock, flags); + if (streaming != ATOMISP_DEVICE_STREAMING_DISABLED) { dev_err(asd->isp->dev, "ISP is streaming, it is not supported to change the depth mode\n"); return -EINVAL; @@ -1066,7 +1065,6 @@ static void atomisp_init_subdev_pipe(struct atomisp_sub_device *asd, pipe->isp = asd->isp; spin_lock_init(&pipe->irq_lock); INIT_LIST_HEAD(&pipe->activeq); - INIT_LIST_HEAD(&pipe->activeq_out); INIT_LIST_HEAD(&pipe->buffers_waiting_for_param); INIT_LIST_HEAD(&pipe->per_frame_params); memset(pipe->frame_request_config_id, @@ -1076,13 +1074,6 @@ static void atomisp_init_subdev_pipe(struct atomisp_sub_device *asd, sizeof(struct atomisp_css_params_with_list *)); } -static void atomisp_init_acc_pipe(struct atomisp_sub_device *asd, - struct atomisp_acc_pipe *pipe) -{ - pipe->asd = asd; - pipe->isp = asd->isp; -} - /* * isp_subdev_init_entities - Initialize V4L2 subdev and media entity * @asd: ISP CCDC module @@ -1126,9 +1117,6 @@ static int isp_subdev_init_entities(struct atomisp_sub_device *asd) if (ret < 0) return ret; - atomisp_init_subdev_pipe(asd, &asd->video_in, - V4L2_BUF_TYPE_VIDEO_OUTPUT); - atomisp_init_subdev_pipe(asd, &asd->video_out_preview, V4L2_BUF_TYPE_VIDEO_CAPTURE); @@ -1141,13 +1129,6 @@ static int isp_subdev_init_entities(struct atomisp_sub_device *asd) atomisp_init_subdev_pipe(asd, &asd->video_out_video_capture, V4L2_BUF_TYPE_VIDEO_CAPTURE); - atomisp_init_acc_pipe(asd, &asd->video_acc); - - ret = atomisp_video_init(&asd->video_in, "MEMORY", - ATOMISP_RUN_MODE_SDV); - if (ret < 0) - return ret; - ret = atomisp_video_init(&asd->video_out_capture, "CAPTURE", ATOMISP_RUN_MODE_STILL_CAPTURE); if (ret < 0) @@ -1168,8 +1149,6 @@ static int isp_subdev_init_entities(struct atomisp_sub_device *asd) if (ret < 0) return ret; - atomisp_acc_init(&asd->video_acc, "ACC"); - ret = v4l2_ctrl_handler_init(&asd->ctrl_handler, 1); if (ret) return ret; @@ -1226,7 +1205,11 @@ int atomisp_create_pads_links(struct atomisp_device *isp) return ret; } } - for (i = 0; i < isp->input_cnt - 2; i++) { + for (i = 0; i < isp->input_cnt; i++) { + /* Don't create links for the test-pattern-generator */ + if (isp->inputs[i].type == TEST_PATTERN) + continue; + ret = media_create_pad_link(&isp->inputs[i].camera->entity, 0, &isp->csi2_port[isp->inputs[i]. port].subdev.entity, @@ -1262,17 +1245,6 @@ int atomisp_create_pads_links(struct atomisp_device *isp) entity, 0, 0); if (ret < 0) return ret; - /* - * file input only supported on subdev0 - * so do not create pad link for subdevs other then subdev0 - */ - if (asd->index) - return 0; - ret = media_create_pad_link(&asd->video_in.vdev.entity, - 0, &asd->subdev.entity, - ATOMISP_SUBDEV_PAD_SINK, 0); - if (ret < 0) - return ret; } return 0; } @@ -1302,87 +1274,55 @@ void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd) { atomisp_subdev_cleanup_entities(asd); v4l2_device_unregister_subdev(&asd->subdev); - atomisp_video_unregister(&asd->video_in); atomisp_video_unregister(&asd->video_out_preview); atomisp_video_unregister(&asd->video_out_vf); atomisp_video_unregister(&asd->video_out_capture); atomisp_video_unregister(&asd->video_out_video_capture); - atomisp_acc_unregister(&asd->video_acc); } -int atomisp_subdev_register_entities(struct atomisp_sub_device *asd, - struct v4l2_device *vdev) +int atomisp_subdev_register_subdev(struct atomisp_sub_device *asd, + struct v4l2_device *vdev) +{ + return v4l2_device_register_subdev(vdev, &asd->subdev); +} + +int atomisp_subdev_register_video_nodes(struct atomisp_sub_device *asd, + struct v4l2_device *vdev) { int ret; - u32 device_caps; /* * FIXME: check if all device caps are properly initialized. - * Should any of those use V4L2_CAP_META_OUTPUT? Probably yes. + * Should any of those use V4L2_CAP_META_CAPTURE? Probably yes. */ - device_caps = V4L2_CAP_VIDEO_CAPTURE | - V4L2_CAP_STREAMING; - - /* Register the subdev and video node. */ - - ret = v4l2_device_register_subdev(vdev, &asd->subdev); - if (ret < 0) - goto error; - asd->video_out_preview.vdev.v4l2_dev = vdev; - asd->video_out_preview.vdev.device_caps = device_caps | - V4L2_CAP_VIDEO_OUTPUT; + asd->video_out_preview.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; ret = video_register_device(&asd->video_out_preview.vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) goto error; asd->video_out_capture.vdev.v4l2_dev = vdev; - asd->video_out_capture.vdev.device_caps = device_caps | - V4L2_CAP_VIDEO_OUTPUT; + asd->video_out_capture.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; ret = video_register_device(&asd->video_out_capture.vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) goto error; asd->video_out_vf.vdev.v4l2_dev = vdev; - asd->video_out_vf.vdev.device_caps = device_caps | - V4L2_CAP_VIDEO_OUTPUT; + asd->video_out_vf.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; ret = video_register_device(&asd->video_out_vf.vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) goto error; asd->video_out_video_capture.vdev.v4l2_dev = vdev; - asd->video_out_video_capture.vdev.device_caps = device_caps | - V4L2_CAP_VIDEO_OUTPUT; + asd->video_out_video_capture.vdev.device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; ret = video_register_device(&asd->video_out_video_capture.vdev, VFL_TYPE_VIDEO, -1); if (ret < 0) goto error; - asd->video_acc.vdev.v4l2_dev = vdev; - asd->video_acc.vdev.device_caps = device_caps | - V4L2_CAP_VIDEO_OUTPUT; - ret = video_register_device(&asd->video_acc.vdev, - VFL_TYPE_VIDEO, -1); - if (ret < 0) - goto error; - - /* - * file input only supported on subdev0 - * so do not create video node for subdevs other then subdev0 - */ - if (asd->index) - return 0; - - asd->video_in.vdev.v4l2_dev = vdev; - asd->video_in.vdev.device_caps = device_caps | - V4L2_CAP_VIDEO_CAPTURE; - ret = video_register_device(&asd->video_in.vdev, - VFL_TYPE_VIDEO, -1); - if (ret < 0) - goto error; return 0; @@ -1415,7 +1355,6 @@ int atomisp_subdev_init(struct atomisp_device *isp) return -ENOMEM; for (i = 0; i < isp->num_of_streams; i++) { asd = &isp->asd[i]; - spin_lock_init(&asd->lock); asd->isp = isp; isp_subdev_init_params(asd); asd->index = i; diff --git a/drivers/staging/media/atomisp/pci/atomisp_subdev.h b/drivers/staging/media/atomisp/pci/atomisp_subdev.h index 798a93793a9a..a1f4da35235d 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_subdev.h +++ b/drivers/staging/media/atomisp/pci/atomisp_subdev.h @@ -70,9 +70,7 @@ struct atomisp_video_pipe { enum v4l2_buf_type type; struct media_pad pad; struct videobuf_queue capq; - struct videobuf_queue outq; struct list_head activeq; - struct list_head activeq_out; /* * the buffers waiting for per-frame parameters, this is only valid * in per-frame setting mode. @@ -86,9 +84,10 @@ struct atomisp_video_pipe { unsigned int buffers_in_css; - /* irq_lock is used to protect video buffer state change operations and - * also to make activeq, activeq_out, capq and outq list - * operations atomic. */ + /* + * irq_lock is used to protect video buffer state change operations and + * also to make activeq and capq operations atomic. + */ spinlock_t irq_lock; unsigned int users; @@ -109,23 +108,6 @@ struct atomisp_video_pipe { */ unsigned int frame_request_config_id[VIDEO_MAX_FRAME]; struct atomisp_css_params_with_list *frame_params[VIDEO_MAX_FRAME]; - - /* - * move wdt from asd struct to create wdt for each pipe - */ - /* ISP2401 */ - struct timer_list wdt; - unsigned int wdt_duration; /* in jiffies */ - unsigned long wdt_expires; - atomic_t wdt_count; -}; - -struct atomisp_acc_pipe { - struct video_device vdev; - unsigned int users; - bool running; - struct atomisp_sub_device *asd; - struct atomisp_device *isp; }; struct atomisp_pad_format { @@ -267,28 +249,6 @@ struct atomisp_css_params_with_list { struct list_head list; }; -struct atomisp_acc_fw { - struct ia_css_fw_info *fw; - unsigned int handle; - unsigned int flags; - unsigned int type; - struct { - size_t length; - unsigned long css_ptr; - } args[ATOMISP_ACC_NR_MEMORY]; - struct list_head list; -}; - -struct atomisp_map { - ia_css_ptr ptr; - size_t length; - struct list_head list; - /* FIXME: should keep book which maps are currently used - * by binaries and not allow releasing those - * which are in use. Implement by reference counting. - */ -}; - struct atomisp_sub_device { struct v4l2_subdev subdev; struct media_pad pads[ATOMISP_SUBDEV_PADS_NUM]; @@ -297,15 +257,12 @@ struct atomisp_sub_device { enum atomisp_subdev_input_entity input; unsigned int output; - struct atomisp_video_pipe video_in; struct atomisp_video_pipe video_out_capture; /* capture output */ struct atomisp_video_pipe video_out_vf; /* viewfinder output */ struct atomisp_video_pipe video_out_preview; /* preview output */ - struct atomisp_acc_pipe video_acc; /* video pipe main output */ struct atomisp_video_pipe video_out_video_capture; /* struct isp_subdev_params params; */ - spinlock_t lock; struct atomisp_device *isp; struct v4l2_ctrl_handler ctrl_handler; struct v4l2_ctrl *fmt_auto; @@ -356,15 +313,16 @@ struct atomisp_sub_device { /* This field specifies which camera (v4l2 input) is selected. */ int input_curr; - /* This field specifies which sensor is being selected when there - are multiple sensors connected to the same MIPI port. */ - int sensor_curr; atomic_t sof_count; atomic_t sequence; /* Sequence value that is assigned to buffer. */ atomic_t sequence_temp; - unsigned int streaming; /* Hold both mutex and lock to change this */ + /* + * Writers of streaming must hold both isp->mutex and isp->lock. + * Readers of streaming need to hold only one of the two locks. + */ + unsigned int streaming; bool stream_prepared; /* whether css stream is created */ /* subdev index: will be used to show which subdev is holding the @@ -390,11 +348,6 @@ struct atomisp_sub_device { int raw_buffer_locked_count; spinlock_t raw_buffer_bitmap_lock; - /* ISP 2400 */ - struct timer_list wdt; - unsigned int wdt_duration; /* in jiffies */ - unsigned long wdt_expires; - /* ISP2401 */ bool re_trigger_capture; @@ -450,8 +403,10 @@ int atomisp_update_run_mode(struct atomisp_sub_device *asd); void atomisp_subdev_cleanup_pending_events(struct atomisp_sub_device *asd); void atomisp_subdev_unregister_entities(struct atomisp_sub_device *asd); -int atomisp_subdev_register_entities(struct atomisp_sub_device *asd, - struct v4l2_device *vdev); +int atomisp_subdev_register_subdev(struct atomisp_sub_device *asd, + struct v4l2_device *vdev); +int atomisp_subdev_register_video_nodes(struct atomisp_sub_device *asd, + struct v4l2_device *vdev); int atomisp_subdev_init(struct atomisp_device *isp); void atomisp_subdev_cleanup(struct atomisp_device *isp); int atomisp_create_pads_links(struct atomisp_device *isp); diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c index 643ba981601b..d5bb9906ca6f 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.c +++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.c @@ -34,7 +34,6 @@ #include "atomisp_cmd.h" #include "atomisp_common.h" #include "atomisp_fops.h" -#include "atomisp_file.h" #include "atomisp_ioctl.h" #include "atomisp_internal.h" #include "atomisp-regs.h" @@ -442,12 +441,7 @@ int atomisp_video_init(struct atomisp_video_pipe *video, const char *name, video->pad.flags = MEDIA_PAD_FL_SINK; video->vdev.fops = &atomisp_fops; video->vdev.ioctl_ops = &atomisp_ioctl_ops; - break; - case V4L2_BUF_TYPE_VIDEO_OUTPUT: - direction = "input"; - video->pad.flags = MEDIA_PAD_FL_SOURCE; - video->vdev.fops = &atomisp_file_fops; - video->vdev.ioctl_ops = &atomisp_file_ioctl_ops; + video->vdev.lock = &video->isp->mutex; break; default: return -EINVAL; @@ -467,18 +461,6 @@ int atomisp_video_init(struct atomisp_video_pipe *video, const char *name, return 0; } -void atomisp_acc_init(struct atomisp_acc_pipe *video, const char *name) -{ - video->vdev.fops = &atomisp_fops; - video->vdev.ioctl_ops = &atomisp_ioctl_ops; - - /* Initialize the video device. */ - snprintf(video->vdev.name, sizeof(video->vdev.name), - "ATOMISP ISP %s", name); - video->vdev.release = video_device_release_empty; - video_set_drvdata(&video->vdev, video->isp); -} - void atomisp_video_unregister(struct atomisp_video_pipe *video) { if (video_is_registered(&video->vdev)) { @@ -487,12 +469,6 @@ void atomisp_video_unregister(struct atomisp_video_pipe *video) } } -void atomisp_acc_unregister(struct atomisp_acc_pipe *video) -{ - if (video_is_registered(&video->vdev)) - video_unregister_device(&video->vdev); -} - static int atomisp_save_iunit_reg(struct atomisp_device *isp) { struct pci_dev *pdev = to_pci_dev(isp->dev); @@ -1031,7 +1007,6 @@ static int atomisp_subdev_probe(struct atomisp_device *isp) &subdevs->v4l2_subdev.board_info; struct i2c_adapter *adapter = i2c_get_adapter(subdevs->v4l2_subdev.i2c_adapter_id); - int sensor_num, i; dev_info(isp->dev, "Probing Subdev %s\n", board_info->type); @@ -1090,22 +1065,7 @@ static int atomisp_subdev_probe(struct atomisp_device *isp) * pixel_format. */ isp->inputs[isp->input_cnt].frame_size.pixel_format = 0; - isp->inputs[isp->input_cnt].camera_caps = - atomisp_get_default_camera_caps(); - sensor_num = isp->inputs[isp->input_cnt] - .camera_caps->sensor_num; isp->input_cnt++; - for (i = 1; i < sensor_num; i++) { - if (isp->input_cnt >= ATOM_ISP_MAX_INPUTS) { - dev_warn(isp->dev, - "atomisp inputs out of range\n"); - break; - } - isp->inputs[isp->input_cnt] = - isp->inputs[isp->input_cnt - 1]; - isp->inputs[isp->input_cnt].sensor_index = i; - isp->input_cnt++; - } break; case CAMERA_MOTOR: if (isp->motor) { @@ -1158,7 +1118,6 @@ static void atomisp_unregister_entities(struct atomisp_device *isp) for (i = 0; i < isp->num_of_streams; i++) atomisp_subdev_unregister_entities(&isp->asd[i]); atomisp_tpg_unregister_entities(&isp->tpg); - atomisp_file_input_unregister_entities(&isp->file_dev); for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) atomisp_mipi_csi2_unregister_entities(&isp->csi2_port[i]); @@ -1210,13 +1169,6 @@ static int atomisp_register_entities(struct atomisp_device *isp) goto csi_and_subdev_probe_failed; } - ret = - atomisp_file_input_register_entities(&isp->file_dev, &isp->v4l2_dev); - if (ret < 0) { - dev_err(isp->dev, "atomisp_file_input_register_entities\n"); - goto file_input_register_failed; - } - ret = atomisp_tpg_register_entities(&isp->tpg, &isp->v4l2_dev); if (ret < 0) { dev_err(isp->dev, "atomisp_tpg_register_entities\n"); @@ -1226,10 +1178,9 @@ static int atomisp_register_entities(struct atomisp_device *isp) for (i = 0; i < isp->num_of_streams; i++) { struct atomisp_sub_device *asd = &isp->asd[i]; - ret = atomisp_subdev_register_entities(asd, &isp->v4l2_dev); + ret = atomisp_subdev_register_subdev(asd, &isp->v4l2_dev); if (ret < 0) { - dev_err(isp->dev, - "atomisp_subdev_register_entities fail\n"); + dev_err(isp->dev, "atomisp_subdev_register_subdev fail\n"); for (; i > 0; i--) atomisp_subdev_unregister_entities( &isp->asd[i - 1]); @@ -1267,31 +1218,17 @@ static int atomisp_register_entities(struct atomisp_device *isp) } } - dev_dbg(isp->dev, - "FILE_INPUT enable, camera_cnt: %d\n", isp->input_cnt); - isp->inputs[isp->input_cnt].type = FILE_INPUT; - isp->inputs[isp->input_cnt].port = -1; - isp->inputs[isp->input_cnt].camera_caps = - atomisp_get_default_camera_caps(); - isp->inputs[isp->input_cnt++].camera = &isp->file_dev.sd; - if (isp->input_cnt < ATOM_ISP_MAX_INPUTS) { dev_dbg(isp->dev, "TPG detected, camera_cnt: %d\n", isp->input_cnt); isp->inputs[isp->input_cnt].type = TEST_PATTERN; isp->inputs[isp->input_cnt].port = -1; - isp->inputs[isp->input_cnt].camera_caps = - atomisp_get_default_camera_caps(); isp->inputs[isp->input_cnt++].camera = &isp->tpg.sd; } else { dev_warn(isp->dev, "too many atomisp inputs, TPG ignored.\n"); } - ret = v4l2_device_register_subdev_nodes(&isp->v4l2_dev); - if (ret < 0) - goto link_failed; - - return media_device_register(&isp->media_dev); + return 0; link_failed: for (i = 0; i < isp->num_of_streams; i++) @@ -1304,8 +1241,6 @@ wq_alloc_failed: subdev_register_failed: atomisp_tpg_unregister_entities(&isp->tpg); tpg_register_failed: - atomisp_file_input_unregister_entities(&isp->file_dev); -file_input_register_failed: for (i = 0; i < ATOMISP_CAMERA_NR_PORTS; i++) atomisp_mipi_csi2_unregister_entities(&isp->csi2_port[i]); csi_and_subdev_probe_failed: @@ -1316,6 +1251,27 @@ v4l2_device_failed: return ret; } +static int atomisp_register_device_nodes(struct atomisp_device *isp) +{ + int i, err; + + for (i = 0; i < isp->num_of_streams; i++) { + err = atomisp_subdev_register_video_nodes(&isp->asd[i], &isp->v4l2_dev); + if (err) + return err; + } + + err = atomisp_create_pads_links(isp); + if (err) + return err; + + err = v4l2_device_register_subdev_nodes(&isp->v4l2_dev); + if (err) + return err; + + return media_device_register(&isp->media_dev); +} + static int atomisp_initialize_modules(struct atomisp_device *isp) { int ret; @@ -1326,13 +1282,6 @@ static int atomisp_initialize_modules(struct atomisp_device *isp) goto error_mipi_csi2; } - ret = atomisp_file_input_init(isp); - if (ret < 0) { - dev_err(isp->dev, - "file input device initialization failed\n"); - goto error_file_input; - } - ret = atomisp_tpg_init(isp); if (ret < 0) { dev_err(isp->dev, "tpg initialization failed\n"); @@ -1350,8 +1299,6 @@ static int atomisp_initialize_modules(struct atomisp_device *isp) error_isp_subdev: error_tpg: atomisp_tpg_cleanup(isp); -error_file_input: - atomisp_file_input_cleanup(isp); error_mipi_csi2: atomisp_mipi_csi2_cleanup(isp); return ret; @@ -1360,7 +1307,6 @@ error_mipi_csi2: static void atomisp_uninitialize_modules(struct atomisp_device *isp) { atomisp_tpg_cleanup(isp); - atomisp_file_input_cleanup(isp); atomisp_mipi_csi2_cleanup(isp); } @@ -1470,39 +1416,6 @@ static bool is_valid_device(struct pci_dev *pdev, const struct pci_device_id *id return true; } -static int init_atomisp_wdts(struct atomisp_device *isp) -{ - int i, err; - - atomic_set(&isp->wdt_work_queued, 0); - isp->wdt_work_queue = alloc_workqueue(isp->v4l2_dev.name, 0, 1); - if (!isp->wdt_work_queue) { - dev_err(isp->dev, "Failed to initialize wdt work queue\n"); - err = -ENOMEM; - goto alloc_fail; - } - INIT_WORK(&isp->wdt_work, atomisp_wdt_work); - - for (i = 0; i < isp->num_of_streams; i++) { - struct atomisp_sub_device *asd = &isp->asd[i]; - - if (!IS_ISP2401) { - timer_setup(&asd->wdt, atomisp_wdt, 0); - } else { - timer_setup(&asd->video_out_capture.wdt, - atomisp_wdt, 0); - timer_setup(&asd->video_out_preview.wdt, - atomisp_wdt, 0); - timer_setup(&asd->video_out_vf.wdt, atomisp_wdt, 0); - timer_setup(&asd->video_out_video_capture.wdt, - atomisp_wdt, 0); - } - } - return 0; -alloc_fail: - return err; -} - #define ATOM_ISP_PCI_BAR 0 static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) @@ -1551,9 +1464,7 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i dev_dbg(&pdev->dev, "atomisp mmio base: %p\n", isp->base); - rt_mutex_init(&isp->mutex); - rt_mutex_init(&isp->loading); - mutex_init(&isp->streamoff_mutex); + mutex_init(&isp->mutex); spin_lock_init(&isp->lock); /* This is not a true PCI device on SoC, so the delay is not needed. */ @@ -1725,8 +1636,6 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i pci_write_config_dword(pdev, MRFLD_PCI_CSI_AFE_TRIM_CONTROL, csi_afe_trim); } - rt_mutex_lock(&isp->loading); - err = atomisp_initialize_modules(isp); if (err < 0) { dev_err(&pdev->dev, "atomisp_initialize_modules (%d)\n", err); @@ -1738,13 +1647,8 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i dev_err(&pdev->dev, "atomisp_register_entities failed (%d)\n", err); goto register_entities_fail; } - err = atomisp_create_pads_links(isp); - if (err < 0) - goto register_entities_fail; - /* init atomisp wdts */ - err = init_atomisp_wdts(isp); - if (err != 0) - goto wdt_work_queue_fail; + + INIT_WORK(&isp->assert_recovery_work, atomisp_assert_recovery_work); /* save the iunit context only once after all the values are init'ed. */ atomisp_save_iunit_reg(isp); @@ -1777,8 +1681,10 @@ static int atomisp_pci_probe(struct pci_dev *pdev, const struct pci_device_id *i release_firmware(isp->firmware); isp->firmware = NULL; isp->css_env.isp_css_fw.data = NULL; - isp->ready = true; - rt_mutex_unlock(&isp->loading); + + err = atomisp_register_device_nodes(isp); + if (err) + goto css_init_fail; atomisp_drvfs_init(isp); @@ -1789,13 +1695,10 @@ css_init_fail: request_irq_fail: hmm_cleanup(); pm_runtime_get_noresume(&pdev->dev); - destroy_workqueue(isp->wdt_work_queue); -wdt_work_queue_fail: atomisp_unregister_entities(isp); register_entities_fail: atomisp_uninitialize_modules(isp); initialize_modules_fail: - rt_mutex_unlock(&isp->loading); cpu_latency_qos_remove_request(&isp->pm_qos); atomisp_msi_irq_uninit(isp); pci_free_irq_vectors(pdev); @@ -1851,9 +1754,6 @@ static void atomisp_pci_remove(struct pci_dev *pdev) atomisp_msi_irq_uninit(isp); atomisp_unregister_entities(isp); - destroy_workqueue(isp->wdt_work_queue); - atomisp_file_input_cleanup(isp); - release_firmware(isp->firmware); } diff --git a/drivers/staging/media/atomisp/pci/atomisp_v4l2.h b/drivers/staging/media/atomisp/pci/atomisp_v4l2.h index 72611b8286a4..ccf1c0ac17b2 100644 --- a/drivers/staging/media/atomisp/pci/atomisp_v4l2.h +++ b/drivers/staging/media/atomisp/pci/atomisp_v4l2.h @@ -22,16 +22,13 @@ #define __ATOMISP_V4L2_H__ struct atomisp_video_pipe; -struct atomisp_acc_pipe; struct v4l2_device; struct atomisp_device; struct firmware; int atomisp_video_init(struct atomisp_video_pipe *video, const char *name, unsigned int run_mode); -void atomisp_acc_init(struct atomisp_acc_pipe *video, const char *name); void atomisp_video_unregister(struct atomisp_video_pipe *video); -void atomisp_acc_unregister(struct atomisp_acc_pipe *video); const struct firmware *atomisp_load_firmware(struct atomisp_device *isp); int atomisp_csi_lane_config(struct atomisp_device *isp); diff --git a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c index f50494123f03..a5fd6d38d3c4 100644 --- a/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c +++ b/drivers/staging/media/atomisp/pci/hmm/hmm_bo.c @@ -44,16 +44,6 @@ #include "hmm/hmm_common.h" #include "hmm/hmm_bo.h" -static unsigned int order_to_nr(unsigned int order) -{ - return 1U << order; -} - -static unsigned int nr_to_order_bottom(unsigned int nr) -{ - return fls(nr) - 1; -} - static int __bo_init(struct hmm_bo_device *bdev, struct hmm_buffer_object *bo, unsigned int pgnr) { @@ -625,136 +615,40 @@ found: return bo; } -static void free_private_bo_pages(struct hmm_buffer_object *bo, - int free_pgnr) +static void free_pages_bulk_array(unsigned long nr_pages, struct page **page_array) { - int i, ret; + unsigned long i; - for (i = 0; i < free_pgnr; i++) { - ret = set_pages_wb(bo->pages[i], 1); - if (ret) - dev_err(atomisp_dev, - "set page to WB err ...ret = %d\n", - ret); - /* - W/A: set_pages_wb seldom return value = -EFAULT - indicate that address of page is not in valid - range(0xffff880000000000~0xffffc7ffffffffff) - then, _free_pages would panic; Do not know why page - address be valid,it maybe memory corruption by lowmemory - */ - if (!ret) { - __free_pages(bo->pages[i], 0); - } - } + for (i = 0; i < nr_pages; i++) + __free_pages(page_array[i], 0); +} + +static void free_private_bo_pages(struct hmm_buffer_object *bo) +{ + set_pages_array_wb(bo->pages, bo->pgnr); + free_pages_bulk_array(bo->pgnr, bo->pages); } /*Allocate pages which will be used only by ISP*/ static int alloc_private_pages(struct hmm_buffer_object *bo) { + const gfp_t gfp = __GFP_NOWARN | __GFP_RECLAIM | __GFP_FS; int ret; - unsigned int pgnr, order, blk_pgnr, alloc_pgnr; - struct page *pages; - gfp_t gfp = GFP_NOWAIT | __GFP_NOWARN; /* REVISIT: need __GFP_FS too? */ - int i, j; - int failure_number = 0; - bool reduce_order = false; - bool lack_mem = true; - - pgnr = bo->pgnr; - - i = 0; - alloc_pgnr = 0; - - while (pgnr) { - order = nr_to_order_bottom(pgnr); - /* - * if be short of memory, we will set order to 0 - * everytime. - */ - if (lack_mem) - order = HMM_MIN_ORDER; - else if (order > HMM_MAX_ORDER) - order = HMM_MAX_ORDER; -retry: - /* - * When order > HMM_MIN_ORDER, for performance reasons we don't - * want alloc_pages() to sleep. In case it fails and fallbacks - * to HMM_MIN_ORDER or in case the requested order is originally - * the minimum value, we can allow alloc_pages() to sleep for - * robustness purpose. - * - * REVISIT: why __GFP_FS is necessary? - */ - if (order == HMM_MIN_ORDER) { - gfp &= ~GFP_NOWAIT; - gfp |= __GFP_RECLAIM | __GFP_FS; - } - - pages = alloc_pages(gfp, order); - if (unlikely(!pages)) { - /* - * in low memory case, if allocation page fails, - * we turn to try if order=0 allocation could - * succeed. if order=0 fails too, that means there is - * no memory left. - */ - if (order == HMM_MIN_ORDER) { - dev_err(atomisp_dev, - "%s: cannot allocate pages\n", - __func__); - goto cleanup; - } - order = HMM_MIN_ORDER; - failure_number++; - reduce_order = true; - /* - * if fail two times continuously, we think be short - * of memory now. - */ - if (failure_number == 2) { - lack_mem = true; - failure_number = 0; - } - goto retry; - } else { - blk_pgnr = order_to_nr(order); - - /* - * set memory to uncacheable -- UC_MINUS - */ - ret = set_pages_uc(pages, blk_pgnr); - if (ret) { - dev_err(atomisp_dev, - "set page uncacheablefailed.\n"); - - __free_pages(pages, order); - goto cleanup; - } - - for (j = 0; j < blk_pgnr; j++, i++) { - bo->pages[i] = pages + j; - } - - pgnr -= blk_pgnr; + ret = alloc_pages_bulk_array(gfp, bo->pgnr, bo->pages); + if (ret != bo->pgnr) { + free_pages_bulk_array(ret, bo->pages); + return -ENOMEM; + } - /* - * if order is not reduced this time, clear - * failure_number. - */ - if (reduce_order) - reduce_order = false; - else - failure_number = 0; - } + ret = set_pages_array_uc(bo->pages, bo->pgnr); + if (ret) { + dev_err(atomisp_dev, "set pages uncacheable failed.\n"); + free_pages_bulk_array(bo->pgnr, bo->pages); + return ret; } return 0; -cleanup: - alloc_pgnr = i; - free_private_bo_pages(bo, alloc_pgnr); - return -ENOMEM; } static void free_user_pages(struct hmm_buffer_object *bo, @@ -762,12 +656,8 @@ static void free_user_pages(struct hmm_buffer_object *bo, { int i; - if (bo->mem_type == HMM_BO_MEM_TYPE_PFN) { - unpin_user_pages(bo->pages, page_nr); - } else { - for (i = 0; i < page_nr; i++) - put_page(bo->pages[i]); - } + for (i = 0; i < page_nr; i++) + put_page(bo->pages[i]); } /* @@ -777,43 +667,13 @@ static int alloc_user_pages(struct hmm_buffer_object *bo, const void __user *userptr) { int page_nr; - struct vm_area_struct *vma; - - mutex_unlock(&bo->mutex); - mmap_read_lock(current->mm); - vma = find_vma(current->mm, (unsigned long)userptr); - mmap_read_unlock(current->mm); - if (!vma) { - dev_err(atomisp_dev, "find_vma failed\n"); - mutex_lock(&bo->mutex); - return -EFAULT; - } - mutex_lock(&bo->mutex); - /* - * Handle frame buffer allocated in other kerenl space driver - * and map to user space - */ userptr = untagged_addr(userptr); - if (vma->vm_flags & (VM_IO | VM_PFNMAP)) { - page_nr = pin_user_pages((unsigned long)userptr, bo->pgnr, - FOLL_LONGTERM | FOLL_WRITE, - bo->pages, NULL); - bo->mem_type = HMM_BO_MEM_TYPE_PFN; - } else { - /*Handle frame buffer allocated in user space*/ - mutex_unlock(&bo->mutex); - page_nr = get_user_pages_fast((unsigned long)userptr, - (int)(bo->pgnr), 1, bo->pages); - mutex_lock(&bo->mutex); - bo->mem_type = HMM_BO_MEM_TYPE_USER; - } - - dev_dbg(atomisp_dev, "%s: %d %s pages were allocated as 0x%08x\n", - __func__, - bo->pgnr, - bo->mem_type == HMM_BO_MEM_TYPE_USER ? "user" : "pfn", page_nr); + /* Handle frame buffer allocated in user space */ + mutex_unlock(&bo->mutex); + page_nr = get_user_pages_fast((unsigned long)userptr, bo->pgnr, 1, bo->pages); + mutex_lock(&bo->mutex); /* can be written by caller, not forced */ if (page_nr != bo->pgnr) { @@ -854,7 +714,7 @@ int hmm_bo_alloc_pages(struct hmm_buffer_object *bo, mutex_lock(&bo->mutex); check_bo_status_no_goto(bo, HMM_BO_PAGE_ALLOCED, status_err); - bo->pages = kmalloc_array(bo->pgnr, sizeof(struct page *), GFP_KERNEL); + bo->pages = kcalloc(bo->pgnr, sizeof(struct page *), GFP_KERNEL); if (unlikely(!bo->pages)) { ret = -ENOMEM; goto alloc_err; @@ -910,7 +770,7 @@ void hmm_bo_free_pages(struct hmm_buffer_object *bo) bo->status &= (~HMM_BO_PAGE_ALLOCED); if (bo->type == HMM_BO_PRIVATE) - free_private_bo_pages(bo, bo->pgnr); + free_private_bo_pages(bo); else if (bo->type == HMM_BO_USER) free_user_pages(bo, bo->pgnr); else diff --git a/drivers/staging/media/atomisp/pci/sh_css_params.c b/drivers/staging/media/atomisp/pci/sh_css_params.c index 0e7c38b2bfe3..67915d76a87f 100644 --- a/drivers/staging/media/atomisp/pci/sh_css_params.c +++ b/drivers/staging/media/atomisp/pci/sh_css_params.c @@ -950,8 +950,8 @@ sh_css_set_black_frame(struct ia_css_stream *stream, params->fpn_config.data = NULL; } if (!params->fpn_config.data) { - params->fpn_config.data = kvmalloc(height * width * - sizeof(short), GFP_KERNEL); + params->fpn_config.data = kvmalloc(array3_size(height, width, sizeof(short)), + GFP_KERNEL); if (!params->fpn_config.data) { IA_CSS_ERROR("out of memory"); IA_CSS_LEAVE_ERR_PRIVATE(-ENOMEM); diff --git a/drivers/staging/media/imx/imx-media-utils.c b/drivers/staging/media/imx/imx-media-utils.c index 294c808b2ebe..3e7462112649 100644 --- a/drivers/staging/media/imx/imx-media-utils.c +++ b/drivers/staging/media/imx/imx-media-utils.c @@ -863,16 +863,16 @@ int imx_media_pipeline_set_stream(struct imx_media_dev *imxmd, mutex_lock(&imxmd->md.graph_mutex); if (on) { - ret = __media_pipeline_start(entity, &imxmd->pipe); + ret = __media_pipeline_start(entity->pads, &imxmd->pipe); if (ret) goto out; ret = v4l2_subdev_call(sd, video, s_stream, 1); if (ret) - __media_pipeline_stop(entity); + __media_pipeline_stop(entity->pads); } else { v4l2_subdev_call(sd, video, s_stream, 0); - if (entity->pipe) - __media_pipeline_stop(entity); + if (media_pad_pipeline(entity->pads)) + __media_pipeline_stop(entity->pads); } out: diff --git a/drivers/staging/media/imx/imx7-media-csi.c b/drivers/staging/media/imx/imx7-media-csi.c index cbc66ef0eda8..e5b550ccfa22 100644 --- a/drivers/staging/media/imx/imx7-media-csi.c +++ b/drivers/staging/media/imx/imx7-media-csi.c @@ -1360,7 +1360,7 @@ static int imx7_csi_video_start_streaming(struct vb2_queue *vq, mutex_lock(&csi->mdev.graph_mutex); - ret = __media_pipeline_start(&csi->sd.entity, &csi->pipe); + ret = __video_device_pipeline_start(csi->vdev, &csi->pipe); if (ret) goto err_unlock; @@ -1373,7 +1373,7 @@ static int imx7_csi_video_start_streaming(struct vb2_queue *vq, return 0; err_stop: - __media_pipeline_stop(&csi->sd.entity); + __video_device_pipeline_stop(csi->vdev); err_unlock: mutex_unlock(&csi->mdev.graph_mutex); dev_err(csi->dev, "pipeline start failed with %d\n", ret); @@ -1396,7 +1396,7 @@ static void imx7_csi_video_stop_streaming(struct vb2_queue *vq) mutex_lock(&csi->mdev.graph_mutex); v4l2_subdev_call(&csi->sd, video, s_stream, 0); - __media_pipeline_stop(&csi->sd.entity); + __video_device_pipeline_stop(csi->vdev); mutex_unlock(&csi->mdev.graph_mutex); /* release all active buffers */ diff --git a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h index dbdd015ce220..caa358e0bae4 100644 --- a/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h +++ b/drivers/staging/media/ipu3/include/uapi/intel-ipu3.h @@ -626,8 +626,11 @@ struct ipu3_uapi_stats_3a { * @b: white balance gain for B channel. * @gb: white balance gain for Gb channel. * - * Precision u3.13, range [0, 8). White balance correction is done by applying - * a multiplicative gain to each color channels prior to BNR. + * For BNR parameters WB gain factor for the three channels [Ggr, Ggb, Gb, Gr]. + * Their precision is U3.13 and the range is (0, 8) and the actual gain is + * Gx + 1, it is typically Gx = 1. + * + * Pout = {Pin * (1 + Gx)}. */ struct ipu3_uapi_bnr_static_config_wb_gains_config { __u16 gr; diff --git a/drivers/staging/media/ipu3/ipu3-v4l2.c b/drivers/staging/media/ipu3/ipu3-v4l2.c index d1c539cefba8..ce13e746c15f 100644 --- a/drivers/staging/media/ipu3/ipu3-v4l2.c +++ b/drivers/staging/media/ipu3/ipu3-v4l2.c @@ -192,33 +192,30 @@ static int imgu_subdev_get_selection(struct v4l2_subdev *sd, struct v4l2_subdev_state *sd_state, struct v4l2_subdev_selection *sel) { - struct v4l2_rect *try_sel, *r; - struct imgu_v4l2_subdev *imgu_sd = container_of(sd, - struct imgu_v4l2_subdev, - subdev); + struct imgu_v4l2_subdev *imgu_sd = + container_of(sd, struct imgu_v4l2_subdev, subdev); if (sel->pad != IMGU_NODE_IN) return -EINVAL; switch (sel->target) { case V4L2_SEL_TGT_CROP: - try_sel = v4l2_subdev_get_try_crop(sd, sd_state, sel->pad); - r = &imgu_sd->rect.eff; - break; + if (sel->which == V4L2_SUBDEV_FORMAT_TRY) + sel->r = *v4l2_subdev_get_try_crop(sd, sd_state, + sel->pad); + else + sel->r = imgu_sd->rect.eff; + return 0; case V4L2_SEL_TGT_COMPOSE: - try_sel = v4l2_subdev_get_try_compose(sd, sd_state, sel->pad); - r = &imgu_sd->rect.bds; - break; + if (sel->which == V4L2_SUBDEV_FORMAT_TRY) + sel->r = *v4l2_subdev_get_try_compose(sd, sd_state, + sel->pad); + else + sel->r = imgu_sd->rect.bds; + return 0; default: return -EINVAL; } - - if (sel->which == V4L2_SUBDEV_FORMAT_TRY) - sel->r = *try_sel; - else - sel->r = *r; - - return 0; } static int imgu_subdev_set_selection(struct v4l2_subdev *sd, @@ -486,7 +483,7 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) pipe = node->pipe; imgu_pipe = &imgu->imgu_pipe[pipe]; atomic_set(&node->sequence, 0); - r = media_pipeline_start(&node->vdev.entity, &imgu_pipe->pipeline); + r = video_device_pipeline_start(&node->vdev, &imgu_pipe->pipeline); if (r < 0) goto fail_return_bufs; @@ -511,7 +508,7 @@ static int imgu_vb2_start_streaming(struct vb2_queue *vq, unsigned int count) return 0; fail_stop_pipeline: - media_pipeline_stop(&node->vdev.entity); + video_device_pipeline_stop(&node->vdev); fail_return_bufs: imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_QUEUED); @@ -551,7 +548,7 @@ static void imgu_vb2_stop_streaming(struct vb2_queue *vq) imgu_return_all_buffers(imgu, node, VB2_BUF_STATE_ERROR); mutex_unlock(&imgu->streaming_lock); - media_pipeline_stop(&node->vdev.entity); + video_device_pipeline_stop(&node->vdev); } /******************** v4l2_ioctl_ops ********************/ diff --git a/drivers/staging/media/meson/vdec/vdec.c b/drivers/staging/media/meson/vdec/vdec.c index 8549d95be0f2..52f224d8def1 100644 --- a/drivers/staging/media/meson/vdec/vdec.c +++ b/drivers/staging/media/meson/vdec/vdec.c @@ -1102,6 +1102,7 @@ static int vdec_probe(struct platform_device *pdev) err_vdev_release: video_device_release(vdev); + v4l2_device_unregister(&core->v4l2_dev); return ret; } @@ -1110,6 +1111,7 @@ static int vdec_remove(struct platform_device *pdev) struct amvdec_core *core = platform_get_drvdata(pdev); video_unregister_device(core->vdev_dec); + v4l2_device_unregister(&core->v4l2_dev); return 0; } diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c index 28aacda0f5a7..fa2a36d829d3 100644 --- a/drivers/staging/media/omap4iss/iss.c +++ b/drivers/staging/media/omap4iss/iss.c @@ -548,10 +548,8 @@ static int iss_pipeline_is_last(struct media_entity *me) struct iss_pipeline *pipe; struct media_pad *pad; - if (!me->pipe) - return 0; pipe = to_iss_pipeline(me); - if (pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED) + if (!pipe || pipe->stream_state == ISS_PIPELINE_STREAM_STOPPED) return 0; pad = media_pad_remote_pad_first(&pipe->output->pad); return pad->entity == me; diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c index 842509dcfedf..60f3d84be828 100644 --- a/drivers/staging/media/omap4iss/iss_video.c +++ b/drivers/staging/media/omap4iss/iss_video.c @@ -870,8 +870,7 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) * Start streaming on the pipeline. No link touching an entity in the * pipeline can be activated or deactivated once streaming is started. */ - pipe = entity->pipe - ? to_iss_pipeline(entity) : &video->pipe; + pipe = to_iss_pipeline(&video->video.entity) ? : &video->pipe; pipe->external = NULL; pipe->external_rate = 0; pipe->external_bpp = 0; @@ -887,7 +886,7 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) if (video->iss->pdata->set_constraints) video->iss->pdata->set_constraints(video->iss, true); - ret = media_pipeline_start(entity, &pipe->pipe); + ret = video_device_pipeline_start(&video->video, &pipe->pipe); if (ret < 0) goto err_media_pipeline_start; @@ -978,7 +977,7 @@ iss_video_streamon(struct file *file, void *fh, enum v4l2_buf_type type) err_omap4iss_set_stream: vb2_streamoff(&vfh->queue, type); err_iss_video_check_format: - media_pipeline_stop(&video->video.entity); + video_device_pipeline_stop(&video->video); err_media_pipeline_start: if (video->iss->pdata->set_constraints) video->iss->pdata->set_constraints(video->iss, false); @@ -1032,7 +1031,7 @@ iss_video_streamoff(struct file *file, void *fh, enum v4l2_buf_type type) if (video->iss->pdata->set_constraints) video->iss->pdata->set_constraints(video->iss, false); - media_pipeline_stop(&video->video.entity); + video_device_pipeline_stop(&video->video); done: mutex_unlock(&video->stream_lock); diff --git a/drivers/staging/media/omap4iss/iss_video.h b/drivers/staging/media/omap4iss/iss_video.h index 526281bf0051..ca2d5edb6261 100644 --- a/drivers/staging/media/omap4iss/iss_video.h +++ b/drivers/staging/media/omap4iss/iss_video.h @@ -90,8 +90,15 @@ struct iss_pipeline { int external_bpp; }; -#define to_iss_pipeline(__e) \ - container_of((__e)->pipe, struct iss_pipeline, pipe) +static inline struct iss_pipeline *to_iss_pipeline(struct media_entity *entity) +{ + struct media_pipeline *pipe = media_entity_pipeline(entity); + + if (!pipe) + return NULL; + + return container_of(pipe, struct iss_pipeline, pipe); +} static inline int iss_pipeline_ready(struct iss_pipeline *pipe) { diff --git a/drivers/staging/media/sunxi/cedrus/Kconfig b/drivers/staging/media/sunxi/cedrus/Kconfig index 21c13f9b6e33..621944f9907a 100644 --- a/drivers/staging/media/sunxi/cedrus/Kconfig +++ b/drivers/staging/media/sunxi/cedrus/Kconfig @@ -2,6 +2,7 @@ config VIDEO_SUNXI_CEDRUS tristate "Allwinner Cedrus VPU driver" depends on VIDEO_DEV + depends on RESET_CONTROLLER depends on HAS_DMA depends on OF select MEDIA_CONTROLLER diff --git a/drivers/staging/media/tegra-video/tegra210.c b/drivers/staging/media/tegra-video/tegra210.c index f10a041e3e6c..d58370a84737 100644 --- a/drivers/staging/media/tegra-video/tegra210.c +++ b/drivers/staging/media/tegra-video/tegra210.c @@ -547,7 +547,7 @@ static int tegra210_vi_start_streaming(struct vb2_queue *vq, u32 count) VI_INCR_SYNCPT_NO_STALL); /* start the pipeline */ - ret = media_pipeline_start(&chan->video.entity, pipe); + ret = video_device_pipeline_start(&chan->video, pipe); if (ret < 0) goto error_pipeline_start; @@ -595,7 +595,7 @@ error_kthread_done: error_kthread_start: tegra_channel_set_stream(chan, false); error_set_stream: - media_pipeline_stop(&chan->video.entity); + video_device_pipeline_stop(&chan->video); error_pipeline_start: tegra_channel_release_buffers(chan, VB2_BUF_STATE_QUEUED); return ret; @@ -617,7 +617,7 @@ static void tegra210_vi_stop_streaming(struct vb2_queue *vq) tegra_channel_release_buffers(chan, VB2_BUF_STATE_ERROR); tegra_channel_set_stream(chan, false); - media_pipeline_stop(&chan->video.entity); + video_device_pipeline_stop(&chan->video); } /* diff --git a/drivers/staging/rtl8192e/rtllib_softmac_wx.c b/drivers/staging/rtl8192e/rtllib_softmac_wx.c index f9589c5b62ba..1e5ad3b476ef 100644 --- a/drivers/staging/rtl8192e/rtllib_softmac_wx.c +++ b/drivers/staging/rtl8192e/rtllib_softmac_wx.c @@ -439,7 +439,7 @@ int rtllib_wx_set_essid(struct rtllib_device *ieee, union iwreq_data *wrqu, char *extra) { - int ret = 0, len, i; + int ret = 0, len; short proto_started; unsigned long flags; @@ -455,13 +455,6 @@ int rtllib_wx_set_essid(struct rtllib_device *ieee, goto out; } - for (i = 0; i < len; i++) { - if (extra[i] < 0) { - ret = -1; - goto out; - } - } - if (proto_started) rtllib_stop_protocol(ieee, true); diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index 4407b56aa6d1..139031ccb700 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -397,6 +397,7 @@ static int tcm_loop_setup_hba_bus(struct tcm_loop_hba *tl_hba, int tcm_loop_host ret = device_register(&tl_hba->dev); if (ret) { pr_err("device_register() failed for tl_hba->dev: %d\n", ret); + put_device(&tl_hba->dev); return -ENODEV; } @@ -1073,7 +1074,7 @@ check_len: */ ret = tcm_loop_setup_hba_bus(tl_hba, tcm_loop_hba_no_cnt); if (ret) - goto out; + return ERR_PTR(ret); sh = tl_hba->sh; tcm_loop_hba_no_cnt++; diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index b7f16ee8aa0e..cb4f7cc02f8f 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -284,6 +284,25 @@ void target_pr_kref_release(struct kref *kref) complete(&deve->pr_comp); } +/* + * Establish UA condition on SCSI device - all LUNs + */ +void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq) +{ + struct se_dev_entry *se_deve; + struct se_lun *lun; + + spin_lock(&dev->se_port_lock); + list_for_each_entry(lun, &dev->dev_sep_list, lun_dev_link) { + + spin_lock(&lun->lun_deve_lock); + list_for_each_entry(se_deve, &lun->lun_deve_list, lun_link) + core_scsi3_ua_allocate(se_deve, asc, ascq); + spin_unlock(&lun->lun_deve_lock); + } + spin_unlock(&dev->se_port_lock); +} + static void target_luns_data_has_changed(struct se_node_acl *nacl, struct se_dev_entry *new, bool skip_new) diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 8351c974cee3..d9266cf558dc 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -230,14 +230,12 @@ static void iblock_unplug_device(struct se_dev_plug *se_plug) clear_bit(IBD_PLUGF_PLUGGED, &ib_dev_plug->flags); } -static unsigned long long iblock_emulate_read_cap_with_block_size( - struct se_device *dev, - struct block_device *bd, - struct request_queue *q) +static sector_t iblock_get_blocks(struct se_device *dev) { - u32 block_size = bdev_logical_block_size(bd); + struct iblock_dev *ib_dev = IBLOCK_DEV(dev); + u32 block_size = bdev_logical_block_size(ib_dev->ibd_bd); unsigned long long blocks_long = - div_u64(bdev_nr_bytes(bd), block_size) - 1; + div_u64(bdev_nr_bytes(ib_dev->ibd_bd), block_size) - 1; if (block_size == dev->dev_attrib.block_size) return blocks_long; @@ -829,15 +827,6 @@ fail: return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; } -static sector_t iblock_get_blocks(struct se_device *dev) -{ - struct iblock_dev *ib_dev = IBLOCK_DEV(dev); - struct block_device *bd = ib_dev->ibd_bd; - struct request_queue *q = bdev_get_queue(bd); - - return iblock_emulate_read_cap_with_block_size(dev, bd, q); -} - static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev) { struct iblock_dev *ib_dev = IBLOCK_DEV(dev); diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h index 30fcf69e1a1d..38a6d08f75b3 100644 --- a/drivers/target/target_core_internal.h +++ b/drivers/target/target_core_internal.h @@ -89,6 +89,7 @@ int target_configure_device(struct se_device *dev); void target_free_device(struct se_device *); int target_for_each_device(int (*fn)(struct se_device *dev, void *data), void *data); +void target_dev_ua_allocate(struct se_device *dev, u8 asc, u8 ascq); /* target_core_configfs.c */ extern struct configfs_item_operations target_core_dev_item_ops; diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index a1d67554709f..1493b1d01194 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -2956,13 +2956,28 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, __core_scsi3_complete_pro_preempt(dev, pr_reg_n, (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : NULL, type, scope, preempt_type); - - if (preempt_type == PREEMPT_AND_ABORT) - core_scsi3_release_preempt_and_abort( - &preempt_and_abort_list, pr_reg_n); } + spin_unlock(&dev->dev_reservation_lock); + /* + * SPC-4 5.12.11.2.6 Preempting and aborting + * The actions described in this subclause shall be performed + * for all I_T nexuses that are registered with the non-zero + * SERVICE ACTION RESERVATION KEY value, without regard for + * whether the preempted I_T nexuses hold the persistent + * reservation. If the SERVICE ACTION RESERVATION KEY field is + * set to zero and an all registrants persistent reservation is + * present, the device server shall abort all commands for all + * registered I_T nexuses. + */ + if (preempt_type == PREEMPT_AND_ABORT) { + core_tmr_lun_reset(dev, NULL, &preempt_and_abort_list, + cmd); + core_scsi3_release_preempt_and_abort( + &preempt_and_abort_list, pr_reg_n); + } + if (pr_tmpl->pr_aptpl_active) core_scsi3_update_and_write_aptpl(cmd->se_dev, true); @@ -3022,7 +3037,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key, if (calling_it_nexus) continue; - if (pr_reg->pr_res_key != sa_res_key) + if (sa_res_key && pr_reg->pr_res_key != sa_res_key) continue; pr_reg_nacl = pr_reg->pr_reg_nacl; @@ -3425,8 +3440,6 @@ after_iport_check: * transport protocols where port names are not required; * d) Register the reservation key specified in the SERVICE ACTION * RESERVATION KEY field; - * e) Retain the reservation key specified in the SERVICE ACTION - * RESERVATION KEY field and associated information; * * Also, It is not an error for a REGISTER AND MOVE service action to * register an I_T nexus that is already registered with the same @@ -3448,6 +3461,12 @@ after_iport_check: dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl, iport_ptr); new_reg = 1; + } else { + /* + * e) Retain the reservation key specified in the SERVICE ACTION + * RESERVATION KEY field and associated information; + */ + dest_pr_reg->pr_res_key = sa_res_key; } /* * f) Release the persistent reservation for the persistent reservation diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 7838dc20f713..5926316252eb 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -3531,8 +3531,7 @@ static void target_tmr_work(struct work_struct *work) tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE : TMR_FUNCTION_REJECTED; if (tmr->response == TMR_FUNCTION_COMPLETE) { - target_ua_allocate_lun(cmd->se_sess->se_node_acl, - cmd->orig_fe_lun, 0x29, + target_dev_ua_allocate(dev, 0x29, ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED); } break; diff --git a/drivers/thermal/intel/intel_powerclamp.c b/drivers/thermal/intel/intel_powerclamp.c index 2a5570b9799a..b80e25ec1261 100644 --- a/drivers/thermal/intel/intel_powerclamp.c +++ b/drivers/thermal/intel/intel_powerclamp.c @@ -516,11 +516,7 @@ static int start_power_clamp(void) cpus_read_lock(); /* prefer BSP */ - control_cpu = 0; - if (!cpu_online(control_cpu)) { - control_cpu = get_cpu(); - put_cpu(); - } + control_cpu = cpumask_first(cpu_online_mask); clamping = true; schedule_delayed_work(&poll_pkg_cstate_work, 0); diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 5e516f5cac5a..b6e0cc4571ea 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -264,7 +264,7 @@ struct gsm_mux { bool constipated; /* Asked by remote to shut up */ bool has_devices; /* Devices were registered */ - struct mutex tx_mutex; + spinlock_t tx_lock; unsigned int tx_bytes; /* TX data outstanding */ #define TX_THRESH_HI 8192 #define TX_THRESH_LO 2048 @@ -272,7 +272,7 @@ struct gsm_mux { struct list_head tx_data_list; /* Pending data packets */ /* Control messages */ - struct delayed_work kick_timeout; /* Kick TX queuing on timeout */ + struct timer_list kick_timer; /* Kick TX queuing on timeout */ struct timer_list t2_timer; /* Retransmit timer for commands */ int cretries; /* Command retry counter */ struct gsm_control *pending_cmd;/* Our current pending command */ @@ -700,6 +700,7 @@ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control) struct gsm_msg *msg; u8 *dp; int ocr; + unsigned long flags; msg = gsm_data_alloc(gsm, addr, 0, control); if (!msg) @@ -721,10 +722,10 @@ static int gsm_send(struct gsm_mux *gsm, int addr, int cr, int control) gsm_print_packet("Q->", addr, cr, control, NULL, 0); - mutex_lock(&gsm->tx_mutex); + spin_lock_irqsave(&gsm->tx_lock, flags); list_add_tail(&msg->list, &gsm->tx_ctrl_list); gsm->tx_bytes += msg->len; - mutex_unlock(&gsm->tx_mutex); + spin_unlock_irqrestore(&gsm->tx_lock, flags); gsmld_write_trigger(gsm); return 0; @@ -749,7 +750,7 @@ static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci) spin_unlock_irqrestore(&dlci->lock, flags); /* Clear data packets in MUX write queue */ - mutex_lock(&gsm->tx_mutex); + spin_lock_irqsave(&gsm->tx_lock, flags); list_for_each_entry_safe(msg, nmsg, &gsm->tx_data_list, list) { if (msg->addr != addr) continue; @@ -757,7 +758,7 @@ static void gsm_dlci_clear_queues(struct gsm_mux *gsm, struct gsm_dlci *dlci) list_del(&msg->list); kfree(msg); } - mutex_unlock(&gsm->tx_mutex); + spin_unlock_irqrestore(&gsm->tx_lock, flags); } /** @@ -1028,7 +1029,7 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) gsm->tx_bytes += msg->len; gsmld_write_trigger(gsm); - schedule_delayed_work(&gsm->kick_timeout, 10 * gsm->t1 * HZ / 100); + mod_timer(&gsm->kick_timer, jiffies + 10 * gsm->t1 * HZ / 100); } /** @@ -1043,9 +1044,10 @@ static void __gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) { - mutex_lock(&dlci->gsm->tx_mutex); + unsigned long flags; + spin_lock_irqsave(&dlci->gsm->tx_lock, flags); __gsm_data_queue(dlci, msg); - mutex_unlock(&dlci->gsm->tx_mutex); + spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags); } /** @@ -1057,7 +1059,7 @@ static void gsm_data_queue(struct gsm_dlci *dlci, struct gsm_msg *msg) * is data. Keep to the MRU of the mux. This path handles the usual tty * interface which is a byte stream with optional modem data. * - * Caller must hold the tx_mutex of the mux. + * Caller must hold the tx_lock of the mux. */ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci) @@ -1117,7 +1119,7 @@ static int gsm_dlci_data_output(struct gsm_mux *gsm, struct gsm_dlci *dlci) * is data. Keep to the MRU of the mux. This path handles framed data * queued as skbuffs to the DLCI. * - * Caller must hold the tx_mutex of the mux. + * Caller must hold the tx_lock of the mux. */ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm, @@ -1133,7 +1135,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm, if (dlci->adaption == 4) overhead = 1; - /* dlci->skb is locked by tx_mutex */ + /* dlci->skb is locked by tx_lock */ if (dlci->skb == NULL) { dlci->skb = skb_dequeue_tail(&dlci->skb_list); if (dlci->skb == NULL) @@ -1187,7 +1189,7 @@ static int gsm_dlci_data_output_framed(struct gsm_mux *gsm, * Push an empty frame in to the transmit queue to update the modem status * bits and to transmit an optional break. * - * Caller must hold the tx_mutex of the mux. + * Caller must hold the tx_lock of the mux. */ static int gsm_dlci_modem_output(struct gsm_mux *gsm, struct gsm_dlci *dlci, @@ -1301,12 +1303,13 @@ static int gsm_dlci_data_sweep(struct gsm_mux *gsm) static void gsm_dlci_data_kick(struct gsm_dlci *dlci) { + unsigned long flags; int sweep; if (dlci->constipated) return; - mutex_lock(&dlci->gsm->tx_mutex); + spin_lock_irqsave(&dlci->gsm->tx_lock, flags); /* If we have nothing running then we need to fire up */ sweep = (dlci->gsm->tx_bytes < TX_THRESH_LO); if (dlci->gsm->tx_bytes == 0) { @@ -1317,7 +1320,7 @@ static void gsm_dlci_data_kick(struct gsm_dlci *dlci) } if (sweep) gsm_dlci_data_sweep(dlci->gsm); - mutex_unlock(&dlci->gsm->tx_mutex); + spin_unlock_irqrestore(&dlci->gsm->tx_lock, flags); } /* @@ -1708,7 +1711,7 @@ static struct gsm_control *gsm_control_send(struct gsm_mux *gsm, unsigned int command, u8 *data, int clen) { struct gsm_control *ctrl = kzalloc(sizeof(struct gsm_control), - GFP_KERNEL); + GFP_ATOMIC); unsigned long flags; if (ctrl == NULL) return NULL; @@ -2019,23 +2022,24 @@ static void gsm_dlci_command(struct gsm_dlci *dlci, const u8 *data, int len) } /** - * gsm_kick_timeout - transmit if possible - * @work: work contained in our gsm object + * gsm_kick_timer - transmit if possible + * @t: timer contained in our gsm object * * Transmit data from DLCIs if the queue is empty. We can't rely on * a tty wakeup except when we filled the pipe so we need to fire off * new data ourselves in other cases. */ -static void gsm_kick_timeout(struct work_struct *work) +static void gsm_kick_timer(struct timer_list *t) { - struct gsm_mux *gsm = container_of(work, struct gsm_mux, kick_timeout.work); + struct gsm_mux *gsm = from_timer(gsm, t, kick_timer); + unsigned long flags; int sent = 0; - mutex_lock(&gsm->tx_mutex); + spin_lock_irqsave(&gsm->tx_lock, flags); /* If we have nothing running then we need to fire up */ if (gsm->tx_bytes < TX_THRESH_LO) sent = gsm_dlci_data_sweep(gsm); - mutex_unlock(&gsm->tx_mutex); + spin_unlock_irqrestore(&gsm->tx_lock, flags); if (sent && debug & DBG_DATA) pr_info("%s TX queue stalled\n", __func__); @@ -2492,7 +2496,7 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc) } /* Finish outstanding timers, making sure they are done */ - cancel_delayed_work_sync(&gsm->kick_timeout); + del_timer_sync(&gsm->kick_timer); del_timer_sync(&gsm->t2_timer); /* Finish writing to ldisc */ @@ -2565,7 +2569,6 @@ static void gsm_free_mux(struct gsm_mux *gsm) break; } } - mutex_destroy(&gsm->tx_mutex); mutex_destroy(&gsm->mutex); kfree(gsm->txframe); kfree(gsm->buf); @@ -2637,15 +2640,15 @@ static struct gsm_mux *gsm_alloc_mux(void) } spin_lock_init(&gsm->lock); mutex_init(&gsm->mutex); - mutex_init(&gsm->tx_mutex); kref_init(&gsm->ref); INIT_LIST_HEAD(&gsm->tx_ctrl_list); INIT_LIST_HEAD(&gsm->tx_data_list); - INIT_DELAYED_WORK(&gsm->kick_timeout, gsm_kick_timeout); + timer_setup(&gsm->kick_timer, gsm_kick_timer, 0); timer_setup(&gsm->t2_timer, gsm_control_retransmit, 0); INIT_WORK(&gsm->tx_work, gsmld_write_task); init_waitqueue_head(&gsm->event); spin_lock_init(&gsm->control_lock); + spin_lock_init(&gsm->tx_lock); gsm->t1 = T1; gsm->t2 = T2; @@ -2670,7 +2673,6 @@ static struct gsm_mux *gsm_alloc_mux(void) } spin_unlock(&gsm_mux_lock); if (i == MAX_MUX) { - mutex_destroy(&gsm->tx_mutex); mutex_destroy(&gsm->mutex); kfree(gsm->txframe); kfree(gsm->buf); @@ -2826,16 +2828,17 @@ static void gsmld_write_trigger(struct gsm_mux *gsm) static void gsmld_write_task(struct work_struct *work) { struct gsm_mux *gsm = container_of(work, struct gsm_mux, tx_work); + unsigned long flags; int i, ret; /* All outstanding control channel and control messages and one data * frame is sent. */ ret = -ENODEV; - mutex_lock(&gsm->tx_mutex); + spin_lock_irqsave(&gsm->tx_lock, flags); if (gsm->tty) ret = gsm_data_kick(gsm); - mutex_unlock(&gsm->tx_mutex); + spin_unlock_irqrestore(&gsm->tx_lock, flags); if (ret >= 0) for (i = 0; i < NUM_DLCI; i++) @@ -3042,6 +3045,7 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file, const unsigned char *buf, size_t nr) { struct gsm_mux *gsm = tty->disc_data; + unsigned long flags; int space; int ret; @@ -3049,13 +3053,13 @@ static ssize_t gsmld_write(struct tty_struct *tty, struct file *file, return -ENODEV; ret = -ENOBUFS; - mutex_lock(&gsm->tx_mutex); + spin_lock_irqsave(&gsm->tx_lock, flags); space = tty_write_room(tty); if (space >= nr) ret = tty->ops->write(tty, buf, nr); else set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); - mutex_unlock(&gsm->tx_mutex); + spin_unlock_irqrestore(&gsm->tx_lock, flags); return ret; } @@ -3352,13 +3356,14 @@ static struct tty_ldisc_ops tty_ldisc_packet = { static void gsm_modem_upd_via_data(struct gsm_dlci *dlci, u8 brk) { struct gsm_mux *gsm = dlci->gsm; + unsigned long flags; if (dlci->state != DLCI_OPEN || dlci->adaption != 2) return; - mutex_lock(&gsm->tx_mutex); + spin_lock_irqsave(&gsm->tx_lock, flags); gsm_dlci_modem_output(gsm, dlci, brk); - mutex_unlock(&gsm->tx_mutex); + spin_unlock_irqrestore(&gsm->tx_lock, flags); } /** diff --git a/drivers/tty/serial/8250/8250_lpss.c b/drivers/tty/serial/8250/8250_lpss.c index 44cc755b1a29..0e43bdfb7459 100644 --- a/drivers/tty/serial/8250/8250_lpss.c +++ b/drivers/tty/serial/8250/8250_lpss.c @@ -174,6 +174,8 @@ static int ehl_serial_setup(struct lpss8250 *lpss, struct uart_port *port) */ up->dma = dma; + lpss->dma_maxburst = 16; + port->set_termios = dw8250_do_set_termios; return 0; @@ -277,8 +279,13 @@ static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port struct dw_dma_slave *rx_param, *tx_param; struct device *dev = port->port.dev; - if (!lpss->dma_param.dma_dev) + if (!lpss->dma_param.dma_dev) { + dma = port->dma; + if (dma) + goto out_configuration_only; + return 0; + } rx_param = devm_kzalloc(dev, sizeof(*rx_param), GFP_KERNEL); if (!rx_param) @@ -289,16 +296,18 @@ static int lpss8250_dma_setup(struct lpss8250 *lpss, struct uart_8250_port *port return -ENOMEM; *rx_param = lpss->dma_param; - dma->rxconf.src_maxburst = lpss->dma_maxburst; - *tx_param = lpss->dma_param; - dma->txconf.dst_maxburst = lpss->dma_maxburst; dma->fn = lpss8250_dma_filter; dma->rx_param = rx_param; dma->tx_param = tx_param; port->dma = dma; + +out_configuration_only: + dma->rxconf.src_maxburst = lpss->dma_maxburst; + dma->txconf.dst_maxburst = lpss->dma_maxburst; + return 0; } diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c index 41b8c6b27136..3f33014022f0 100644 --- a/drivers/tty/serial/8250/8250_omap.c +++ b/drivers/tty/serial/8250/8250_omap.c @@ -157,7 +157,11 @@ static u32 uart_read(struct uart_8250_port *up, u32 reg) return readl(up->port.membase + (reg << up->port.regshift)); } -static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) +/* + * Called on runtime PM resume path from omap8250_restore_regs(), and + * omap8250_set_mctrl(). + */ +static void __omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct uart_8250_port *up = up_to_u8250p(port); struct omap8250_priv *priv = up->port.private_data; @@ -181,6 +185,20 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) } } +static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) +{ + int err; + + err = pm_runtime_resume_and_get(port->dev); + if (err) + return; + + __omap8250_set_mctrl(port, mctrl); + + pm_runtime_mark_last_busy(port->dev); + pm_runtime_put_autosuspend(port->dev); +} + /* * Work Around for Errata i202 (2430, 3430, 3630, 4430 and 4460) * The access to uart register after MDR1 Access @@ -193,27 +211,10 @@ static void omap8250_set_mctrl(struct uart_port *port, unsigned int mctrl) static void omap_8250_mdr1_errataset(struct uart_8250_port *up, struct omap8250_priv *priv) { - u8 timeout = 255; - serial_out(up, UART_OMAP_MDR1, priv->mdr1); udelay(2); serial_out(up, UART_FCR, up->fcr | UART_FCR_CLEAR_XMIT | UART_FCR_CLEAR_RCVR); - /* - * Wait for FIFO to empty: when empty, RX_FIFO_E bit is 0 and - * TX_FIFO_E bit is 1. - */ - while (UART_LSR_THRE != (serial_in(up, UART_LSR) & - (UART_LSR_THRE | UART_LSR_DR))) { - timeout--; - if (!timeout) { - /* Should *never* happen. we warn and carry on */ - dev_crit(up->port.dev, "Errata i202: timedout %x\n", - serial_in(up, UART_LSR)); - break; - } - udelay(1); - } } static void omap_8250_get_divisor(struct uart_port *port, unsigned int baud, @@ -292,6 +293,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up) { struct omap8250_priv *priv = up->port.private_data; struct uart_8250_dma *dma = up->dma; + u8 mcr = serial8250_in_MCR(up); if (dma && dma->tx_running) { /* @@ -308,7 +310,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up) serial_out(up, UART_EFR, UART_EFR_ECB); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_A); - serial8250_out_MCR(up, UART_MCR_TCRTLR); + serial8250_out_MCR(up, mcr | UART_MCR_TCRTLR); serial_out(up, UART_FCR, up->fcr); omap8250_update_scr(up, priv); @@ -324,7 +326,8 @@ static void omap8250_restore_regs(struct uart_8250_port *up) serial_out(up, UART_LCR, 0); /* drop TCR + TLR access, we setup XON/XOFF later */ - serial8250_out_MCR(up, up->mcr); + serial8250_out_MCR(up, mcr); + serial_out(up, UART_IER, up->ier); serial_out(up, UART_LCR, UART_LCR_CONF_MODE_B); @@ -341,7 +344,7 @@ static void omap8250_restore_regs(struct uart_8250_port *up) omap8250_update_mdr1(up, priv); - up->port.ops->set_mctrl(&up->port, up->port.mctrl); + __omap8250_set_mctrl(&up->port, up->port.mctrl); if (up->port.rs485.flags & SER_RS485_ENABLED) serial8250_em485_stop_tx(up); @@ -669,7 +672,6 @@ static int omap_8250_startup(struct uart_port *port) pm_runtime_get_sync(port->dev); - up->mcr = 0; serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT); serial_out(up, UART_LCR, UART_LCR_WLEN8); @@ -1458,9 +1460,15 @@ err: static int omap8250_remove(struct platform_device *pdev) { struct omap8250_priv *priv = platform_get_drvdata(pdev); + int err; + + err = pm_runtime_resume_and_get(&pdev->dev); + if (err) + return err; pm_runtime_dont_use_autosuspend(&pdev->dev); pm_runtime_put_sync(&pdev->dev); + flush_work(&priv->qos_work); pm_runtime_disable(&pdev->dev); serial8250_unregister_port(priv->line); cpu_latency_qos_remove_request(&priv->pm_qos_request); diff --git a/drivers/tty/serial/8250/8250_gsc.c b/drivers/tty/serial/8250/8250_parisc.c index 948d0a1c6ae8..948d0a1c6ae8 100644 --- a/drivers/tty/serial/8250/8250_gsc.c +++ b/drivers/tty/serial/8250/8250_parisc.c diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index fe8662cd9402..388172289627 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -1897,10 +1897,13 @@ EXPORT_SYMBOL_GPL(serial8250_modem_status); static bool handle_rx_dma(struct uart_8250_port *up, unsigned int iir) { switch (iir & 0x3f) { - case UART_IIR_RX_TIMEOUT: - serial8250_rx_dma_flush(up); + case UART_IIR_RDI: + if (!up->dma->rx_running) + break; fallthrough; case UART_IIR_RLSI: + case UART_IIR_RX_TIMEOUT: + serial8250_rx_dma_flush(up); return true; } return up->dma->rx_dma(up); diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig index d0b49e15fbf5..b0f62345bc84 100644 --- a/drivers/tty/serial/8250/Kconfig +++ b/drivers/tty/serial/8250/Kconfig @@ -116,9 +116,9 @@ config SERIAL_8250_CONSOLE If unsure, say N. -config SERIAL_8250_GSC +config SERIAL_8250_PARISC tristate - depends on SERIAL_8250 && GSC + depends on SERIAL_8250 && PARISC default SERIAL_8250 config SERIAL_8250_DMA diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile index bee908f99ea0..1615bfdde2a0 100644 --- a/drivers/tty/serial/8250/Makefile +++ b/drivers/tty/serial/8250/Makefile @@ -12,7 +12,7 @@ obj-$(CONFIG_SERIAL_8250) += 8250.o 8250_base.o 8250_base-$(CONFIG_SERIAL_8250_DMA) += 8250_dma.o 8250_base-$(CONFIG_SERIAL_8250_DWLIB) += 8250_dwlib.o 8250_base-$(CONFIG_SERIAL_8250_FINTEK) += 8250_fintek.o -obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o +obj-$(CONFIG_SERIAL_8250_PARISC) += 8250_parisc.o obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o obj-$(CONFIG_SERIAL_8250_EXAR) += 8250_exar.o obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 67fa113f77d4..888e01fbd9c5 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -12,6 +12,7 @@ #include <linux/dmaengine.h> #include <linux/dmapool.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/of.h> @@ -404,33 +405,6 @@ static unsigned int lpuart_get_baud_clk_rate(struct lpuart_port *sport) #define lpuart_enable_clks(x) __lpuart_enable_clks(x, true) #define lpuart_disable_clks(x) __lpuart_enable_clks(x, false) -static int lpuart_global_reset(struct lpuart_port *sport) -{ - struct uart_port *port = &sport->port; - void __iomem *global_addr; - int ret; - - if (uart_console(port)) - return 0; - - ret = clk_prepare_enable(sport->ipg_clk); - if (ret) { - dev_err(sport->port.dev, "failed to enable uart ipg clk: %d\n", ret); - return ret; - } - - if (is_imx7ulp_lpuart(sport) || is_imx8qxp_lpuart(sport)) { - global_addr = port->membase + UART_GLOBAL - IMX_REG_OFF; - writel(UART_GLOBAL_RST, global_addr); - usleep_range(GLOBAL_RST_MIN_US, GLOBAL_RST_MAX_US); - writel(0, global_addr); - usleep_range(GLOBAL_RST_MIN_US, GLOBAL_RST_MAX_US); - } - - clk_disable_unprepare(sport->ipg_clk); - return 0; -} - static void lpuart_stop_tx(struct uart_port *port) { unsigned char temp; @@ -2636,6 +2610,54 @@ static const struct serial_rs485 lpuart_rs485_supported = { /* delay_rts_* and RX_DURING_TX are not supported */ }; +static int lpuart_global_reset(struct lpuart_port *sport) +{ + struct uart_port *port = &sport->port; + void __iomem *global_addr; + unsigned long ctrl, bd; + unsigned int val = 0; + int ret; + + ret = clk_prepare_enable(sport->ipg_clk); + if (ret) { + dev_err(sport->port.dev, "failed to enable uart ipg clk: %d\n", ret); + return ret; + } + + if (is_imx7ulp_lpuart(sport) || is_imx8qxp_lpuart(sport)) { + /* + * If the transmitter is used by earlycon, wait for transmit engine to + * complete and then reset. + */ + ctrl = lpuart32_read(port, UARTCTRL); + if (ctrl & UARTCTRL_TE) { + bd = lpuart32_read(&sport->port, UARTBAUD); + if (read_poll_timeout(lpuart32_tx_empty, val, val, 1, 100000, false, + port)) { + dev_warn(sport->port.dev, + "timeout waiting for transmit engine to complete\n"); + clk_disable_unprepare(sport->ipg_clk); + return 0; + } + } + + global_addr = port->membase + UART_GLOBAL - IMX_REG_OFF; + writel(UART_GLOBAL_RST, global_addr); + usleep_range(GLOBAL_RST_MIN_US, GLOBAL_RST_MAX_US); + writel(0, global_addr); + usleep_range(GLOBAL_RST_MIN_US, GLOBAL_RST_MAX_US); + + /* Recover the transmitter for earlycon. */ + if (ctrl & UARTCTRL_TE) { + lpuart32_write(port, bd, UARTBAUD); + lpuart32_write(port, ctrl, UARTCTRL); + } + } + + clk_disable_unprepare(sport->ipg_clk); + return 0; +} + static int lpuart_probe(struct platform_device *pdev) { const struct lpuart_soc_data *sdata = of_device_get_match_data(&pdev->dev); diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c index 05b432dc7a85..aadda66405b4 100644 --- a/drivers/tty/serial/imx.c +++ b/drivers/tty/serial/imx.c @@ -2594,6 +2594,7 @@ static const struct dev_pm_ops imx_uart_pm_ops = { .suspend_noirq = imx_uart_suspend_noirq, .resume_noirq = imx_uart_resume_noirq, .freeze_noirq = imx_uart_suspend_noirq, + .thaw_noirq = imx_uart_resume_noirq, .restore_noirq = imx_uart_resume_noirq, .suspend = imx_uart_suspend, .resume = imx_uart_resume, diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 7256e6c43ca6..b1f59a5fe632 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -772,7 +772,7 @@ static inline void ufshcd_utrl_clear(struct ufs_hba *hba, u32 mask) } /** - * ufshcd_utmrl_clear - Clear a bit in UTRMLCLR register + * ufshcd_utmrl_clear - Clear a bit in UTMRLCLR register * @hba: per adapter instance * @pos: position of the bit to be cleared */ @@ -3098,7 +3098,7 @@ static int ufshcd_query_flag_retry(struct ufs_hba *hba, if (ret) dev_err(hba->dev, - "%s: query attribute, opcode %d, idn %d, failed with error %d after %d retries\n", + "%s: query flag, opcode %d, idn %d, failed with error %d after %d retries\n", __func__, opcode, idn, ret, retries); return ret; } diff --git a/drivers/ufs/core/ufshpb.c b/drivers/ufs/core/ufshpb.c index 3d69a81c5b17..b7f412d0f301 100644 --- a/drivers/ufs/core/ufshpb.c +++ b/drivers/ufs/core/ufshpb.c @@ -383,7 +383,7 @@ int ufshpb_prep(struct ufs_hba *hba, struct ufshcd_lrb *lrbp) rgn = hpb->rgn_tbl + rgn_idx; srgn = rgn->srgn_tbl + srgn_idx; - /* If command type is WRITE or DISCARD, set bitmap as drity */ + /* If command type is WRITE or DISCARD, set bitmap as dirty */ if (ufshpb_is_write_or_discard(cmd)) { ufshpb_iterate_rgn(hpb, rgn_idx, srgn_idx, srgn_offset, transfer_len, true); @@ -616,7 +616,7 @@ static void ufshpb_activate_subregion(struct ufshpb_lu *hpb, static enum rq_end_io_ret ufshpb_umap_req_compl_fn(struct request *req, blk_status_t error) { - struct ufshpb_req *umap_req = (struct ufshpb_req *)req->end_io_data; + struct ufshpb_req *umap_req = req->end_io_data; ufshpb_put_req(umap_req->hpb, umap_req); return RQ_END_IO_NONE; @@ -625,7 +625,7 @@ static enum rq_end_io_ret ufshpb_umap_req_compl_fn(struct request *req, static enum rq_end_io_ret ufshpb_map_req_compl_fn(struct request *req, blk_status_t error) { - struct ufshpb_req *map_req = (struct ufshpb_req *) req->end_io_data; + struct ufshpb_req *map_req = req->end_io_data; struct ufshpb_lu *hpb = map_req->hpb; struct ufshpb_subregion *srgn; unsigned long flags; diff --git a/drivers/ufs/host/ufs-qcom-ice.c b/drivers/ufs/host/ufs-qcom-ice.c index 745e48ec598f..62387ccd5b30 100644 --- a/drivers/ufs/host/ufs-qcom-ice.c +++ b/drivers/ufs/host/ufs-qcom-ice.c @@ -118,7 +118,6 @@ int ufs_qcom_ice_init(struct ufs_qcom_host *host) host->ice_mmio = devm_ioremap_resource(dev, res); if (IS_ERR(host->ice_mmio)) { err = PTR_ERR(host->ice_mmio); - dev_err(dev, "Failed to map ICE registers; err=%d\n", err); return err; } diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c index 9643b905e2d8..6164fc4c96a4 100644 --- a/drivers/usb/cdns3/host.c +++ b/drivers/usb/cdns3/host.c @@ -24,11 +24,37 @@ #define CFG_RXDET_P3_EN BIT(15) #define LPM_2_STB_SWITCH_EN BIT(25) -static int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd); +static void xhci_cdns3_plat_start(struct usb_hcd *hcd) +{ + struct xhci_hcd *xhci = hcd_to_xhci(hcd); + u32 value; + + /* set usbcmd.EU3S */ + value = readl(&xhci->op_regs->command); + value |= CMD_PM_INDEX; + writel(value, &xhci->op_regs->command); + + if (hcd->regs) { + value = readl(hcd->regs + XECP_AUX_CTRL_REG1); + value |= CFG_RXDET_P3_EN; + writel(value, hcd->regs + XECP_AUX_CTRL_REG1); + + value = readl(hcd->regs + XECP_PORT_CAP_REG); + value |= LPM_2_STB_SWITCH_EN; + writel(value, hcd->regs + XECP_PORT_CAP_REG); + } +} + +static int xhci_cdns3_resume_quirk(struct usb_hcd *hcd) +{ + xhci_cdns3_plat_start(hcd); + return 0; +} static const struct xhci_plat_priv xhci_plat_cdns3_xhci = { .quirks = XHCI_SKIP_PHY_INIT | XHCI_AVOID_BEI, - .suspend_quirk = xhci_cdns3_suspend_quirk, + .plat_start = xhci_cdns3_plat_start, + .resume_quirk = xhci_cdns3_resume_quirk, }; static int __cdns_host_init(struct cdns *cdns) @@ -90,32 +116,6 @@ err1: return ret; } -static int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd) -{ - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - u32 value; - - if (pm_runtime_status_suspended(hcd->self.controller)) - return 0; - - /* set usbcmd.EU3S */ - value = readl(&xhci->op_regs->command); - value |= CMD_PM_INDEX; - writel(value, &xhci->op_regs->command); - - if (hcd->regs) { - value = readl(hcd->regs + XECP_AUX_CTRL_REG1); - value |= CFG_RXDET_P3_EN; - writel(value, hcd->regs + XECP_AUX_CTRL_REG1); - - value = readl(hcd->regs + XECP_PORT_CAP_REG); - value |= LPM_2_STB_SWITCH_EN; - writel(value, hcd->regs + XECP_PORT_CAP_REG); - } - - return 0; -} - static void cdns_host_exit(struct cdns *cdns) { kfree(cdns->xhci_plat_data); diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c index ada78daba6df..c17516c29b63 100644 --- a/drivers/usb/chipidea/otg_fsm.c +++ b/drivers/usb/chipidea/otg_fsm.c @@ -256,8 +256,10 @@ static void ci_otg_del_timer(struct ci_hdrc *ci, enum otg_fsm_timer t) ci->enabled_otg_timer_bits &= ~(1 << t); if (ci->next_otg_timer == t) { if (ci->enabled_otg_timer_bits == 0) { + spin_unlock_irqrestore(&ci->lock, flags); /* No enabled timers after delete it */ hrtimer_cancel(&ci->otg_fsm_hrtimer); + spin_lock_irqsave(&ci->lock, flags); ci->next_otg_timer = NUM_OTG_FSM_TIMERS; } else { /* Find the next timer */ diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 0722d2131305..079e183cf3bf 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -362,6 +362,9 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x0781, 0x5583), .driver_info = USB_QUIRK_NO_LPM }, { USB_DEVICE(0x0781, 0x5591), .driver_info = USB_QUIRK_NO_LPM }, + /* Realforce 87U Keyboard */ + { USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM }, + /* M-Systems Flash Disk Pioneers */ { USB_DEVICE(0x08ec, 0x1000), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index ea51624461b5..1f348bc867c2 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -23,6 +23,7 @@ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/of.h> +#include <linux/of_graph.h> #include <linux/acpi.h> #include <linux/pinctrl/consumer.h> #include <linux/reset.h> @@ -85,7 +86,7 @@ static int dwc3_get_dr_mode(struct dwc3 *dwc) * mode. If the controller supports DRD but the dr_mode is not * specified or set to OTG, then set the mode to peripheral. */ - if (mode == USB_DR_MODE_OTG && + if (mode == USB_DR_MODE_OTG && !dwc->edev && (!IS_ENABLED(CONFIG_USB_ROLE_SWITCH) || !device_property_read_bool(dwc->dev, "usb-role-switch")) && !DWC3_VER_IS_PRIOR(DWC3, 330A)) @@ -1690,6 +1691,56 @@ static void dwc3_check_params(struct dwc3 *dwc) } } +static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc) +{ + struct device *dev = dwc->dev; + struct device_node *np_phy; + struct extcon_dev *edev = NULL; + const char *name; + + if (device_property_read_bool(dev, "extcon")) + return extcon_get_edev_by_phandle(dev, 0); + + /* + * Device tree platforms should get extcon via phandle. + * On ACPI platforms, we get the name from a device property. + * This device property is for kernel internal use only and + * is expected to be set by the glue code. + */ + if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) + return extcon_get_extcon_dev(name); + + /* + * Check explicitly if "usb-role-switch" is used since + * extcon_find_edev_by_node() can not be used to check the absence of + * an extcon device. In the absence of an device it will always return + * EPROBE_DEFER. + */ + if (IS_ENABLED(CONFIG_USB_ROLE_SWITCH) && + device_property_read_bool(dev, "usb-role-switch")) + return NULL; + + /* + * Try to get an extcon device from the USB PHY controller's "port" + * node. Check if it has the "port" node first, to avoid printing the + * error message from underlying code, as it's a valid case: extcon + * device (and "port" node) may be missing in case of "usb-role-switch" + * or OTG mode. + */ + np_phy = of_parse_phandle(dev->of_node, "phys", 0); + if (of_graph_is_present(np_phy)) { + struct device_node *np_conn; + + np_conn = of_graph_get_remote_node(np_phy, -1, -1); + if (np_conn) + edev = extcon_find_edev_by_node(np_conn); + of_node_put(np_conn); + } + of_node_put(np_phy); + + return edev; +} + static int dwc3_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -1840,6 +1891,12 @@ static int dwc3_probe(struct platform_device *pdev) goto err2; } + dwc->edev = dwc3_get_extcon(dwc); + if (IS_ERR(dwc->edev)) { + ret = dev_err_probe(dwc->dev, PTR_ERR(dwc->edev), "failed to get extcon\n"); + goto err3; + } + ret = dwc3_get_dr_mode(dwc); if (ret) goto err3; diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c index 8cad9e7d3368..039bf241769a 100644 --- a/drivers/usb/dwc3/drd.c +++ b/drivers/usb/dwc3/drd.c @@ -8,7 +8,6 @@ */ #include <linux/extcon.h> -#include <linux/of_graph.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/property.h> @@ -439,51 +438,6 @@ static int dwc3_drd_notifier(struct notifier_block *nb, return NOTIFY_DONE; } -static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc) -{ - struct device *dev = dwc->dev; - struct device_node *np_phy; - struct extcon_dev *edev = NULL; - const char *name; - - if (device_property_read_bool(dev, "extcon")) - return extcon_get_edev_by_phandle(dev, 0); - - /* - * Device tree platforms should get extcon via phandle. - * On ACPI platforms, we get the name from a device property. - * This device property is for kernel internal use only and - * is expected to be set by the glue code. - */ - if (device_property_read_string(dev, "linux,extcon-name", &name) == 0) { - edev = extcon_get_extcon_dev(name); - if (!edev) - return ERR_PTR(-EPROBE_DEFER); - - return edev; - } - - /* - * Try to get an extcon device from the USB PHY controller's "port" - * node. Check if it has the "port" node first, to avoid printing the - * error message from underlying code, as it's a valid case: extcon - * device (and "port" node) may be missing in case of "usb-role-switch" - * or OTG mode. - */ - np_phy = of_parse_phandle(dev->of_node, "phys", 0); - if (of_graph_is_present(np_phy)) { - struct device_node *np_conn; - - np_conn = of_graph_get_remote_node(np_phy, -1, -1); - if (np_conn) - edev = extcon_find_edev_by_node(np_conn); - of_node_put(np_conn); - } - of_node_put(np_phy); - - return edev; -} - #if IS_ENABLED(CONFIG_USB_ROLE_SWITCH) #define ROLE_SWITCH 1 static int dwc3_usb_role_switch_set(struct usb_role_switch *sw, @@ -588,10 +542,6 @@ int dwc3_drd_init(struct dwc3 *dwc) device_property_read_bool(dwc->dev, "usb-role-switch")) return dwc3_setup_role_switch(dwc); - dwc->edev = dwc3_get_extcon(dwc); - if (IS_ERR(dwc->edev)) - return PTR_ERR(dwc->edev); - if (dwc->edev) { dwc->edev_nb.notifier_call = dwc3_drd_notifier; ret = extcon_register_notifier(dwc->edev, EXTCON_USB_HOST, diff --git a/drivers/usb/dwc3/dwc3-st.c b/drivers/usb/dwc3/dwc3-st.c index 6c14a79279f9..fea5290de83f 100644 --- a/drivers/usb/dwc3/dwc3-st.c +++ b/drivers/usb/dwc3/dwc3-st.c @@ -251,7 +251,7 @@ static int st_dwc3_probe(struct platform_device *pdev) /* Manage SoftReset */ reset_control_deassert(dwc3_data->rstc_rst); - child = of_get_child_by_name(node, "usb"); + child = of_get_compatible_child(node, "snps,dwc3"); if (!child) { dev_err(&pdev->dev, "failed to find dwc3 core node\n"); ret = -ENODEV; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 079cd333632e..026d4029bda6 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -1029,7 +1029,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep) dep->endpoint.desc = NULL; } - dwc3_remove_requests(dwc, dep, -ECONNRESET); + dwc3_remove_requests(dwc, dep, -ESHUTDOWN); dep->stream_capable = false; dep->type = 0; @@ -1292,8 +1292,8 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS; } - /* always enable Interrupt on Missed ISOC */ - trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; + if (!no_interrupt && !chain) + trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; break; case USB_ENDPOINT_XFER_BULK: @@ -1698,6 +1698,16 @@ static int __dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, bool int cmd |= DWC3_DEPCMD_PARAM(dep->resource_index); memset(¶ms, 0, sizeof(params)); ret = dwc3_send_gadget_ep_cmd(dep, cmd, ¶ms); + /* + * If the End Transfer command was timed out while the device is + * not in SETUP phase, it's possible that an incoming Setup packet + * may prevent the command's completion. Let's retry when the + * ep0state returns to EP0_SETUP_PHASE. + */ + if (ret == -ETIMEDOUT && dep->dwc->ep0state != EP0_SETUP_PHASE) { + dep->flags |= DWC3_EP_DELAY_STOP; + return 0; + } WARN_ON_ONCE(ret); dep->resource_index = 0; @@ -3238,6 +3248,10 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep, if (event->status & DEPEVT_STATUS_SHORT && !chain) return 1; + if ((trb->ctrl & DWC3_TRB_CTRL_ISP_IMI) && + DWC3_TRB_SIZE_TRBSTS(trb->size) == DWC3_TRBSTS_MISSED_ISOC) + return 1; + if ((trb->ctrl & DWC3_TRB_CTRL_IOC) || (trb->ctrl & DWC3_TRB_CTRL_LST)) return 1; @@ -3719,7 +3733,7 @@ void dwc3_stop_active_transfer(struct dwc3_ep *dep, bool force, * timeout. Delay issuing the End Transfer command until the Setup TRB is * prepared. */ - if (dwc->ep0state != EP0_SETUP_PHASE) { + if (dwc->ep0state != EP0_SETUP_PHASE && !dwc->delayed_status) { dep->flags |= DWC3_EP_DELAY_STOP; return; } diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c index a7154fe8206d..f6f13e7f1ba1 100644 --- a/drivers/usb/dwc3/host.c +++ b/drivers/usb/dwc3/host.c @@ -11,13 +11,8 @@ #include <linux/of.h> #include <linux/platform_device.h> -#include "../host/xhci-plat.h" #include "core.h" -static const struct xhci_plat_priv dwc3_xhci_plat_priv = { - .quirks = XHCI_SKIP_PHY_INIT, -}; - static void dwc3_host_fill_xhci_irq_res(struct dwc3 *dwc, int irq, char *name) { @@ -97,11 +92,6 @@ int dwc3_host_init(struct dwc3 *dwc) goto err; } - ret = platform_device_add_data(xhci, &dwc3_xhci_plat_priv, - sizeof(dwc3_xhci_plat_priv)); - if (ret) - goto err; - memset(props, 0, sizeof(struct property_entry) * ARRAY_SIZE(props)); if (dwc->usb3_lpm_capable) diff --git a/drivers/usb/gadget/function/uvc_queue.c b/drivers/usb/gadget/function/uvc_queue.c index ec500ee499ee..0aa3d7e1f3cc 100644 --- a/drivers/usb/gadget/function/uvc_queue.c +++ b/drivers/usb/gadget/function/uvc_queue.c @@ -304,6 +304,7 @@ int uvcg_queue_enable(struct uvc_video_queue *queue, int enable) queue->sequence = 0; queue->buf_used = 0; + queue->flags &= ~UVC_QUEUE_DROP_INCOMPLETE; } else { ret = vb2_streamoff(&queue->queue, queue->queue.type); if (ret < 0) @@ -329,10 +330,11 @@ int uvcg_queue_enable(struct uvc_video_queue *queue, int enable) void uvcg_complete_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf) { - if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) && - buf->length != buf->bytesused) { - buf->state = UVC_BUF_STATE_QUEUED; + if (queue->flags & UVC_QUEUE_DROP_INCOMPLETE) { + queue->flags &= ~UVC_QUEUE_DROP_INCOMPLETE; + buf->state = UVC_BUF_STATE_ERROR; vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0); + vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR); return; } diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c index bb037fcc90e6..dd1c6b2ca7c6 100644 --- a/drivers/usb/gadget/function/uvc_video.c +++ b/drivers/usb/gadget/function/uvc_video.c @@ -88,6 +88,7 @@ uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video, struct uvc_buffer *buf) { void *mem = req->buf; + struct uvc_request *ureq = req->context; int len = video->req_size; int ret; @@ -113,13 +114,14 @@ uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video, video->queue.buf_used = 0; buf->state = UVC_BUF_STATE_DONE; list_del(&buf->queue); - uvcg_complete_buffer(&video->queue, buf); video->fid ^= UVC_STREAM_FID; + ureq->last_buf = buf; video->payload_size = 0; } if (video->payload_size == video->max_payload_size || + video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE || buf->bytesused == video->queue.buf_used) video->payload_size = 0; } @@ -155,10 +157,10 @@ uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video, sg = sg_next(sg); for_each_sg(sg, iter, ureq->sgt.nents - 1, i) { - if (!len || !buf->sg || !sg_dma_len(buf->sg)) + if (!len || !buf->sg || !buf->sg->length) break; - sg_left = sg_dma_len(buf->sg) - buf->offset; + sg_left = buf->sg->length - buf->offset; part = min_t(unsigned int, len, sg_left); sg_set_page(iter, sg_page(buf->sg), part, buf->offset); @@ -180,7 +182,8 @@ uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video, req->length -= len; video->queue.buf_used += req->length - header_len; - if (buf->bytesused == video->queue.buf_used || !buf->sg) { + if (buf->bytesused == video->queue.buf_used || !buf->sg || + video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) { video->queue.buf_used = 0; buf->state = UVC_BUF_STATE_DONE; buf->offset = 0; @@ -195,6 +198,7 @@ uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video, struct uvc_buffer *buf) { void *mem = req->buf; + struct uvc_request *ureq = req->context; int len = video->req_size; int ret; @@ -209,12 +213,13 @@ uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video, req->length = video->req_size - len; - if (buf->bytesused == video->queue.buf_used) { + if (buf->bytesused == video->queue.buf_used || + video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) { video->queue.buf_used = 0; buf->state = UVC_BUF_STATE_DONE; list_del(&buf->queue); - uvcg_complete_buffer(&video->queue, buf); video->fid ^= UVC_STREAM_FID; + ureq->last_buf = buf; } } @@ -255,6 +260,11 @@ uvc_video_complete(struct usb_ep *ep, struct usb_request *req) case 0: break; + case -EXDEV: + uvcg_dbg(&video->uvc->func, "VS request missed xfer.\n"); + queue->flags |= UVC_QUEUE_DROP_INCOMPLETE; + break; + case -ESHUTDOWN: /* disconnect from host. */ uvcg_dbg(&video->uvc->func, "VS request cancelled.\n"); uvcg_queue_cancel(queue, 1); @@ -431,7 +441,8 @@ static void uvcg_video_pump(struct work_struct *work) /* Endpoint now owns the request */ req = NULL; - video->req_int_count++; + if (buf->state != UVC_BUF_STATE_DONE) + video->req_int_count++; } if (!req) diff --git a/drivers/usb/gadget/udc/aspeed-vhub/dev.c b/drivers/usb/gadget/udc/aspeed-vhub/dev.c index b0dfca43fbdc..4f3bc27c1c62 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/dev.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/dev.c @@ -591,6 +591,7 @@ int ast_vhub_init_dev(struct ast_vhub *vhub, unsigned int idx) d->gadget.max_speed = USB_SPEED_HIGH; d->gadget.speed = USB_SPEED_UNKNOWN; d->gadget.dev.of_node = vhub->pdev->dev.of_node; + d->gadget.dev.of_node_reused = true; rc = usb_add_gadget_udc(d->port_dev, &d->gadget); if (rc != 0) diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c index 5ac0ef88334e..53ffaf4e2e37 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_udc.c +++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c @@ -151,6 +151,7 @@ static void bdc_uspc_disconnected(struct bdc *bdc, bool reinit) bdc->delayed_status = false; bdc->reinit = reinit; bdc->test_mode = false; + usb_gadget_set_state(&bdc->gadget, USB_STATE_NOTATTACHED); } /* TNotify wkaeup timer */ diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c index 2df52f75f6b3..7558cc4d90cc 100644 --- a/drivers/usb/host/bcma-hcd.c +++ b/drivers/usb/host/bcma-hcd.c @@ -285,7 +285,7 @@ static void bcma_hci_platform_power_gpio(struct bcma_device *dev, bool val) { struct bcma_hcd_device *usb_dev = bcma_get_drvdata(dev); - if (IS_ERR_OR_NULL(usb_dev->gpio_desc)) + if (!usb_dev->gpio_desc) return; gpiod_set_value(usb_dev->gpio_desc, val); @@ -406,9 +406,11 @@ static int bcma_hcd_probe(struct bcma_device *core) return -ENOMEM; usb_dev->core = core; - if (core->dev.of_node) - usb_dev->gpio_desc = devm_gpiod_get(&core->dev, "vcc", - GPIOD_OUT_HIGH); + usb_dev->gpio_desc = devm_gpiod_get_optional(&core->dev, "vcc", + GPIOD_OUT_HIGH); + if (IS_ERR(usb_dev->gpio_desc)) + return dev_err_probe(&core->dev, PTR_ERR(usb_dev->gpio_desc), + "error obtaining VCC GPIO"); switch (core->id.id) { case BCMA_CORE_USB20_HOST: diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index 9e56aa28efcd..81ca2bc1f0be 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -889,15 +889,19 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) if (dev->eps[i].stream_info) xhci_free_stream_info(xhci, dev->eps[i].stream_info); - /* Endpoints on the TT/root port lists should have been removed - * when usb_disable_device() was called for the device. - * We can't drop them anyway, because the udev might have gone - * away by this point, and we can't tell what speed it was. + /* + * Endpoints are normally deleted from the bandwidth list when + * endpoints are dropped, before device is freed. + * If host is dying or being removed then endpoints aren't + * dropped cleanly, so delete the endpoint from list here. + * Only applicable for hosts with software bandwidth checking. */ - if (!list_empty(&dev->eps[i].bw_endpoint_list)) - xhci_warn(xhci, "Slot %u endpoint %u " - "not removed from BW list!\n", - slot_id, i); + + if (!list_empty(&dev->eps[i].bw_endpoint_list)) { + list_del_init(&dev->eps[i].bw_endpoint_list); + xhci_dbg(xhci, "Slot %u endpoint %u not removed from BW list!\n", + slot_id, i); + } } /* If this is a hub, free the TT(s) from the TT list */ xhci_free_tt_info(xhci, dev, slot_id); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 40228a3d77a0..7bccbe50bab1 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -58,25 +58,13 @@ #define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af #define PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI 0x9a13 #define PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI 0x1138 -#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI 0x461e -#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI 0x464e -#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed -#define PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI 0xa71e -#define PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI 0x7ec0 +#define PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI 0x51ed #define PCI_DEVICE_ID_AMD_RENOIR_XHCI 0x1639 #define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9 #define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba #define PCI_DEVICE_ID_AMD_PROMONTORYA_2 0x43bb #define PCI_DEVICE_ID_AMD_PROMONTORYA_1 0x43bc -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_1 0x161a -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_2 0x161b -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 0x161d -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 0x161e -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 0x15d6 -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 0x15d7 -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 0x161c -#define PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8 0x161f #define PCI_DEVICE_ID_ASMEDIA_1042_XHCI 0x1042 #define PCI_DEVICE_ID_ASMEDIA_1042A_XHCI 0x1142 @@ -258,6 +246,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) xhci->quirks |= XHCI_MISSING_CAS; if (pdev->vendor == PCI_VENDOR_ID_INTEL && + pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI) + xhci->quirks |= XHCI_RESET_TO_DEFAULT; + + if (pdev->vendor == PCI_VENDOR_ID_INTEL && (pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_4C_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_XHCI || @@ -268,12 +260,7 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI || pdev->device == PCI_DEVICE_ID_INTEL_TIGER_LAKE_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_N_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_ALDER_LAKE_PCH_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_RAPTOR_LAKE_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_METEOR_LAKE_XHCI)) + pdev->device == PCI_DEVICE_ID_INTEL_MAPLE_RIDGE_XHCI)) xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; if (pdev->vendor == PCI_VENDOR_ID_ETRON && @@ -306,8 +293,14 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) } if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && - pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) + pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) { + /* + * try to tame the ASMedia 1042 controller which reports 0.96 + * but appears to behave more like 1.0 + */ + xhci->quirks |= XHCI_SPURIOUS_SUCCESS; xhci->quirks |= XHCI_BROKEN_STREAMS; + } if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == PCI_DEVICE_ID_ASMEDIA_1042A_XHCI) { xhci->quirks |= XHCI_TRUST_TX_LENGTH; @@ -336,15 +329,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == PCI_DEVICE_ID_AMD_PROMONTORYA_4)) xhci->quirks |= XHCI_NO_SOFT_RETRY; - if (pdev->vendor == PCI_VENDOR_ID_AMD && - (pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_1 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_2 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_3 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_4 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_5 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_6 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_7 || - pdev->device == PCI_DEVICE_ID_AMD_YELLOW_CARP_XHCI_8)) + /* xHC spec requires PCI devices to support D3hot and D3cold */ + if (xhci->hci_version >= 0x120) xhci->quirks |= XHCI_DEFAULT_PM_RUNTIME_ALLOW; if (xhci->quirks & XHCI_RESET_ON_RESUME) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 5176765c4013..79d7931c048a 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -810,9 +810,15 @@ void xhci_shutdown(struct usb_hcd *hcd) spin_lock_irq(&xhci->lock); xhci_halt(xhci); - /* Workaround for spurious wakeups at shutdown with HSW */ - if (xhci->quirks & XHCI_SPURIOUS_WAKEUP) + + /* + * Workaround for spurious wakeps at shutdown with HSW, and for boot + * firmware delay in ADL-P PCH if port are left in U3 at shutdown + */ + if (xhci->quirks & XHCI_SPURIOUS_WAKEUP || + xhci->quirks & XHCI_RESET_TO_DEFAULT) xhci_reset(xhci, XHCI_RESET_SHORT_USEC); + spin_unlock_irq(&xhci->lock); xhci_cleanup_msix(xhci); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index c0964fe8ac12..cc084d9505cd 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1897,6 +1897,7 @@ struct xhci_hcd { #define XHCI_BROKEN_D3COLD BIT_ULL(41) #define XHCI_EP_CTX_BROKEN_DCS BIT_ULL(42) #define XHCI_SUSPEND_RESUME_CLKS BIT_ULL(43) +#define XHCI_RESET_TO_DEFAULT BIT_ULL(44) unsigned int num_active_eps; unsigned int limit_active_eps; diff --git a/drivers/usb/misc/sisusbvga/sisusb_struct.h b/drivers/usb/misc/sisusbvga/sisusb_struct.h index 3df64d2a9d43..a86032a26d36 100644 --- a/drivers/usb/misc/sisusbvga/sisusb_struct.h +++ b/drivers/usb/misc/sisusbvga/sisusb_struct.h @@ -91,7 +91,7 @@ struct SiS_Ext { unsigned char VB_ExtTVYFilterIndex; unsigned char VB_ExtTVYFilterIndexROM661; unsigned char REFindex; - char ROMMODEIDX661; + signed char ROMMODEIDX661; }; struct SiS_Ext2 { diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 697683e3fbff..c3b7f1d98e78 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -162,6 +162,8 @@ static void option_instat_callback(struct urb *urb); #define NOVATELWIRELESS_PRODUCT_G2 0xA010 #define NOVATELWIRELESS_PRODUCT_MC551 0xB001 +#define UBLOX_VENDOR_ID 0x1546 + /* AMOI PRODUCTS */ #define AMOI_VENDOR_ID 0x1614 #define AMOI_PRODUCT_H01 0x0800 @@ -240,7 +242,6 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_UC15 0x9090 /* These u-blox products use Qualcomm's vendor ID */ #define UBLOX_PRODUCT_R410M 0x90b2 -#define UBLOX_PRODUCT_R6XX 0x90fa /* These Yuga products use Qualcomm's vendor ID */ #define YUGA_PRODUCT_CLM920_NC5 0x9625 @@ -581,6 +582,9 @@ static void option_instat_callback(struct urb *urb); #define OPPO_VENDOR_ID 0x22d9 #define OPPO_PRODUCT_R11 0x276c +/* Sierra Wireless products */ +#define SIERRA_VENDOR_ID 0x1199 +#define SIERRA_PRODUCT_EM9191 0x90d3 /* Device flags */ @@ -1124,8 +1128,16 @@ static const struct usb_device_id option_ids[] = { /* u-blox products using Qualcomm vendor ID */ { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R410M), .driver_info = RSVD(1) | RSVD(3) }, - { USB_DEVICE(QUALCOMM_VENDOR_ID, UBLOX_PRODUCT_R6XX), + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x908b), /* u-blox LARA-R6 00B */ + .driver_info = RSVD(4) }, + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x90fa), .driver_info = RSVD(3) }, + /* u-blox products */ + { USB_DEVICE(UBLOX_VENDOR_ID, 0x1341) }, /* u-blox LARA-L6 */ + { USB_DEVICE(UBLOX_VENDOR_ID, 0x1342), /* u-blox LARA-L6 (RMNET) */ + .driver_info = RSVD(4) }, + { USB_DEVICE(UBLOX_VENDOR_ID, 0x1343), /* u-blox LARA-L6 (ECM) */ + .driver_info = RSVD(4) }, /* Quectel products using Quectel vendor ID */ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC21, 0xff, 0xff, 0xff), .driver_info = NUMEP2 }, @@ -2167,6 +2179,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x010a, 0xff) }, /* Fibocom MA510 (ECM mode) */ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0xff, 0x30) }, /* Fibocom FG150 Diag */ { USB_DEVICE_AND_INTERFACE_INFO(0x2cb7, 0x010b, 0xff, 0, 0) }, /* Fibocom FG150 AT */ + { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0111, 0xff) }, /* Fibocom FM160 (MBIM mode) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a2, 0xff) }, /* Fibocom FM101-GL (laptop MBIM) */ { USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a4, 0xff), /* Fibocom FM101-GL (laptop MBIM) */ @@ -2176,6 +2189,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */ { USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */ { USB_DEVICE_AND_INTERFACE_INFO(OPPO_VENDOR_ID, OPPO_PRODUCT_R11, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(SIERRA_VENDOR_ID, SIERRA_PRODUCT_EM9191, 0xff, 0, 0) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c index e1f4df7238bf..fdbf3694e21f 100644 --- a/drivers/usb/typec/mux/intel_pmc_mux.c +++ b/drivers/usb/typec/mux/intel_pmc_mux.c @@ -369,13 +369,24 @@ pmc_usb_mux_usb4(struct pmc_usb_port *port, struct typec_mux_state *state) return pmc_usb_command(port, (void *)&req, sizeof(req)); } -static int pmc_usb_mux_safe_state(struct pmc_usb_port *port) +static int pmc_usb_mux_safe_state(struct pmc_usb_port *port, + struct typec_mux_state *state) { u8 msg; if (IOM_PORT_ACTIVITY_IS(port->iom_status, SAFE_MODE)) return 0; + if ((IOM_PORT_ACTIVITY_IS(port->iom_status, DP) || + IOM_PORT_ACTIVITY_IS(port->iom_status, DP_MFD)) && + state->alt && state->alt->svid == USB_TYPEC_DP_SID) + return 0; + + if ((IOM_PORT_ACTIVITY_IS(port->iom_status, TBT) || + IOM_PORT_ACTIVITY_IS(port->iom_status, ALT_MODE_TBT_USB)) && + state->alt && state->alt->svid == USB_TYPEC_TBT_SID) + return 0; + msg = PMC_USB_SAFE_MODE; msg |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; @@ -443,7 +454,7 @@ pmc_usb_mux_set(struct typec_mux_dev *mux, struct typec_mux_state *state) return 0; if (state->mode == TYPEC_STATE_SAFE) - return pmc_usb_mux_safe_state(port); + return pmc_usb_mux_safe_state(port, state); if (state->mode == TYPEC_STATE_USB) return pmc_usb_connect(port, port->role); diff --git a/drivers/usb/typec/tipd/core.c b/drivers/usb/typec/tipd/core.c index b637e8b378b3..2a77bab948f5 100644 --- a/drivers/usb/typec/tipd/core.c +++ b/drivers/usb/typec/tipd/core.c @@ -474,7 +474,7 @@ static void tps6598x_handle_plug_event(struct tps6598x *tps, u32 status) static irqreturn_t cd321x_interrupt(int irq, void *data) { struct tps6598x *tps = data; - u64 event; + u64 event = 0; u32 status; int ret; @@ -519,8 +519,8 @@ err_unlock: static irqreturn_t tps6598x_interrupt(int irq, void *data) { struct tps6598x *tps = data; - u64 event1; - u64 event2; + u64 event1 = 0; + u64 event2 = 0; u32 status; int ret; diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c index 74fb5a4c6f21..a7987fc764cc 100644 --- a/drivers/usb/typec/ucsi/ucsi.c +++ b/drivers/usb/typec/ucsi/ucsi.c @@ -183,16 +183,6 @@ out: } EXPORT_SYMBOL_GPL(ucsi_send_command); -int ucsi_resume(struct ucsi *ucsi) -{ - u64 command; - - /* Restore UCSI notification enable mask after system resume */ - command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy; - - return ucsi_send_command(ucsi, command, NULL, 0); -} -EXPORT_SYMBOL_GPL(ucsi_resume); /* -------------------------------------------------------------------------- */ struct ucsi_work { @@ -744,6 +734,7 @@ static void ucsi_partner_change(struct ucsi_connector *con) static int ucsi_check_connection(struct ucsi_connector *con) { + u8 prev_flags = con->status.flags; u64 command; int ret; @@ -754,10 +745,13 @@ static int ucsi_check_connection(struct ucsi_connector *con) return ret; } + if (con->status.flags == prev_flags) + return 0; + if (con->status.flags & UCSI_CONSTAT_CONNECTED) { - if (UCSI_CONSTAT_PWR_OPMODE(con->status.flags) == - UCSI_CONSTAT_PWR_OPMODE_PD) - ucsi_partner_task(con, ucsi_check_altmodes, 30, 0); + ucsi_register_partner(con); + ucsi_pwr_opmode_change(con); + ucsi_partner_change(con); } else { ucsi_partner_change(con); ucsi_port_psy_changed(con); @@ -1276,6 +1270,28 @@ err: return ret; } +int ucsi_resume(struct ucsi *ucsi) +{ + struct ucsi_connector *con; + u64 command; + int ret; + + /* Restore UCSI notification enable mask after system resume */ + command = UCSI_SET_NOTIFICATION_ENABLE | ucsi->ntfy; + ret = ucsi_send_command(ucsi, command, NULL, 0); + if (ret < 0) + return ret; + + for (con = ucsi->connector; con->port; con++) { + mutex_lock(&con->lock); + ucsi_check_connection(con); + mutex_unlock(&con->lock); + } + + return 0; +} +EXPORT_SYMBOL_GPL(ucsi_resume); + static void ucsi_init_work(struct work_struct *work) { struct ucsi *ucsi = container_of(work, struct ucsi, work.work); diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c index 8873c1644a29..ce0c8ef80c04 100644 --- a/drivers/usb/typec/ucsi/ucsi_acpi.c +++ b/drivers/usb/typec/ucsi/ucsi_acpi.c @@ -185,6 +185,15 @@ static int ucsi_acpi_remove(struct platform_device *pdev) return 0; } +static int ucsi_acpi_resume(struct device *dev) +{ + struct ucsi_acpi *ua = dev_get_drvdata(dev); + + return ucsi_resume(ua->ucsi); +} + +static DEFINE_SIMPLE_DEV_PM_OPS(ucsi_acpi_pm_ops, NULL, ucsi_acpi_resume); + static const struct acpi_device_id ucsi_acpi_match[] = { { "PNP0CA0", 0 }, { }, @@ -194,6 +203,7 @@ MODULE_DEVICE_TABLE(acpi, ucsi_acpi_match); static struct platform_driver ucsi_acpi_platform_driver = { .driver = { .name = "ucsi_acpi", + .pm = pm_ptr(&ucsi_acpi_pm_ops), .acpi_match_table = ACPI_PTR(ucsi_acpi_match), }, .probe = ucsi_acpi_probe, diff --git a/drivers/vfio/pci/vfio_pci_core.c b/drivers/vfio/pci/vfio_pci_core.c index badc9d828cac..e030c2120183 100644 --- a/drivers/vfio/pci/vfio_pci_core.c +++ b/drivers/vfio/pci/vfio_pci_core.c @@ -2488,12 +2488,12 @@ static bool vfio_pci_dev_set_needs_reset(struct vfio_device_set *dev_set) struct vfio_pci_core_device *cur; bool needs_reset = false; - list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) { - /* No VFIO device in the set can have an open device FD */ - if (cur->vdev.open_count) - return false; + /* No other VFIO device in the set can be open. */ + if (vfio_device_set_open_count(dev_set) > 1) + return false; + + list_for_each_entry(cur, &dev_set->device_list, vdev.dev_set_list) needs_reset |= cur->needs_reset; - } return needs_reset; } diff --git a/drivers/vfio/vfio_main.c b/drivers/vfio/vfio_main.c index 2d168793d4e1..6e8804fe0095 100644 --- a/drivers/vfio/vfio_main.c +++ b/drivers/vfio/vfio_main.c @@ -125,6 +125,19 @@ static void vfio_release_device_set(struct vfio_device *device) xa_unlock(&vfio_device_set_xa); } +unsigned int vfio_device_set_open_count(struct vfio_device_set *dev_set) +{ + struct vfio_device *cur; + unsigned int open_count = 0; + + lockdep_assert_held(&dev_set->lock); + + list_for_each_entry(cur, &dev_set->device_list, dev_set_list) + open_count += cur->open_count; + return open_count; +} +EXPORT_SYMBOL_GPL(vfio_device_set_open_count); + /* * Group objects - create, release, get, put, search */ @@ -801,8 +814,9 @@ static struct file *vfio_device_open(struct vfio_device *device) err_close_device: mutex_lock(&device->dev_set->lock); mutex_lock(&device->group->group_lock); - if (device->open_count == 1 && device->ops->close_device) { - device->ops->close_device(device); + if (device->open_count == 1) { + if (device->ops->close_device) + device->ops->close_device(device); vfio_device_container_unregister(device); } @@ -1017,10 +1031,12 @@ static int vfio_device_fops_release(struct inode *inode, struct file *filep) mutex_lock(&device->dev_set->lock); vfio_assert_device_open(device); mutex_lock(&device->group->group_lock); - if (device->open_count == 1 && device->ops->close_device) - device->ops->close_device(device); + if (device->open_count == 1) { + if (device->ops->close_device) + device->ops->close_device(device); - vfio_device_container_unregister(device); + vfio_device_container_unregister(device); + } mutex_unlock(&device->group->group_lock); device->open_count--; if (device->open_count == 0) diff --git a/drivers/video/aperture.c b/drivers/video/aperture.c index 9e6bcc03a1a4..41e77de1ea82 100644 --- a/drivers/video/aperture.c +++ b/drivers/video/aperture.c @@ -340,12 +340,9 @@ int aperture_remove_conflicting_pci_devices(struct pci_dev *pdev, const char *na size = pci_resource_len(pdev, bar); ret = aperture_remove_conflicting_devices(base, size, primary, name); if (ret) - break; + return ret; } - if (ret) - return ret; - /* * WARNING: Apparently we must kick fbdev drivers before vgacon, * otherwise the vga fbdev driver falls over. diff --git a/drivers/video/fbdev/cyber2000fb.c b/drivers/video/fbdev/cyber2000fb.c index 585af90a68a5..31ff1da82c05 100644 --- a/drivers/video/fbdev/cyber2000fb.c +++ b/drivers/video/fbdev/cyber2000fb.c @@ -1796,6 +1796,7 @@ failed_ioremap: failed_regions: cyberpro_free_fb_info(cfb); failed_release: + pci_disable_device(dev); return err; } @@ -1812,6 +1813,7 @@ static void cyberpro_pci_remove(struct pci_dev *dev) int_cfb_info = NULL; pci_release_regions(dev); + pci_disable_device(dev); } } diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c index ae76a2111c77..11922b009ed7 100644 --- a/drivers/video/fbdev/da8xx-fb.c +++ b/drivers/video/fbdev/da8xx-fb.c @@ -1076,7 +1076,8 @@ static int fb_remove(struct platform_device *dev) if (par->lcd_supply) { ret = regulator_disable(par->lcd_supply); if (ret) - return ret; + dev_warn(&dev->dev, "Failed to disable regulator (%pe)\n", + ERR_PTR(ret)); } lcd_disable_raster(DA8XX_FRAME_WAIT); diff --git a/drivers/video/fbdev/gbefb.c b/drivers/video/fbdev/gbefb.c index 1582c718329c..000b4aa44241 100644 --- a/drivers/video/fbdev/gbefb.c +++ b/drivers/video/fbdev/gbefb.c @@ -1060,14 +1060,14 @@ static const struct fb_ops gbefb_ops = { static ssize_t gbefb_show_memsize(struct device *dev, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%u\n", gbe_mem_size); + return sysfs_emit(buf, "%u\n", gbe_mem_size); } static DEVICE_ATTR(size, S_IRUGO, gbefb_show_memsize, NULL); static ssize_t gbefb_show_rev(struct device *device, struct device_attribute *attr, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", gbe_revision); + return sysfs_emit(buf, "%d\n", gbe_revision); } static DEVICE_ATTR(revision, S_IRUGO, gbefb_show_rev, NULL); diff --git a/drivers/video/fbdev/sis/sis_accel.c b/drivers/video/fbdev/sis/sis_accel.c index 1914ab5a5a91..5850e4325f07 100644 --- a/drivers/video/fbdev/sis/sis_accel.c +++ b/drivers/video/fbdev/sis/sis_accel.c @@ -202,7 +202,7 @@ SiS310SubsequentScreenToScreenCopy(struct sis_video_info *ivideo, int src_x, int * and destination blitting areas overlap and * adapt the bitmap addresses synchronously * if the coordinates exceed the valid range. - * The the areas do not overlap, we do our + * The areas do not overlap, we do our * normal check. */ if((mymax - mymin) < height) { diff --git a/drivers/video/fbdev/sis/vstruct.h b/drivers/video/fbdev/sis/vstruct.h index ea94d214dcff..d7a14e63ba5a 100644 --- a/drivers/video/fbdev/sis/vstruct.h +++ b/drivers/video/fbdev/sis/vstruct.h @@ -148,7 +148,7 @@ struct SiS_Ext { unsigned char VB_ExtTVYFilterIndex; unsigned char VB_ExtTVYFilterIndexROM661; unsigned char REFindex; - char ROMMODEIDX661; + signed char ROMMODEIDX661; }; struct SiS_Ext2 { diff --git a/drivers/video/fbdev/sm501fb.c b/drivers/video/fbdev/sm501fb.c index fce6cfbadfd6..f743bfbde2a6 100644 --- a/drivers/video/fbdev/sm501fb.c +++ b/drivers/video/fbdev/sm501fb.c @@ -1166,7 +1166,7 @@ static ssize_t sm501fb_crtsrc_show(struct device *dev, ctrl = smc501_readl(info->regs + SM501_DC_CRT_CONTROL); ctrl &= SM501_DC_CRT_CONTROL_SEL; - return snprintf(buf, PAGE_SIZE, "%s\n", ctrl ? "crt" : "panel"); + return sysfs_emit(buf, "%s\n", ctrl ? "crt" : "panel"); } /* sm501fb_crtsrc_show diff --git a/drivers/video/fbdev/smscufx.c b/drivers/video/fbdev/smscufx.c index e65bdc499c23..9343b7a4ac89 100644 --- a/drivers/video/fbdev/smscufx.c +++ b/drivers/video/fbdev/smscufx.c @@ -97,7 +97,6 @@ struct ufx_data { struct kref kref; int fb_count; bool virtualized; /* true when physical usb device not present */ - struct delayed_work free_framebuffer_work; atomic_t usb_active; /* 0 = update virtual buffer, but no usb traffic */ atomic_t lost_pixels; /* 1 = a render op failed. Need screen refresh */ u8 *edid; /* null until we read edid from hw or get from sysfs */ @@ -1117,15 +1116,24 @@ static void ufx_free(struct kref *kref) { struct ufx_data *dev = container_of(kref, struct ufx_data, kref); - /* this function will wait for all in-flight urbs to complete */ - if (dev->urbs.count > 0) - ufx_free_urb_list(dev); + kfree(dev); +} - pr_debug("freeing ufx_data %p", dev); +static void ufx_ops_destory(struct fb_info *info) +{ + struct ufx_data *dev = info->par; + int node = info->node; - kfree(dev); + /* Assume info structure is freed after this point */ + framebuffer_release(info); + + pr_debug("fb_info for /dev/fb%d has been freed", node); + + /* release reference taken by kref_init in probe() */ + kref_put(&dev->kref, ufx_free); } + static void ufx_release_urb_work(struct work_struct *work) { struct urb_node *unode = container_of(work, struct urb_node, @@ -1134,14 +1142,9 @@ static void ufx_release_urb_work(struct work_struct *work) up(&unode->dev->urbs.limit_sem); } -static void ufx_free_framebuffer_work(struct work_struct *work) +static void ufx_free_framebuffer(struct ufx_data *dev) { - struct ufx_data *dev = container_of(work, struct ufx_data, - free_framebuffer_work.work); struct fb_info *info = dev->info; - int node = info->node; - - unregister_framebuffer(info); if (info->cmap.len != 0) fb_dealloc_cmap(&info->cmap); @@ -1153,11 +1156,6 @@ static void ufx_free_framebuffer_work(struct work_struct *work) dev->info = NULL; - /* Assume info structure is freed after this point */ - framebuffer_release(info); - - pr_debug("fb_info for /dev/fb%d has been freed", node); - /* ref taken in probe() as part of registering framebfufer */ kref_put(&dev->kref, ufx_free); } @@ -1169,11 +1167,13 @@ static int ufx_ops_release(struct fb_info *info, int user) { struct ufx_data *dev = info->par; + mutex_lock(&disconnect_mutex); + dev->fb_count--; /* We can't free fb_info here - fbmem will touch it when we return */ if (dev->virtualized && (dev->fb_count == 0)) - schedule_delayed_work(&dev->free_framebuffer_work, HZ); + ufx_free_framebuffer(dev); if ((dev->fb_count == 0) && (info->fbdefio)) { fb_deferred_io_cleanup(info); @@ -1186,6 +1186,8 @@ static int ufx_ops_release(struct fb_info *info, int user) kref_put(&dev->kref, ufx_free); + mutex_unlock(&disconnect_mutex); + return 0; } @@ -1292,6 +1294,7 @@ static const struct fb_ops ufx_ops = { .fb_blank = ufx_ops_blank, .fb_check_var = ufx_ops_check_var, .fb_set_par = ufx_ops_set_par, + .fb_destroy = ufx_ops_destory, }; /* Assumes &info->lock held by caller @@ -1673,9 +1676,6 @@ static int ufx_usb_probe(struct usb_interface *interface, goto destroy_modedb; } - INIT_DELAYED_WORK(&dev->free_framebuffer_work, - ufx_free_framebuffer_work); - retval = ufx_reg_read(dev, 0x3000, &id_rev); check_warn_goto_error(retval, "error %d reading 0x3000 register from device", retval); dev_dbg(dev->gdev, "ID_REV register value 0x%08x", id_rev); @@ -1748,10 +1748,12 @@ e_nomem: static void ufx_usb_disconnect(struct usb_interface *interface) { struct ufx_data *dev; + struct fb_info *info; mutex_lock(&disconnect_mutex); dev = usb_get_intfdata(interface); + info = dev->info; pr_debug("USB disconnect starting\n"); @@ -1765,12 +1767,15 @@ static void ufx_usb_disconnect(struct usb_interface *interface) /* if clients still have us open, will be freed on last close */ if (dev->fb_count == 0) - schedule_delayed_work(&dev->free_framebuffer_work, 0); + ufx_free_framebuffer(dev); - /* release reference taken by kref_init in probe() */ - kref_put(&dev->kref, ufx_free); + /* this function will wait for all in-flight urbs to complete */ + if (dev->urbs.count > 0) + ufx_free_urb_list(dev); - /* consider ufx_data freed */ + pr_debug("freeing ufx_data %p", dev); + + unregister_framebuffer(info); mutex_unlock(&disconnect_mutex); } diff --git a/drivers/video/fbdev/stifb.c b/drivers/video/fbdev/stifb.c index 7753e586e65a..3feb6e40d56d 100644 --- a/drivers/video/fbdev/stifb.c +++ b/drivers/video/fbdev/stifb.c @@ -1055,7 +1055,8 @@ stifb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct stifb_info *fb = container_of(info, struct stifb_info, info); - if (rect->rop != ROP_COPY) + if (rect->rop != ROP_COPY || + (fb->id == S9000_ID_HCRX && fb->info.var.bits_per_pixel == 32)) return cfb_fillrect(info, rect); SETUP_HW(fb); diff --git a/drivers/video/fbdev/xilinxfb.c b/drivers/video/fbdev/xilinxfb.c index 438e2c78142f..1ac83900a21c 100644 --- a/drivers/video/fbdev/xilinxfb.c +++ b/drivers/video/fbdev/xilinxfb.c @@ -376,7 +376,7 @@ err_cmap: return rc; } -static int xilinxfb_release(struct device *dev) +static void xilinxfb_release(struct device *dev) { struct xilinxfb_drvdata *drvdata = dev_get_drvdata(dev); @@ -402,8 +402,6 @@ static int xilinxfb_release(struct device *dev) if (!(drvdata->flags & BUS_ACCESS_FLAG)) dcr_unmap(drvdata->dcr_host, drvdata->dcr_len); #endif - - return 0; } /* --------------------------------------------------------------------- @@ -480,7 +478,9 @@ static int xilinxfb_of_probe(struct platform_device *pdev) static int xilinxfb_of_remove(struct platform_device *op) { - return xilinxfb_release(&op->dev); + xilinxfb_release(&op->dev); + + return 0; } /* Match table for of_platform binding */ diff --git a/drivers/watchdog/exar_wdt.c b/drivers/watchdog/exar_wdt.c index 35058d8b21bc..7c61ff343271 100644 --- a/drivers/watchdog/exar_wdt.c +++ b/drivers/watchdog/exar_wdt.c @@ -355,8 +355,10 @@ static int __init exar_wdt_register(struct wdt_priv *priv, const int idx) &priv->wdt_res, 1, priv, sizeof(*priv)); if (IS_ERR(n->pdev)) { + int err = PTR_ERR(n->pdev); + kfree(n); - return PTR_ERR(n->pdev); + return err; } list_add_tail(&n->list, &pdev_list); diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c index 78ba36689eec..2756ed54ca3d 100644 --- a/drivers/watchdog/sp805_wdt.c +++ b/drivers/watchdog/sp805_wdt.c @@ -88,7 +88,7 @@ static bool wdt_is_running(struct watchdog_device *wdd) return (wdtcontrol & ENABLE_MASK) == ENABLE_MASK; } -/* This routine finds load value that will reset system in required timout */ +/* This routine finds load value that will reset system in required timeout */ static int wdt_setload(struct watchdog_device *wdd, unsigned int timeout) { struct sp805_wdt *wdt = watchdog_get_drvdata(wdd); diff --git a/drivers/watchdog/watchdog_core.c b/drivers/watchdog/watchdog_core.c index 3fe8a7edc252..c777a612d932 100644 --- a/drivers/watchdog/watchdog_core.c +++ b/drivers/watchdog/watchdog_core.c @@ -38,6 +38,9 @@ #include "watchdog_core.h" /* For watchdog_dev_register/... */ +#define CREATE_TRACE_POINTS +#include <trace/events/watchdog.h> + static DEFINE_IDA(watchdog_ida); static int stop_on_reboot = -1; @@ -163,6 +166,7 @@ static int watchdog_reboot_notifier(struct notifier_block *nb, int ret; ret = wdd->ops->stop(wdd); + trace_watchdog_stop(wdd, ret); if (ret) return NOTIFY_BAD; } diff --git a/drivers/watchdog/watchdog_dev.c b/drivers/watchdog/watchdog_dev.c index 744b2ab75288..55574ed42504 100644 --- a/drivers/watchdog/watchdog_dev.c +++ b/drivers/watchdog/watchdog_dev.c @@ -47,6 +47,8 @@ #include "watchdog_core.h" #include "watchdog_pretimeout.h" +#include <trace/events/watchdog.h> + /* the dev_t structure to store the dynamically allocated watchdog devices */ static dev_t watchdog_devt; /* Reference to watchdog device behind /dev/watchdog */ @@ -157,10 +159,13 @@ static int __watchdog_ping(struct watchdog_device *wdd) wd_data->last_hw_keepalive = now; - if (wdd->ops->ping) + if (wdd->ops->ping) { err = wdd->ops->ping(wdd); /* ping the watchdog */ - else + trace_watchdog_ping(wdd, err); + } else { err = wdd->ops->start(wdd); /* restart watchdog */ + trace_watchdog_start(wdd, err); + } if (err == 0) watchdog_hrtimer_pretimeout_start(wdd); @@ -259,6 +264,7 @@ static int watchdog_start(struct watchdog_device *wdd) } } else { err = wdd->ops->start(wdd); + trace_watchdog_start(wdd, err); if (err == 0) { set_bit(WDOG_ACTIVE, &wdd->status); wd_data->last_keepalive = started_at; @@ -297,6 +303,7 @@ static int watchdog_stop(struct watchdog_device *wdd) if (wdd->ops->stop) { clear_bit(WDOG_HW_RUNNING, &wdd->status); err = wdd->ops->stop(wdd); + trace_watchdog_stop(wdd, err); } else { set_bit(WDOG_HW_RUNNING, &wdd->status); } @@ -369,6 +376,7 @@ static int watchdog_set_timeout(struct watchdog_device *wdd, if (wdd->ops->set_timeout) { err = wdd->ops->set_timeout(wdd, timeout); + trace_watchdog_set_timeout(wdd, timeout, err); } else { wdd->timeout = timeout; /* Disable pretimeout if it doesn't fit the new timeout */ diff --git a/drivers/xen/grant-dma-ops.c b/drivers/xen/grant-dma-ops.c index 860f37c93af4..daa525df7bdc 100644 --- a/drivers/xen/grant-dma-ops.c +++ b/drivers/xen/grant-dma-ops.c @@ -31,12 +31,12 @@ static DEFINE_XARRAY_FLAGS(xen_grant_dma_devices, XA_FLAGS_LOCK_IRQ); static inline dma_addr_t grant_to_dma(grant_ref_t grant) { - return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << PAGE_SHIFT); + return XEN_GRANT_DMA_ADDR_OFF | ((dma_addr_t)grant << XEN_PAGE_SHIFT); } static inline grant_ref_t dma_to_grant(dma_addr_t dma) { - return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> PAGE_SHIFT); + return (grant_ref_t)((dma & ~XEN_GRANT_DMA_ADDR_OFF) >> XEN_PAGE_SHIFT); } static struct xen_grant_dma_data *find_xen_grant_dma_data(struct device *dev) @@ -79,7 +79,7 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size, unsigned long attrs) { struct xen_grant_dma_data *data; - unsigned int i, n_pages = PFN_UP(size); + unsigned int i, n_pages = XEN_PFN_UP(size); unsigned long pfn; grant_ref_t grant; void *ret; @@ -91,14 +91,14 @@ static void *xen_grant_dma_alloc(struct device *dev, size_t size, if (unlikely(data->broken)) return NULL; - ret = alloc_pages_exact(n_pages * PAGE_SIZE, gfp); + ret = alloc_pages_exact(n_pages * XEN_PAGE_SIZE, gfp); if (!ret) return NULL; pfn = virt_to_pfn(ret); if (gnttab_alloc_grant_reference_seq(n_pages, &grant)) { - free_pages_exact(ret, n_pages * PAGE_SIZE); + free_pages_exact(ret, n_pages * XEN_PAGE_SIZE); return NULL; } @@ -116,7 +116,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle, unsigned long attrs) { struct xen_grant_dma_data *data; - unsigned int i, n_pages = PFN_UP(size); + unsigned int i, n_pages = XEN_PFN_UP(size); grant_ref_t grant; data = find_xen_grant_dma_data(dev); @@ -138,7 +138,7 @@ static void xen_grant_dma_free(struct device *dev, size_t size, void *vaddr, gnttab_free_grant_reference_seq(grant, n_pages); - free_pages_exact(vaddr, n_pages * PAGE_SIZE); + free_pages_exact(vaddr, n_pages * XEN_PAGE_SIZE); } static struct page *xen_grant_dma_alloc_pages(struct device *dev, size_t size, @@ -168,7 +168,9 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page, unsigned long attrs) { struct xen_grant_dma_data *data; - unsigned int i, n_pages = PFN_UP(offset + size); + unsigned long dma_offset = xen_offset_in_page(offset), + pfn_offset = XEN_PFN_DOWN(offset); + unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size); grant_ref_t grant; dma_addr_t dma_handle; @@ -187,10 +189,11 @@ static dma_addr_t xen_grant_dma_map_page(struct device *dev, struct page *page, for (i = 0; i < n_pages; i++) { gnttab_grant_foreign_access_ref(grant + i, data->backend_domid, - xen_page_to_gfn(page) + i, dir == DMA_TO_DEVICE); + pfn_to_gfn(page_to_xen_pfn(page) + i + pfn_offset), + dir == DMA_TO_DEVICE); } - dma_handle = grant_to_dma(grant) + offset; + dma_handle = grant_to_dma(grant) + dma_offset; return dma_handle; } @@ -200,8 +203,8 @@ static void xen_grant_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, unsigned long attrs) { struct xen_grant_dma_data *data; - unsigned long offset = dma_handle & (PAGE_SIZE - 1); - unsigned int i, n_pages = PFN_UP(offset + size); + unsigned long dma_offset = xen_offset_in_page(dma_handle); + unsigned int i, n_pages = XEN_PFN_UP(dma_offset + size); grant_ref_t grant; if (WARN_ON(dir == DMA_NONE)) diff --git a/drivers/xen/pcpu.c b/drivers/xen/pcpu.c index 47aa3a1ccaf5..fd3a644b0855 100644 --- a/drivers/xen/pcpu.c +++ b/drivers/xen/pcpu.c @@ -228,7 +228,7 @@ static int register_pcpu(struct pcpu *pcpu) err = device_register(dev); if (err) { - pcpu_release(dev); + put_device(dev); return err; } diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c index 18f0ed8b1f93..cd07e3fed0fa 100644 --- a/drivers/xen/platform-pci.c +++ b/drivers/xen/platform-pci.c @@ -54,7 +54,8 @@ static uint64_t get_callback_via(struct pci_dev *pdev) pin = pdev->pin; /* We don't know the GSI. Specify the PCI INTx line instead. */ - return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */ + return ((uint64_t)HVM_PARAM_CALLBACK_TYPE_PCI_INTX << + HVM_CALLBACK_VIA_TYPE_SHIFT) | ((uint64_t)pci_domain_nr(pdev->bus) << 32) | ((uint64_t)pdev->bus->number << 16) | ((uint64_t)(pdev->devfn & 0xff) << 8) | @@ -144,7 +145,7 @@ static int platform_pci_probe(struct pci_dev *pdev, if (ret) { dev_warn(&pdev->dev, "Unable to set the evtchn callback " "err=%d\n", ret); - goto out; + goto irq_out; } } @@ -152,13 +153,16 @@ static int platform_pci_probe(struct pci_dev *pdev, grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes); ret = gnttab_setup_auto_xlat_frames(grant_frames); if (ret) - goto out; + goto irq_out; ret = gnttab_init(); if (ret) goto grant_out; return 0; grant_out: gnttab_free_auto_xlat_frames(); +irq_out: + if (!xen_have_vector_callback) + free_irq(pdev->irq, pdev); out: pci_release_region(pdev, 0); mem_out: diff --git a/drivers/xen/xen-pciback/conf_space_capability.c b/drivers/xen/xen-pciback/conf_space_capability.c index 5e53b4817f16..097316a74126 100644 --- a/drivers/xen/xen-pciback/conf_space_capability.c +++ b/drivers/xen/xen-pciback/conf_space_capability.c @@ -190,13 +190,16 @@ static const struct config_field caplist_pm[] = { }; static struct msi_msix_field_config { - u16 enable_bit; /* bit for enabling MSI/MSI-X */ - unsigned int int_type; /* interrupt type for exclusiveness check */ + u16 enable_bit; /* bit for enabling MSI/MSI-X */ + u16 allowed_bits; /* bits allowed to be changed */ + unsigned int int_type; /* interrupt type for exclusiveness check */ } msi_field_config = { .enable_bit = PCI_MSI_FLAGS_ENABLE, + .allowed_bits = PCI_MSI_FLAGS_ENABLE, .int_type = INTERRUPT_TYPE_MSI, }, msix_field_config = { .enable_bit = PCI_MSIX_FLAGS_ENABLE, + .allowed_bits = PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL, .int_type = INTERRUPT_TYPE_MSIX, }; @@ -229,7 +232,7 @@ static int msi_msix_flags_write(struct pci_dev *dev, int offset, u16 new_value, return 0; if (!dev_data->allow_interrupt_control || - (new_value ^ old_value) & ~field_config->enable_bit) + (new_value ^ old_value) & ~field_config->allowed_bits) return PCIBIOS_SET_FAILED; if (new_value & field_config->enable_bit) { |